1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * VM - Hardware Address Translation management for Spitfire MMU. 28 * 29 * This file implements the machine specific hardware translation 30 * needed by the VM system. The machine independent interface is 31 * described in <vm/hat.h> while the machine dependent interface 32 * and data structures are described in <vm/hat_sfmmu.h>. 33 * 34 * The hat layer manages the address translation hardware as a cache 35 * driven by calls from the higher levels in the VM system. 36 */ 37 38 #include <sys/types.h> 39 #include <sys/kstat.h> 40 #include <vm/hat.h> 41 #include <vm/hat_sfmmu.h> 42 #include <vm/page.h> 43 #include <sys/pte.h> 44 #include <sys/systm.h> 45 #include <sys/mman.h> 46 #include <sys/sysmacros.h> 47 #include <sys/machparam.h> 48 #include <sys/vtrace.h> 49 #include <sys/kmem.h> 50 #include <sys/mmu.h> 51 #include <sys/cmn_err.h> 52 #include <sys/cpu.h> 53 #include <sys/cpuvar.h> 54 #include <sys/debug.h> 55 #include <sys/lgrp.h> 56 #include <sys/archsystm.h> 57 #include <sys/machsystm.h> 58 #include <sys/vmsystm.h> 59 #include <vm/as.h> 60 #include <vm/seg.h> 61 #include <vm/seg_kp.h> 62 #include <vm/seg_kmem.h> 63 #include <vm/seg_kpm.h> 64 #include <vm/rm.h> 65 #include <sys/t_lock.h> 66 #include <sys/obpdefs.h> 67 #include <sys/vm_machparam.h> 68 #include <sys/var.h> 69 #include <sys/trap.h> 70 #include <sys/machtrap.h> 71 #include <sys/scb.h> 72 #include <sys/bitmap.h> 73 #include <sys/machlock.h> 74 #include <sys/membar.h> 75 #include <sys/atomic.h> 76 #include <sys/cpu_module.h> 77 #include <sys/prom_debug.h> 78 #include <sys/ksynch.h> 79 #include <sys/mem_config.h> 80 #include <sys/mem_cage.h> 81 #include <vm/vm_dep.h> 82 #include <vm/xhat_sfmmu.h> 83 #include <sys/fpu/fpusystm.h> 84 #include <vm/mach_kpm.h> 85 #include <sys/callb.h> 86 87 #ifdef DEBUG 88 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 89 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 90 caddr_t _eaddr = (saddr) + (len); \ 91 sf_srd_t *_srdp; \ 92 sf_region_t *_rgnp; \ 93 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 94 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 95 ASSERT((hat) != ksfmmup); \ 96 _srdp = (hat)->sfmmu_srdp; \ 97 ASSERT(_srdp != NULL); \ 98 ASSERT(_srdp->srd_refcnt != 0); \ 99 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 100 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 101 ASSERT(_rgnp->rgn_refcnt != 0); \ 102 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 103 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 104 SFMMU_REGION_HME); \ 105 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 106 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 107 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 108 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 } 110 111 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 112 { \ 113 caddr_t _hsva; \ 114 caddr_t _heva; \ 115 caddr_t _rsva; \ 116 caddr_t _reva; \ 117 int _ttesz = get_hblk_ttesz(hmeblkp); \ 118 int _flagtte; \ 119 ASSERT((srdp)->srd_refcnt != 0); \ 120 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 121 ASSERT((rgnp)->rgn_id == rid); \ 122 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 123 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 124 SFMMU_REGION_HME); \ 125 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 126 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 127 _heva = get_hblk_endaddr(hmeblkp); \ 128 _rsva = (caddr_t)P2ALIGN( \ 129 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 130 _reva = (caddr_t)P2ROUNDUP( \ 131 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 132 HBLK_MIN_BYTES); \ 133 ASSERT(_hsva >= _rsva); \ 134 ASSERT(_hsva < _reva); \ 135 ASSERT(_heva > _rsva); \ 136 ASSERT(_heva <= _reva); \ 137 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 138 _ttesz; \ 139 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 140 } 141 142 #else /* DEBUG */ 143 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 144 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 145 #endif /* DEBUG */ 146 147 #if defined(SF_ERRATA_57) 148 extern caddr_t errata57_limit; 149 #endif 150 151 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 152 (sizeof (int64_t))) 153 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 154 155 #define HBLK_RESERVE_CNT 128 156 #define HBLK_RESERVE_MIN 20 157 158 static struct hme_blk *freehblkp; 159 static kmutex_t freehblkp_lock; 160 static int freehblkcnt; 161 162 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 163 static kmutex_t hblk_reserve_lock; 164 static kthread_t *hblk_reserve_thread; 165 166 static nucleus_hblk8_info_t nucleus_hblk8; 167 static nucleus_hblk1_info_t nucleus_hblk1; 168 169 /* 170 * SFMMU specific hat functions 171 */ 172 void hat_pagecachectl(struct page *, int); 173 174 /* flags for hat_pagecachectl */ 175 #define HAT_CACHE 0x1 176 #define HAT_UNCACHE 0x2 177 #define HAT_TMPNC 0x4 178 179 /* 180 * Flag to allow the creation of non-cacheable translations 181 * to system memory. It is off by default. At the moment this 182 * flag is used by the ecache error injector. The error injector 183 * will turn it on when creating such a translation then shut it 184 * off when it's finished. 185 */ 186 187 int sfmmu_allow_nc_trans = 0; 188 189 /* 190 * Flag to disable large page support. 191 * value of 1 => disable all large pages. 192 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 193 * 194 * For example, use the value 0x4 to disable 512K pages. 195 * 196 */ 197 #define LARGE_PAGES_OFF 0x1 198 199 /* 200 * The disable_large_pages and disable_ism_large_pages variables control 201 * hat_memload_array and the page sizes to be used by ISM and the kernel. 202 * 203 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 204 * are only used to control which OOB pages to use at upper VM segment creation 205 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 206 * Their values may come from platform or CPU specific code to disable page 207 * sizes that should not be used. 208 * 209 * WARNING: 512K pages are currently not supported for ISM/DISM. 210 */ 211 uint_t disable_large_pages = 0; 212 uint_t disable_ism_large_pages = (1 << TTE512K); 213 uint_t disable_auto_data_large_pages = 0; 214 uint_t disable_auto_text_large_pages = 0; 215 216 /* 217 * Private sfmmu data structures for hat management 218 */ 219 static struct kmem_cache *sfmmuid_cache; 220 static struct kmem_cache *mmuctxdom_cache; 221 222 /* 223 * Private sfmmu data structures for tsb management 224 */ 225 static struct kmem_cache *sfmmu_tsbinfo_cache; 226 static struct kmem_cache *sfmmu_tsb8k_cache; 227 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 228 static vmem_t *kmem_bigtsb_arena; 229 static vmem_t *kmem_tsb_arena; 230 231 /* 232 * sfmmu static variables for hmeblk resource management. 233 */ 234 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 235 static struct kmem_cache *sfmmu8_cache; 236 static struct kmem_cache *sfmmu1_cache; 237 static struct kmem_cache *pa_hment_cache; 238 239 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 240 /* 241 * private data for ism 242 */ 243 static struct kmem_cache *ism_blk_cache; 244 static struct kmem_cache *ism_ment_cache; 245 #define ISMID_STARTADDR NULL 246 247 /* 248 * Region management data structures and function declarations. 249 */ 250 251 static void sfmmu_leave_srd(sfmmu_t *); 252 static int sfmmu_srdcache_constructor(void *, void *, int); 253 static void sfmmu_srdcache_destructor(void *, void *); 254 static int sfmmu_rgncache_constructor(void *, void *, int); 255 static void sfmmu_rgncache_destructor(void *, void *); 256 static int sfrgnmap_isnull(sf_region_map_t *); 257 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 258 static int sfmmu_scdcache_constructor(void *, void *, int); 259 static void sfmmu_scdcache_destructor(void *, void *); 260 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 261 size_t, void *, u_offset_t); 262 263 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 264 static sf_srd_bucket_t *srd_buckets; 265 static struct kmem_cache *srd_cache; 266 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 267 static struct kmem_cache *region_cache; 268 static struct kmem_cache *scd_cache; 269 270 #ifdef sun4v 271 int use_bigtsb_arena = 1; 272 #else 273 int use_bigtsb_arena = 0; 274 #endif 275 276 /* External /etc/system tunable, for turning on&off the shctx support */ 277 int disable_shctx = 0; 278 /* Internal variable, set by MD if the HW supports shctx feature */ 279 int shctx_on = 0; 280 281 #ifdef DEBUG 282 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 283 #endif 284 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 285 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 286 287 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 288 static void sfmmu_find_scd(sfmmu_t *); 289 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 290 static void sfmmu_finish_join_scd(sfmmu_t *); 291 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 292 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 293 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 294 static void sfmmu_free_scd_tsbs(sfmmu_t *); 295 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 296 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 297 static void sfmmu_ism_hatflags(sfmmu_t *, int); 298 static int sfmmu_srd_lock_held(sf_srd_t *); 299 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 300 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 301 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 302 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 303 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 304 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 305 306 /* 307 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 308 * HAT flags, synchronizing TLB/TSB coherency, and context management. 309 * The lock is hashed on the sfmmup since the case where we need to lock 310 * all processes is rare but does occur (e.g. we need to unload a shared 311 * mapping from all processes using the mapping). We have a lot of buckets, 312 * and each slab of sfmmu_t's can use about a quarter of them, giving us 313 * a fairly good distribution without wasting too much space and overhead 314 * when we have to grab them all. 315 */ 316 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 317 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 318 319 /* 320 * Hash algorithm optimized for a small number of slabs. 321 * 7 is (highbit((sizeof sfmmu_t)) - 1) 322 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 323 * kmem_cache, and thus they will be sequential within that cache. In 324 * addition, each new slab will have a different "color" up to cache_maxcolor 325 * which will skew the hashing for each successive slab which is allocated. 326 * If the size of sfmmu_t changed to a larger size, this algorithm may need 327 * to be revisited. 328 */ 329 #define TSB_HASH_SHIFT_BITS (7) 330 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 331 332 #ifdef DEBUG 333 int tsb_hash_debug = 0; 334 #define TSB_HASH(sfmmup) \ 335 (tsb_hash_debug ? &hat_lock[0] : \ 336 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 337 #else /* DEBUG */ 338 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 339 #endif /* DEBUG */ 340 341 342 /* sfmmu_replace_tsb() return codes. */ 343 typedef enum tsb_replace_rc { 344 TSB_SUCCESS, 345 TSB_ALLOCFAIL, 346 TSB_LOSTRACE, 347 TSB_ALREADY_SWAPPED, 348 TSB_CANTGROW 349 } tsb_replace_rc_t; 350 351 /* 352 * Flags for TSB allocation routines. 353 */ 354 #define TSB_ALLOC 0x01 355 #define TSB_FORCEALLOC 0x02 356 #define TSB_GROW 0x04 357 #define TSB_SHRINK 0x08 358 #define TSB_SWAPIN 0x10 359 360 /* 361 * Support for HAT callbacks. 362 */ 363 #define SFMMU_MAX_RELOC_CALLBACKS 10 364 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 365 static id_t sfmmu_cb_nextid = 0; 366 static id_t sfmmu_tsb_cb_id; 367 struct sfmmu_callback *sfmmu_cb_table; 368 369 /* 370 * Kernel page relocation is enabled by default for non-caged 371 * kernel pages. This has little effect unless segkmem_reloc is 372 * set, since by default kernel memory comes from inside the 373 * kernel cage. 374 */ 375 int hat_kpr_enabled = 1; 376 377 kmutex_t kpr_mutex; 378 kmutex_t kpr_suspendlock; 379 kthread_t *kreloc_thread; 380 381 /* 382 * Enable VA->PA translation sanity checking on DEBUG kernels. 383 * Disabled by default. This is incompatible with some 384 * drivers (error injector, RSM) so if it breaks you get 385 * to keep both pieces. 386 */ 387 int hat_check_vtop = 0; 388 389 /* 390 * Private sfmmu routines (prototypes) 391 */ 392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 395 uint_t); 396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 397 caddr_t, demap_range_t *, uint_t); 398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 399 caddr_t, int); 400 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 401 uint64_t, struct hme_blk **); 402 static void sfmmu_hblks_list_purge(struct hme_blk **); 403 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 404 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 405 static struct hme_blk *sfmmu_hblk_steal(int); 406 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 407 struct hme_blk *, uint64_t, uint64_t, 408 struct hme_blk *); 409 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 410 411 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 412 struct page **, uint_t, uint_t, uint_t); 413 static void hat_do_memload(struct hat *, caddr_t, struct page *, 414 uint_t, uint_t, uint_t); 415 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 416 uint_t, uint_t, pgcnt_t, uint_t); 417 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 418 uint_t); 419 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 420 uint_t, uint_t); 421 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 422 caddr_t, int, uint_t); 423 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 424 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 425 uint_t); 426 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 427 caddr_t, page_t **, uint_t, uint_t); 428 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 429 430 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 431 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 432 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 433 #ifdef VAC 434 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 435 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 436 int tst_tnc(page_t *pp, pgcnt_t); 437 void conv_tnc(page_t *pp, int); 438 #endif 439 440 static void sfmmu_get_ctx(sfmmu_t *); 441 static void sfmmu_free_sfmmu(sfmmu_t *); 442 443 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 444 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 445 446 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 447 static void hat_pagereload(struct page *, struct page *); 448 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 449 #ifdef VAC 450 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 451 static void sfmmu_page_cache(page_t *, int, int, int); 452 #endif 453 454 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 455 struct hme_blk *, int); 456 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 457 pfn_t, int, int, int, int); 458 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 459 pfn_t, int); 460 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 461 static void sfmmu_tlb_range_demap(demap_range_t *); 462 static void sfmmu_invalidate_ctx(sfmmu_t *); 463 static void sfmmu_sync_mmustate(sfmmu_t *); 464 465 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 466 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 467 sfmmu_t *); 468 static void sfmmu_tsb_free(struct tsb_info *); 469 static void sfmmu_tsbinfo_free(struct tsb_info *); 470 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 471 sfmmu_t *); 472 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 473 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 474 static int sfmmu_select_tsb_szc(pgcnt_t); 475 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 476 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 477 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 478 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 479 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 480 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 481 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 482 hatlock_t *, uint_t); 483 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 484 485 #ifdef VAC 486 void sfmmu_cache_flush(pfn_t, int); 487 void sfmmu_cache_flushcolor(int, pfn_t); 488 #endif 489 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 490 caddr_t, demap_range_t *, uint_t, int); 491 492 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 493 static uint_t sfmmu_ptov_attr(tte_t *); 494 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 495 caddr_t, demap_range_t *, uint_t); 496 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 497 static int sfmmu_idcache_constructor(void *, void *, int); 498 static void sfmmu_idcache_destructor(void *, void *); 499 static int sfmmu_hblkcache_constructor(void *, void *, int); 500 static void sfmmu_hblkcache_destructor(void *, void *); 501 static void sfmmu_hblkcache_reclaim(void *); 502 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 503 struct hmehash_bucket *); 504 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 505 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 506 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 507 int, caddr_t *); 508 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 509 510 static void sfmmu_rm_large_mappings(page_t *, int); 511 512 static void hat_lock_init(void); 513 static void hat_kstat_init(void); 514 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 515 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 516 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 517 static void sfmmu_check_page_sizes(sfmmu_t *, int); 518 int fnd_mapping_sz(page_t *); 519 static void iment_add(struct ism_ment *, struct hat *); 520 static void iment_sub(struct ism_ment *, struct hat *); 521 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 522 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 523 extern void sfmmu_clear_utsbinfo(void); 524 525 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 526 527 /* kpm globals */ 528 #ifdef DEBUG 529 /* 530 * Enable trap level tsbmiss handling 531 */ 532 int kpm_tsbmtl = 1; 533 534 /* 535 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 536 * required TLB shootdowns in this case, so handle w/ care. Off by default. 537 */ 538 int kpm_tlb_flush; 539 #endif /* DEBUG */ 540 541 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 542 543 #ifdef DEBUG 544 static void sfmmu_check_hblk_flist(); 545 #endif 546 547 /* 548 * Semi-private sfmmu data structures. Some of them are initialize in 549 * startup or in hat_init. Some of them are private but accessed by 550 * assembly code or mach_sfmmu.c 551 */ 552 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 553 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 554 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 555 uint64_t khme_hash_pa; /* PA of khme_hash */ 556 int uhmehash_num; /* # of buckets in user hash table */ 557 int khmehash_num; /* # of buckets in kernel hash table */ 558 559 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 560 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 561 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 562 563 #define DEFAULT_NUM_CTXS_PER_MMU 8192 564 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 565 566 int cache; /* describes system cache */ 567 568 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 569 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 570 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 571 int ktsb_sz; /* kernel 8k-indexed tsb size */ 572 573 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 574 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 575 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 576 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 577 578 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 579 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 580 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 581 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 582 583 #ifndef sun4v 584 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 585 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 586 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 587 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 588 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 589 #endif /* sun4v */ 590 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 591 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 592 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 593 594 /* 595 * Size to use for TSB slabs. Future platforms that support page sizes 596 * larger than 4M may wish to change these values, and provide their own 597 * assembly macros for building and decoding the TSB base register contents. 598 * Note disable_large_pages will override the value set here. 599 */ 600 static uint_t tsb_slab_ttesz = TTE4M; 601 size_t tsb_slab_size = MMU_PAGESIZE4M; 602 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 603 /* PFN mask for TTE */ 604 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 605 606 /* 607 * Size to use for TSB slabs. These are used only when 256M tsb arenas 608 * exist. 609 */ 610 static uint_t bigtsb_slab_ttesz = TTE256M; 611 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 612 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 613 /* 256M page alignment for 8K pfn */ 614 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 615 616 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 617 static int tsb_max_growsize = 0; 618 619 /* 620 * Tunable parameters dealing with TSB policies. 621 */ 622 623 /* 624 * This undocumented tunable forces all 8K TSBs to be allocated from 625 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 626 */ 627 #ifdef DEBUG 628 int tsb_forceheap = 0; 629 #endif /* DEBUG */ 630 631 /* 632 * Decide whether to use per-lgroup arenas, or one global set of 633 * TSB arenas. The default is not to break up per-lgroup, since 634 * most platforms don't recognize any tangible benefit from it. 635 */ 636 int tsb_lgrp_affinity = 0; 637 638 /* 639 * Used for growing the TSB based on the process RSS. 640 * tsb_rss_factor is based on the smallest TSB, and is 641 * shifted by the TSB size to determine if we need to grow. 642 * The default will grow the TSB if the number of TTEs for 643 * this page size exceeds 75% of the number of TSB entries, 644 * which should _almost_ eliminate all conflict misses 645 * (at the expense of using up lots and lots of memory). 646 */ 647 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 648 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 649 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 650 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 651 default_tsb_size) 652 #define TSB_OK_SHRINK() \ 653 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 654 #define TSB_OK_GROW() \ 655 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 656 657 int enable_tsb_rss_sizing = 1; 658 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 659 660 /* which TSB size code to use for new address spaces or if rss sizing off */ 661 int default_tsb_size = TSB_8K_SZCODE; 662 663 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 664 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 665 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 666 667 #ifdef DEBUG 668 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 669 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 670 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 671 static int tsb_alloc_fail_mtbf = 0; 672 static int tsb_alloc_count = 0; 673 #endif /* DEBUG */ 674 675 /* if set to 1, will remap valid TTEs when growing TSB. */ 676 int tsb_remap_ttes = 1; 677 678 /* 679 * If we have more than this many mappings, allocate a second TSB. 680 * This default is chosen because the I/D fully associative TLBs are 681 * assumed to have at least 8 available entries. Platforms with a 682 * larger fully-associative TLB could probably override the default. 683 */ 684 685 #ifdef sun4v 686 int tsb_sectsb_threshold = 0; 687 #else 688 int tsb_sectsb_threshold = 8; 689 #endif 690 691 /* 692 * kstat data 693 */ 694 struct sfmmu_global_stat sfmmu_global_stat; 695 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 696 697 /* 698 * Global data 699 */ 700 sfmmu_t *ksfmmup; /* kernel's hat id */ 701 702 #ifdef DEBUG 703 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 704 #endif 705 706 /* sfmmu locking operations */ 707 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 708 static int sfmmu_mlspl_held(struct page *, int); 709 710 kmutex_t *sfmmu_page_enter(page_t *); 711 void sfmmu_page_exit(kmutex_t *); 712 int sfmmu_page_spl_held(struct page *); 713 714 /* sfmmu internal locking operations - accessed directly */ 715 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 716 kmutex_t **, kmutex_t **); 717 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 718 static hatlock_t * 719 sfmmu_hat_enter(sfmmu_t *); 720 static hatlock_t * 721 sfmmu_hat_tryenter(sfmmu_t *); 722 static void sfmmu_hat_exit(hatlock_t *); 723 static void sfmmu_hat_lock_all(void); 724 static void sfmmu_hat_unlock_all(void); 725 static void sfmmu_ismhat_enter(sfmmu_t *, int); 726 static void sfmmu_ismhat_exit(sfmmu_t *, int); 727 728 /* 729 * Array of mutexes protecting a page's mapping list and p_nrm field. 730 * 731 * The hash function looks complicated, but is made up so that: 732 * 733 * "pp" not shifted, so adjacent pp values will hash to different cache lines 734 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 735 * 736 * "pp" >> mml_shift, incorporates more source bits into the hash result 737 * 738 * "& (mml_table_size - 1), should be faster than using remainder "%" 739 * 740 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 741 * cacheline, since they get declared next to each other below. We'll trust 742 * ld not to do something random. 743 */ 744 #ifdef DEBUG 745 int mlist_hash_debug = 0; 746 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 747 &mml_table[((uintptr_t)(pp) + \ 748 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 749 #else /* !DEBUG */ 750 #define MLIST_HASH(pp) &mml_table[ \ 751 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 752 #endif /* !DEBUG */ 753 754 kmutex_t *mml_table; 755 uint_t mml_table_sz; /* must be a power of 2 */ 756 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 757 758 kpm_hlk_t *kpmp_table; 759 uint_t kpmp_table_sz; /* must be a power of 2 */ 760 uchar_t kpmp_shift; 761 762 kpm_shlk_t *kpmp_stable; 763 uint_t kpmp_stable_sz; /* must be a power of 2 */ 764 765 /* 766 * SPL_HASH was improved to avoid false cache line sharing 767 */ 768 #define SPL_TABLE_SIZE 128 769 #define SPL_MASK (SPL_TABLE_SIZE - 1) 770 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 771 772 #define SPL_INDEX(pp) \ 773 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 774 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 775 (SPL_TABLE_SIZE - 1)) 776 777 #define SPL_HASH(pp) \ 778 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 779 780 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 781 782 783 /* 784 * hat_unload_callback() will group together callbacks in order 785 * to avoid xt_sync() calls. This is the maximum size of the group. 786 */ 787 #define MAX_CB_ADDR 32 788 789 tte_t hw_tte; 790 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 791 792 static char *mmu_ctx_kstat_names[] = { 793 "mmu_ctx_tsb_exceptions", 794 "mmu_ctx_tsb_raise_exception", 795 "mmu_ctx_wrap_around", 796 }; 797 798 /* 799 * Wrapper for vmem_xalloc since vmem_create only allows limited 800 * parameters for vm_source_alloc functions. This function allows us 801 * to specify alignment consistent with the size of the object being 802 * allocated. 803 */ 804 static void * 805 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 806 { 807 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 808 } 809 810 /* Common code for setting tsb_alloc_hiwater. */ 811 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 812 ptob(pages) / tsb_alloc_hiwater_factor 813 814 /* 815 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 816 * a single TSB. physmem is the number of physical pages so we need physmem 8K 817 * TTEs to represent all those physical pages. We round this up by using 818 * 1<<highbit(). To figure out which size code to use, remember that the size 819 * code is just an amount to shift the smallest TSB size to get the size of 820 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 821 * highbit() - 1) to get the size code for the smallest TSB that can represent 822 * all of physical memory, while erring on the side of too much. 823 * 824 * Restrict tsb_max_growsize to make sure that: 825 * 1) TSBs can't grow larger than the TSB slab size 826 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 827 */ 828 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 829 int _i, _szc, _slabszc, _tsbszc; \ 830 \ 831 _i = highbit(pages); \ 832 if ((1 << (_i - 1)) == (pages)) \ 833 _i--; /* 2^n case, round down */ \ 834 _szc = _i - TSB_START_SIZE; \ 835 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 836 _tsbszc = MIN(_szc, _slabszc); \ 837 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 838 } 839 840 /* 841 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 842 * tsb_info which handles that TTE size. 843 */ 844 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 845 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 846 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 847 sfmmu_hat_lock_held(sfmmup)); \ 848 if ((tte_szc) >= TTE4M) { \ 849 ASSERT((tsbinfop) != NULL); \ 850 (tsbinfop) = (tsbinfop)->tsb_next; \ 851 } \ 852 } 853 854 /* 855 * Macro to use to unload entries from the TSB. 856 * It has knowledge of which page sizes get replicated in the TSB 857 * and will call the appropriate unload routine for the appropriate size. 858 */ 859 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 860 { \ 861 int ttesz = get_hblk_ttesz(hmeblkp); \ 862 if (ttesz == TTE8K || ttesz == TTE4M) { \ 863 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 864 } else { \ 865 caddr_t sva = ismhat ? addr : \ 866 (caddr_t)get_hblk_base(hmeblkp); \ 867 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 868 ASSERT(addr >= sva && addr < eva); \ 869 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 870 } \ 871 } 872 873 874 /* Update tsb_alloc_hiwater after memory is configured. */ 875 /*ARGSUSED*/ 876 static void 877 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 878 { 879 /* Assumes physmem has already been updated. */ 880 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 881 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 882 } 883 884 /* 885 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 886 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 887 * deleted. 888 */ 889 /*ARGSUSED*/ 890 static int 891 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 892 { 893 return (0); 894 } 895 896 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 897 /*ARGSUSED*/ 898 static void 899 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 900 { 901 /* 902 * Whether the delete was cancelled or not, just go ahead and update 903 * tsb_alloc_hiwater and tsb_max_growsize. 904 */ 905 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 906 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 907 } 908 909 static kphysm_setup_vector_t sfmmu_update_vec = { 910 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 911 sfmmu_update_post_add, /* post_add */ 912 sfmmu_update_pre_del, /* pre_del */ 913 sfmmu_update_post_del /* post_del */ 914 }; 915 916 917 /* 918 * HME_BLK HASH PRIMITIVES 919 */ 920 921 /* 922 * Enter a hme on the mapping list for page pp. 923 * When large pages are more prevalent in the system we might want to 924 * keep the mapping list in ascending order by the hment size. For now, 925 * small pages are more frequent, so don't slow it down. 926 */ 927 #define HME_ADD(hme, pp) \ 928 { \ 929 ASSERT(sfmmu_mlist_held(pp)); \ 930 \ 931 hme->hme_prev = NULL; \ 932 hme->hme_next = pp->p_mapping; \ 933 hme->hme_page = pp; \ 934 if (pp->p_mapping) { \ 935 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 936 ASSERT(pp->p_share > 0); \ 937 } else { \ 938 /* EMPTY */ \ 939 ASSERT(pp->p_share == 0); \ 940 } \ 941 pp->p_mapping = hme; \ 942 pp->p_share++; \ 943 } 944 945 /* 946 * Enter a hme on the mapping list for page pp. 947 * If we are unmapping a large translation, we need to make sure that the 948 * change is reflect in the corresponding bit of the p_index field. 949 */ 950 #define HME_SUB(hme, pp) \ 951 { \ 952 ASSERT(sfmmu_mlist_held(pp)); \ 953 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 954 \ 955 if (pp->p_mapping == NULL) { \ 956 panic("hme_remove - no mappings"); \ 957 } \ 958 \ 959 membar_stst(); /* ensure previous stores finish */ \ 960 \ 961 ASSERT(pp->p_share > 0); \ 962 pp->p_share--; \ 963 \ 964 if (hme->hme_prev) { \ 965 ASSERT(pp->p_mapping != hme); \ 966 ASSERT(hme->hme_prev->hme_page == pp || \ 967 IS_PAHME(hme->hme_prev)); \ 968 hme->hme_prev->hme_next = hme->hme_next; \ 969 } else { \ 970 ASSERT(pp->p_mapping == hme); \ 971 pp->p_mapping = hme->hme_next; \ 972 ASSERT((pp->p_mapping == NULL) ? \ 973 (pp->p_share == 0) : 1); \ 974 } \ 975 \ 976 if (hme->hme_next) { \ 977 ASSERT(hme->hme_next->hme_page == pp || \ 978 IS_PAHME(hme->hme_next)); \ 979 hme->hme_next->hme_prev = hme->hme_prev; \ 980 } \ 981 \ 982 /* zero out the entry */ \ 983 hme->hme_next = NULL; \ 984 hme->hme_prev = NULL; \ 985 hme->hme_page = NULL; \ 986 \ 987 if (hme_size(hme) > TTE8K) { \ 988 /* remove mappings for remainder of large pg */ \ 989 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 990 } \ 991 } 992 993 /* 994 * This function returns the hment given the hme_blk and a vaddr. 995 * It assumes addr has already been checked to belong to hme_blk's 996 * range. 997 */ 998 #define HBLKTOHME(hment, hmeblkp, addr) \ 999 { \ 1000 int index; \ 1001 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 1002 } 1003 1004 /* 1005 * Version of HBLKTOHME that also returns the index in hmeblkp 1006 * of the hment. 1007 */ 1008 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1009 { \ 1010 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1011 \ 1012 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1013 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1014 } else \ 1015 idx = 0; \ 1016 \ 1017 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1018 } 1019 1020 /* 1021 * Disable any page sizes not supported by the CPU 1022 */ 1023 void 1024 hat_init_pagesizes() 1025 { 1026 int i; 1027 1028 mmu_exported_page_sizes = 0; 1029 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1030 1031 szc_2_userszc[i] = (uint_t)-1; 1032 userszc_2_szc[i] = (uint_t)-1; 1033 1034 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1035 disable_large_pages |= (1 << i); 1036 } else { 1037 szc_2_userszc[i] = mmu_exported_page_sizes; 1038 userszc_2_szc[mmu_exported_page_sizes] = i; 1039 mmu_exported_page_sizes++; 1040 } 1041 } 1042 1043 disable_ism_large_pages |= disable_large_pages; 1044 disable_auto_data_large_pages = disable_large_pages; 1045 disable_auto_text_large_pages = disable_large_pages; 1046 1047 /* 1048 * Initialize mmu-specific large page sizes. 1049 */ 1050 if (&mmu_large_pages_disabled) { 1051 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1052 disable_ism_large_pages |= 1053 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1054 disable_auto_data_large_pages |= 1055 mmu_large_pages_disabled(HAT_AUTO_DATA); 1056 disable_auto_text_large_pages |= 1057 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1058 } 1059 } 1060 1061 /* 1062 * Initialize the hardware address translation structures. 1063 */ 1064 void 1065 hat_init(void) 1066 { 1067 int i; 1068 uint_t sz; 1069 size_t size; 1070 1071 hat_lock_init(); 1072 hat_kstat_init(); 1073 1074 /* 1075 * Hardware-only bits in a TTE 1076 */ 1077 MAKE_TTE_MASK(&hw_tte); 1078 1079 hat_init_pagesizes(); 1080 1081 /* Initialize the hash locks */ 1082 for (i = 0; i < khmehash_num; i++) { 1083 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1084 MUTEX_DEFAULT, NULL); 1085 } 1086 for (i = 0; i < uhmehash_num; i++) { 1087 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1088 MUTEX_DEFAULT, NULL); 1089 } 1090 khmehash_num--; /* make sure counter starts from 0 */ 1091 uhmehash_num--; /* make sure counter starts from 0 */ 1092 1093 /* 1094 * Allocate context domain structures. 1095 * 1096 * A platform may choose to modify max_mmu_ctxdoms in 1097 * set_platform_defaults(). If a platform does not define 1098 * a set_platform_defaults() or does not choose to modify 1099 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1100 * 1101 * For sun4v, there will be one global context domain, this is to 1102 * avoid the ldom cpu substitution problem. 1103 * 1104 * For all platforms that have CPUs sharing MMUs, this 1105 * value must be defined. 1106 */ 1107 if (max_mmu_ctxdoms == 0) { 1108 #ifndef sun4v 1109 max_mmu_ctxdoms = max_ncpus; 1110 #else /* sun4v */ 1111 max_mmu_ctxdoms = 1; 1112 #endif /* sun4v */ 1113 } 1114 1115 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1116 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1117 1118 /* mmu_ctx_t is 64 bytes aligned */ 1119 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1120 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1121 /* 1122 * MMU context domain initialization for the Boot CPU. 1123 * This needs the context domains array allocated above. 1124 */ 1125 mutex_enter(&cpu_lock); 1126 sfmmu_cpu_init(CPU); 1127 mutex_exit(&cpu_lock); 1128 1129 /* 1130 * Intialize ism mapping list lock. 1131 */ 1132 1133 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1134 1135 /* 1136 * Each sfmmu structure carries an array of MMU context info 1137 * structures, one per context domain. The size of this array depends 1138 * on the maximum number of context domains. So, the size of the 1139 * sfmmu structure varies per platform. 1140 * 1141 * sfmmu is allocated from static arena, because trap 1142 * handler at TL > 0 is not allowed to touch kernel relocatable 1143 * memory. sfmmu's alignment is changed to 64 bytes from 1144 * default 8 bytes, as the lower 6 bits will be used to pass 1145 * pgcnt to vtag_flush_pgcnt_tl1. 1146 */ 1147 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1148 1149 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1150 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1151 NULL, NULL, static_arena, 0); 1152 1153 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1154 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1155 1156 /* 1157 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1158 * from the heap when low on memory or when TSB_FORCEALLOC is 1159 * specified, don't use magazines to cache them--we want to return 1160 * them to the system as quickly as possible. 1161 */ 1162 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1163 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1164 static_arena, KMC_NOMAGAZINE); 1165 1166 /* 1167 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1168 * memory, which corresponds to the old static reserve for TSBs. 1169 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1170 * memory we'll allocate for TSB slabs; beyond this point TSB 1171 * allocations will be taken from the kernel heap (via 1172 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1173 * consumer. 1174 */ 1175 if (tsb_alloc_hiwater_factor == 0) { 1176 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1177 } 1178 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1179 1180 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1181 if (!(disable_large_pages & (1 << sz))) 1182 break; 1183 } 1184 1185 if (sz < tsb_slab_ttesz) { 1186 tsb_slab_ttesz = sz; 1187 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1188 tsb_slab_size = 1 << tsb_slab_shift; 1189 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1190 use_bigtsb_arena = 0; 1191 } else if (use_bigtsb_arena && 1192 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1193 use_bigtsb_arena = 0; 1194 } 1195 1196 if (!use_bigtsb_arena) { 1197 bigtsb_slab_shift = tsb_slab_shift; 1198 } 1199 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1200 1201 /* 1202 * On smaller memory systems, allocate TSB memory in smaller chunks 1203 * than the default 4M slab size. We also honor disable_large_pages 1204 * here. 1205 * 1206 * The trap handlers need to be patched with the final slab shift, 1207 * since they need to be able to construct the TSB pointer at runtime. 1208 */ 1209 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1210 !(disable_large_pages & (1 << TTE512K))) { 1211 tsb_slab_ttesz = TTE512K; 1212 tsb_slab_shift = MMU_PAGESHIFT512K; 1213 tsb_slab_size = MMU_PAGESIZE512K; 1214 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1215 use_bigtsb_arena = 0; 1216 } 1217 1218 if (!use_bigtsb_arena) { 1219 bigtsb_slab_ttesz = tsb_slab_ttesz; 1220 bigtsb_slab_shift = tsb_slab_shift; 1221 bigtsb_slab_size = tsb_slab_size; 1222 bigtsb_slab_mask = tsb_slab_mask; 1223 } 1224 1225 1226 /* 1227 * Set up memory callback to update tsb_alloc_hiwater and 1228 * tsb_max_growsize. 1229 */ 1230 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1231 ASSERT(i == 0); 1232 1233 /* 1234 * kmem_tsb_arena is the source from which large TSB slabs are 1235 * drawn. The quantum of this arena corresponds to the largest 1236 * TSB size we can dynamically allocate for user processes. 1237 * Currently it must also be a supported page size since we 1238 * use exactly one translation entry to map each slab page. 1239 * 1240 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1241 * which most TSBs are allocated. Since most TSB allocations are 1242 * typically 8K we have a kmem cache we stack on top of each 1243 * kmem_tsb_default_arena to speed up those allocations. 1244 * 1245 * Note the two-level scheme of arenas is required only 1246 * because vmem_create doesn't allow us to specify alignment 1247 * requirements. If this ever changes the code could be 1248 * simplified to use only one level of arenas. 1249 * 1250 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1251 * will be provided in addition to the 4M kmem_tsb_arena. 1252 */ 1253 if (use_bigtsb_arena) { 1254 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1255 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1256 vmem_xfree, heap_arena, 0, VM_SLEEP); 1257 } 1258 1259 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1260 sfmmu_vmem_xalloc_aligned_wrapper, 1261 vmem_xfree, heap_arena, 0, VM_SLEEP); 1262 1263 if (tsb_lgrp_affinity) { 1264 char s[50]; 1265 for (i = 0; i < NLGRPS_MAX; i++) { 1266 if (use_bigtsb_arena) { 1267 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1268 kmem_bigtsb_default_arena[i] = vmem_create(s, 1269 NULL, 0, 2 * tsb_slab_size, 1270 sfmmu_tsb_segkmem_alloc, 1271 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1272 0, VM_SLEEP | VM_BESTFIT); 1273 } 1274 1275 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1276 kmem_tsb_default_arena[i] = vmem_create(s, 1277 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1278 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1279 VM_SLEEP | VM_BESTFIT); 1280 1281 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1282 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1283 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1284 kmem_tsb_default_arena[i], 0); 1285 } 1286 } else { 1287 if (use_bigtsb_arena) { 1288 kmem_bigtsb_default_arena[0] = 1289 vmem_create("kmem_bigtsb_default", NULL, 0, 1290 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1291 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1292 VM_SLEEP | VM_BESTFIT); 1293 } 1294 1295 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1296 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1297 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1298 VM_SLEEP | VM_BESTFIT); 1299 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1300 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1301 kmem_tsb_default_arena[0], 0); 1302 } 1303 1304 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1305 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1306 sfmmu_hblkcache_destructor, 1307 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1308 hat_memload_arena, KMC_NOHASH); 1309 1310 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1311 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1312 1313 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1314 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1315 sfmmu_hblkcache_destructor, 1316 NULL, (void *)HME1BLK_SZ, 1317 hat_memload1_arena, KMC_NOHASH); 1318 1319 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1320 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1321 1322 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1323 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1324 NULL, NULL, static_arena, KMC_NOHASH); 1325 1326 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1327 sizeof (ism_ment_t), 0, NULL, NULL, 1328 NULL, NULL, NULL, 0); 1329 1330 /* 1331 * We grab the first hat for the kernel, 1332 */ 1333 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1334 kas.a_hat = hat_alloc(&kas); 1335 AS_LOCK_EXIT(&kas, &kas.a_lock); 1336 1337 /* 1338 * Initialize hblk_reserve. 1339 */ 1340 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1341 va_to_pa((caddr_t)hblk_reserve); 1342 1343 #ifndef UTSB_PHYS 1344 /* 1345 * Reserve some kernel virtual address space for the locked TTEs 1346 * that allow us to probe the TSB from TL>0. 1347 */ 1348 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1349 0, 0, NULL, NULL, VM_SLEEP); 1350 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1351 0, 0, NULL, NULL, VM_SLEEP); 1352 #endif 1353 1354 #ifdef VAC 1355 /* 1356 * The big page VAC handling code assumes VAC 1357 * will not be bigger than the smallest big 1358 * page- which is 64K. 1359 */ 1360 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1361 cmn_err(CE_PANIC, "VAC too big!"); 1362 } 1363 #endif 1364 1365 (void) xhat_init(); 1366 1367 uhme_hash_pa = va_to_pa(uhme_hash); 1368 khme_hash_pa = va_to_pa(khme_hash); 1369 1370 /* 1371 * Initialize relocation locks. kpr_suspendlock is held 1372 * at PIL_MAX to prevent interrupts from pinning the holder 1373 * of a suspended TTE which may access it leading to a 1374 * deadlock condition. 1375 */ 1376 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1377 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1378 1379 /* 1380 * If Shared context support is disabled via /etc/system 1381 * set shctx_on to 0 here if it was set to 1 earlier in boot 1382 * sequence by cpu module initialization code. 1383 */ 1384 if (shctx_on && disable_shctx) { 1385 shctx_on = 0; 1386 } 1387 1388 if (shctx_on) { 1389 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1390 sizeof (srd_buckets[0]), KM_SLEEP); 1391 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1392 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1393 MUTEX_DEFAULT, NULL); 1394 } 1395 1396 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1397 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1398 NULL, NULL, NULL, 0); 1399 region_cache = kmem_cache_create("region_cache", 1400 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1401 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1402 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1403 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1404 NULL, NULL, NULL, 0); 1405 } 1406 1407 /* 1408 * Pre-allocate hrm_hashtab before enabling the collection of 1409 * refmod statistics. Allocating on the fly would mean us 1410 * running the risk of suffering recursive mutex enters or 1411 * deadlocks. 1412 */ 1413 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1414 KM_SLEEP); 1415 } 1416 1417 /* 1418 * Initialize locking for the hat layer, called early during boot. 1419 */ 1420 static void 1421 hat_lock_init() 1422 { 1423 int i; 1424 1425 /* 1426 * initialize the array of mutexes protecting a page's mapping 1427 * list and p_nrm field. 1428 */ 1429 for (i = 0; i < mml_table_sz; i++) 1430 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1431 1432 if (kpm_enable) { 1433 for (i = 0; i < kpmp_table_sz; i++) { 1434 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1435 MUTEX_DEFAULT, NULL); 1436 } 1437 } 1438 1439 /* 1440 * Initialize array of mutex locks that protects sfmmu fields and 1441 * TSB lists. 1442 */ 1443 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1445 NULL); 1446 } 1447 1448 #define SFMMU_KERNEL_MAXVA \ 1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1450 1451 /* 1452 * Allocate a hat structure. 1453 * Called when an address space first uses a hat. 1454 */ 1455 struct hat * 1456 hat_alloc(struct as *as) 1457 { 1458 sfmmu_t *sfmmup; 1459 int i; 1460 uint64_t cnum; 1461 extern uint_t get_color_start(struct as *); 1462 1463 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1465 sfmmup->sfmmu_as = as; 1466 sfmmup->sfmmu_flags = 0; 1467 sfmmup->sfmmu_tteflags = 0; 1468 sfmmup->sfmmu_rtteflags = 0; 1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1470 1471 if (as == &kas) { 1472 ksfmmup = sfmmup; 1473 sfmmup->sfmmu_cext = 0; 1474 cnum = KCONTEXT; 1475 1476 sfmmup->sfmmu_clrstart = 0; 1477 sfmmup->sfmmu_tsb = NULL; 1478 /* 1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1480 * to setup tsb_info for ksfmmup. 1481 */ 1482 } else { 1483 1484 /* 1485 * Just set to invalid ctx. When it faults, it will 1486 * get a valid ctx. This would avoid the situation 1487 * where we get a ctx, but it gets stolen and then 1488 * we fault when we try to run and so have to get 1489 * another ctx. 1490 */ 1491 sfmmup->sfmmu_cext = 0; 1492 cnum = INVALID_CONTEXT; 1493 1494 /* initialize original physical page coloring bin */ 1495 sfmmup->sfmmu_clrstart = get_color_start(as); 1496 #ifdef DEBUG 1497 if (tsb_random_size) { 1498 uint32_t randval = (uint32_t)gettick() >> 4; 1499 int size = randval % (tsb_max_growsize + 1); 1500 1501 /* chose a random tsb size for stress testing */ 1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1503 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1504 } else 1505 #endif /* DEBUG */ 1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1507 default_tsb_size, 1508 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1510 ASSERT(sfmmup->sfmmu_tsb != NULL); 1511 } 1512 1513 ASSERT(max_mmu_ctxdoms > 0); 1514 for (i = 0; i < max_mmu_ctxdoms; i++) { 1515 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1516 sfmmup->sfmmu_ctxs[i].gnum = 0; 1517 } 1518 1519 for (i = 0; i < max_mmu_page_sizes; i++) { 1520 sfmmup->sfmmu_ttecnt[i] = 0; 1521 sfmmup->sfmmu_scdrttecnt[i] = 0; 1522 sfmmup->sfmmu_ismttecnt[i] = 0; 1523 sfmmup->sfmmu_scdismttecnt[i] = 0; 1524 sfmmup->sfmmu_pgsz[i] = TTE8K; 1525 } 1526 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1527 sfmmup->sfmmu_iblk = NULL; 1528 sfmmup->sfmmu_ismhat = 0; 1529 sfmmup->sfmmu_scdhat = 0; 1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1531 if (sfmmup == ksfmmup) { 1532 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1533 } else { 1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1535 } 1536 sfmmup->sfmmu_free = 0; 1537 sfmmup->sfmmu_rmstat = 0; 1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1539 sfmmup->sfmmu_xhat_provider = NULL; 1540 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1541 sfmmup->sfmmu_srdp = NULL; 1542 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1543 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1544 sfmmup->sfmmu_scdp = NULL; 1545 sfmmup->sfmmu_scd_link.next = NULL; 1546 sfmmup->sfmmu_scd_link.prev = NULL; 1547 return (sfmmup); 1548 } 1549 1550 /* 1551 * Create per-MMU context domain kstats for a given MMU ctx. 1552 */ 1553 static void 1554 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1555 { 1556 mmu_ctx_stat_t stat; 1557 kstat_t *mmu_kstat; 1558 1559 ASSERT(MUTEX_HELD(&cpu_lock)); 1560 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1561 1562 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1563 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1564 1565 if (mmu_kstat == NULL) { 1566 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1567 mmu_ctxp->mmu_idx); 1568 } else { 1569 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1570 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1571 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1572 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1573 mmu_ctxp->mmu_kstat = mmu_kstat; 1574 kstat_install(mmu_kstat); 1575 } 1576 } 1577 1578 /* 1579 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1580 * context domain information for a given CPU. If a platform does not 1581 * specify that interface, then the function below is used instead to return 1582 * default information. The defaults are as follows: 1583 * 1584 * - For sun4u systems there's one MMU context domain per CPU. 1585 * This default is used by all sun4u systems except OPL. OPL systems 1586 * provide platform specific interface to map CPU ids to MMU ids 1587 * because on OPL more than 1 CPU shares a single MMU. 1588 * Note that on sun4v, there is one global context domain for 1589 * the entire system. This is to avoid running into potential problem 1590 * with ldom physical cpu substitution feature. 1591 * - The number of MMU context IDs supported on any CPU in the 1592 * system is 8K. 1593 */ 1594 /*ARGSUSED*/ 1595 static void 1596 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1597 { 1598 infop->mmu_nctxs = nctxs; 1599 #ifndef sun4v 1600 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1601 #else /* sun4v */ 1602 infop->mmu_idx = 0; 1603 #endif /* sun4v */ 1604 } 1605 1606 /* 1607 * Called during CPU initialization to set the MMU context-related information 1608 * for a CPU. 1609 * 1610 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1611 */ 1612 void 1613 sfmmu_cpu_init(cpu_t *cp) 1614 { 1615 mmu_ctx_info_t info; 1616 mmu_ctx_t *mmu_ctxp; 1617 1618 ASSERT(MUTEX_HELD(&cpu_lock)); 1619 1620 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1621 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1622 else 1623 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1624 1625 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1626 1627 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1628 /* Each mmu_ctx is cacheline aligned. */ 1629 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1630 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1631 1632 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1633 (void *)ipltospl(DISP_LEVEL)); 1634 mmu_ctxp->mmu_idx = info.mmu_idx; 1635 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1636 /* 1637 * Globally for lifetime of a system, 1638 * gnum must always increase. 1639 * mmu_saved_gnum is protected by the cpu_lock. 1640 */ 1641 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1642 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1643 1644 sfmmu_mmu_kstat_create(mmu_ctxp); 1645 1646 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1647 } else { 1648 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1649 } 1650 1651 /* 1652 * The mmu_lock is acquired here to prevent races with 1653 * the wrap-around code. 1654 */ 1655 mutex_enter(&mmu_ctxp->mmu_lock); 1656 1657 1658 mmu_ctxp->mmu_ncpus++; 1659 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1660 CPU_MMU_IDX(cp) = info.mmu_idx; 1661 CPU_MMU_CTXP(cp) = mmu_ctxp; 1662 1663 mutex_exit(&mmu_ctxp->mmu_lock); 1664 } 1665 1666 /* 1667 * Called to perform MMU context-related cleanup for a CPU. 1668 */ 1669 void 1670 sfmmu_cpu_cleanup(cpu_t *cp) 1671 { 1672 mmu_ctx_t *mmu_ctxp; 1673 1674 ASSERT(MUTEX_HELD(&cpu_lock)); 1675 1676 mmu_ctxp = CPU_MMU_CTXP(cp); 1677 ASSERT(mmu_ctxp != NULL); 1678 1679 /* 1680 * The mmu_lock is acquired here to prevent races with 1681 * the wrap-around code. 1682 */ 1683 mutex_enter(&mmu_ctxp->mmu_lock); 1684 1685 CPU_MMU_CTXP(cp) = NULL; 1686 1687 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1688 if (--mmu_ctxp->mmu_ncpus == 0) { 1689 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1690 mutex_exit(&mmu_ctxp->mmu_lock); 1691 mutex_destroy(&mmu_ctxp->mmu_lock); 1692 1693 if (mmu_ctxp->mmu_kstat) 1694 kstat_delete(mmu_ctxp->mmu_kstat); 1695 1696 /* mmu_saved_gnum is protected by the cpu_lock. */ 1697 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1698 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1699 1700 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1701 1702 return; 1703 } 1704 1705 mutex_exit(&mmu_ctxp->mmu_lock); 1706 } 1707 1708 /* 1709 * Hat_setup, makes an address space context the current active one. 1710 * In sfmmu this translates to setting the secondary context with the 1711 * corresponding context. 1712 */ 1713 void 1714 hat_setup(struct hat *sfmmup, int allocflag) 1715 { 1716 hatlock_t *hatlockp; 1717 1718 /* Init needs some special treatment. */ 1719 if (allocflag == HAT_INIT) { 1720 /* 1721 * Make sure that we have 1722 * 1. a TSB 1723 * 2. a valid ctx that doesn't get stolen after this point. 1724 */ 1725 hatlockp = sfmmu_hat_enter(sfmmup); 1726 1727 /* 1728 * Swap in the TSB. hat_init() allocates tsbinfos without 1729 * TSBs, but we need one for init, since the kernel does some 1730 * special things to set up its stack and needs the TSB to 1731 * resolve page faults. 1732 */ 1733 sfmmu_tsb_swapin(sfmmup, hatlockp); 1734 1735 sfmmu_get_ctx(sfmmup); 1736 1737 sfmmu_hat_exit(hatlockp); 1738 } else { 1739 ASSERT(allocflag == HAT_ALLOC); 1740 1741 hatlockp = sfmmu_hat_enter(sfmmup); 1742 kpreempt_disable(); 1743 1744 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1745 /* 1746 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1747 * pagesize bits don't matter in this case since we are passing 1748 * INVALID_CONTEXT to it. 1749 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1750 */ 1751 sfmmu_setctx_sec(INVALID_CONTEXT); 1752 sfmmu_clear_utsbinfo(); 1753 1754 kpreempt_enable(); 1755 sfmmu_hat_exit(hatlockp); 1756 } 1757 } 1758 1759 /* 1760 * Free all the translation resources for the specified address space. 1761 * Called from as_free when an address space is being destroyed. 1762 */ 1763 void 1764 hat_free_start(struct hat *sfmmup) 1765 { 1766 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1767 ASSERT(sfmmup != ksfmmup); 1768 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1769 1770 sfmmup->sfmmu_free = 1; 1771 if (sfmmup->sfmmu_scdp != NULL) { 1772 sfmmu_leave_scd(sfmmup, 0); 1773 } 1774 1775 ASSERT(sfmmup->sfmmu_scdp == NULL); 1776 } 1777 1778 void 1779 hat_free_end(struct hat *sfmmup) 1780 { 1781 int i; 1782 1783 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1784 ASSERT(sfmmup->sfmmu_free == 1); 1785 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1786 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1787 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1788 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1789 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1790 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1791 1792 if (sfmmup->sfmmu_rmstat) { 1793 hat_freestat(sfmmup->sfmmu_as, NULL); 1794 } 1795 1796 while (sfmmup->sfmmu_tsb != NULL) { 1797 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1798 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1799 sfmmup->sfmmu_tsb = next; 1800 } 1801 1802 if (sfmmup->sfmmu_srdp != NULL) { 1803 sfmmu_leave_srd(sfmmup); 1804 ASSERT(sfmmup->sfmmu_srdp == NULL); 1805 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1806 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1807 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1808 SFMMU_L2_HMERLINKS_SIZE); 1809 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1810 } 1811 } 1812 } 1813 sfmmu_free_sfmmu(sfmmup); 1814 1815 #ifdef DEBUG 1816 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1817 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1818 } 1819 #endif 1820 1821 kmem_cache_free(sfmmuid_cache, sfmmup); 1822 } 1823 1824 /* 1825 * Set up any translation structures, for the specified address space, 1826 * that are needed or preferred when the process is being swapped in. 1827 */ 1828 /* ARGSUSED */ 1829 void 1830 hat_swapin(struct hat *hat) 1831 { 1832 ASSERT(hat->sfmmu_xhat_provider == NULL); 1833 } 1834 1835 /* 1836 * Free all of the translation resources, for the specified address space, 1837 * that can be freed while the process is swapped out. Called from as_swapout. 1838 * Also, free up the ctx that this process was using. 1839 */ 1840 void 1841 hat_swapout(struct hat *sfmmup) 1842 { 1843 struct hmehash_bucket *hmebp; 1844 struct hme_blk *hmeblkp; 1845 struct hme_blk *pr_hblk = NULL; 1846 struct hme_blk *nx_hblk; 1847 int i; 1848 uint64_t hblkpa, prevpa, nx_pa; 1849 struct hme_blk *list = NULL; 1850 hatlock_t *hatlockp; 1851 struct tsb_info *tsbinfop; 1852 struct free_tsb { 1853 struct free_tsb *next; 1854 struct tsb_info *tsbinfop; 1855 }; /* free list of TSBs */ 1856 struct free_tsb *freelist, *last, *next; 1857 1858 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1859 SFMMU_STAT(sf_swapout); 1860 1861 /* 1862 * There is no way to go from an as to all its translations in sfmmu. 1863 * Here is one of the times when we take the big hit and traverse 1864 * the hash looking for hme_blks to free up. Not only do we free up 1865 * this as hme_blks but all those that are free. We are obviously 1866 * swapping because we need memory so let's free up as much 1867 * as we can. 1868 * 1869 * Note that we don't flush TLB/TSB here -- it's not necessary 1870 * because: 1871 * 1) we free the ctx we're using and throw away the TSB(s); 1872 * 2) processes aren't runnable while being swapped out. 1873 */ 1874 ASSERT(sfmmup != KHATID); 1875 for (i = 0; i <= UHMEHASH_SZ; i++) { 1876 hmebp = &uhme_hash[i]; 1877 SFMMU_HASH_LOCK(hmebp); 1878 hmeblkp = hmebp->hmeblkp; 1879 hblkpa = hmebp->hmeh_nextpa; 1880 prevpa = 0; 1881 pr_hblk = NULL; 1882 while (hmeblkp) { 1883 1884 ASSERT(!hmeblkp->hblk_xhat_bit); 1885 1886 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1887 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1888 ASSERT(!hmeblkp->hblk_shared); 1889 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1890 (caddr_t)get_hblk_base(hmeblkp), 1891 get_hblk_endaddr(hmeblkp), 1892 NULL, HAT_UNLOAD); 1893 } 1894 nx_hblk = hmeblkp->hblk_next; 1895 nx_pa = hmeblkp->hblk_nextpa; 1896 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1897 ASSERT(!hmeblkp->hblk_lckcnt); 1898 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1899 prevpa, pr_hblk); 1900 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1901 } else { 1902 pr_hblk = hmeblkp; 1903 prevpa = hblkpa; 1904 } 1905 hmeblkp = nx_hblk; 1906 hblkpa = nx_pa; 1907 } 1908 SFMMU_HASH_UNLOCK(hmebp); 1909 } 1910 1911 sfmmu_hblks_list_purge(&list); 1912 1913 /* 1914 * Now free up the ctx so that others can reuse it. 1915 */ 1916 hatlockp = sfmmu_hat_enter(sfmmup); 1917 1918 sfmmu_invalidate_ctx(sfmmup); 1919 1920 /* 1921 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1922 * If TSBs were never swapped in, just return. 1923 * This implies that we don't support partial swapping 1924 * of TSBs -- either all are swapped out, or none are. 1925 * 1926 * We must hold the HAT lock here to prevent racing with another 1927 * thread trying to unmap TTEs from the TSB or running the post- 1928 * relocator after relocating the TSB's memory. Unfortunately, we 1929 * can't free memory while holding the HAT lock or we could 1930 * deadlock, so we build a list of TSBs to be freed after marking 1931 * the tsbinfos as swapped out and free them after dropping the 1932 * lock. 1933 */ 1934 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1935 sfmmu_hat_exit(hatlockp); 1936 return; 1937 } 1938 1939 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1940 last = freelist = NULL; 1941 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1942 tsbinfop = tsbinfop->tsb_next) { 1943 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1944 1945 /* 1946 * Cast the TSB into a struct free_tsb and put it on the free 1947 * list. 1948 */ 1949 if (freelist == NULL) { 1950 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1951 } else { 1952 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1953 last = last->next; 1954 } 1955 last->next = NULL; 1956 last->tsbinfop = tsbinfop; 1957 tsbinfop->tsb_flags |= TSB_SWAPPED; 1958 /* 1959 * Zero out the TTE to clear the valid bit. 1960 * Note we can't use a value like 0xbad because we want to 1961 * ensure diagnostic bits are NEVER set on TTEs that might 1962 * be loaded. The intent is to catch any invalid access 1963 * to the swapped TSB, such as a thread running with a valid 1964 * context without first calling sfmmu_tsb_swapin() to 1965 * allocate TSB memory. 1966 */ 1967 tsbinfop->tsb_tte.ll = 0; 1968 } 1969 1970 /* Now we can drop the lock and free the TSB memory. */ 1971 sfmmu_hat_exit(hatlockp); 1972 for (; freelist != NULL; freelist = next) { 1973 next = freelist->next; 1974 sfmmu_tsb_free(freelist->tsbinfop); 1975 } 1976 } 1977 1978 /* 1979 * Duplicate the translations of an as into another newas 1980 */ 1981 /* ARGSUSED */ 1982 int 1983 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1984 uint_t flag) 1985 { 1986 sf_srd_t *srdp; 1987 sf_scd_t *scdp; 1988 int i; 1989 extern uint_t get_color_start(struct as *); 1990 1991 ASSERT(hat->sfmmu_xhat_provider == NULL); 1992 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 1993 (flag == HAT_DUP_SRD)); 1994 ASSERT(hat != ksfmmup); 1995 ASSERT(newhat != ksfmmup); 1996 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 1997 1998 if (flag == HAT_DUP_COW) { 1999 panic("hat_dup: HAT_DUP_COW not supported"); 2000 } 2001 2002 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2003 ASSERT(srdp->srd_evp != NULL); 2004 VN_HOLD(srdp->srd_evp); 2005 ASSERT(srdp->srd_refcnt > 0); 2006 newhat->sfmmu_srdp = srdp; 2007 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 2008 } 2009 2010 /* 2011 * HAT_DUP_ALL flag is used after as duplication is done. 2012 */ 2013 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2014 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2015 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2016 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2017 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2018 } 2019 2020 /* check if need to join scd */ 2021 if ((scdp = hat->sfmmu_scdp) != NULL && 2022 newhat->sfmmu_scdp != scdp) { 2023 int ret; 2024 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2025 &scdp->scd_region_map, ret); 2026 ASSERT(ret); 2027 sfmmu_join_scd(scdp, newhat); 2028 ASSERT(newhat->sfmmu_scdp == scdp && 2029 scdp->scd_refcnt >= 2); 2030 for (i = 0; i < max_mmu_page_sizes; i++) { 2031 newhat->sfmmu_ismttecnt[i] = 2032 hat->sfmmu_ismttecnt[i]; 2033 newhat->sfmmu_scdismttecnt[i] = 2034 hat->sfmmu_scdismttecnt[i]; 2035 } 2036 } 2037 2038 sfmmu_check_page_sizes(newhat, 1); 2039 } 2040 2041 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2042 update_proc_pgcolorbase_after_fork != 0) { 2043 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2044 } 2045 return (0); 2046 } 2047 2048 void 2049 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2050 uint_t attr, uint_t flags) 2051 { 2052 hat_do_memload(hat, addr, pp, attr, flags, 2053 SFMMU_INVALID_SHMERID); 2054 } 2055 2056 void 2057 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2058 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2059 { 2060 uint_t rid; 2061 if (rcookie == HAT_INVALID_REGION_COOKIE || 2062 hat->sfmmu_xhat_provider != NULL) { 2063 hat_do_memload(hat, addr, pp, attr, flags, 2064 SFMMU_INVALID_SHMERID); 2065 return; 2066 } 2067 rid = (uint_t)((uint64_t)rcookie); 2068 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2069 hat_do_memload(hat, addr, pp, attr, flags, rid); 2070 } 2071 2072 /* 2073 * Set up addr to map to page pp with protection prot. 2074 * As an optimization we also load the TSB with the 2075 * corresponding tte but it is no big deal if the tte gets kicked out. 2076 */ 2077 static void 2078 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2079 uint_t attr, uint_t flags, uint_t rid) 2080 { 2081 tte_t tte; 2082 2083 2084 ASSERT(hat != NULL); 2085 ASSERT(PAGE_LOCKED(pp)); 2086 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2087 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2088 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2089 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2090 2091 if (PP_ISFREE(pp)) { 2092 panic("hat_memload: loading a mapping to free page %p", 2093 (void *)pp); 2094 } 2095 2096 if (hat->sfmmu_xhat_provider) { 2097 /* no regions for xhats */ 2098 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2099 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 2100 return; 2101 } 2102 2103 ASSERT((hat == ksfmmup) || 2104 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2105 2106 if (flags & ~SFMMU_LOAD_ALLFLAG) 2107 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2108 flags & ~SFMMU_LOAD_ALLFLAG); 2109 2110 if (hat->sfmmu_rmstat) 2111 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2112 2113 #if defined(SF_ERRATA_57) 2114 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2115 (addr < errata57_limit) && (attr & PROT_EXEC) && 2116 !(flags & HAT_LOAD_SHARE)) { 2117 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2118 " page executable"); 2119 attr &= ~PROT_EXEC; 2120 } 2121 #endif 2122 2123 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2124 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2125 2126 /* 2127 * Check TSB and TLB page sizes. 2128 */ 2129 if ((flags & HAT_LOAD_SHARE) == 0) { 2130 sfmmu_check_page_sizes(hat, 1); 2131 } 2132 } 2133 2134 /* 2135 * hat_devload can be called to map real memory (e.g. 2136 * /dev/kmem) and even though hat_devload will determine pf is 2137 * for memory, it will be unable to get a shared lock on the 2138 * page (because someone else has it exclusively) and will 2139 * pass dp = NULL. If tteload doesn't get a non-NULL 2140 * page pointer it can't cache memory. 2141 */ 2142 void 2143 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2144 uint_t attr, int flags) 2145 { 2146 tte_t tte; 2147 struct page *pp = NULL; 2148 int use_lgpg = 0; 2149 2150 ASSERT(hat != NULL); 2151 2152 if (hat->sfmmu_xhat_provider) { 2153 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 2154 return; 2155 } 2156 2157 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2158 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2159 ASSERT((hat == ksfmmup) || 2160 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2161 if (len == 0) 2162 panic("hat_devload: zero len"); 2163 if (flags & ~SFMMU_LOAD_ALLFLAG) 2164 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2165 flags & ~SFMMU_LOAD_ALLFLAG); 2166 2167 #if defined(SF_ERRATA_57) 2168 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2169 (addr < errata57_limit) && (attr & PROT_EXEC) && 2170 !(flags & HAT_LOAD_SHARE)) { 2171 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2172 " page executable"); 2173 attr &= ~PROT_EXEC; 2174 } 2175 #endif 2176 2177 /* 2178 * If it's a memory page find its pp 2179 */ 2180 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2181 pp = page_numtopp_nolock(pfn); 2182 if (pp == NULL) { 2183 flags |= HAT_LOAD_NOCONSIST; 2184 } else { 2185 if (PP_ISFREE(pp)) { 2186 panic("hat_memload: loading " 2187 "a mapping to free page %p", 2188 (void *)pp); 2189 } 2190 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2191 panic("hat_memload: loading a mapping " 2192 "to unlocked relocatable page %p", 2193 (void *)pp); 2194 } 2195 ASSERT(len == MMU_PAGESIZE); 2196 } 2197 } 2198 2199 if (hat->sfmmu_rmstat) 2200 hat_resvstat(len, hat->sfmmu_as, addr); 2201 2202 if (flags & HAT_LOAD_NOCONSIST) { 2203 attr |= SFMMU_UNCACHEVTTE; 2204 use_lgpg = 1; 2205 } 2206 if (!pf_is_memory(pfn)) { 2207 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2208 use_lgpg = 1; 2209 switch (attr & HAT_ORDER_MASK) { 2210 case HAT_STRICTORDER: 2211 case HAT_UNORDERED_OK: 2212 /* 2213 * we set the side effect bit for all non 2214 * memory mappings unless merging is ok 2215 */ 2216 attr |= SFMMU_SIDEFFECT; 2217 break; 2218 case HAT_MERGING_OK: 2219 case HAT_LOADCACHING_OK: 2220 case HAT_STORECACHING_OK: 2221 break; 2222 default: 2223 panic("hat_devload: bad attr"); 2224 break; 2225 } 2226 } 2227 while (len) { 2228 if (!use_lgpg) { 2229 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2230 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2231 flags, SFMMU_INVALID_SHMERID); 2232 len -= MMU_PAGESIZE; 2233 addr += MMU_PAGESIZE; 2234 pfn++; 2235 continue; 2236 } 2237 /* 2238 * try to use large pages, check va/pa alignments 2239 * Note that 32M/256M page sizes are not (yet) supported. 2240 */ 2241 if ((len >= MMU_PAGESIZE4M) && 2242 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2243 !(disable_large_pages & (1 << TTE4M)) && 2244 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2245 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2246 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2247 flags, SFMMU_INVALID_SHMERID); 2248 len -= MMU_PAGESIZE4M; 2249 addr += MMU_PAGESIZE4M; 2250 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2251 } else if ((len >= MMU_PAGESIZE512K) && 2252 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2253 !(disable_large_pages & (1 << TTE512K)) && 2254 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2255 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2256 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2257 flags, SFMMU_INVALID_SHMERID); 2258 len -= MMU_PAGESIZE512K; 2259 addr += MMU_PAGESIZE512K; 2260 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2261 } else if ((len >= MMU_PAGESIZE64K) && 2262 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2263 !(disable_large_pages & (1 << TTE64K)) && 2264 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2265 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2266 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2267 flags, SFMMU_INVALID_SHMERID); 2268 len -= MMU_PAGESIZE64K; 2269 addr += MMU_PAGESIZE64K; 2270 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2271 } else { 2272 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2273 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2274 flags, SFMMU_INVALID_SHMERID); 2275 len -= MMU_PAGESIZE; 2276 addr += MMU_PAGESIZE; 2277 pfn++; 2278 } 2279 } 2280 2281 /* 2282 * Check TSB and TLB page sizes. 2283 */ 2284 if ((flags & HAT_LOAD_SHARE) == 0) { 2285 sfmmu_check_page_sizes(hat, 1); 2286 } 2287 } 2288 2289 void 2290 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2291 struct page **pps, uint_t attr, uint_t flags) 2292 { 2293 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2294 SFMMU_INVALID_SHMERID); 2295 } 2296 2297 void 2298 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2299 struct page **pps, uint_t attr, uint_t flags, 2300 hat_region_cookie_t rcookie) 2301 { 2302 uint_t rid; 2303 if (rcookie == HAT_INVALID_REGION_COOKIE || 2304 hat->sfmmu_xhat_provider != NULL) { 2305 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2306 SFMMU_INVALID_SHMERID); 2307 return; 2308 } 2309 rid = (uint_t)((uint64_t)rcookie); 2310 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2311 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2312 } 2313 2314 /* 2315 * Map the largest extend possible out of the page array. The array may NOT 2316 * be in order. The largest possible mapping a page can have 2317 * is specified in the p_szc field. The p_szc field 2318 * cannot change as long as there any mappings (large or small) 2319 * to any of the pages that make up the large page. (ie. any 2320 * promotion/demotion of page size is not up to the hat but up to 2321 * the page free list manager). The array 2322 * should consist of properly aligned contigous pages that are 2323 * part of a big page for a large mapping to be created. 2324 */ 2325 static void 2326 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2327 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2328 { 2329 int ttesz; 2330 size_t mapsz; 2331 pgcnt_t numpg, npgs; 2332 tte_t tte; 2333 page_t *pp; 2334 uint_t large_pages_disable; 2335 2336 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2337 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2338 2339 if (hat->sfmmu_xhat_provider) { 2340 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2341 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2342 return; 2343 } 2344 2345 if (hat->sfmmu_rmstat) 2346 hat_resvstat(len, hat->sfmmu_as, addr); 2347 2348 #if defined(SF_ERRATA_57) 2349 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2350 (addr < errata57_limit) && (attr & PROT_EXEC) && 2351 !(flags & HAT_LOAD_SHARE)) { 2352 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2353 "user page executable"); 2354 attr &= ~PROT_EXEC; 2355 } 2356 #endif 2357 2358 /* Get number of pages */ 2359 npgs = len >> MMU_PAGESHIFT; 2360 2361 if (flags & HAT_LOAD_SHARE) { 2362 large_pages_disable = disable_ism_large_pages; 2363 } else { 2364 large_pages_disable = disable_large_pages; 2365 } 2366 2367 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2368 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2369 rid); 2370 return; 2371 } 2372 2373 while (npgs >= NHMENTS) { 2374 pp = *pps; 2375 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2376 /* 2377 * Check if this page size is disabled. 2378 */ 2379 if (large_pages_disable & (1 << ttesz)) 2380 continue; 2381 2382 numpg = TTEPAGES(ttesz); 2383 mapsz = numpg << MMU_PAGESHIFT; 2384 if ((npgs >= numpg) && 2385 IS_P2ALIGNED(addr, mapsz) && 2386 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2387 /* 2388 * At this point we have enough pages and 2389 * we know the virtual address and the pfn 2390 * are properly aligned. We still need 2391 * to check for physical contiguity but since 2392 * it is very likely that this is the case 2393 * we will assume they are so and undo 2394 * the request if necessary. It would 2395 * be great if we could get a hint flag 2396 * like HAT_CONTIG which would tell us 2397 * the pages are contigous for sure. 2398 */ 2399 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2400 attr, ttesz); 2401 if (!sfmmu_tteload_array(hat, &tte, addr, 2402 pps, flags, rid)) { 2403 break; 2404 } 2405 } 2406 } 2407 if (ttesz == TTE8K) { 2408 /* 2409 * We were not able to map array using a large page 2410 * batch a hmeblk or fraction at a time. 2411 */ 2412 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2413 & (NHMENTS-1); 2414 numpg = NHMENTS - numpg; 2415 ASSERT(numpg <= npgs); 2416 mapsz = numpg * MMU_PAGESIZE; 2417 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2418 numpg, rid); 2419 } 2420 addr += mapsz; 2421 npgs -= numpg; 2422 pps += numpg; 2423 } 2424 2425 if (npgs) { 2426 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2427 rid); 2428 } 2429 2430 /* 2431 * Check TSB and TLB page sizes. 2432 */ 2433 if ((flags & HAT_LOAD_SHARE) == 0) { 2434 sfmmu_check_page_sizes(hat, 1); 2435 } 2436 } 2437 2438 /* 2439 * Function tries to batch 8K pages into the same hme blk. 2440 */ 2441 static void 2442 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2443 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2444 { 2445 tte_t tte; 2446 page_t *pp; 2447 struct hmehash_bucket *hmebp; 2448 struct hme_blk *hmeblkp; 2449 int index; 2450 2451 while (npgs) { 2452 /* 2453 * Acquire the hash bucket. 2454 */ 2455 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2456 rid); 2457 ASSERT(hmebp); 2458 2459 /* 2460 * Find the hment block. 2461 */ 2462 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2463 TTE8K, flags, rid); 2464 ASSERT(hmeblkp); 2465 2466 do { 2467 /* 2468 * Make the tte. 2469 */ 2470 pp = *pps; 2471 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2472 2473 /* 2474 * Add the translation. 2475 */ 2476 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2477 vaddr, pps, flags, rid); 2478 2479 /* 2480 * Goto next page. 2481 */ 2482 pps++; 2483 npgs--; 2484 2485 /* 2486 * Goto next address. 2487 */ 2488 vaddr += MMU_PAGESIZE; 2489 2490 /* 2491 * Don't crossover into a different hmentblk. 2492 */ 2493 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2494 (NHMENTS-1)); 2495 2496 } while (index != 0 && npgs != 0); 2497 2498 /* 2499 * Release the hash bucket. 2500 */ 2501 2502 sfmmu_tteload_release_hashbucket(hmebp); 2503 } 2504 } 2505 2506 /* 2507 * Construct a tte for a page: 2508 * 2509 * tte_valid = 1 2510 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2511 * tte_size = size 2512 * tte_nfo = attr & HAT_NOFAULT 2513 * tte_ie = attr & HAT_STRUCTURE_LE 2514 * tte_hmenum = hmenum 2515 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2516 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2517 * tte_ref = 1 (optimization) 2518 * tte_wr_perm = attr & PROT_WRITE; 2519 * tte_no_sync = attr & HAT_NOSYNC 2520 * tte_lock = attr & SFMMU_LOCKTTE 2521 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2522 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2523 * tte_e = attr & SFMMU_SIDEFFECT 2524 * tte_priv = !(attr & PROT_USER) 2525 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2526 * tte_glb = 0 2527 */ 2528 void 2529 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2530 { 2531 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2532 2533 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2534 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2535 2536 if (TTE_IS_NOSYNC(ttep)) { 2537 TTE_SET_REF(ttep); 2538 if (TTE_IS_WRITABLE(ttep)) { 2539 TTE_SET_MOD(ttep); 2540 } 2541 } 2542 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2543 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2544 } 2545 } 2546 2547 /* 2548 * This function will add a translation to the hme_blk and allocate the 2549 * hme_blk if one does not exist. 2550 * If a page structure is specified then it will add the 2551 * corresponding hment to the mapping list. 2552 * It will also update the hmenum field for the tte. 2553 * 2554 * Currently this function is only used for kernel mappings. 2555 * So pass invalid region to sfmmu_tteload_array(). 2556 */ 2557 void 2558 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2559 uint_t flags) 2560 { 2561 ASSERT(sfmmup == ksfmmup); 2562 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2563 SFMMU_INVALID_SHMERID); 2564 } 2565 2566 /* 2567 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2568 * Assumes that a particular page size may only be resident in one TSB. 2569 */ 2570 static void 2571 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2572 { 2573 struct tsb_info *tsbinfop = NULL; 2574 uint64_t tag; 2575 struct tsbe *tsbe_addr; 2576 uint64_t tsb_base; 2577 uint_t tsb_size; 2578 int vpshift = MMU_PAGESHIFT; 2579 int phys = 0; 2580 2581 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2582 phys = ktsb_phys; 2583 if (ttesz >= TTE4M) { 2584 #ifndef sun4v 2585 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2586 #endif 2587 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2588 tsb_size = ktsb4m_szcode; 2589 } else { 2590 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2591 tsb_size = ktsb_szcode; 2592 } 2593 } else { 2594 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2595 2596 /* 2597 * If there isn't a TSB for this page size, or the TSB is 2598 * swapped out, there is nothing to do. Note that the latter 2599 * case seems impossible but can occur if hat_pageunload() 2600 * is called on an ISM mapping while the process is swapped 2601 * out. 2602 */ 2603 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2604 return; 2605 2606 /* 2607 * If another thread is in the middle of relocating a TSB 2608 * we can't unload the entry so set a flag so that the 2609 * TSB will be flushed before it can be accessed by the 2610 * process. 2611 */ 2612 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2613 if (ttep == NULL) 2614 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2615 return; 2616 } 2617 #if defined(UTSB_PHYS) 2618 phys = 1; 2619 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2620 #else 2621 tsb_base = (uint64_t)tsbinfop->tsb_va; 2622 #endif 2623 tsb_size = tsbinfop->tsb_szc; 2624 } 2625 if (ttesz >= TTE4M) 2626 vpshift = MMU_PAGESHIFT4M; 2627 2628 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2629 tag = sfmmu_make_tsbtag(vaddr); 2630 2631 if (ttep == NULL) { 2632 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2633 } else { 2634 if (ttesz >= TTE4M) { 2635 SFMMU_STAT(sf_tsb_load4m); 2636 } else { 2637 SFMMU_STAT(sf_tsb_load8k); 2638 } 2639 2640 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2641 } 2642 } 2643 2644 /* 2645 * Unmap all entries from [start, end) matching the given page size. 2646 * 2647 * This function is used primarily to unmap replicated 64K or 512K entries 2648 * from the TSB that are inserted using the base page size TSB pointer, but 2649 * it may also be called to unmap a range of addresses from the TSB. 2650 */ 2651 void 2652 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2653 { 2654 struct tsb_info *tsbinfop; 2655 uint64_t tag; 2656 struct tsbe *tsbe_addr; 2657 caddr_t vaddr; 2658 uint64_t tsb_base; 2659 int vpshift, vpgsz; 2660 uint_t tsb_size; 2661 int phys = 0; 2662 2663 /* 2664 * Assumptions: 2665 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2666 * at a time shooting down any valid entries we encounter. 2667 * 2668 * If ttesz >= 4M we walk the range 4M at a time shooting 2669 * down any valid mappings we find. 2670 */ 2671 if (sfmmup == ksfmmup) { 2672 phys = ktsb_phys; 2673 if (ttesz >= TTE4M) { 2674 #ifndef sun4v 2675 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2676 #endif 2677 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2678 tsb_size = ktsb4m_szcode; 2679 } else { 2680 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2681 tsb_size = ktsb_szcode; 2682 } 2683 } else { 2684 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2685 2686 /* 2687 * If there isn't a TSB for this page size, or the TSB is 2688 * swapped out, there is nothing to do. Note that the latter 2689 * case seems impossible but can occur if hat_pageunload() 2690 * is called on an ISM mapping while the process is swapped 2691 * out. 2692 */ 2693 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2694 return; 2695 2696 /* 2697 * If another thread is in the middle of relocating a TSB 2698 * we can't unload the entry so set a flag so that the 2699 * TSB will be flushed before it can be accessed by the 2700 * process. 2701 */ 2702 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2703 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2704 return; 2705 } 2706 #if defined(UTSB_PHYS) 2707 phys = 1; 2708 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2709 #else 2710 tsb_base = (uint64_t)tsbinfop->tsb_va; 2711 #endif 2712 tsb_size = tsbinfop->tsb_szc; 2713 } 2714 if (ttesz >= TTE4M) { 2715 vpshift = MMU_PAGESHIFT4M; 2716 vpgsz = MMU_PAGESIZE4M; 2717 } else { 2718 vpshift = MMU_PAGESHIFT; 2719 vpgsz = MMU_PAGESIZE; 2720 } 2721 2722 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2723 tag = sfmmu_make_tsbtag(vaddr); 2724 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2725 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2726 } 2727 } 2728 2729 /* 2730 * Select the optimum TSB size given the number of mappings 2731 * that need to be cached. 2732 */ 2733 static int 2734 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2735 { 2736 int szc = 0; 2737 2738 #ifdef DEBUG 2739 if (tsb_grow_stress) { 2740 uint32_t randval = (uint32_t)gettick() >> 4; 2741 return (randval % (tsb_max_growsize + 1)); 2742 } 2743 #endif /* DEBUG */ 2744 2745 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2746 szc++; 2747 return (szc); 2748 } 2749 2750 /* 2751 * This function will add a translation to the hme_blk and allocate the 2752 * hme_blk if one does not exist. 2753 * If a page structure is specified then it will add the 2754 * corresponding hment to the mapping list. 2755 * It will also update the hmenum field for the tte. 2756 * Furthermore, it attempts to create a large page translation 2757 * for <addr,hat> at page array pps. It assumes addr and first 2758 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2759 */ 2760 static int 2761 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2762 page_t **pps, uint_t flags, uint_t rid) 2763 { 2764 struct hmehash_bucket *hmebp; 2765 struct hme_blk *hmeblkp; 2766 int ret; 2767 uint_t size; 2768 2769 /* 2770 * Get mapping size. 2771 */ 2772 size = TTE_CSZ(ttep); 2773 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2774 2775 /* 2776 * Acquire the hash bucket. 2777 */ 2778 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2779 ASSERT(hmebp); 2780 2781 /* 2782 * Find the hment block. 2783 */ 2784 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2785 rid); 2786 ASSERT(hmeblkp); 2787 2788 /* 2789 * Add the translation. 2790 */ 2791 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2792 rid); 2793 2794 /* 2795 * Release the hash bucket. 2796 */ 2797 sfmmu_tteload_release_hashbucket(hmebp); 2798 2799 return (ret); 2800 } 2801 2802 /* 2803 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2804 */ 2805 static struct hmehash_bucket * 2806 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2807 uint_t rid) 2808 { 2809 struct hmehash_bucket *hmebp; 2810 int hmeshift; 2811 void *htagid = sfmmutohtagid(sfmmup, rid); 2812 2813 ASSERT(htagid != NULL); 2814 2815 hmeshift = HME_HASH_SHIFT(size); 2816 2817 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2818 2819 SFMMU_HASH_LOCK(hmebp); 2820 2821 return (hmebp); 2822 } 2823 2824 /* 2825 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2826 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2827 * allocated. 2828 */ 2829 static struct hme_blk * 2830 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2831 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2832 { 2833 hmeblk_tag hblktag; 2834 int hmeshift; 2835 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2836 uint64_t hblkpa, prevpa; 2837 struct kmem_cache *sfmmu_cache; 2838 uint_t forcefree; 2839 2840 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2841 2842 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2843 ASSERT(hblktag.htag_id != NULL); 2844 hmeshift = HME_HASH_SHIFT(size); 2845 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2846 hblktag.htag_rehash = HME_HASH_REHASH(size); 2847 hblktag.htag_rid = rid; 2848 2849 ttearray_realloc: 2850 2851 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2852 pr_hblk, prevpa, &list); 2853 2854 /* 2855 * We block until hblk_reserve_lock is released; it's held by 2856 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2857 * replaced by a hblk from sfmmu8_cache. 2858 */ 2859 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2860 hblk_reserve_thread != curthread) { 2861 SFMMU_HASH_UNLOCK(hmebp); 2862 mutex_enter(&hblk_reserve_lock); 2863 mutex_exit(&hblk_reserve_lock); 2864 SFMMU_STAT(sf_hblk_reserve_hit); 2865 SFMMU_HASH_LOCK(hmebp); 2866 goto ttearray_realloc; 2867 } 2868 2869 if (hmeblkp == NULL) { 2870 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2871 hblktag, flags, rid); 2872 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2873 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2874 } else { 2875 /* 2876 * It is possible for 8k and 64k hblks to collide since they 2877 * have the same rehash value. This is because we 2878 * lazily free hblks and 8K/64K blks could be lingering. 2879 * If we find size mismatch we free the block and & try again. 2880 */ 2881 if (get_hblk_ttesz(hmeblkp) != size) { 2882 ASSERT(!hmeblkp->hblk_vcnt); 2883 ASSERT(!hmeblkp->hblk_hmecnt); 2884 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2885 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2886 goto ttearray_realloc; 2887 } 2888 if (hmeblkp->hblk_shw_bit) { 2889 /* 2890 * if the hblk was previously used as a shadow hblk then 2891 * we will change it to a normal hblk 2892 */ 2893 ASSERT(!hmeblkp->hblk_shared); 2894 if (hmeblkp->hblk_shw_mask) { 2895 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2896 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2897 goto ttearray_realloc; 2898 } else { 2899 hmeblkp->hblk_shw_bit = 0; 2900 } 2901 } 2902 SFMMU_STAT(sf_hblk_hit); 2903 } 2904 2905 /* 2906 * hat_memload() should never call kmem_cache_free(); see block 2907 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2908 * enqueue each hblk in the list to reserve list if it's created 2909 * from sfmmu8_cache *and* sfmmup == KHATID. 2910 */ 2911 forcefree = (sfmmup == KHATID) ? 1 : 0; 2912 while ((pr_hblk = list) != NULL) { 2913 list = pr_hblk->hblk_next; 2914 sfmmu_cache = get_hblk_cache(pr_hblk); 2915 if ((sfmmu_cache == sfmmu8_cache) && 2916 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2917 continue; 2918 2919 ASSERT(sfmmup != KHATID); 2920 kmem_cache_free(sfmmu_cache, pr_hblk); 2921 } 2922 2923 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2924 ASSERT(!hmeblkp->hblk_shw_bit); 2925 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2926 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2927 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 2928 2929 return (hmeblkp); 2930 } 2931 2932 /* 2933 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2934 * otherwise. 2935 */ 2936 static int 2937 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2938 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 2939 { 2940 page_t *pp = *pps; 2941 int hmenum, size, remap; 2942 tte_t tteold, flush_tte; 2943 #ifdef DEBUG 2944 tte_t orig_old; 2945 #endif /* DEBUG */ 2946 struct sf_hment *sfhme; 2947 kmutex_t *pml, *pmtx; 2948 hatlock_t *hatlockp; 2949 int myflt; 2950 2951 /* 2952 * remove this panic when we decide to let user virtual address 2953 * space be >= USERLIMIT. 2954 */ 2955 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2956 panic("user addr %p in kernel space", (void *)vaddr); 2957 #if defined(TTE_IS_GLOBAL) 2958 if (TTE_IS_GLOBAL(ttep)) 2959 panic("sfmmu_tteload: creating global tte"); 2960 #endif 2961 2962 #ifdef DEBUG 2963 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2964 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2965 panic("sfmmu_tteload: non cacheable memory tte"); 2966 #endif /* DEBUG */ 2967 2968 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 2969 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 2970 TTE_SET_REF(ttep); 2971 TTE_SET_MOD(ttep); 2972 } 2973 2974 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2975 !TTE_IS_MOD(ttep)) { 2976 /* 2977 * Don't load TSB for dummy as in ISM. Also don't preload 2978 * the TSB if the TTE isn't writable since we're likely to 2979 * fault on it again -- preloading can be fairly expensive. 2980 */ 2981 flags |= SFMMU_NO_TSBLOAD; 2982 } 2983 2984 size = TTE_CSZ(ttep); 2985 switch (size) { 2986 case TTE8K: 2987 SFMMU_STAT(sf_tteload8k); 2988 break; 2989 case TTE64K: 2990 SFMMU_STAT(sf_tteload64k); 2991 break; 2992 case TTE512K: 2993 SFMMU_STAT(sf_tteload512k); 2994 break; 2995 case TTE4M: 2996 SFMMU_STAT(sf_tteload4m); 2997 break; 2998 case (TTE32M): 2999 SFMMU_STAT(sf_tteload32m); 3000 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3001 break; 3002 case (TTE256M): 3003 SFMMU_STAT(sf_tteload256m); 3004 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3005 break; 3006 } 3007 3008 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3009 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3010 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3011 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3012 3013 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3014 3015 /* 3016 * Need to grab mlist lock here so that pageunload 3017 * will not change tte behind us. 3018 */ 3019 if (pp) { 3020 pml = sfmmu_mlist_enter(pp); 3021 } 3022 3023 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3024 /* 3025 * Look for corresponding hment and if valid verify 3026 * pfns are equal. 3027 */ 3028 remap = TTE_IS_VALID(&tteold); 3029 if (remap) { 3030 pfn_t new_pfn, old_pfn; 3031 3032 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3033 new_pfn = TTE_TO_PFN(vaddr, ttep); 3034 3035 if (flags & HAT_LOAD_REMAP) { 3036 /* make sure we are remapping same type of pages */ 3037 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3038 panic("sfmmu_tteload - tte remap io<->memory"); 3039 } 3040 if (old_pfn != new_pfn && 3041 (pp != NULL || sfhme->hme_page != NULL)) { 3042 panic("sfmmu_tteload - tte remap pp != NULL"); 3043 } 3044 } else if (old_pfn != new_pfn) { 3045 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3046 (void *)hmeblkp); 3047 } 3048 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3049 } 3050 3051 if (pp) { 3052 if (size == TTE8K) { 3053 #ifdef VAC 3054 /* 3055 * Handle VAC consistency 3056 */ 3057 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3058 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3059 } 3060 #endif 3061 3062 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3063 pmtx = sfmmu_page_enter(pp); 3064 PP_CLRRO(pp); 3065 sfmmu_page_exit(pmtx); 3066 } else if (!PP_ISMAPPED(pp) && 3067 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3068 pmtx = sfmmu_page_enter(pp); 3069 if (!(PP_ISMOD(pp))) { 3070 PP_SETRO(pp); 3071 } 3072 sfmmu_page_exit(pmtx); 3073 } 3074 3075 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3076 /* 3077 * sfmmu_pagearray_setup failed so return 3078 */ 3079 sfmmu_mlist_exit(pml); 3080 return (1); 3081 } 3082 } 3083 3084 /* 3085 * Make sure hment is not on a mapping list. 3086 */ 3087 ASSERT(remap || (sfhme->hme_page == NULL)); 3088 3089 /* if it is not a remap then hme->next better be NULL */ 3090 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3091 3092 if (flags & HAT_LOAD_LOCK) { 3093 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3094 panic("too high lckcnt-hmeblk %p", 3095 (void *)hmeblkp); 3096 } 3097 atomic_add_32(&hmeblkp->hblk_lckcnt, 1); 3098 3099 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3100 } 3101 3102 #ifdef VAC 3103 if (pp && PP_ISNC(pp)) { 3104 /* 3105 * If the physical page is marked to be uncacheable, like 3106 * by a vac conflict, make sure the new mapping is also 3107 * uncacheable. 3108 */ 3109 TTE_CLR_VCACHEABLE(ttep); 3110 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3111 } 3112 #endif 3113 ttep->tte_hmenum = hmenum; 3114 3115 #ifdef DEBUG 3116 orig_old = tteold; 3117 #endif /* DEBUG */ 3118 3119 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3120 if ((sfmmup == KHATID) && 3121 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3122 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3123 } 3124 #ifdef DEBUG 3125 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3126 #endif /* DEBUG */ 3127 } 3128 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3129 3130 if (!TTE_IS_VALID(&tteold)) { 3131 3132 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 3133 if (rid == SFMMU_INVALID_SHMERID) { 3134 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 3135 } else { 3136 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3137 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3138 /* 3139 * We already accounted for region ttecnt's in sfmmu 3140 * during hat_join_region() processing. Here we 3141 * only update ttecnt's in region struture. 3142 */ 3143 atomic_add_long(&rgnp->rgn_ttecnt[size], 1); 3144 } 3145 } 3146 3147 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3148 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3149 sfmmup != ksfmmup) { 3150 uchar_t tteflag = 1 << size; 3151 if (rid == SFMMU_INVALID_SHMERID) { 3152 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3153 hatlockp = sfmmu_hat_enter(sfmmup); 3154 sfmmup->sfmmu_tteflags |= tteflag; 3155 sfmmu_hat_exit(hatlockp); 3156 } 3157 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3158 hatlockp = sfmmu_hat_enter(sfmmup); 3159 sfmmup->sfmmu_rtteflags |= tteflag; 3160 sfmmu_hat_exit(hatlockp); 3161 } 3162 /* 3163 * Update the current CPU tsbmiss area, so the current thread 3164 * won't need to take the tsbmiss for the new pagesize. 3165 * The other threads in the process will update their tsb 3166 * miss area lazily in sfmmu_tsbmiss_exception() when they 3167 * fail to find the translation for a newly added pagesize. 3168 */ 3169 if (size > TTE64K && myflt) { 3170 struct tsbmiss *tsbmp; 3171 kpreempt_disable(); 3172 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3173 if (rid == SFMMU_INVALID_SHMERID) { 3174 if (!(tsbmp->uhat_tteflags & tteflag)) { 3175 tsbmp->uhat_tteflags |= tteflag; 3176 } 3177 } else { 3178 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3179 tsbmp->uhat_rtteflags |= tteflag; 3180 } 3181 } 3182 kpreempt_enable(); 3183 } 3184 } 3185 3186 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3187 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3188 hatlockp = sfmmu_hat_enter(sfmmup); 3189 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3190 sfmmu_hat_exit(hatlockp); 3191 } 3192 3193 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3194 hw_tte.tte_intlo; 3195 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3196 hw_tte.tte_inthi; 3197 3198 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3199 /* 3200 * If remap and new tte differs from old tte we need 3201 * to sync the mod bit and flush TLB/TSB. We don't 3202 * need to sync ref bit because we currently always set 3203 * ref bit in tteload. 3204 */ 3205 ASSERT(TTE_IS_REF(ttep)); 3206 if (TTE_IS_MOD(&tteold)) { 3207 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3208 } 3209 /* 3210 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3211 * hmes are only used for read only text. Adding this code for 3212 * completeness and future use of shared hmeblks with writable 3213 * mappings of VMODSORT vnodes. 3214 */ 3215 if (hmeblkp->hblk_shared) { 3216 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3217 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3218 xt_sync(cpuset); 3219 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3220 } else { 3221 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3222 xt_sync(sfmmup->sfmmu_cpusran); 3223 } 3224 } 3225 3226 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3227 /* 3228 * We only preload 8K and 4M mappings into the TSB, since 3229 * 64K and 512K mappings are replicated and hence don't 3230 * have a single, unique TSB entry. Ditto for 32M/256M. 3231 */ 3232 if (size == TTE8K || size == TTE4M) { 3233 sf_scd_t *scdp; 3234 hatlockp = sfmmu_hat_enter(sfmmup); 3235 /* 3236 * Don't preload private TSB if the mapping is used 3237 * by the shctx in the SCD. 3238 */ 3239 scdp = sfmmup->sfmmu_scdp; 3240 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3241 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3242 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3243 size); 3244 } 3245 sfmmu_hat_exit(hatlockp); 3246 } 3247 } 3248 if (pp) { 3249 if (!remap) { 3250 HME_ADD(sfhme, pp); 3251 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 3252 ASSERT(hmeblkp->hblk_hmecnt > 0); 3253 3254 /* 3255 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3256 * see pageunload() for comment. 3257 */ 3258 } 3259 sfmmu_mlist_exit(pml); 3260 } 3261 3262 return (0); 3263 } 3264 /* 3265 * Function unlocks hash bucket. 3266 */ 3267 static void 3268 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3269 { 3270 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3271 SFMMU_HASH_UNLOCK(hmebp); 3272 } 3273 3274 /* 3275 * function which checks and sets up page array for a large 3276 * translation. Will set p_vcolor, p_index, p_ro fields. 3277 * Assumes addr and pfnum of first page are properly aligned. 3278 * Will check for physical contiguity. If check fails it return 3279 * non null. 3280 */ 3281 static int 3282 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3283 { 3284 int i, index, ttesz; 3285 pfn_t pfnum; 3286 pgcnt_t npgs; 3287 page_t *pp, *pp1; 3288 kmutex_t *pmtx; 3289 #ifdef VAC 3290 int osz; 3291 int cflags = 0; 3292 int vac_err = 0; 3293 #endif 3294 int newidx = 0; 3295 3296 ttesz = TTE_CSZ(ttep); 3297 3298 ASSERT(ttesz > TTE8K); 3299 3300 npgs = TTEPAGES(ttesz); 3301 index = PAGESZ_TO_INDEX(ttesz); 3302 3303 pfnum = (*pps)->p_pagenum; 3304 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3305 3306 /* 3307 * Save the first pp so we can do HAT_TMPNC at the end. 3308 */ 3309 pp1 = *pps; 3310 #ifdef VAC 3311 osz = fnd_mapping_sz(pp1); 3312 #endif 3313 3314 for (i = 0; i < npgs; i++, pps++) { 3315 pp = *pps; 3316 ASSERT(PAGE_LOCKED(pp)); 3317 ASSERT(pp->p_szc >= ttesz); 3318 ASSERT(pp->p_szc == pp1->p_szc); 3319 ASSERT(sfmmu_mlist_held(pp)); 3320 3321 /* 3322 * XXX is it possible to maintain P_RO on the root only? 3323 */ 3324 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3325 pmtx = sfmmu_page_enter(pp); 3326 PP_CLRRO(pp); 3327 sfmmu_page_exit(pmtx); 3328 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3329 !PP_ISMOD(pp)) { 3330 pmtx = sfmmu_page_enter(pp); 3331 if (!(PP_ISMOD(pp))) { 3332 PP_SETRO(pp); 3333 } 3334 sfmmu_page_exit(pmtx); 3335 } 3336 3337 /* 3338 * If this is a remap we skip vac & contiguity checks. 3339 */ 3340 if (remap) 3341 continue; 3342 3343 /* 3344 * set p_vcolor and detect any vac conflicts. 3345 */ 3346 #ifdef VAC 3347 if (vac_err == 0) { 3348 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3349 3350 } 3351 #endif 3352 3353 /* 3354 * Save current index in case we need to undo it. 3355 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3356 * "SFMMU_INDEX_SHIFT 6" 3357 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3358 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3359 * 3360 * So: index = PAGESZ_TO_INDEX(ttesz); 3361 * if ttesz == 1 then index = 0x2 3362 * 2 then index = 0x4 3363 * 3 then index = 0x8 3364 * 4 then index = 0x10 3365 * 5 then index = 0x20 3366 * The code below checks if it's a new pagesize (ie, newidx) 3367 * in case we need to take it back out of p_index, 3368 * and then or's the new index into the existing index. 3369 */ 3370 if ((PP_MAPINDEX(pp) & index) == 0) 3371 newidx = 1; 3372 pp->p_index = (PP_MAPINDEX(pp) | index); 3373 3374 /* 3375 * contiguity check 3376 */ 3377 if (pp->p_pagenum != pfnum) { 3378 /* 3379 * If we fail the contiguity test then 3380 * the only thing we need to fix is the p_index field. 3381 * We might get a few extra flushes but since this 3382 * path is rare that is ok. The p_ro field will 3383 * get automatically fixed on the next tteload to 3384 * the page. NO TNC bit is set yet. 3385 */ 3386 while (i >= 0) { 3387 pp = *pps; 3388 if (newidx) 3389 pp->p_index = (PP_MAPINDEX(pp) & 3390 ~index); 3391 pps--; 3392 i--; 3393 } 3394 return (1); 3395 } 3396 pfnum++; 3397 addr += MMU_PAGESIZE; 3398 } 3399 3400 #ifdef VAC 3401 if (vac_err) { 3402 if (ttesz > osz) { 3403 /* 3404 * There are some smaller mappings that causes vac 3405 * conflicts. Convert all existing small mappings to 3406 * TNC. 3407 */ 3408 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3409 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3410 npgs); 3411 } else { 3412 /* EMPTY */ 3413 /* 3414 * If there exists an big page mapping, 3415 * that means the whole existing big page 3416 * has TNC setting already. No need to covert to 3417 * TNC again. 3418 */ 3419 ASSERT(PP_ISTNC(pp1)); 3420 } 3421 } 3422 #endif /* VAC */ 3423 3424 return (0); 3425 } 3426 3427 #ifdef VAC 3428 /* 3429 * Routine that detects vac consistency for a large page. It also 3430 * sets virtual color for all pp's for this big mapping. 3431 */ 3432 static int 3433 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3434 { 3435 int vcolor, ocolor; 3436 3437 ASSERT(sfmmu_mlist_held(pp)); 3438 3439 if (PP_ISNC(pp)) { 3440 return (HAT_TMPNC); 3441 } 3442 3443 vcolor = addr_to_vcolor(addr); 3444 if (PP_NEWPAGE(pp)) { 3445 PP_SET_VCOLOR(pp, vcolor); 3446 return (0); 3447 } 3448 3449 ocolor = PP_GET_VCOLOR(pp); 3450 if (ocolor == vcolor) { 3451 return (0); 3452 } 3453 3454 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3455 /* 3456 * Previous user of page had a differnet color 3457 * but since there are no current users 3458 * we just flush the cache and change the color. 3459 * As an optimization for large pages we flush the 3460 * entire cache of that color and set a flag. 3461 */ 3462 SFMMU_STAT(sf_pgcolor_conflict); 3463 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3464 CacheColor_SetFlushed(*cflags, ocolor); 3465 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3466 } 3467 PP_SET_VCOLOR(pp, vcolor); 3468 return (0); 3469 } 3470 3471 /* 3472 * We got a real conflict with a current mapping. 3473 * set flags to start unencaching all mappings 3474 * and return failure so we restart looping 3475 * the pp array from the beginning. 3476 */ 3477 return (HAT_TMPNC); 3478 } 3479 #endif /* VAC */ 3480 3481 /* 3482 * creates a large page shadow hmeblk for a tte. 3483 * The purpose of this routine is to allow us to do quick unloads because 3484 * the vm layer can easily pass a very large but sparsely populated range. 3485 */ 3486 static struct hme_blk * 3487 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3488 { 3489 struct hmehash_bucket *hmebp; 3490 hmeblk_tag hblktag; 3491 int hmeshift, size, vshift; 3492 uint_t shw_mask, newshw_mask; 3493 struct hme_blk *hmeblkp; 3494 3495 ASSERT(sfmmup != KHATID); 3496 if (mmu_page_sizes == max_mmu_page_sizes) { 3497 ASSERT(ttesz < TTE256M); 3498 } else { 3499 ASSERT(ttesz < TTE4M); 3500 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3501 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3502 } 3503 3504 if (ttesz == TTE8K) { 3505 size = TTE512K; 3506 } else { 3507 size = ++ttesz; 3508 } 3509 3510 hblktag.htag_id = sfmmup; 3511 hmeshift = HME_HASH_SHIFT(size); 3512 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3513 hblktag.htag_rehash = HME_HASH_REHASH(size); 3514 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3515 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3516 3517 SFMMU_HASH_LOCK(hmebp); 3518 3519 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3520 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3521 if (hmeblkp == NULL) { 3522 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3523 hblktag, flags, SFMMU_INVALID_SHMERID); 3524 } 3525 ASSERT(hmeblkp); 3526 if (!hmeblkp->hblk_shw_mask) { 3527 /* 3528 * if this is a unused hblk it was just allocated or could 3529 * potentially be a previous large page hblk so we need to 3530 * set the shadow bit. 3531 */ 3532 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3533 hmeblkp->hblk_shw_bit = 1; 3534 } else if (hmeblkp->hblk_shw_bit == 0) { 3535 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3536 (void *)hmeblkp); 3537 } 3538 ASSERT(hmeblkp->hblk_shw_bit == 1); 3539 ASSERT(!hmeblkp->hblk_shared); 3540 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3541 ASSERT(vshift < 8); 3542 /* 3543 * Atomically set shw mask bit 3544 */ 3545 do { 3546 shw_mask = hmeblkp->hblk_shw_mask; 3547 newshw_mask = shw_mask | (1 << vshift); 3548 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3549 newshw_mask); 3550 } while (newshw_mask != shw_mask); 3551 3552 SFMMU_HASH_UNLOCK(hmebp); 3553 3554 return (hmeblkp); 3555 } 3556 3557 /* 3558 * This routine cleanup a previous shadow hmeblk and changes it to 3559 * a regular hblk. This happens rarely but it is possible 3560 * when a process wants to use large pages and there are hblks still 3561 * lying around from the previous as that used these hmeblks. 3562 * The alternative was to cleanup the shadow hblks at unload time 3563 * but since so few user processes actually use large pages, it is 3564 * better to be lazy and cleanup at this time. 3565 */ 3566 static void 3567 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3568 struct hmehash_bucket *hmebp) 3569 { 3570 caddr_t addr, endaddr; 3571 int hashno, size; 3572 3573 ASSERT(hmeblkp->hblk_shw_bit); 3574 ASSERT(!hmeblkp->hblk_shared); 3575 3576 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3577 3578 if (!hmeblkp->hblk_shw_mask) { 3579 hmeblkp->hblk_shw_bit = 0; 3580 return; 3581 } 3582 addr = (caddr_t)get_hblk_base(hmeblkp); 3583 endaddr = get_hblk_endaddr(hmeblkp); 3584 size = get_hblk_ttesz(hmeblkp); 3585 hashno = size - 1; 3586 ASSERT(hashno > 0); 3587 SFMMU_HASH_UNLOCK(hmebp); 3588 3589 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3590 3591 SFMMU_HASH_LOCK(hmebp); 3592 } 3593 3594 static void 3595 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3596 int hashno) 3597 { 3598 int hmeshift, shadow = 0; 3599 hmeblk_tag hblktag; 3600 struct hmehash_bucket *hmebp; 3601 struct hme_blk *hmeblkp; 3602 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3603 uint64_t hblkpa, prevpa, nx_pa; 3604 3605 ASSERT(hashno > 0); 3606 hblktag.htag_id = sfmmup; 3607 hblktag.htag_rehash = hashno; 3608 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3609 3610 hmeshift = HME_HASH_SHIFT(hashno); 3611 3612 while (addr < endaddr) { 3613 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3614 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3615 SFMMU_HASH_LOCK(hmebp); 3616 /* inline HME_HASH_SEARCH */ 3617 hmeblkp = hmebp->hmeblkp; 3618 hblkpa = hmebp->hmeh_nextpa; 3619 prevpa = 0; 3620 pr_hblk = NULL; 3621 while (hmeblkp) { 3622 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3623 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3624 /* found hme_blk */ 3625 ASSERT(!hmeblkp->hblk_shared); 3626 if (hmeblkp->hblk_shw_bit) { 3627 if (hmeblkp->hblk_shw_mask) { 3628 shadow = 1; 3629 sfmmu_shadow_hcleanup(sfmmup, 3630 hmeblkp, hmebp); 3631 break; 3632 } else { 3633 hmeblkp->hblk_shw_bit = 0; 3634 } 3635 } 3636 3637 /* 3638 * Hblk_hmecnt and hblk_vcnt could be non zero 3639 * since hblk_unload() does not gurantee that. 3640 * 3641 * XXX - this could cause tteload() to spin 3642 * where sfmmu_shadow_hcleanup() is called. 3643 */ 3644 } 3645 3646 nx_hblk = hmeblkp->hblk_next; 3647 nx_pa = hmeblkp->hblk_nextpa; 3648 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3649 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3650 pr_hblk); 3651 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3652 } else { 3653 pr_hblk = hmeblkp; 3654 prevpa = hblkpa; 3655 } 3656 hmeblkp = nx_hblk; 3657 hblkpa = nx_pa; 3658 } 3659 3660 SFMMU_HASH_UNLOCK(hmebp); 3661 3662 if (shadow) { 3663 /* 3664 * We found another shadow hblk so cleaned its 3665 * children. We need to go back and cleanup 3666 * the original hblk so we don't change the 3667 * addr. 3668 */ 3669 shadow = 0; 3670 } else { 3671 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3672 (1 << hmeshift)); 3673 } 3674 } 3675 sfmmu_hblks_list_purge(&list); 3676 } 3677 3678 /* 3679 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3680 * may still linger on after pageunload. 3681 */ 3682 static void 3683 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3684 { 3685 int hmeshift; 3686 hmeblk_tag hblktag; 3687 struct hmehash_bucket *hmebp; 3688 struct hme_blk *hmeblkp; 3689 struct hme_blk *pr_hblk; 3690 struct hme_blk *list = NULL; 3691 uint64_t hblkpa, prevpa; 3692 3693 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3694 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3695 3696 hmeshift = HME_HASH_SHIFT(ttesz); 3697 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3698 hblktag.htag_rehash = ttesz; 3699 hblktag.htag_rid = rid; 3700 hblktag.htag_id = srdp; 3701 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3702 3703 SFMMU_HASH_LOCK(hmebp); 3704 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 3705 prevpa, &list); 3706 if (hmeblkp != NULL) { 3707 ASSERT(hmeblkp->hblk_shared); 3708 ASSERT(!hmeblkp->hblk_shw_bit); 3709 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3710 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3711 } 3712 ASSERT(!hmeblkp->hblk_lckcnt); 3713 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 3714 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3715 } 3716 SFMMU_HASH_UNLOCK(hmebp); 3717 sfmmu_hblks_list_purge(&list); 3718 } 3719 3720 /* ARGSUSED */ 3721 static void 3722 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3723 size_t r_size, void *r_obj, u_offset_t r_objoff) 3724 { 3725 } 3726 3727 /* 3728 * Searches for an hmeblk which maps addr, then unloads this mapping 3729 * and updates *eaddrp, if the hmeblk is found. 3730 */ 3731 static void 3732 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3733 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3734 { 3735 int hmeshift; 3736 hmeblk_tag hblktag; 3737 struct hmehash_bucket *hmebp; 3738 struct hme_blk *hmeblkp; 3739 struct hme_blk *pr_hblk; 3740 struct hme_blk *list = NULL; 3741 uint64_t hblkpa, prevpa; 3742 3743 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3744 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3745 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3746 3747 hmeshift = HME_HASH_SHIFT(ttesz); 3748 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3749 hblktag.htag_rehash = ttesz; 3750 hblktag.htag_rid = rid; 3751 hblktag.htag_id = srdp; 3752 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3753 3754 SFMMU_HASH_LOCK(hmebp); 3755 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 3756 prevpa, &list); 3757 if (hmeblkp != NULL) { 3758 ASSERT(hmeblkp->hblk_shared); 3759 ASSERT(!hmeblkp->hblk_lckcnt); 3760 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3761 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3762 eaddr, NULL, HAT_UNLOAD); 3763 ASSERT(*eaddrp > addr); 3764 } 3765 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3766 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 3767 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3768 } 3769 SFMMU_HASH_UNLOCK(hmebp); 3770 sfmmu_hblks_list_purge(&list); 3771 } 3772 3773 static void 3774 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3775 { 3776 int ttesz = rgnp->rgn_pgszc; 3777 size_t rsz = rgnp->rgn_size; 3778 caddr_t rsaddr = rgnp->rgn_saddr; 3779 caddr_t readdr = rsaddr + rsz; 3780 caddr_t rhsaddr; 3781 caddr_t va; 3782 uint_t rid = rgnp->rgn_id; 3783 caddr_t cbsaddr; 3784 caddr_t cbeaddr; 3785 hat_rgn_cb_func_t rcbfunc; 3786 ulong_t cnt; 3787 3788 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3789 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3790 3791 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3792 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3793 if (ttesz < HBLK_MIN_TTESZ) { 3794 ttesz = HBLK_MIN_TTESZ; 3795 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3796 } else { 3797 rhsaddr = rsaddr; 3798 } 3799 3800 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3801 rcbfunc = sfmmu_rgn_cb_noop; 3802 } 3803 3804 while (ttesz >= HBLK_MIN_TTESZ) { 3805 cbsaddr = rsaddr; 3806 cbeaddr = rsaddr; 3807 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3808 ttesz--; 3809 continue; 3810 } 3811 cnt = 0; 3812 va = rsaddr; 3813 while (va < readdr) { 3814 ASSERT(va >= rhsaddr); 3815 if (va != cbeaddr) { 3816 if (cbeaddr != cbsaddr) { 3817 ASSERT(cbeaddr > cbsaddr); 3818 (*rcbfunc)(cbsaddr, cbeaddr, 3819 rsaddr, rsz, rgnp->rgn_obj, 3820 rgnp->rgn_objoff); 3821 } 3822 cbsaddr = va; 3823 cbeaddr = va; 3824 } 3825 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3826 ttesz, &cbeaddr); 3827 cnt++; 3828 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3829 } 3830 if (cbeaddr != cbsaddr) { 3831 ASSERT(cbeaddr > cbsaddr); 3832 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3833 rsz, rgnp->rgn_obj, 3834 rgnp->rgn_objoff); 3835 } 3836 ttesz--; 3837 } 3838 } 3839 3840 /* 3841 * Release one hardware address translation lock on the given address range. 3842 */ 3843 void 3844 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3845 { 3846 struct hmehash_bucket *hmebp; 3847 hmeblk_tag hblktag; 3848 int hmeshift, hashno = 1; 3849 struct hme_blk *hmeblkp, *list = NULL; 3850 caddr_t endaddr; 3851 3852 ASSERT(sfmmup != NULL); 3853 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3854 3855 ASSERT((sfmmup == ksfmmup) || 3856 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3857 ASSERT((len & MMU_PAGEOFFSET) == 0); 3858 endaddr = addr + len; 3859 hblktag.htag_id = sfmmup; 3860 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3861 3862 /* 3863 * Spitfire supports 4 page sizes. 3864 * Most pages are expected to be of the smallest page size (8K) and 3865 * these will not need to be rehashed. 64K pages also don't need to be 3866 * rehashed because an hmeblk spans 64K of address space. 512K pages 3867 * might need 1 rehash and and 4M pages might need 2 rehashes. 3868 */ 3869 while (addr < endaddr) { 3870 hmeshift = HME_HASH_SHIFT(hashno); 3871 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3872 hblktag.htag_rehash = hashno; 3873 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3874 3875 SFMMU_HASH_LOCK(hmebp); 3876 3877 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3878 if (hmeblkp != NULL) { 3879 ASSERT(!hmeblkp->hblk_shared); 3880 /* 3881 * If we encounter a shadow hmeblk then 3882 * we know there are no valid hmeblks mapping 3883 * this address at this size or larger. 3884 * Just increment address by the smallest 3885 * page size. 3886 */ 3887 if (hmeblkp->hblk_shw_bit) { 3888 addr += MMU_PAGESIZE; 3889 } else { 3890 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3891 endaddr); 3892 } 3893 SFMMU_HASH_UNLOCK(hmebp); 3894 hashno = 1; 3895 continue; 3896 } 3897 SFMMU_HASH_UNLOCK(hmebp); 3898 3899 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3900 /* 3901 * We have traversed the whole list and rehashed 3902 * if necessary without finding the address to unlock 3903 * which should never happen. 3904 */ 3905 panic("sfmmu_unlock: addr not found. " 3906 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3907 } else { 3908 hashno++; 3909 } 3910 } 3911 3912 sfmmu_hblks_list_purge(&list); 3913 } 3914 3915 void 3916 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 3917 hat_region_cookie_t rcookie) 3918 { 3919 sf_srd_t *srdp; 3920 sf_region_t *rgnp; 3921 int ttesz; 3922 uint_t rid; 3923 caddr_t eaddr; 3924 caddr_t va; 3925 int hmeshift; 3926 hmeblk_tag hblktag; 3927 struct hmehash_bucket *hmebp; 3928 struct hme_blk *hmeblkp; 3929 struct hme_blk *pr_hblk; 3930 struct hme_blk *list; 3931 uint64_t hblkpa, prevpa; 3932 3933 if (rcookie == HAT_INVALID_REGION_COOKIE) { 3934 hat_unlock(sfmmup, addr, len); 3935 return; 3936 } 3937 3938 ASSERT(sfmmup != NULL); 3939 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3940 ASSERT(sfmmup != ksfmmup); 3941 3942 srdp = sfmmup->sfmmu_srdp; 3943 rid = (uint_t)((uint64_t)rcookie); 3944 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3945 eaddr = addr + len; 3946 va = addr; 3947 list = NULL; 3948 rgnp = srdp->srd_hmergnp[rid]; 3949 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 3950 3951 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 3952 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 3953 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 3954 ttesz = HBLK_MIN_TTESZ; 3955 } else { 3956 ttesz = rgnp->rgn_pgszc; 3957 } 3958 while (va < eaddr) { 3959 while (ttesz < rgnp->rgn_pgszc && 3960 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 3961 ttesz++; 3962 } 3963 while (ttesz >= HBLK_MIN_TTESZ) { 3964 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3965 ttesz--; 3966 continue; 3967 } 3968 hmeshift = HME_HASH_SHIFT(ttesz); 3969 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 3970 hblktag.htag_rehash = ttesz; 3971 hblktag.htag_rid = rid; 3972 hblktag.htag_id = srdp; 3973 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 3974 SFMMU_HASH_LOCK(hmebp); 3975 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 3976 pr_hblk, prevpa, &list); 3977 if (hmeblkp == NULL) { 3978 SFMMU_HASH_UNLOCK(hmebp); 3979 ttesz--; 3980 continue; 3981 } 3982 ASSERT(hmeblkp->hblk_shared); 3983 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 3984 ASSERT(va >= eaddr || 3985 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 3986 SFMMU_HASH_UNLOCK(hmebp); 3987 break; 3988 } 3989 if (ttesz < HBLK_MIN_TTESZ) { 3990 panic("hat_unlock_region: addr not found " 3991 "addr %p hat %p", (void *)va, (void *)sfmmup); 3992 } 3993 } 3994 sfmmu_hblks_list_purge(&list); 3995 } 3996 3997 /* 3998 * Function to unlock a range of addresses in an hmeblk. It returns the 3999 * next address that needs to be unlocked. 4000 * Should be called with the hash lock held. 4001 */ 4002 static caddr_t 4003 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4004 { 4005 struct sf_hment *sfhme; 4006 tte_t tteold, ttemod; 4007 int ttesz, ret; 4008 4009 ASSERT(in_hblk_range(hmeblkp, addr)); 4010 ASSERT(hmeblkp->hblk_shw_bit == 0); 4011 4012 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4013 ttesz = get_hblk_ttesz(hmeblkp); 4014 4015 HBLKTOHME(sfhme, hmeblkp, addr); 4016 while (addr < endaddr) { 4017 readtte: 4018 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4019 if (TTE_IS_VALID(&tteold)) { 4020 4021 ttemod = tteold; 4022 4023 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4024 &sfhme->hme_tte); 4025 4026 if (ret < 0) 4027 goto readtte; 4028 4029 if (hmeblkp->hblk_lckcnt == 0) 4030 panic("zero hblk lckcnt"); 4031 4032 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4033 (uintptr_t)endaddr) 4034 panic("can't unlock large tte"); 4035 4036 ASSERT(hmeblkp->hblk_lckcnt > 0); 4037 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 4038 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4039 } else { 4040 panic("sfmmu_hblk_unlock: invalid tte"); 4041 } 4042 addr += TTEBYTES(ttesz); 4043 sfhme++; 4044 } 4045 return (addr); 4046 } 4047 4048 /* 4049 * Physical Address Mapping Framework 4050 * 4051 * General rules: 4052 * 4053 * (1) Applies only to seg_kmem memory pages. To make things easier, 4054 * seg_kpm addresses are also accepted by the routines, but nothing 4055 * is done with them since by definition their PA mappings are static. 4056 * (2) hat_add_callback() may only be called while holding the page lock 4057 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4058 * or passing HAC_PAGELOCK flag. 4059 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4060 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4061 * callbacks may not sleep or acquire adaptive mutex locks. 4062 * (4) Either prehandler() or posthandler() (but not both) may be specified 4063 * as being NULL. Specifying an errhandler() is optional. 4064 * 4065 * Details of using the framework: 4066 * 4067 * registering a callback (hat_register_callback()) 4068 * 4069 * Pass prehandler, posthandler, errhandler addresses 4070 * as described below. If capture_cpus argument is nonzero, 4071 * suspend callback to the prehandler will occur with CPUs 4072 * captured and executing xc_loop() and CPUs will remain 4073 * captured until after the posthandler suspend callback 4074 * occurs. 4075 * 4076 * adding a callback (hat_add_callback()) 4077 * 4078 * as_pagelock(); 4079 * hat_add_callback(); 4080 * save returned pfn in private data structures or program registers; 4081 * as_pageunlock(); 4082 * 4083 * prehandler() 4084 * 4085 * Stop all accesses by physical address to this memory page. 4086 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4087 * adaptive locks. The second, SUSPEND, is called at high PIL with 4088 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4089 * locks must be XCALL_PIL or higher locks). 4090 * 4091 * May return the following errors: 4092 * EIO: A fatal error has occurred. This will result in panic. 4093 * EAGAIN: The page cannot be suspended. This will fail the 4094 * relocation. 4095 * 0: Success. 4096 * 4097 * posthandler() 4098 * 4099 * Save new pfn in private data structures or program registers; 4100 * not allowed to fail (non-zero return values will result in panic). 4101 * 4102 * errhandler() 4103 * 4104 * called when an error occurs related to the callback. Currently 4105 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4106 * a page is being freed, but there are still outstanding callback(s) 4107 * registered on the page. 4108 * 4109 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4110 * 4111 * stop using physical address 4112 * hat_delete_callback(); 4113 * 4114 */ 4115 4116 /* 4117 * Register a callback class. Each subsystem should do this once and 4118 * cache the id_t returned for use in setting up and tearing down callbacks. 4119 * 4120 * There is no facility for removing callback IDs once they are created; 4121 * the "key" should be unique for each module, so in case a module is unloaded 4122 * and subsequently re-loaded, we can recycle the module's previous entry. 4123 */ 4124 id_t 4125 hat_register_callback(int key, 4126 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4127 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4128 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4129 int capture_cpus) 4130 { 4131 id_t id; 4132 4133 /* 4134 * Search the table for a pre-existing callback associated with 4135 * the identifier "key". If one exists, we re-use that entry in 4136 * the table for this instance, otherwise we assign the next 4137 * available table slot. 4138 */ 4139 for (id = 0; id < sfmmu_max_cb_id; id++) { 4140 if (sfmmu_cb_table[id].key == key) 4141 break; 4142 } 4143 4144 if (id == sfmmu_max_cb_id) { 4145 id = sfmmu_cb_nextid++; 4146 if (id >= sfmmu_max_cb_id) 4147 panic("hat_register_callback: out of callback IDs"); 4148 } 4149 4150 ASSERT(prehandler != NULL || posthandler != NULL); 4151 4152 sfmmu_cb_table[id].key = key; 4153 sfmmu_cb_table[id].prehandler = prehandler; 4154 sfmmu_cb_table[id].posthandler = posthandler; 4155 sfmmu_cb_table[id].errhandler = errhandler; 4156 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4157 4158 return (id); 4159 } 4160 4161 #define HAC_COOKIE_NONE (void *)-1 4162 4163 /* 4164 * Add relocation callbacks to the specified addr/len which will be called 4165 * when relocating the associated page. See the description of pre and 4166 * posthandler above for more details. 4167 * 4168 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4169 * locked internally so the caller must be able to deal with the callback 4170 * running even before this function has returned. If HAC_PAGELOCK is not 4171 * set, it is assumed that the underlying memory pages are locked. 4172 * 4173 * Since the caller must track the individual page boundaries anyway, 4174 * we only allow a callback to be added to a single page (large 4175 * or small). Thus [addr, addr + len) MUST be contained within a single 4176 * page. 4177 * 4178 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4179 * _provided_that_ a unique parameter is specified for each callback. 4180 * If multiple callbacks are registered on the same range the callback will 4181 * be invoked with each unique parameter. Registering the same callback with 4182 * the same argument more than once will result in corrupted kernel state. 4183 * 4184 * Returns the pfn of the underlying kernel page in *rpfn 4185 * on success, or PFN_INVALID on failure. 4186 * 4187 * cookiep (if passed) provides storage space for an opaque cookie 4188 * to return later to hat_delete_callback(). This cookie makes the callback 4189 * deletion significantly quicker by avoiding a potentially lengthy hash 4190 * search. 4191 * 4192 * Returns values: 4193 * 0: success 4194 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4195 * EINVAL: callback ID is not valid 4196 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4197 * space 4198 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4199 */ 4200 int 4201 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4202 void *pvt, pfn_t *rpfn, void **cookiep) 4203 { 4204 struct hmehash_bucket *hmebp; 4205 hmeblk_tag hblktag; 4206 struct hme_blk *hmeblkp; 4207 int hmeshift, hashno; 4208 caddr_t saddr, eaddr, baseaddr; 4209 struct pa_hment *pahmep; 4210 struct sf_hment *sfhmep, *osfhmep; 4211 kmutex_t *pml; 4212 tte_t tte; 4213 page_t *pp; 4214 vnode_t *vp; 4215 u_offset_t off; 4216 pfn_t pfn; 4217 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4218 int locked = 0; 4219 4220 /* 4221 * For KPM mappings, just return the physical address since we 4222 * don't need to register any callbacks. 4223 */ 4224 if (IS_KPM_ADDR(vaddr)) { 4225 uint64_t paddr; 4226 SFMMU_KPM_VTOP(vaddr, paddr); 4227 *rpfn = btop(paddr); 4228 if (cookiep != NULL) 4229 *cookiep = HAC_COOKIE_NONE; 4230 return (0); 4231 } 4232 4233 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4234 *rpfn = PFN_INVALID; 4235 return (EINVAL); 4236 } 4237 4238 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4239 *rpfn = PFN_INVALID; 4240 return (ENOMEM); 4241 } 4242 4243 sfhmep = &pahmep->sfment; 4244 4245 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4246 eaddr = saddr + len; 4247 4248 rehash: 4249 /* Find the mapping(s) for this page */ 4250 for (hashno = TTE64K, hmeblkp = NULL; 4251 hmeblkp == NULL && hashno <= mmu_hashcnt; 4252 hashno++) { 4253 hmeshift = HME_HASH_SHIFT(hashno); 4254 hblktag.htag_id = ksfmmup; 4255 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4256 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4257 hblktag.htag_rehash = hashno; 4258 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4259 4260 SFMMU_HASH_LOCK(hmebp); 4261 4262 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4263 4264 if (hmeblkp == NULL) 4265 SFMMU_HASH_UNLOCK(hmebp); 4266 } 4267 4268 if (hmeblkp == NULL) { 4269 kmem_cache_free(pa_hment_cache, pahmep); 4270 *rpfn = PFN_INVALID; 4271 return (ENXIO); 4272 } 4273 4274 ASSERT(!hmeblkp->hblk_shared); 4275 4276 HBLKTOHME(osfhmep, hmeblkp, saddr); 4277 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4278 4279 if (!TTE_IS_VALID(&tte)) { 4280 SFMMU_HASH_UNLOCK(hmebp); 4281 kmem_cache_free(pa_hment_cache, pahmep); 4282 *rpfn = PFN_INVALID; 4283 return (ENXIO); 4284 } 4285 4286 /* 4287 * Make sure the boundaries for the callback fall within this 4288 * single mapping. 4289 */ 4290 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4291 ASSERT(saddr >= baseaddr); 4292 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4293 SFMMU_HASH_UNLOCK(hmebp); 4294 kmem_cache_free(pa_hment_cache, pahmep); 4295 *rpfn = PFN_INVALID; 4296 return (ERANGE); 4297 } 4298 4299 pfn = sfmmu_ttetopfn(&tte, vaddr); 4300 4301 /* 4302 * The pfn may not have a page_t underneath in which case we 4303 * just return it. This can happen if we are doing I/O to a 4304 * static portion of the kernel's address space, for instance. 4305 */ 4306 pp = osfhmep->hme_page; 4307 if (pp == NULL) { 4308 SFMMU_HASH_UNLOCK(hmebp); 4309 kmem_cache_free(pa_hment_cache, pahmep); 4310 *rpfn = pfn; 4311 if (cookiep) 4312 *cookiep = HAC_COOKIE_NONE; 4313 return (0); 4314 } 4315 ASSERT(pp == PP_PAGEROOT(pp)); 4316 4317 vp = pp->p_vnode; 4318 off = pp->p_offset; 4319 4320 pml = sfmmu_mlist_enter(pp); 4321 4322 if (flags & HAC_PAGELOCK) { 4323 if (!page_trylock(pp, SE_SHARED)) { 4324 /* 4325 * Somebody is holding SE_EXCL lock. Might 4326 * even be hat_page_relocate(). Drop all 4327 * our locks, lookup the page in &kvp, and 4328 * retry. If it doesn't exist in &kvp and &zvp, 4329 * then we must be dealing with a kernel mapped 4330 * page which doesn't actually belong to 4331 * segkmem so we punt. 4332 */ 4333 sfmmu_mlist_exit(pml); 4334 SFMMU_HASH_UNLOCK(hmebp); 4335 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4336 4337 /* check zvp before giving up */ 4338 if (pp == NULL) 4339 pp = page_lookup(&zvp, (u_offset_t)saddr, 4340 SE_SHARED); 4341 4342 /* Okay, we didn't find it, give up */ 4343 if (pp == NULL) { 4344 kmem_cache_free(pa_hment_cache, pahmep); 4345 *rpfn = pfn; 4346 if (cookiep) 4347 *cookiep = HAC_COOKIE_NONE; 4348 return (0); 4349 } 4350 page_unlock(pp); 4351 goto rehash; 4352 } 4353 locked = 1; 4354 } 4355 4356 if (!PAGE_LOCKED(pp) && !panicstr) 4357 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4358 4359 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4360 pp->p_offset != off) { 4361 /* 4362 * The page moved before we got our hands on it. Drop 4363 * all the locks and try again. 4364 */ 4365 ASSERT((flags & HAC_PAGELOCK) != 0); 4366 sfmmu_mlist_exit(pml); 4367 SFMMU_HASH_UNLOCK(hmebp); 4368 page_unlock(pp); 4369 locked = 0; 4370 goto rehash; 4371 } 4372 4373 if (!VN_ISKAS(vp)) { 4374 /* 4375 * This is not a segkmem page but another page which 4376 * has been kernel mapped. It had better have at least 4377 * a share lock on it. Return the pfn. 4378 */ 4379 sfmmu_mlist_exit(pml); 4380 SFMMU_HASH_UNLOCK(hmebp); 4381 if (locked) 4382 page_unlock(pp); 4383 kmem_cache_free(pa_hment_cache, pahmep); 4384 ASSERT(PAGE_LOCKED(pp)); 4385 *rpfn = pfn; 4386 if (cookiep) 4387 *cookiep = HAC_COOKIE_NONE; 4388 return (0); 4389 } 4390 4391 /* 4392 * Setup this pa_hment and link its embedded dummy sf_hment into 4393 * the mapping list. 4394 */ 4395 pp->p_share++; 4396 pahmep->cb_id = callback_id; 4397 pahmep->addr = vaddr; 4398 pahmep->len = len; 4399 pahmep->refcnt = 1; 4400 pahmep->flags = 0; 4401 pahmep->pvt = pvt; 4402 4403 sfhmep->hme_tte.ll = 0; 4404 sfhmep->hme_data = pahmep; 4405 sfhmep->hme_prev = osfhmep; 4406 sfhmep->hme_next = osfhmep->hme_next; 4407 4408 if (osfhmep->hme_next) 4409 osfhmep->hme_next->hme_prev = sfhmep; 4410 4411 osfhmep->hme_next = sfhmep; 4412 4413 sfmmu_mlist_exit(pml); 4414 SFMMU_HASH_UNLOCK(hmebp); 4415 4416 if (locked) 4417 page_unlock(pp); 4418 4419 *rpfn = pfn; 4420 if (cookiep) 4421 *cookiep = (void *)pahmep; 4422 4423 return (0); 4424 } 4425 4426 /* 4427 * Remove the relocation callbacks from the specified addr/len. 4428 */ 4429 void 4430 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4431 void *cookie) 4432 { 4433 struct hmehash_bucket *hmebp; 4434 hmeblk_tag hblktag; 4435 struct hme_blk *hmeblkp; 4436 int hmeshift, hashno; 4437 caddr_t saddr; 4438 struct pa_hment *pahmep; 4439 struct sf_hment *sfhmep, *osfhmep; 4440 kmutex_t *pml; 4441 tte_t tte; 4442 page_t *pp; 4443 vnode_t *vp; 4444 u_offset_t off; 4445 int locked = 0; 4446 4447 /* 4448 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4449 * remove so just return. 4450 */ 4451 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4452 return; 4453 4454 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4455 4456 rehash: 4457 /* Find the mapping(s) for this page */ 4458 for (hashno = TTE64K, hmeblkp = NULL; 4459 hmeblkp == NULL && hashno <= mmu_hashcnt; 4460 hashno++) { 4461 hmeshift = HME_HASH_SHIFT(hashno); 4462 hblktag.htag_id = ksfmmup; 4463 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4464 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4465 hblktag.htag_rehash = hashno; 4466 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4467 4468 SFMMU_HASH_LOCK(hmebp); 4469 4470 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4471 4472 if (hmeblkp == NULL) 4473 SFMMU_HASH_UNLOCK(hmebp); 4474 } 4475 4476 if (hmeblkp == NULL) 4477 return; 4478 4479 ASSERT(!hmeblkp->hblk_shared); 4480 4481 HBLKTOHME(osfhmep, hmeblkp, saddr); 4482 4483 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4484 if (!TTE_IS_VALID(&tte)) { 4485 SFMMU_HASH_UNLOCK(hmebp); 4486 return; 4487 } 4488 4489 pp = osfhmep->hme_page; 4490 if (pp == NULL) { 4491 SFMMU_HASH_UNLOCK(hmebp); 4492 ASSERT(cookie == NULL); 4493 return; 4494 } 4495 4496 vp = pp->p_vnode; 4497 off = pp->p_offset; 4498 4499 pml = sfmmu_mlist_enter(pp); 4500 4501 if (flags & HAC_PAGELOCK) { 4502 if (!page_trylock(pp, SE_SHARED)) { 4503 /* 4504 * Somebody is holding SE_EXCL lock. Might 4505 * even be hat_page_relocate(). Drop all 4506 * our locks, lookup the page in &kvp, and 4507 * retry. If it doesn't exist in &kvp and &zvp, 4508 * then we must be dealing with a kernel mapped 4509 * page which doesn't actually belong to 4510 * segkmem so we punt. 4511 */ 4512 sfmmu_mlist_exit(pml); 4513 SFMMU_HASH_UNLOCK(hmebp); 4514 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4515 /* check zvp before giving up */ 4516 if (pp == NULL) 4517 pp = page_lookup(&zvp, (u_offset_t)saddr, 4518 SE_SHARED); 4519 4520 if (pp == NULL) { 4521 ASSERT(cookie == NULL); 4522 return; 4523 } 4524 page_unlock(pp); 4525 goto rehash; 4526 } 4527 locked = 1; 4528 } 4529 4530 ASSERT(PAGE_LOCKED(pp)); 4531 4532 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4533 pp->p_offset != off) { 4534 /* 4535 * The page moved before we got our hands on it. Drop 4536 * all the locks and try again. 4537 */ 4538 ASSERT((flags & HAC_PAGELOCK) != 0); 4539 sfmmu_mlist_exit(pml); 4540 SFMMU_HASH_UNLOCK(hmebp); 4541 page_unlock(pp); 4542 locked = 0; 4543 goto rehash; 4544 } 4545 4546 if (!VN_ISKAS(vp)) { 4547 /* 4548 * This is not a segkmem page but another page which 4549 * has been kernel mapped. 4550 */ 4551 sfmmu_mlist_exit(pml); 4552 SFMMU_HASH_UNLOCK(hmebp); 4553 if (locked) 4554 page_unlock(pp); 4555 ASSERT(cookie == NULL); 4556 return; 4557 } 4558 4559 if (cookie != NULL) { 4560 pahmep = (struct pa_hment *)cookie; 4561 sfhmep = &pahmep->sfment; 4562 } else { 4563 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4564 sfhmep = sfhmep->hme_next) { 4565 4566 /* 4567 * skip va<->pa mappings 4568 */ 4569 if (!IS_PAHME(sfhmep)) 4570 continue; 4571 4572 pahmep = sfhmep->hme_data; 4573 ASSERT(pahmep != NULL); 4574 4575 /* 4576 * if pa_hment matches, remove it 4577 */ 4578 if ((pahmep->pvt == pvt) && 4579 (pahmep->addr == vaddr) && 4580 (pahmep->len == len)) { 4581 break; 4582 } 4583 } 4584 } 4585 4586 if (sfhmep == NULL) { 4587 if (!panicstr) { 4588 panic("hat_delete_callback: pa_hment not found, pp %p", 4589 (void *)pp); 4590 } 4591 return; 4592 } 4593 4594 /* 4595 * Note: at this point a valid kernel mapping must still be 4596 * present on this page. 4597 */ 4598 pp->p_share--; 4599 if (pp->p_share <= 0) 4600 panic("hat_delete_callback: zero p_share"); 4601 4602 if (--pahmep->refcnt == 0) { 4603 if (pahmep->flags != 0) 4604 panic("hat_delete_callback: pa_hment is busy"); 4605 4606 /* 4607 * Remove sfhmep from the mapping list for the page. 4608 */ 4609 if (sfhmep->hme_prev) { 4610 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4611 } else { 4612 pp->p_mapping = sfhmep->hme_next; 4613 } 4614 4615 if (sfhmep->hme_next) 4616 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4617 4618 sfmmu_mlist_exit(pml); 4619 SFMMU_HASH_UNLOCK(hmebp); 4620 4621 if (locked) 4622 page_unlock(pp); 4623 4624 kmem_cache_free(pa_hment_cache, pahmep); 4625 return; 4626 } 4627 4628 sfmmu_mlist_exit(pml); 4629 SFMMU_HASH_UNLOCK(hmebp); 4630 if (locked) 4631 page_unlock(pp); 4632 } 4633 4634 /* 4635 * hat_probe returns 1 if the translation for the address 'addr' is 4636 * loaded, zero otherwise. 4637 * 4638 * hat_probe should be used only for advisorary purposes because it may 4639 * occasionally return the wrong value. The implementation must guarantee that 4640 * returning the wrong value is a very rare event. hat_probe is used 4641 * to implement optimizations in the segment drivers. 4642 * 4643 */ 4644 int 4645 hat_probe(struct hat *sfmmup, caddr_t addr) 4646 { 4647 pfn_t pfn; 4648 tte_t tte; 4649 4650 ASSERT(sfmmup != NULL); 4651 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4652 4653 ASSERT((sfmmup == ksfmmup) || 4654 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4655 4656 if (sfmmup == ksfmmup) { 4657 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4658 == PFN_SUSPENDED) { 4659 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4660 } 4661 } else { 4662 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4663 } 4664 4665 if (pfn != PFN_INVALID) 4666 return (1); 4667 else 4668 return (0); 4669 } 4670 4671 ssize_t 4672 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4673 { 4674 tte_t tte; 4675 4676 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4677 4678 if (sfmmup == ksfmmup) { 4679 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4680 return (-1); 4681 } 4682 } else { 4683 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4684 return (-1); 4685 } 4686 } 4687 4688 ASSERT(TTE_IS_VALID(&tte)); 4689 return (TTEBYTES(TTE_CSZ(&tte))); 4690 } 4691 4692 uint_t 4693 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4694 { 4695 tte_t tte; 4696 4697 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4698 4699 if (sfmmup == ksfmmup) { 4700 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4701 tte.ll = 0; 4702 } 4703 } else { 4704 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4705 tte.ll = 0; 4706 } 4707 } 4708 if (TTE_IS_VALID(&tte)) { 4709 *attr = sfmmu_ptov_attr(&tte); 4710 return (0); 4711 } 4712 *attr = 0; 4713 return ((uint_t)0xffffffff); 4714 } 4715 4716 /* 4717 * Enables more attributes on specified address range (ie. logical OR) 4718 */ 4719 void 4720 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4721 { 4722 if (hat->sfmmu_xhat_provider) { 4723 XHAT_SETATTR(hat, addr, len, attr); 4724 return; 4725 } else { 4726 /* 4727 * This must be a CPU HAT. If the address space has 4728 * XHATs attached, change attributes for all of them, 4729 * just in case 4730 */ 4731 ASSERT(hat->sfmmu_as != NULL); 4732 if (hat->sfmmu_as->a_xhat != NULL) 4733 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4734 } 4735 4736 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4737 } 4738 4739 /* 4740 * Assigns attributes to the specified address range. All the attributes 4741 * are specified. 4742 */ 4743 void 4744 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4745 { 4746 if (hat->sfmmu_xhat_provider) { 4747 XHAT_CHGATTR(hat, addr, len, attr); 4748 return; 4749 } else { 4750 /* 4751 * This must be a CPU HAT. If the address space has 4752 * XHATs attached, change attributes for all of them, 4753 * just in case 4754 */ 4755 ASSERT(hat->sfmmu_as != NULL); 4756 if (hat->sfmmu_as->a_xhat != NULL) 4757 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4758 } 4759 4760 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4761 } 4762 4763 /* 4764 * Remove attributes on the specified address range (ie. loginal NAND) 4765 */ 4766 void 4767 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4768 { 4769 if (hat->sfmmu_xhat_provider) { 4770 XHAT_CLRATTR(hat, addr, len, attr); 4771 return; 4772 } else { 4773 /* 4774 * This must be a CPU HAT. If the address space has 4775 * XHATs attached, change attributes for all of them, 4776 * just in case 4777 */ 4778 ASSERT(hat->sfmmu_as != NULL); 4779 if (hat->sfmmu_as->a_xhat != NULL) 4780 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4781 } 4782 4783 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4784 } 4785 4786 /* 4787 * Change attributes on an address range to that specified by attr and mode. 4788 */ 4789 static void 4790 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4791 int mode) 4792 { 4793 struct hmehash_bucket *hmebp; 4794 hmeblk_tag hblktag; 4795 int hmeshift, hashno = 1; 4796 struct hme_blk *hmeblkp, *list = NULL; 4797 caddr_t endaddr; 4798 cpuset_t cpuset; 4799 demap_range_t dmr; 4800 4801 CPUSET_ZERO(cpuset); 4802 4803 ASSERT((sfmmup == ksfmmup) || 4804 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4805 ASSERT((len & MMU_PAGEOFFSET) == 0); 4806 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4807 4808 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4809 ((addr + len) > (caddr_t)USERLIMIT)) { 4810 panic("user addr %p in kernel space", 4811 (void *)addr); 4812 } 4813 4814 endaddr = addr + len; 4815 hblktag.htag_id = sfmmup; 4816 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4817 DEMAP_RANGE_INIT(sfmmup, &dmr); 4818 4819 while (addr < endaddr) { 4820 hmeshift = HME_HASH_SHIFT(hashno); 4821 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4822 hblktag.htag_rehash = hashno; 4823 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4824 4825 SFMMU_HASH_LOCK(hmebp); 4826 4827 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4828 if (hmeblkp != NULL) { 4829 ASSERT(!hmeblkp->hblk_shared); 4830 /* 4831 * We've encountered a shadow hmeblk so skip the range 4832 * of the next smaller mapping size. 4833 */ 4834 if (hmeblkp->hblk_shw_bit) { 4835 ASSERT(sfmmup != ksfmmup); 4836 ASSERT(hashno > 1); 4837 addr = (caddr_t)P2END((uintptr_t)addr, 4838 TTEBYTES(hashno - 1)); 4839 } else { 4840 addr = sfmmu_hblk_chgattr(sfmmup, 4841 hmeblkp, addr, endaddr, &dmr, attr, mode); 4842 } 4843 SFMMU_HASH_UNLOCK(hmebp); 4844 hashno = 1; 4845 continue; 4846 } 4847 SFMMU_HASH_UNLOCK(hmebp); 4848 4849 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4850 /* 4851 * We have traversed the whole list and rehashed 4852 * if necessary without finding the address to chgattr. 4853 * This is ok, so we increment the address by the 4854 * smallest hmeblk range for kernel mappings or for 4855 * user mappings with no large pages, and the largest 4856 * hmeblk range, to account for shadow hmeblks, for 4857 * user mappings with large pages and continue. 4858 */ 4859 if (sfmmup == ksfmmup) 4860 addr = (caddr_t)P2END((uintptr_t)addr, 4861 TTEBYTES(1)); 4862 else 4863 addr = (caddr_t)P2END((uintptr_t)addr, 4864 TTEBYTES(hashno)); 4865 hashno = 1; 4866 } else { 4867 hashno++; 4868 } 4869 } 4870 4871 sfmmu_hblks_list_purge(&list); 4872 DEMAP_RANGE_FLUSH(&dmr); 4873 cpuset = sfmmup->sfmmu_cpusran; 4874 xt_sync(cpuset); 4875 } 4876 4877 /* 4878 * This function chgattr on a range of addresses in an hmeblk. It returns the 4879 * next addres that needs to be chgattr. 4880 * It should be called with the hash lock held. 4881 * XXX It should be possible to optimize chgattr by not flushing every time but 4882 * on the other hand: 4883 * 1. do one flush crosscall. 4884 * 2. only flush if we are increasing permissions (make sure this will work) 4885 */ 4886 static caddr_t 4887 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4888 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4889 { 4890 tte_t tte, tteattr, tteflags, ttemod; 4891 struct sf_hment *sfhmep; 4892 int ttesz; 4893 struct page *pp = NULL; 4894 kmutex_t *pml, *pmtx; 4895 int ret; 4896 int use_demap_range; 4897 #if defined(SF_ERRATA_57) 4898 int check_exec; 4899 #endif 4900 4901 ASSERT(in_hblk_range(hmeblkp, addr)); 4902 ASSERT(hmeblkp->hblk_shw_bit == 0); 4903 ASSERT(!hmeblkp->hblk_shared); 4904 4905 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4906 ttesz = get_hblk_ttesz(hmeblkp); 4907 4908 /* 4909 * Flush the current demap region if addresses have been 4910 * skipped or the page size doesn't match. 4911 */ 4912 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4913 if (use_demap_range) { 4914 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4915 } else { 4916 DEMAP_RANGE_FLUSH(dmrp); 4917 } 4918 4919 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4920 #if defined(SF_ERRATA_57) 4921 check_exec = (sfmmup != ksfmmup) && 4922 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4923 TTE_IS_EXECUTABLE(&tteattr); 4924 #endif 4925 HBLKTOHME(sfhmep, hmeblkp, addr); 4926 while (addr < endaddr) { 4927 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4928 if (TTE_IS_VALID(&tte)) { 4929 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4930 /* 4931 * if the new attr is the same as old 4932 * continue 4933 */ 4934 goto next_addr; 4935 } 4936 if (!TTE_IS_WRITABLE(&tteattr)) { 4937 /* 4938 * make sure we clear hw modify bit if we 4939 * removing write protections 4940 */ 4941 tteflags.tte_intlo |= TTE_HWWR_INT; 4942 } 4943 4944 pml = NULL; 4945 pp = sfhmep->hme_page; 4946 if (pp) { 4947 pml = sfmmu_mlist_enter(pp); 4948 } 4949 4950 if (pp != sfhmep->hme_page) { 4951 /* 4952 * tte must have been unloaded. 4953 */ 4954 ASSERT(pml); 4955 sfmmu_mlist_exit(pml); 4956 continue; 4957 } 4958 4959 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4960 4961 ttemod = tte; 4962 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4963 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4964 4965 #if defined(SF_ERRATA_57) 4966 if (check_exec && addr < errata57_limit) 4967 ttemod.tte_exec_perm = 0; 4968 #endif 4969 ret = sfmmu_modifytte_try(&tte, &ttemod, 4970 &sfhmep->hme_tte); 4971 4972 if (ret < 0) { 4973 /* tte changed underneath us */ 4974 if (pml) { 4975 sfmmu_mlist_exit(pml); 4976 } 4977 continue; 4978 } 4979 4980 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4981 /* 4982 * need to sync if we are clearing modify bit. 4983 */ 4984 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4985 } 4986 4987 if (pp && PP_ISRO(pp)) { 4988 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4989 pmtx = sfmmu_page_enter(pp); 4990 PP_CLRRO(pp); 4991 sfmmu_page_exit(pmtx); 4992 } 4993 } 4994 4995 if (ret > 0 && use_demap_range) { 4996 DEMAP_RANGE_MARKPG(dmrp, addr); 4997 } else if (ret > 0) { 4998 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4999 } 5000 5001 if (pml) { 5002 sfmmu_mlist_exit(pml); 5003 } 5004 } 5005 next_addr: 5006 addr += TTEBYTES(ttesz); 5007 sfhmep++; 5008 DEMAP_RANGE_NEXTPG(dmrp); 5009 } 5010 return (addr); 5011 } 5012 5013 /* 5014 * This routine converts virtual attributes to physical ones. It will 5015 * update the tteflags field with the tte mask corresponding to the attributes 5016 * affected and it returns the new attributes. It will also clear the modify 5017 * bit if we are taking away write permission. This is necessary since the 5018 * modify bit is the hardware permission bit and we need to clear it in order 5019 * to detect write faults. 5020 */ 5021 static uint64_t 5022 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5023 { 5024 tte_t ttevalue; 5025 5026 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5027 5028 switch (mode) { 5029 case SFMMU_CHGATTR: 5030 /* all attributes specified */ 5031 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5032 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5033 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5034 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5035 break; 5036 case SFMMU_SETATTR: 5037 ASSERT(!(attr & ~HAT_PROT_MASK)); 5038 ttemaskp->ll = 0; 5039 ttevalue.ll = 0; 5040 /* 5041 * a valid tte implies exec and read for sfmmu 5042 * so no need to do anything about them. 5043 * since priviledged access implies user access 5044 * PROT_USER doesn't make sense either. 5045 */ 5046 if (attr & PROT_WRITE) { 5047 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5048 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5049 } 5050 break; 5051 case SFMMU_CLRATTR: 5052 /* attributes will be nand with current ones */ 5053 if (attr & ~(PROT_WRITE | PROT_USER)) { 5054 panic("sfmmu: attr %x not supported", attr); 5055 } 5056 ttemaskp->ll = 0; 5057 ttevalue.ll = 0; 5058 if (attr & PROT_WRITE) { 5059 /* clear both writable and modify bit */ 5060 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5061 } 5062 if (attr & PROT_USER) { 5063 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5064 ttevalue.tte_intlo |= TTE_PRIV_INT; 5065 } 5066 break; 5067 default: 5068 panic("sfmmu_vtop_attr: bad mode %x", mode); 5069 } 5070 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5071 return (ttevalue.ll); 5072 } 5073 5074 static uint_t 5075 sfmmu_ptov_attr(tte_t *ttep) 5076 { 5077 uint_t attr; 5078 5079 ASSERT(TTE_IS_VALID(ttep)); 5080 5081 attr = PROT_READ; 5082 5083 if (TTE_IS_WRITABLE(ttep)) { 5084 attr |= PROT_WRITE; 5085 } 5086 if (TTE_IS_EXECUTABLE(ttep)) { 5087 attr |= PROT_EXEC; 5088 } 5089 if (!TTE_IS_PRIVILEGED(ttep)) { 5090 attr |= PROT_USER; 5091 } 5092 if (TTE_IS_NFO(ttep)) { 5093 attr |= HAT_NOFAULT; 5094 } 5095 if (TTE_IS_NOSYNC(ttep)) { 5096 attr |= HAT_NOSYNC; 5097 } 5098 if (TTE_IS_SIDEFFECT(ttep)) { 5099 attr |= SFMMU_SIDEFFECT; 5100 } 5101 if (!TTE_IS_VCACHEABLE(ttep)) { 5102 attr |= SFMMU_UNCACHEVTTE; 5103 } 5104 if (!TTE_IS_PCACHEABLE(ttep)) { 5105 attr |= SFMMU_UNCACHEPTTE; 5106 } 5107 return (attr); 5108 } 5109 5110 /* 5111 * hat_chgprot is a deprecated hat call. New segment drivers 5112 * should store all attributes and use hat_*attr calls. 5113 * 5114 * Change the protections in the virtual address range 5115 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5116 * then remove write permission, leaving the other 5117 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5118 * 5119 */ 5120 void 5121 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5122 { 5123 struct hmehash_bucket *hmebp; 5124 hmeblk_tag hblktag; 5125 int hmeshift, hashno = 1; 5126 struct hme_blk *hmeblkp, *list = NULL; 5127 caddr_t endaddr; 5128 cpuset_t cpuset; 5129 demap_range_t dmr; 5130 5131 ASSERT((len & MMU_PAGEOFFSET) == 0); 5132 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5133 5134 if (sfmmup->sfmmu_xhat_provider) { 5135 XHAT_CHGPROT(sfmmup, addr, len, vprot); 5136 return; 5137 } else { 5138 /* 5139 * This must be a CPU HAT. If the address space has 5140 * XHATs attached, change attributes for all of them, 5141 * just in case 5142 */ 5143 ASSERT(sfmmup->sfmmu_as != NULL); 5144 if (sfmmup->sfmmu_as->a_xhat != NULL) 5145 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 5146 } 5147 5148 CPUSET_ZERO(cpuset); 5149 5150 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5151 ((addr + len) > (caddr_t)USERLIMIT)) { 5152 panic("user addr %p vprot %x in kernel space", 5153 (void *)addr, vprot); 5154 } 5155 endaddr = addr + len; 5156 hblktag.htag_id = sfmmup; 5157 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5158 DEMAP_RANGE_INIT(sfmmup, &dmr); 5159 5160 while (addr < endaddr) { 5161 hmeshift = HME_HASH_SHIFT(hashno); 5162 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5163 hblktag.htag_rehash = hashno; 5164 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5165 5166 SFMMU_HASH_LOCK(hmebp); 5167 5168 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5169 if (hmeblkp != NULL) { 5170 ASSERT(!hmeblkp->hblk_shared); 5171 /* 5172 * We've encountered a shadow hmeblk so skip the range 5173 * of the next smaller mapping size. 5174 */ 5175 if (hmeblkp->hblk_shw_bit) { 5176 ASSERT(sfmmup != ksfmmup); 5177 ASSERT(hashno > 1); 5178 addr = (caddr_t)P2END((uintptr_t)addr, 5179 TTEBYTES(hashno - 1)); 5180 } else { 5181 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5182 addr, endaddr, &dmr, vprot); 5183 } 5184 SFMMU_HASH_UNLOCK(hmebp); 5185 hashno = 1; 5186 continue; 5187 } 5188 SFMMU_HASH_UNLOCK(hmebp); 5189 5190 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5191 /* 5192 * We have traversed the whole list and rehashed 5193 * if necessary without finding the address to chgprot. 5194 * This is ok so we increment the address by the 5195 * smallest hmeblk range for kernel mappings and the 5196 * largest hmeblk range, to account for shadow hmeblks, 5197 * for user mappings and continue. 5198 */ 5199 if (sfmmup == ksfmmup) 5200 addr = (caddr_t)P2END((uintptr_t)addr, 5201 TTEBYTES(1)); 5202 else 5203 addr = (caddr_t)P2END((uintptr_t)addr, 5204 TTEBYTES(hashno)); 5205 hashno = 1; 5206 } else { 5207 hashno++; 5208 } 5209 } 5210 5211 sfmmu_hblks_list_purge(&list); 5212 DEMAP_RANGE_FLUSH(&dmr); 5213 cpuset = sfmmup->sfmmu_cpusran; 5214 xt_sync(cpuset); 5215 } 5216 5217 /* 5218 * This function chgprots a range of addresses in an hmeblk. It returns the 5219 * next addres that needs to be chgprot. 5220 * It should be called with the hash lock held. 5221 * XXX It shold be possible to optimize chgprot by not flushing every time but 5222 * on the other hand: 5223 * 1. do one flush crosscall. 5224 * 2. only flush if we are increasing permissions (make sure this will work) 5225 */ 5226 static caddr_t 5227 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5228 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5229 { 5230 uint_t pprot; 5231 tte_t tte, ttemod; 5232 struct sf_hment *sfhmep; 5233 uint_t tteflags; 5234 int ttesz; 5235 struct page *pp = NULL; 5236 kmutex_t *pml, *pmtx; 5237 int ret; 5238 int use_demap_range; 5239 #if defined(SF_ERRATA_57) 5240 int check_exec; 5241 #endif 5242 5243 ASSERT(in_hblk_range(hmeblkp, addr)); 5244 ASSERT(hmeblkp->hblk_shw_bit == 0); 5245 ASSERT(!hmeblkp->hblk_shared); 5246 5247 #ifdef DEBUG 5248 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5249 (endaddr < get_hblk_endaddr(hmeblkp))) { 5250 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5251 } 5252 #endif /* DEBUG */ 5253 5254 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5255 ttesz = get_hblk_ttesz(hmeblkp); 5256 5257 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5258 #if defined(SF_ERRATA_57) 5259 check_exec = (sfmmup != ksfmmup) && 5260 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5261 ((vprot & PROT_EXEC) == PROT_EXEC); 5262 #endif 5263 HBLKTOHME(sfhmep, hmeblkp, addr); 5264 5265 /* 5266 * Flush the current demap region if addresses have been 5267 * skipped or the page size doesn't match. 5268 */ 5269 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5270 if (use_demap_range) { 5271 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5272 } else { 5273 DEMAP_RANGE_FLUSH(dmrp); 5274 } 5275 5276 while (addr < endaddr) { 5277 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5278 if (TTE_IS_VALID(&tte)) { 5279 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5280 /* 5281 * if the new protection is the same as old 5282 * continue 5283 */ 5284 goto next_addr; 5285 } 5286 pml = NULL; 5287 pp = sfhmep->hme_page; 5288 if (pp) { 5289 pml = sfmmu_mlist_enter(pp); 5290 } 5291 if (pp != sfhmep->hme_page) { 5292 /* 5293 * tte most have been unloaded 5294 * underneath us. Recheck 5295 */ 5296 ASSERT(pml); 5297 sfmmu_mlist_exit(pml); 5298 continue; 5299 } 5300 5301 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5302 5303 ttemod = tte; 5304 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5305 #if defined(SF_ERRATA_57) 5306 if (check_exec && addr < errata57_limit) 5307 ttemod.tte_exec_perm = 0; 5308 #endif 5309 ret = sfmmu_modifytte_try(&tte, &ttemod, 5310 &sfhmep->hme_tte); 5311 5312 if (ret < 0) { 5313 /* tte changed underneath us */ 5314 if (pml) { 5315 sfmmu_mlist_exit(pml); 5316 } 5317 continue; 5318 } 5319 5320 if (tteflags & TTE_HWWR_INT) { 5321 /* 5322 * need to sync if we are clearing modify bit. 5323 */ 5324 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5325 } 5326 5327 if (pp && PP_ISRO(pp)) { 5328 if (pprot & TTE_WRPRM_INT) { 5329 pmtx = sfmmu_page_enter(pp); 5330 PP_CLRRO(pp); 5331 sfmmu_page_exit(pmtx); 5332 } 5333 } 5334 5335 if (ret > 0 && use_demap_range) { 5336 DEMAP_RANGE_MARKPG(dmrp, addr); 5337 } else if (ret > 0) { 5338 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5339 } 5340 5341 if (pml) { 5342 sfmmu_mlist_exit(pml); 5343 } 5344 } 5345 next_addr: 5346 addr += TTEBYTES(ttesz); 5347 sfhmep++; 5348 DEMAP_RANGE_NEXTPG(dmrp); 5349 } 5350 return (addr); 5351 } 5352 5353 /* 5354 * This routine is deprecated and should only be used by hat_chgprot. 5355 * The correct routine is sfmmu_vtop_attr. 5356 * This routine converts virtual page protections to physical ones. It will 5357 * update the tteflags field with the tte mask corresponding to the protections 5358 * affected and it returns the new protections. It will also clear the modify 5359 * bit if we are taking away write permission. This is necessary since the 5360 * modify bit is the hardware permission bit and we need to clear it in order 5361 * to detect write faults. 5362 * It accepts the following special protections: 5363 * ~PROT_WRITE = remove write permissions. 5364 * ~PROT_USER = remove user permissions. 5365 */ 5366 static uint_t 5367 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5368 { 5369 if (vprot == (uint_t)~PROT_WRITE) { 5370 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5371 return (0); /* will cause wrprm to be cleared */ 5372 } 5373 if (vprot == (uint_t)~PROT_USER) { 5374 *tteflagsp = TTE_PRIV_INT; 5375 return (0); /* will cause privprm to be cleared */ 5376 } 5377 if ((vprot == 0) || (vprot == PROT_USER) || 5378 ((vprot & PROT_ALL) != vprot)) { 5379 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5380 } 5381 5382 switch (vprot) { 5383 case (PROT_READ): 5384 case (PROT_EXEC): 5385 case (PROT_EXEC | PROT_READ): 5386 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5387 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5388 case (PROT_WRITE): 5389 case (PROT_WRITE | PROT_READ): 5390 case (PROT_EXEC | PROT_WRITE): 5391 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5392 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5393 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5394 case (PROT_USER | PROT_READ): 5395 case (PROT_USER | PROT_EXEC): 5396 case (PROT_USER | PROT_EXEC | PROT_READ): 5397 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5398 return (0); /* clr prv and wrt */ 5399 case (PROT_USER | PROT_WRITE): 5400 case (PROT_USER | PROT_WRITE | PROT_READ): 5401 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5402 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5403 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5404 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5405 default: 5406 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5407 } 5408 return (0); 5409 } 5410 5411 /* 5412 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5413 * the normal algorithm would take too long for a very large VA range with 5414 * few real mappings. This routine just walks thru all HMEs in the global 5415 * hash table to find and remove mappings. 5416 */ 5417 static void 5418 hat_unload_large_virtual( 5419 struct hat *sfmmup, 5420 caddr_t startaddr, 5421 size_t len, 5422 uint_t flags, 5423 hat_callback_t *callback) 5424 { 5425 struct hmehash_bucket *hmebp; 5426 struct hme_blk *hmeblkp; 5427 struct hme_blk *pr_hblk = NULL; 5428 struct hme_blk *nx_hblk; 5429 struct hme_blk *list = NULL; 5430 int i; 5431 uint64_t hblkpa, prevpa, nx_pa; 5432 demap_range_t dmr, *dmrp; 5433 cpuset_t cpuset; 5434 caddr_t endaddr = startaddr + len; 5435 caddr_t sa; 5436 caddr_t ea; 5437 caddr_t cb_sa[MAX_CB_ADDR]; 5438 caddr_t cb_ea[MAX_CB_ADDR]; 5439 int addr_cnt = 0; 5440 int a = 0; 5441 5442 if (sfmmup->sfmmu_free) { 5443 dmrp = NULL; 5444 } else { 5445 dmrp = &dmr; 5446 DEMAP_RANGE_INIT(sfmmup, dmrp); 5447 } 5448 5449 /* 5450 * Loop through all the hash buckets of HME blocks looking for matches. 5451 */ 5452 for (i = 0; i <= UHMEHASH_SZ; i++) { 5453 hmebp = &uhme_hash[i]; 5454 SFMMU_HASH_LOCK(hmebp); 5455 hmeblkp = hmebp->hmeblkp; 5456 hblkpa = hmebp->hmeh_nextpa; 5457 prevpa = 0; 5458 pr_hblk = NULL; 5459 while (hmeblkp) { 5460 nx_hblk = hmeblkp->hblk_next; 5461 nx_pa = hmeblkp->hblk_nextpa; 5462 5463 /* 5464 * skip if not this context, if a shadow block or 5465 * if the mapping is not in the requested range 5466 */ 5467 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5468 hmeblkp->hblk_shw_bit || 5469 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5470 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5471 pr_hblk = hmeblkp; 5472 prevpa = hblkpa; 5473 goto next_block; 5474 } 5475 5476 ASSERT(!hmeblkp->hblk_shared); 5477 /* 5478 * unload if there are any current valid mappings 5479 */ 5480 if (hmeblkp->hblk_vcnt != 0 || 5481 hmeblkp->hblk_hmecnt != 0) 5482 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5483 sa, ea, dmrp, flags); 5484 5485 /* 5486 * on unmap we also release the HME block itself, once 5487 * all mappings are gone. 5488 */ 5489 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5490 !hmeblkp->hblk_vcnt && 5491 !hmeblkp->hblk_hmecnt) { 5492 ASSERT(!hmeblkp->hblk_lckcnt); 5493 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 5494 prevpa, pr_hblk); 5495 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5496 } else { 5497 pr_hblk = hmeblkp; 5498 prevpa = hblkpa; 5499 } 5500 5501 if (callback == NULL) 5502 goto next_block; 5503 5504 /* 5505 * HME blocks may span more than one page, but we may be 5506 * unmapping only one page, so check for a smaller range 5507 * for the callback 5508 */ 5509 if (sa < startaddr) 5510 sa = startaddr; 5511 if (--ea > endaddr) 5512 ea = endaddr - 1; 5513 5514 cb_sa[addr_cnt] = sa; 5515 cb_ea[addr_cnt] = ea; 5516 if (++addr_cnt == MAX_CB_ADDR) { 5517 if (dmrp != NULL) { 5518 DEMAP_RANGE_FLUSH(dmrp); 5519 cpuset = sfmmup->sfmmu_cpusran; 5520 xt_sync(cpuset); 5521 } 5522 5523 for (a = 0; a < MAX_CB_ADDR; ++a) { 5524 callback->hcb_start_addr = cb_sa[a]; 5525 callback->hcb_end_addr = cb_ea[a]; 5526 callback->hcb_function(callback); 5527 } 5528 addr_cnt = 0; 5529 } 5530 5531 next_block: 5532 hmeblkp = nx_hblk; 5533 hblkpa = nx_pa; 5534 } 5535 SFMMU_HASH_UNLOCK(hmebp); 5536 } 5537 5538 sfmmu_hblks_list_purge(&list); 5539 if (dmrp != NULL) { 5540 DEMAP_RANGE_FLUSH(dmrp); 5541 cpuset = sfmmup->sfmmu_cpusran; 5542 xt_sync(cpuset); 5543 } 5544 5545 for (a = 0; a < addr_cnt; ++a) { 5546 callback->hcb_start_addr = cb_sa[a]; 5547 callback->hcb_end_addr = cb_ea[a]; 5548 callback->hcb_function(callback); 5549 } 5550 5551 /* 5552 * Check TSB and TLB page sizes if the process isn't exiting. 5553 */ 5554 if (!sfmmup->sfmmu_free) 5555 sfmmu_check_page_sizes(sfmmup, 0); 5556 } 5557 5558 /* 5559 * Unload all the mappings in the range [addr..addr+len). addr and len must 5560 * be MMU_PAGESIZE aligned. 5561 */ 5562 5563 extern struct seg *segkmap; 5564 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5565 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5566 5567 5568 void 5569 hat_unload_callback( 5570 struct hat *sfmmup, 5571 caddr_t addr, 5572 size_t len, 5573 uint_t flags, 5574 hat_callback_t *callback) 5575 { 5576 struct hmehash_bucket *hmebp; 5577 hmeblk_tag hblktag; 5578 int hmeshift, hashno, iskernel; 5579 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5580 caddr_t endaddr; 5581 cpuset_t cpuset; 5582 uint64_t hblkpa, prevpa; 5583 int addr_count = 0; 5584 int a; 5585 caddr_t cb_start_addr[MAX_CB_ADDR]; 5586 caddr_t cb_end_addr[MAX_CB_ADDR]; 5587 int issegkmap = ISSEGKMAP(sfmmup, addr); 5588 demap_range_t dmr, *dmrp; 5589 5590 if (sfmmup->sfmmu_xhat_provider) { 5591 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 5592 return; 5593 } else { 5594 /* 5595 * This must be a CPU HAT. If the address space has 5596 * XHATs attached, unload the mappings for all of them, 5597 * just in case 5598 */ 5599 ASSERT(sfmmup->sfmmu_as != NULL); 5600 if (sfmmup->sfmmu_as->a_xhat != NULL) 5601 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 5602 len, flags, callback); 5603 } 5604 5605 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5606 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5607 5608 ASSERT(sfmmup != NULL); 5609 ASSERT((len & MMU_PAGEOFFSET) == 0); 5610 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5611 5612 /* 5613 * Probing through a large VA range (say 63 bits) will be slow, even 5614 * at 4 Meg steps between the probes. So, when the virtual address range 5615 * is very large, search the HME entries for what to unload. 5616 * 5617 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5618 * 5619 * UHMEHASH_SZ is number of hash buckets to examine 5620 * 5621 */ 5622 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5623 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5624 return; 5625 } 5626 5627 CPUSET_ZERO(cpuset); 5628 5629 /* 5630 * If the process is exiting, we can save a lot of fuss since 5631 * we'll flush the TLB when we free the ctx anyway. 5632 */ 5633 if (sfmmup->sfmmu_free) 5634 dmrp = NULL; 5635 else 5636 dmrp = &dmr; 5637 5638 DEMAP_RANGE_INIT(sfmmup, dmrp); 5639 endaddr = addr + len; 5640 hblktag.htag_id = sfmmup; 5641 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5642 5643 /* 5644 * It is likely for the vm to call unload over a wide range of 5645 * addresses that are actually very sparsely populated by 5646 * translations. In order to speed this up the sfmmu hat supports 5647 * the concept of shadow hmeblks. Dummy large page hmeblks that 5648 * correspond to actual small translations are allocated at tteload 5649 * time and are referred to as shadow hmeblks. Now, during unload 5650 * time, we first check if we have a shadow hmeblk for that 5651 * translation. The absence of one means the corresponding address 5652 * range is empty and can be skipped. 5653 * 5654 * The kernel is an exception to above statement and that is why 5655 * we don't use shadow hmeblks and hash starting from the smallest 5656 * page size. 5657 */ 5658 if (sfmmup == KHATID) { 5659 iskernel = 1; 5660 hashno = TTE64K; 5661 } else { 5662 iskernel = 0; 5663 if (mmu_page_sizes == max_mmu_page_sizes) { 5664 hashno = TTE256M; 5665 } else { 5666 hashno = TTE4M; 5667 } 5668 } 5669 while (addr < endaddr) { 5670 hmeshift = HME_HASH_SHIFT(hashno); 5671 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5672 hblktag.htag_rehash = hashno; 5673 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5674 5675 SFMMU_HASH_LOCK(hmebp); 5676 5677 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5678 prevpa, &list); 5679 if (hmeblkp == NULL) { 5680 /* 5681 * didn't find an hmeblk. skip the appropiate 5682 * address range. 5683 */ 5684 SFMMU_HASH_UNLOCK(hmebp); 5685 if (iskernel) { 5686 if (hashno < mmu_hashcnt) { 5687 hashno++; 5688 continue; 5689 } else { 5690 hashno = TTE64K; 5691 addr = (caddr_t)roundup((uintptr_t)addr 5692 + 1, MMU_PAGESIZE64K); 5693 continue; 5694 } 5695 } 5696 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5697 (1 << hmeshift)); 5698 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5699 ASSERT(hashno == TTE64K); 5700 continue; 5701 } 5702 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5703 hashno = TTE512K; 5704 continue; 5705 } 5706 if (mmu_page_sizes == max_mmu_page_sizes) { 5707 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5708 hashno = TTE4M; 5709 continue; 5710 } 5711 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5712 hashno = TTE32M; 5713 continue; 5714 } 5715 hashno = TTE256M; 5716 continue; 5717 } else { 5718 hashno = TTE4M; 5719 continue; 5720 } 5721 } 5722 ASSERT(hmeblkp); 5723 ASSERT(!hmeblkp->hblk_shared); 5724 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5725 /* 5726 * If the valid count is zero we can skip the range 5727 * mapped by this hmeblk. 5728 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5729 * is used by segment drivers as a hint 5730 * that the mapping resource won't be used any longer. 5731 * The best example of this is during exit(). 5732 */ 5733 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5734 get_hblk_span(hmeblkp)); 5735 if ((flags & HAT_UNLOAD_UNMAP) || 5736 (iskernel && !issegkmap)) { 5737 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5738 pr_hblk); 5739 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5740 } 5741 SFMMU_HASH_UNLOCK(hmebp); 5742 5743 if (iskernel) { 5744 hashno = TTE64K; 5745 continue; 5746 } 5747 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5748 ASSERT(hashno == TTE64K); 5749 continue; 5750 } 5751 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5752 hashno = TTE512K; 5753 continue; 5754 } 5755 if (mmu_page_sizes == max_mmu_page_sizes) { 5756 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5757 hashno = TTE4M; 5758 continue; 5759 } 5760 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5761 hashno = TTE32M; 5762 continue; 5763 } 5764 hashno = TTE256M; 5765 continue; 5766 } else { 5767 hashno = TTE4M; 5768 continue; 5769 } 5770 } 5771 if (hmeblkp->hblk_shw_bit) { 5772 /* 5773 * If we encounter a shadow hmeblk we know there is 5774 * smaller sized hmeblks mapping the same address space. 5775 * Decrement the hash size and rehash. 5776 */ 5777 ASSERT(sfmmup != KHATID); 5778 hashno--; 5779 SFMMU_HASH_UNLOCK(hmebp); 5780 continue; 5781 } 5782 5783 /* 5784 * track callback address ranges. 5785 * only start a new range when it's not contiguous 5786 */ 5787 if (callback != NULL) { 5788 if (addr_count > 0 && 5789 addr == cb_end_addr[addr_count - 1]) 5790 --addr_count; 5791 else 5792 cb_start_addr[addr_count] = addr; 5793 } 5794 5795 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5796 dmrp, flags); 5797 5798 if (callback != NULL) 5799 cb_end_addr[addr_count++] = addr; 5800 5801 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5802 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5803 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5804 pr_hblk); 5805 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5806 } 5807 SFMMU_HASH_UNLOCK(hmebp); 5808 5809 /* 5810 * Notify our caller as to exactly which pages 5811 * have been unloaded. We do these in clumps, 5812 * to minimize the number of xt_sync()s that need to occur. 5813 */ 5814 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5815 DEMAP_RANGE_FLUSH(dmrp); 5816 if (dmrp != NULL) { 5817 cpuset = sfmmup->sfmmu_cpusran; 5818 xt_sync(cpuset); 5819 } 5820 5821 for (a = 0; a < MAX_CB_ADDR; ++a) { 5822 callback->hcb_start_addr = cb_start_addr[a]; 5823 callback->hcb_end_addr = cb_end_addr[a]; 5824 callback->hcb_function(callback); 5825 } 5826 addr_count = 0; 5827 } 5828 if (iskernel) { 5829 hashno = TTE64K; 5830 continue; 5831 } 5832 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5833 ASSERT(hashno == TTE64K); 5834 continue; 5835 } 5836 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5837 hashno = TTE512K; 5838 continue; 5839 } 5840 if (mmu_page_sizes == max_mmu_page_sizes) { 5841 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5842 hashno = TTE4M; 5843 continue; 5844 } 5845 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5846 hashno = TTE32M; 5847 continue; 5848 } 5849 hashno = TTE256M; 5850 } else { 5851 hashno = TTE4M; 5852 } 5853 } 5854 5855 sfmmu_hblks_list_purge(&list); 5856 DEMAP_RANGE_FLUSH(dmrp); 5857 if (dmrp != NULL) { 5858 cpuset = sfmmup->sfmmu_cpusran; 5859 xt_sync(cpuset); 5860 } 5861 if (callback && addr_count != 0) { 5862 for (a = 0; a < addr_count; ++a) { 5863 callback->hcb_start_addr = cb_start_addr[a]; 5864 callback->hcb_end_addr = cb_end_addr[a]; 5865 callback->hcb_function(callback); 5866 } 5867 } 5868 5869 /* 5870 * Check TSB and TLB page sizes if the process isn't exiting. 5871 */ 5872 if (!sfmmup->sfmmu_free) 5873 sfmmu_check_page_sizes(sfmmup, 0); 5874 } 5875 5876 /* 5877 * Unload all the mappings in the range [addr..addr+len). addr and len must 5878 * be MMU_PAGESIZE aligned. 5879 */ 5880 void 5881 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5882 { 5883 if (sfmmup->sfmmu_xhat_provider) { 5884 XHAT_UNLOAD(sfmmup, addr, len, flags); 5885 return; 5886 } 5887 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5888 } 5889 5890 5891 /* 5892 * Find the largest mapping size for this page. 5893 */ 5894 int 5895 fnd_mapping_sz(page_t *pp) 5896 { 5897 int sz; 5898 int p_index; 5899 5900 p_index = PP_MAPINDEX(pp); 5901 5902 sz = 0; 5903 p_index >>= 1; /* don't care about 8K bit */ 5904 for (; p_index; p_index >>= 1) { 5905 sz++; 5906 } 5907 5908 return (sz); 5909 } 5910 5911 /* 5912 * This function unloads a range of addresses for an hmeblk. 5913 * It returns the next address to be unloaded. 5914 * It should be called with the hash lock held. 5915 */ 5916 static caddr_t 5917 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5918 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5919 { 5920 tte_t tte, ttemod; 5921 struct sf_hment *sfhmep; 5922 int ttesz; 5923 long ttecnt; 5924 page_t *pp; 5925 kmutex_t *pml; 5926 int ret; 5927 int use_demap_range; 5928 5929 ASSERT(in_hblk_range(hmeblkp, addr)); 5930 ASSERT(!hmeblkp->hblk_shw_bit); 5931 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5932 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5933 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5934 5935 #ifdef DEBUG 5936 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5937 (endaddr < get_hblk_endaddr(hmeblkp))) { 5938 panic("sfmmu_hblk_unload: partial unload of large page"); 5939 } 5940 #endif /* DEBUG */ 5941 5942 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5943 ttesz = get_hblk_ttesz(hmeblkp); 5944 5945 use_demap_range = ((dmrp == NULL) || 5946 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5947 5948 if (use_demap_range) { 5949 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5950 } else { 5951 DEMAP_RANGE_FLUSH(dmrp); 5952 } 5953 ttecnt = 0; 5954 HBLKTOHME(sfhmep, hmeblkp, addr); 5955 5956 while (addr < endaddr) { 5957 pml = NULL; 5958 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5959 if (TTE_IS_VALID(&tte)) { 5960 pp = sfhmep->hme_page; 5961 if (pp != NULL) { 5962 pml = sfmmu_mlist_enter(pp); 5963 } 5964 5965 /* 5966 * Verify if hme still points to 'pp' now that 5967 * we have p_mapping lock. 5968 */ 5969 if (sfhmep->hme_page != pp) { 5970 if (pp != NULL && sfhmep->hme_page != NULL) { 5971 ASSERT(pml != NULL); 5972 sfmmu_mlist_exit(pml); 5973 /* Re-start this iteration. */ 5974 continue; 5975 } 5976 ASSERT((pp != NULL) && 5977 (sfhmep->hme_page == NULL)); 5978 goto tte_unloaded; 5979 } 5980 5981 /* 5982 * This point on we have both HASH and p_mapping 5983 * lock. 5984 */ 5985 ASSERT(pp == sfhmep->hme_page); 5986 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5987 5988 /* 5989 * We need to loop on modify tte because it is 5990 * possible for pagesync to come along and 5991 * change the software bits beneath us. 5992 * 5993 * Page_unload can also invalidate the tte after 5994 * we read tte outside of p_mapping lock. 5995 */ 5996 again: 5997 ttemod = tte; 5998 5999 TTE_SET_INVALID(&ttemod); 6000 ret = sfmmu_modifytte_try(&tte, &ttemod, 6001 &sfhmep->hme_tte); 6002 6003 if (ret <= 0) { 6004 if (TTE_IS_VALID(&tte)) { 6005 ASSERT(ret < 0); 6006 goto again; 6007 } 6008 if (pp != NULL) { 6009 panic("sfmmu_hblk_unload: pp = 0x%p " 6010 "tte became invalid under mlist" 6011 " lock = 0x%p", (void *)pp, 6012 (void *)pml); 6013 } 6014 continue; 6015 } 6016 6017 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6018 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6019 } 6020 6021 /* 6022 * Ok- we invalidated the tte. Do the rest of the job. 6023 */ 6024 ttecnt++; 6025 6026 if (flags & HAT_UNLOAD_UNLOCK) { 6027 ASSERT(hmeblkp->hblk_lckcnt > 0); 6028 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 6029 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6030 } 6031 6032 /* 6033 * Normally we would need to flush the page 6034 * from the virtual cache at this point in 6035 * order to prevent a potential cache alias 6036 * inconsistency. 6037 * The particular scenario we need to worry 6038 * about is: 6039 * Given: va1 and va2 are two virtual address 6040 * that alias and map the same physical 6041 * address. 6042 * 1. mapping exists from va1 to pa and data 6043 * has been read into the cache. 6044 * 2. unload va1. 6045 * 3. load va2 and modify data using va2. 6046 * 4 unload va2. 6047 * 5. load va1 and reference data. Unless we 6048 * flush the data cache when we unload we will 6049 * get stale data. 6050 * Fortunately, page coloring eliminates the 6051 * above scenario by remembering the color a 6052 * physical page was last or is currently 6053 * mapped to. Now, we delay the flush until 6054 * the loading of translations. Only when the 6055 * new translation is of a different color 6056 * are we forced to flush. 6057 */ 6058 if (use_demap_range) { 6059 /* 6060 * Mark this page as needing a demap. 6061 */ 6062 DEMAP_RANGE_MARKPG(dmrp, addr); 6063 } else { 6064 ASSERT(sfmmup != NULL); 6065 ASSERT(!hmeblkp->hblk_shared); 6066 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6067 sfmmup->sfmmu_free, 0); 6068 } 6069 6070 if (pp) { 6071 /* 6072 * Remove the hment from the mapping list 6073 */ 6074 ASSERT(hmeblkp->hblk_hmecnt > 0); 6075 6076 /* 6077 * Again, we cannot 6078 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6079 */ 6080 HME_SUB(sfhmep, pp); 6081 membar_stst(); 6082 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6083 } 6084 6085 ASSERT(hmeblkp->hblk_vcnt > 0); 6086 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6087 6088 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6089 !hmeblkp->hblk_lckcnt); 6090 6091 #ifdef VAC 6092 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6093 if (PP_ISTNC(pp)) { 6094 /* 6095 * If page was temporary 6096 * uncached, try to recache 6097 * it. Note that HME_SUB() was 6098 * called above so p_index and 6099 * mlist had been updated. 6100 */ 6101 conv_tnc(pp, ttesz); 6102 } else if (pp->p_mapping == NULL) { 6103 ASSERT(kpm_enable); 6104 /* 6105 * Page is marked to be in VAC conflict 6106 * to an existing kpm mapping and/or is 6107 * kpm mapped using only the regular 6108 * pagesize. 6109 */ 6110 sfmmu_kpm_hme_unload(pp); 6111 } 6112 } 6113 #endif /* VAC */ 6114 } else if ((pp = sfhmep->hme_page) != NULL) { 6115 /* 6116 * TTE is invalid but the hme 6117 * still exists. let pageunload 6118 * complete its job. 6119 */ 6120 ASSERT(pml == NULL); 6121 pml = sfmmu_mlist_enter(pp); 6122 if (sfhmep->hme_page != NULL) { 6123 sfmmu_mlist_exit(pml); 6124 continue; 6125 } 6126 ASSERT(sfhmep->hme_page == NULL); 6127 } else if (hmeblkp->hblk_hmecnt != 0) { 6128 /* 6129 * pageunload may have not finished decrementing 6130 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6131 * wait for pageunload to finish. Rely on pageunload 6132 * to decrement hblk_hmecnt after hblk_vcnt. 6133 */ 6134 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6135 ASSERT(pml == NULL); 6136 if (pf_is_memory(pfn)) { 6137 pp = page_numtopp_nolock(pfn); 6138 if (pp != NULL) { 6139 pml = sfmmu_mlist_enter(pp); 6140 sfmmu_mlist_exit(pml); 6141 pml = NULL; 6142 } 6143 } 6144 } 6145 6146 tte_unloaded: 6147 /* 6148 * At this point, the tte we are looking at 6149 * should be unloaded, and hme has been unlinked 6150 * from page too. This is important because in 6151 * pageunload, it does ttesync() then HME_SUB. 6152 * We need to make sure HME_SUB has been completed 6153 * so we know ttesync() has been completed. Otherwise, 6154 * at exit time, after return from hat layer, VM will 6155 * release as structure which hat_setstat() (called 6156 * by ttesync()) needs. 6157 */ 6158 #ifdef DEBUG 6159 { 6160 tte_t dtte; 6161 6162 ASSERT(sfhmep->hme_page == NULL); 6163 6164 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6165 ASSERT(!TTE_IS_VALID(&dtte)); 6166 } 6167 #endif 6168 6169 if (pml) { 6170 sfmmu_mlist_exit(pml); 6171 } 6172 6173 addr += TTEBYTES(ttesz); 6174 sfhmep++; 6175 DEMAP_RANGE_NEXTPG(dmrp); 6176 } 6177 /* 6178 * For shared hmeblks this routine is only called when region is freed 6179 * and no longer referenced. So no need to decrement ttecnt 6180 * in the region structure here. 6181 */ 6182 if (ttecnt > 0 && sfmmup != NULL) { 6183 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6184 } 6185 return (addr); 6186 } 6187 6188 /* 6189 * Synchronize all the mappings in the range [addr..addr+len). 6190 * Can be called with clearflag having two states: 6191 * HAT_SYNC_DONTZERO means just return the rm stats 6192 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6193 */ 6194 void 6195 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6196 { 6197 struct hmehash_bucket *hmebp; 6198 hmeblk_tag hblktag; 6199 int hmeshift, hashno = 1; 6200 struct hme_blk *hmeblkp, *list = NULL; 6201 caddr_t endaddr; 6202 cpuset_t cpuset; 6203 6204 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 6205 ASSERT((sfmmup == ksfmmup) || 6206 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6207 ASSERT((len & MMU_PAGEOFFSET) == 0); 6208 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6209 (clearflag == HAT_SYNC_ZERORM)); 6210 6211 CPUSET_ZERO(cpuset); 6212 6213 endaddr = addr + len; 6214 hblktag.htag_id = sfmmup; 6215 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6216 6217 /* 6218 * Spitfire supports 4 page sizes. 6219 * Most pages are expected to be of the smallest page 6220 * size (8K) and these will not need to be rehashed. 64K 6221 * pages also don't need to be rehashed because the an hmeblk 6222 * spans 64K of address space. 512K pages might need 1 rehash and 6223 * and 4M pages 2 rehashes. 6224 */ 6225 while (addr < endaddr) { 6226 hmeshift = HME_HASH_SHIFT(hashno); 6227 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6228 hblktag.htag_rehash = hashno; 6229 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6230 6231 SFMMU_HASH_LOCK(hmebp); 6232 6233 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6234 if (hmeblkp != NULL) { 6235 ASSERT(!hmeblkp->hblk_shared); 6236 /* 6237 * We've encountered a shadow hmeblk so skip the range 6238 * of the next smaller mapping size. 6239 */ 6240 if (hmeblkp->hblk_shw_bit) { 6241 ASSERT(sfmmup != ksfmmup); 6242 ASSERT(hashno > 1); 6243 addr = (caddr_t)P2END((uintptr_t)addr, 6244 TTEBYTES(hashno - 1)); 6245 } else { 6246 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6247 addr, endaddr, clearflag); 6248 } 6249 SFMMU_HASH_UNLOCK(hmebp); 6250 hashno = 1; 6251 continue; 6252 } 6253 SFMMU_HASH_UNLOCK(hmebp); 6254 6255 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6256 /* 6257 * We have traversed the whole list and rehashed 6258 * if necessary without finding the address to sync. 6259 * This is ok so we increment the address by the 6260 * smallest hmeblk range for kernel mappings and the 6261 * largest hmeblk range, to account for shadow hmeblks, 6262 * for user mappings and continue. 6263 */ 6264 if (sfmmup == ksfmmup) 6265 addr = (caddr_t)P2END((uintptr_t)addr, 6266 TTEBYTES(1)); 6267 else 6268 addr = (caddr_t)P2END((uintptr_t)addr, 6269 TTEBYTES(hashno)); 6270 hashno = 1; 6271 } else { 6272 hashno++; 6273 } 6274 } 6275 sfmmu_hblks_list_purge(&list); 6276 cpuset = sfmmup->sfmmu_cpusran; 6277 xt_sync(cpuset); 6278 } 6279 6280 static caddr_t 6281 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6282 caddr_t endaddr, int clearflag) 6283 { 6284 tte_t tte, ttemod; 6285 struct sf_hment *sfhmep; 6286 int ttesz; 6287 struct page *pp; 6288 kmutex_t *pml; 6289 int ret; 6290 6291 ASSERT(hmeblkp->hblk_shw_bit == 0); 6292 ASSERT(!hmeblkp->hblk_shared); 6293 6294 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6295 6296 ttesz = get_hblk_ttesz(hmeblkp); 6297 HBLKTOHME(sfhmep, hmeblkp, addr); 6298 6299 while (addr < endaddr) { 6300 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6301 if (TTE_IS_VALID(&tte)) { 6302 pml = NULL; 6303 pp = sfhmep->hme_page; 6304 if (pp) { 6305 pml = sfmmu_mlist_enter(pp); 6306 } 6307 if (pp != sfhmep->hme_page) { 6308 /* 6309 * tte most have been unloaded 6310 * underneath us. Recheck 6311 */ 6312 ASSERT(pml); 6313 sfmmu_mlist_exit(pml); 6314 continue; 6315 } 6316 6317 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6318 6319 if (clearflag == HAT_SYNC_ZERORM) { 6320 ttemod = tte; 6321 TTE_CLR_RM(&ttemod); 6322 ret = sfmmu_modifytte_try(&tte, &ttemod, 6323 &sfhmep->hme_tte); 6324 if (ret < 0) { 6325 if (pml) { 6326 sfmmu_mlist_exit(pml); 6327 } 6328 continue; 6329 } 6330 6331 if (ret > 0) { 6332 sfmmu_tlb_demap(addr, sfmmup, 6333 hmeblkp, 0, 0); 6334 } 6335 } 6336 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6337 if (pml) { 6338 sfmmu_mlist_exit(pml); 6339 } 6340 } 6341 addr += TTEBYTES(ttesz); 6342 sfhmep++; 6343 } 6344 return (addr); 6345 } 6346 6347 /* 6348 * This function will sync a tte to the page struct and it will 6349 * update the hat stats. Currently it allows us to pass a NULL pp 6350 * and we will simply update the stats. We may want to change this 6351 * so we only keep stats for pages backed by pp's. 6352 */ 6353 static void 6354 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6355 { 6356 uint_t rm = 0; 6357 int sz; 6358 pgcnt_t npgs; 6359 6360 ASSERT(TTE_IS_VALID(ttep)); 6361 6362 if (TTE_IS_NOSYNC(ttep)) { 6363 return; 6364 } 6365 6366 if (TTE_IS_REF(ttep)) { 6367 rm = P_REF; 6368 } 6369 if (TTE_IS_MOD(ttep)) { 6370 rm |= P_MOD; 6371 } 6372 6373 if (rm == 0) { 6374 return; 6375 } 6376 6377 sz = TTE_CSZ(ttep); 6378 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6379 int i; 6380 caddr_t vaddr = addr; 6381 6382 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6383 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6384 } 6385 6386 } 6387 6388 /* 6389 * XXX I want to use cas to update nrm bits but they 6390 * currently belong in common/vm and not in hat where 6391 * they should be. 6392 * The nrm bits are protected by the same mutex as 6393 * the one that protects the page's mapping list. 6394 */ 6395 if (!pp) 6396 return; 6397 ASSERT(sfmmu_mlist_held(pp)); 6398 /* 6399 * If the tte is for a large page, we need to sync all the 6400 * pages covered by the tte. 6401 */ 6402 if (sz != TTE8K) { 6403 ASSERT(pp->p_szc != 0); 6404 pp = PP_GROUPLEADER(pp, sz); 6405 ASSERT(sfmmu_mlist_held(pp)); 6406 } 6407 6408 /* Get number of pages from tte size. */ 6409 npgs = TTEPAGES(sz); 6410 6411 do { 6412 ASSERT(pp); 6413 ASSERT(sfmmu_mlist_held(pp)); 6414 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6415 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6416 hat_page_setattr(pp, rm); 6417 6418 /* 6419 * Are we done? If not, we must have a large mapping. 6420 * For large mappings we need to sync the rest of the pages 6421 * covered by this tte; goto the next page. 6422 */ 6423 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6424 } 6425 6426 /* 6427 * Execute pre-callback handler of each pa_hment linked to pp 6428 * 6429 * Inputs: 6430 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6431 * capture_cpus: pointer to return value (below) 6432 * 6433 * Returns: 6434 * Propagates the subsystem callback return values back to the caller; 6435 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6436 * is zero if all of the pa_hments are of a type that do not require 6437 * capturing CPUs prior to suspending the mapping, else it is 1. 6438 */ 6439 static int 6440 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6441 { 6442 struct sf_hment *sfhmep; 6443 struct pa_hment *pahmep; 6444 int (*f)(caddr_t, uint_t, uint_t, void *); 6445 int ret; 6446 id_t id; 6447 int locked = 0; 6448 kmutex_t *pml; 6449 6450 ASSERT(PAGE_EXCL(pp)); 6451 if (!sfmmu_mlist_held(pp)) { 6452 pml = sfmmu_mlist_enter(pp); 6453 locked = 1; 6454 } 6455 6456 if (capture_cpus) 6457 *capture_cpus = 0; 6458 6459 top: 6460 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6461 /* 6462 * skip sf_hments corresponding to VA<->PA mappings; 6463 * for pa_hment's, hme_tte.ll is zero 6464 */ 6465 if (!IS_PAHME(sfhmep)) 6466 continue; 6467 6468 pahmep = sfhmep->hme_data; 6469 ASSERT(pahmep != NULL); 6470 6471 /* 6472 * skip if pre-handler has been called earlier in this loop 6473 */ 6474 if (pahmep->flags & flag) 6475 continue; 6476 6477 id = pahmep->cb_id; 6478 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6479 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6480 *capture_cpus = 1; 6481 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6482 pahmep->flags |= flag; 6483 continue; 6484 } 6485 6486 /* 6487 * Drop the mapping list lock to avoid locking order issues. 6488 */ 6489 if (locked) 6490 sfmmu_mlist_exit(pml); 6491 6492 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6493 if (ret != 0) 6494 return (ret); /* caller must do the cleanup */ 6495 6496 if (locked) { 6497 pml = sfmmu_mlist_enter(pp); 6498 pahmep->flags |= flag; 6499 goto top; 6500 } 6501 6502 pahmep->flags |= flag; 6503 } 6504 6505 if (locked) 6506 sfmmu_mlist_exit(pml); 6507 6508 return (0); 6509 } 6510 6511 /* 6512 * Execute post-callback handler of each pa_hment linked to pp 6513 * 6514 * Same overall assumptions and restrictions apply as for 6515 * hat_pageprocess_precallbacks(). 6516 */ 6517 static void 6518 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6519 { 6520 pfn_t pgpfn = pp->p_pagenum; 6521 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6522 pfn_t newpfn; 6523 struct sf_hment *sfhmep; 6524 struct pa_hment *pahmep; 6525 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6526 id_t id; 6527 int locked = 0; 6528 kmutex_t *pml; 6529 6530 ASSERT(PAGE_EXCL(pp)); 6531 if (!sfmmu_mlist_held(pp)) { 6532 pml = sfmmu_mlist_enter(pp); 6533 locked = 1; 6534 } 6535 6536 top: 6537 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6538 /* 6539 * skip sf_hments corresponding to VA<->PA mappings; 6540 * for pa_hment's, hme_tte.ll is zero 6541 */ 6542 if (!IS_PAHME(sfhmep)) 6543 continue; 6544 6545 pahmep = sfhmep->hme_data; 6546 ASSERT(pahmep != NULL); 6547 6548 if ((pahmep->flags & flag) == 0) 6549 continue; 6550 6551 pahmep->flags &= ~flag; 6552 6553 id = pahmep->cb_id; 6554 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6555 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6556 continue; 6557 6558 /* 6559 * Convert the base page PFN into the constituent PFN 6560 * which is needed by the callback handler. 6561 */ 6562 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6563 6564 /* 6565 * Drop the mapping list lock to avoid locking order issues. 6566 */ 6567 if (locked) 6568 sfmmu_mlist_exit(pml); 6569 6570 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6571 != 0) 6572 panic("sfmmu: posthandler failed"); 6573 6574 if (locked) { 6575 pml = sfmmu_mlist_enter(pp); 6576 goto top; 6577 } 6578 } 6579 6580 if (locked) 6581 sfmmu_mlist_exit(pml); 6582 } 6583 6584 /* 6585 * Suspend locked kernel mapping 6586 */ 6587 void 6588 hat_pagesuspend(struct page *pp) 6589 { 6590 struct sf_hment *sfhmep; 6591 sfmmu_t *sfmmup; 6592 tte_t tte, ttemod; 6593 struct hme_blk *hmeblkp; 6594 caddr_t addr; 6595 int index, cons; 6596 cpuset_t cpuset; 6597 6598 ASSERT(PAGE_EXCL(pp)); 6599 ASSERT(sfmmu_mlist_held(pp)); 6600 6601 mutex_enter(&kpr_suspendlock); 6602 6603 /* 6604 * We're about to suspend a kernel mapping so mark this thread as 6605 * non-traceable by DTrace. This prevents us from running into issues 6606 * with probe context trying to touch a suspended page 6607 * in the relocation codepath itself. 6608 */ 6609 curthread->t_flag |= T_DONTDTRACE; 6610 6611 index = PP_MAPINDEX(pp); 6612 cons = TTE8K; 6613 6614 retry: 6615 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6616 6617 if (IS_PAHME(sfhmep)) 6618 continue; 6619 6620 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6621 continue; 6622 6623 /* 6624 * Loop until we successfully set the suspend bit in 6625 * the TTE. 6626 */ 6627 again: 6628 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6629 ASSERT(TTE_IS_VALID(&tte)); 6630 6631 ttemod = tte; 6632 TTE_SET_SUSPEND(&ttemod); 6633 if (sfmmu_modifytte_try(&tte, &ttemod, 6634 &sfhmep->hme_tte) < 0) 6635 goto again; 6636 6637 /* 6638 * Invalidate TSB entry 6639 */ 6640 hmeblkp = sfmmu_hmetohblk(sfhmep); 6641 6642 sfmmup = hblktosfmmu(hmeblkp); 6643 ASSERT(sfmmup == ksfmmup); 6644 ASSERT(!hmeblkp->hblk_shared); 6645 6646 addr = tte_to_vaddr(hmeblkp, tte); 6647 6648 /* 6649 * No need to make sure that the TSB for this sfmmu is 6650 * not being relocated since it is ksfmmup and thus it 6651 * will never be relocated. 6652 */ 6653 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6654 6655 /* 6656 * Update xcall stats 6657 */ 6658 cpuset = cpu_ready_set; 6659 CPUSET_DEL(cpuset, CPU->cpu_id); 6660 6661 /* LINTED: constant in conditional context */ 6662 SFMMU_XCALL_STATS(ksfmmup); 6663 6664 /* 6665 * Flush TLB entry on remote CPU's 6666 */ 6667 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6668 (uint64_t)ksfmmup); 6669 xt_sync(cpuset); 6670 6671 /* 6672 * Flush TLB entry on local CPU 6673 */ 6674 vtag_flushpage(addr, (uint64_t)ksfmmup); 6675 } 6676 6677 while (index != 0) { 6678 index = index >> 1; 6679 if (index != 0) 6680 cons++; 6681 if (index & 0x1) { 6682 pp = PP_GROUPLEADER(pp, cons); 6683 goto retry; 6684 } 6685 } 6686 } 6687 6688 #ifdef DEBUG 6689 6690 #define N_PRLE 1024 6691 struct prle { 6692 page_t *targ; 6693 page_t *repl; 6694 int status; 6695 int pausecpus; 6696 hrtime_t whence; 6697 }; 6698 6699 static struct prle page_relocate_log[N_PRLE]; 6700 static int prl_entry; 6701 static kmutex_t prl_mutex; 6702 6703 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6704 mutex_enter(&prl_mutex); \ 6705 page_relocate_log[prl_entry].targ = *(t); \ 6706 page_relocate_log[prl_entry].repl = *(r); \ 6707 page_relocate_log[prl_entry].status = (s); \ 6708 page_relocate_log[prl_entry].pausecpus = (p); \ 6709 page_relocate_log[prl_entry].whence = gethrtime(); \ 6710 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6711 mutex_exit(&prl_mutex); 6712 6713 #else /* !DEBUG */ 6714 #define PAGE_RELOCATE_LOG(t, r, s, p) 6715 #endif 6716 6717 /* 6718 * Core Kernel Page Relocation Algorithm 6719 * 6720 * Input: 6721 * 6722 * target : constituent pages are SE_EXCL locked. 6723 * replacement: constituent pages are SE_EXCL locked. 6724 * 6725 * Output: 6726 * 6727 * nrelocp: number of pages relocated 6728 */ 6729 int 6730 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6731 { 6732 page_t *targ, *repl; 6733 page_t *tpp, *rpp; 6734 kmutex_t *low, *high; 6735 spgcnt_t npages, i; 6736 page_t *pl = NULL; 6737 int old_pil; 6738 cpuset_t cpuset; 6739 int cap_cpus; 6740 int ret; 6741 #ifdef VAC 6742 int cflags = 0; 6743 #endif 6744 6745 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6746 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6747 return (EAGAIN); 6748 } 6749 6750 mutex_enter(&kpr_mutex); 6751 kreloc_thread = curthread; 6752 6753 targ = *target; 6754 repl = *replacement; 6755 ASSERT(repl != NULL); 6756 ASSERT(targ->p_szc == repl->p_szc); 6757 6758 npages = page_get_pagecnt(targ->p_szc); 6759 6760 /* 6761 * unload VA<->PA mappings that are not locked 6762 */ 6763 tpp = targ; 6764 for (i = 0; i < npages; i++) { 6765 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6766 tpp++; 6767 } 6768 6769 /* 6770 * Do "presuspend" callbacks, in a context from which we can still 6771 * block as needed. Note that we don't hold the mapping list lock 6772 * of "targ" at this point due to potential locking order issues; 6773 * we assume that between the hat_pageunload() above and holding 6774 * the SE_EXCL lock that the mapping list *cannot* change at this 6775 * point. 6776 */ 6777 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6778 if (ret != 0) { 6779 /* 6780 * EIO translates to fatal error, for all others cleanup 6781 * and return EAGAIN. 6782 */ 6783 ASSERT(ret != EIO); 6784 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6785 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6786 kreloc_thread = NULL; 6787 mutex_exit(&kpr_mutex); 6788 return (EAGAIN); 6789 } 6790 6791 /* 6792 * acquire p_mapping list lock for both the target and replacement 6793 * root pages. 6794 * 6795 * low and high refer to the need to grab the mlist locks in a 6796 * specific order in order to prevent race conditions. Thus the 6797 * lower lock must be grabbed before the higher lock. 6798 * 6799 * This will block hat_unload's accessing p_mapping list. Since 6800 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6801 * blocked. Thus, no one else will be accessing the p_mapping list 6802 * while we suspend and reload the locked mapping below. 6803 */ 6804 tpp = targ; 6805 rpp = repl; 6806 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6807 6808 kpreempt_disable(); 6809 6810 /* 6811 * We raise our PIL to 13 so that we don't get captured by 6812 * another CPU or pinned by an interrupt thread. We can't go to 6813 * PIL 14 since the nexus driver(s) may need to interrupt at 6814 * that level in the case of IOMMU pseudo mappings. 6815 */ 6816 cpuset = cpu_ready_set; 6817 CPUSET_DEL(cpuset, CPU->cpu_id); 6818 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6819 old_pil = splr(XCALL_PIL); 6820 } else { 6821 old_pil = -1; 6822 xc_attention(cpuset); 6823 } 6824 ASSERT(getpil() == XCALL_PIL); 6825 6826 /* 6827 * Now do suspend callbacks. In the case of an IOMMU mapping 6828 * this will suspend all DMA activity to the page while it is 6829 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6830 * may be captured at this point we should have acquired any needed 6831 * locks in the presuspend callback. 6832 */ 6833 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6834 if (ret != 0) { 6835 repl = targ; 6836 goto suspend_fail; 6837 } 6838 6839 /* 6840 * Raise the PIL yet again, this time to block all high-level 6841 * interrupts on this CPU. This is necessary to prevent an 6842 * interrupt routine from pinning the thread which holds the 6843 * mapping suspended and then touching the suspended page. 6844 * 6845 * Once the page is suspended we also need to be careful to 6846 * avoid calling any functions which touch any seg_kmem memory 6847 * since that memory may be backed by the very page we are 6848 * relocating in here! 6849 */ 6850 hat_pagesuspend(targ); 6851 6852 /* 6853 * Now that we are confident everybody has stopped using this page, 6854 * copy the page contents. Note we use a physical copy to prevent 6855 * locking issues and to avoid fpRAS because we can't handle it in 6856 * this context. 6857 */ 6858 for (i = 0; i < npages; i++, tpp++, rpp++) { 6859 #ifdef VAC 6860 /* 6861 * If the replacement has a different vcolor than 6862 * the one being replacd, we need to handle VAC 6863 * consistency for it just as we were setting up 6864 * a new mapping to it. 6865 */ 6866 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6867 (tpp->p_vcolor != rpp->p_vcolor) && 6868 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6869 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6870 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6871 rpp->p_pagenum); 6872 } 6873 #endif 6874 /* 6875 * Copy the contents of the page. 6876 */ 6877 ppcopy_kernel(tpp, rpp); 6878 } 6879 6880 tpp = targ; 6881 rpp = repl; 6882 for (i = 0; i < npages; i++, tpp++, rpp++) { 6883 /* 6884 * Copy attributes. VAC consistency was handled above, 6885 * if required. 6886 */ 6887 rpp->p_nrm = tpp->p_nrm; 6888 tpp->p_nrm = 0; 6889 rpp->p_index = tpp->p_index; 6890 tpp->p_index = 0; 6891 #ifdef VAC 6892 rpp->p_vcolor = tpp->p_vcolor; 6893 #endif 6894 } 6895 6896 /* 6897 * First, unsuspend the page, if we set the suspend bit, and transfer 6898 * the mapping list from the target page to the replacement page. 6899 * Next process postcallbacks; since pa_hment's are linked only to the 6900 * p_mapping list of root page, we don't iterate over the constituent 6901 * pages. 6902 */ 6903 hat_pagereload(targ, repl); 6904 6905 suspend_fail: 6906 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6907 6908 /* 6909 * Now lower our PIL and release any captured CPUs since we 6910 * are out of the "danger zone". After this it will again be 6911 * safe to acquire adaptive mutex locks, or to drop them... 6912 */ 6913 if (old_pil != -1) { 6914 splx(old_pil); 6915 } else { 6916 xc_dismissed(cpuset); 6917 } 6918 6919 kpreempt_enable(); 6920 6921 sfmmu_mlist_reloc_exit(low, high); 6922 6923 /* 6924 * Postsuspend callbacks should drop any locks held across 6925 * the suspend callbacks. As before, we don't hold the mapping 6926 * list lock at this point.. our assumption is that the mapping 6927 * list still can't change due to our holding SE_EXCL lock and 6928 * there being no unlocked mappings left. Hence the restriction 6929 * on calling context to hat_delete_callback() 6930 */ 6931 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6932 if (ret != 0) { 6933 /* 6934 * The second presuspend call failed: we got here through 6935 * the suspend_fail label above. 6936 */ 6937 ASSERT(ret != EIO); 6938 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6939 kreloc_thread = NULL; 6940 mutex_exit(&kpr_mutex); 6941 return (EAGAIN); 6942 } 6943 6944 /* 6945 * Now that we're out of the performance critical section we can 6946 * take care of updating the hash table, since we still 6947 * hold all the pages locked SE_EXCL at this point we 6948 * needn't worry about things changing out from under us. 6949 */ 6950 tpp = targ; 6951 rpp = repl; 6952 for (i = 0; i < npages; i++, tpp++, rpp++) { 6953 6954 /* 6955 * replace targ with replacement in page_hash table 6956 */ 6957 targ = tpp; 6958 page_relocate_hash(rpp, targ); 6959 6960 /* 6961 * concatenate target; caller of platform_page_relocate() 6962 * expects target to be concatenated after returning. 6963 */ 6964 ASSERT(targ->p_next == targ); 6965 ASSERT(targ->p_prev == targ); 6966 page_list_concat(&pl, &targ); 6967 } 6968 6969 ASSERT(*target == pl); 6970 *nrelocp = npages; 6971 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6972 kreloc_thread = NULL; 6973 mutex_exit(&kpr_mutex); 6974 return (0); 6975 } 6976 6977 /* 6978 * Called when stray pa_hments are found attached to a page which is 6979 * being freed. Notify the subsystem which attached the pa_hment of 6980 * the error if it registered a suitable handler, else panic. 6981 */ 6982 static void 6983 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6984 { 6985 id_t cb_id = pahmep->cb_id; 6986 6987 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6988 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6989 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6990 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6991 return; /* non-fatal */ 6992 } 6993 panic("pa_hment leaked: 0x%p", (void *)pahmep); 6994 } 6995 6996 /* 6997 * Remove all mappings to page 'pp'. 6998 */ 6999 int 7000 hat_pageunload(struct page *pp, uint_t forceflag) 7001 { 7002 struct page *origpp = pp; 7003 struct sf_hment *sfhme, *tmphme; 7004 struct hme_blk *hmeblkp; 7005 kmutex_t *pml; 7006 #ifdef VAC 7007 kmutex_t *pmtx; 7008 #endif 7009 cpuset_t cpuset, tset; 7010 int index, cons; 7011 int xhme_blks; 7012 int pa_hments; 7013 7014 ASSERT(PAGE_EXCL(pp)); 7015 7016 retry_xhat: 7017 tmphme = NULL; 7018 xhme_blks = 0; 7019 pa_hments = 0; 7020 CPUSET_ZERO(cpuset); 7021 7022 pml = sfmmu_mlist_enter(pp); 7023 7024 #ifdef VAC 7025 if (pp->p_kpmref) 7026 sfmmu_kpm_pageunload(pp); 7027 ASSERT(!PP_ISMAPPED_KPM(pp)); 7028 #endif 7029 7030 index = PP_MAPINDEX(pp); 7031 cons = TTE8K; 7032 retry: 7033 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7034 tmphme = sfhme->hme_next; 7035 7036 if (IS_PAHME(sfhme)) { 7037 ASSERT(sfhme->hme_data != NULL); 7038 pa_hments++; 7039 continue; 7040 } 7041 7042 hmeblkp = sfmmu_hmetohblk(sfhme); 7043 if (hmeblkp->hblk_xhat_bit) { 7044 struct xhat_hme_blk *xblk = 7045 (struct xhat_hme_blk *)hmeblkp; 7046 7047 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 7048 pp, forceflag, XBLK2PROVBLK(xblk)); 7049 7050 xhme_blks = 1; 7051 continue; 7052 } 7053 7054 /* 7055 * If there are kernel mappings don't unload them, they will 7056 * be suspended. 7057 */ 7058 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7059 hmeblkp->hblk_tag.htag_id == ksfmmup) 7060 continue; 7061 7062 tset = sfmmu_pageunload(pp, sfhme, cons); 7063 CPUSET_OR(cpuset, tset); 7064 } 7065 7066 while (index != 0) { 7067 index = index >> 1; 7068 if (index != 0) 7069 cons++; 7070 if (index & 0x1) { 7071 /* Go to leading page */ 7072 pp = PP_GROUPLEADER(pp, cons); 7073 ASSERT(sfmmu_mlist_held(pp)); 7074 goto retry; 7075 } 7076 } 7077 7078 /* 7079 * cpuset may be empty if the page was only mapped by segkpm, 7080 * in which case we won't actually cross-trap. 7081 */ 7082 xt_sync(cpuset); 7083 7084 /* 7085 * The page should have no mappings at this point, unless 7086 * we were called from hat_page_relocate() in which case we 7087 * leave the locked mappings which will be suspended later. 7088 */ 7089 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 7090 (forceflag == SFMMU_KERNEL_RELOC)); 7091 7092 #ifdef VAC 7093 if (PP_ISTNC(pp)) { 7094 if (cons == TTE8K) { 7095 pmtx = sfmmu_page_enter(pp); 7096 PP_CLRTNC(pp); 7097 sfmmu_page_exit(pmtx); 7098 } else { 7099 conv_tnc(pp, cons); 7100 } 7101 } 7102 #endif /* VAC */ 7103 7104 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7105 /* 7106 * Unlink any pa_hments and free them, calling back 7107 * the responsible subsystem to notify it of the error. 7108 * This can occur in situations such as drivers leaking 7109 * DMA handles: naughty, but common enough that we'd like 7110 * to keep the system running rather than bringing it 7111 * down with an obscure error like "pa_hment leaked" 7112 * which doesn't aid the user in debugging their driver. 7113 */ 7114 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7115 tmphme = sfhme->hme_next; 7116 if (IS_PAHME(sfhme)) { 7117 struct pa_hment *pahmep = sfhme->hme_data; 7118 sfmmu_pahment_leaked(pahmep); 7119 HME_SUB(sfhme, pp); 7120 kmem_cache_free(pa_hment_cache, pahmep); 7121 } 7122 } 7123 7124 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 7125 } 7126 7127 sfmmu_mlist_exit(pml); 7128 7129 /* 7130 * XHAT may not have finished unloading pages 7131 * because some other thread was waiting for 7132 * mlist lock and XHAT_PAGEUNLOAD let it do 7133 * the job. 7134 */ 7135 if (xhme_blks) { 7136 pp = origpp; 7137 goto retry_xhat; 7138 } 7139 7140 return (0); 7141 } 7142 7143 cpuset_t 7144 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7145 { 7146 struct hme_blk *hmeblkp; 7147 sfmmu_t *sfmmup; 7148 tte_t tte, ttemod; 7149 #ifdef DEBUG 7150 tte_t orig_old; 7151 #endif /* DEBUG */ 7152 caddr_t addr; 7153 int ttesz; 7154 int ret; 7155 cpuset_t cpuset; 7156 7157 ASSERT(pp != NULL); 7158 ASSERT(sfmmu_mlist_held(pp)); 7159 ASSERT(!PP_ISKAS(pp)); 7160 7161 CPUSET_ZERO(cpuset); 7162 7163 hmeblkp = sfmmu_hmetohblk(sfhme); 7164 7165 readtte: 7166 sfmmu_copytte(&sfhme->hme_tte, &tte); 7167 if (TTE_IS_VALID(&tte)) { 7168 sfmmup = hblktosfmmu(hmeblkp); 7169 ttesz = get_hblk_ttesz(hmeblkp); 7170 /* 7171 * Only unload mappings of 'cons' size. 7172 */ 7173 if (ttesz != cons) 7174 return (cpuset); 7175 7176 /* 7177 * Note that we have p_mapping lock, but no hash lock here. 7178 * hblk_unload() has to have both hash lock AND p_mapping 7179 * lock before it tries to modify tte. So, the tte could 7180 * not become invalid in the sfmmu_modifytte_try() below. 7181 */ 7182 ttemod = tte; 7183 #ifdef DEBUG 7184 orig_old = tte; 7185 #endif /* DEBUG */ 7186 7187 TTE_SET_INVALID(&ttemod); 7188 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7189 if (ret < 0) { 7190 #ifdef DEBUG 7191 /* only R/M bits can change. */ 7192 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7193 #endif /* DEBUG */ 7194 goto readtte; 7195 } 7196 7197 if (ret == 0) { 7198 panic("pageunload: cas failed?"); 7199 } 7200 7201 addr = tte_to_vaddr(hmeblkp, tte); 7202 7203 if (hmeblkp->hblk_shared) { 7204 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7205 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7206 sf_region_t *rgnp; 7207 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7208 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7209 ASSERT(srdp != NULL); 7210 rgnp = srdp->srd_hmergnp[rid]; 7211 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7212 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7213 sfmmu_ttesync(NULL, addr, &tte, pp); 7214 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7215 atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); 7216 } else { 7217 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7218 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 7219 7220 /* 7221 * We need to flush the page from the virtual cache 7222 * in order to prevent a virtual cache alias 7223 * inconsistency. The particular scenario we need 7224 * to worry about is: 7225 * Given: va1 and va2 are two virtual address that 7226 * alias and will map the same physical address. 7227 * 1. mapping exists from va1 to pa and data has 7228 * been read into the cache. 7229 * 2. unload va1. 7230 * 3. load va2 and modify data using va2. 7231 * 4 unload va2. 7232 * 5. load va1 and reference data. Unless we flush 7233 * the data cache when we unload we will get 7234 * stale data. 7235 * This scenario is taken care of by using virtual 7236 * page coloring. 7237 */ 7238 if (sfmmup->sfmmu_ismhat) { 7239 /* 7240 * Flush TSBs, TLBs and caches 7241 * of every process 7242 * sharing this ism segment. 7243 */ 7244 sfmmu_hat_lock_all(); 7245 mutex_enter(&ism_mlist_lock); 7246 kpreempt_disable(); 7247 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7248 pp->p_pagenum, CACHE_NO_FLUSH); 7249 kpreempt_enable(); 7250 mutex_exit(&ism_mlist_lock); 7251 sfmmu_hat_unlock_all(); 7252 cpuset = cpu_ready_set; 7253 } else { 7254 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7255 cpuset = sfmmup->sfmmu_cpusran; 7256 } 7257 } 7258 7259 /* 7260 * Hme_sub has to run after ttesync() and a_rss update. 7261 * See hblk_unload(). 7262 */ 7263 HME_SUB(sfhme, pp); 7264 membar_stst(); 7265 7266 /* 7267 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7268 * since pteload may have done a HME_ADD() right after 7269 * we did the HME_SUB() above. Hmecnt is now maintained 7270 * by cas only. no lock guranteed its value. The only 7271 * gurantee we have is the hmecnt should not be less than 7272 * what it should be so the hblk will not be taken away. 7273 * It's also important that we decremented the hmecnt after 7274 * we are done with hmeblkp so that this hmeblk won't be 7275 * stolen. 7276 */ 7277 ASSERT(hmeblkp->hblk_hmecnt > 0); 7278 ASSERT(hmeblkp->hblk_vcnt > 0); 7279 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 7280 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 7281 /* 7282 * This is bug 4063182. 7283 * XXX: fixme 7284 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7285 * !hmeblkp->hblk_lckcnt); 7286 */ 7287 } else { 7288 panic("invalid tte? pp %p &tte %p", 7289 (void *)pp, (void *)&tte); 7290 } 7291 7292 return (cpuset); 7293 } 7294 7295 /* 7296 * While relocating a kernel page, this function will move the mappings 7297 * from tpp to dpp and modify any associated data with these mappings. 7298 * It also unsuspends the suspended kernel mapping. 7299 */ 7300 static void 7301 hat_pagereload(struct page *tpp, struct page *dpp) 7302 { 7303 struct sf_hment *sfhme; 7304 tte_t tte, ttemod; 7305 int index, cons; 7306 7307 ASSERT(getpil() == PIL_MAX); 7308 ASSERT(sfmmu_mlist_held(tpp)); 7309 ASSERT(sfmmu_mlist_held(dpp)); 7310 7311 index = PP_MAPINDEX(tpp); 7312 cons = TTE8K; 7313 7314 /* Update real mappings to the page */ 7315 retry: 7316 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7317 if (IS_PAHME(sfhme)) 7318 continue; 7319 sfmmu_copytte(&sfhme->hme_tte, &tte); 7320 ttemod = tte; 7321 7322 /* 7323 * replace old pfn with new pfn in TTE 7324 */ 7325 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7326 7327 /* 7328 * clear suspend bit 7329 */ 7330 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7331 TTE_CLR_SUSPEND(&ttemod); 7332 7333 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7334 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7335 7336 /* 7337 * set hme_page point to new page 7338 */ 7339 sfhme->hme_page = dpp; 7340 } 7341 7342 /* 7343 * move p_mapping list from old page to new page 7344 */ 7345 dpp->p_mapping = tpp->p_mapping; 7346 tpp->p_mapping = NULL; 7347 dpp->p_share = tpp->p_share; 7348 tpp->p_share = 0; 7349 7350 while (index != 0) { 7351 index = index >> 1; 7352 if (index != 0) 7353 cons++; 7354 if (index & 0x1) { 7355 tpp = PP_GROUPLEADER(tpp, cons); 7356 dpp = PP_GROUPLEADER(dpp, cons); 7357 goto retry; 7358 } 7359 } 7360 7361 curthread->t_flag &= ~T_DONTDTRACE; 7362 mutex_exit(&kpr_suspendlock); 7363 } 7364 7365 uint_t 7366 hat_pagesync(struct page *pp, uint_t clearflag) 7367 { 7368 struct sf_hment *sfhme, *tmphme = NULL; 7369 struct hme_blk *hmeblkp; 7370 kmutex_t *pml; 7371 cpuset_t cpuset, tset; 7372 int index, cons; 7373 extern ulong_t po_share; 7374 page_t *save_pp = pp; 7375 int stop_on_sh = 0; 7376 uint_t shcnt; 7377 7378 CPUSET_ZERO(cpuset); 7379 7380 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7381 return (PP_GENERIC_ATTR(pp)); 7382 } 7383 7384 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7385 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7386 return (PP_GENERIC_ATTR(pp)); 7387 } 7388 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7389 return (PP_GENERIC_ATTR(pp)); 7390 } 7391 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7392 if (pp->p_share > po_share) { 7393 hat_page_setattr(pp, P_REF); 7394 return (PP_GENERIC_ATTR(pp)); 7395 } 7396 stop_on_sh = 1; 7397 shcnt = 0; 7398 } 7399 } 7400 7401 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7402 pml = sfmmu_mlist_enter(pp); 7403 index = PP_MAPINDEX(pp); 7404 cons = TTE8K; 7405 retry: 7406 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7407 /* 7408 * We need to save the next hment on the list since 7409 * it is possible for pagesync to remove an invalid hment 7410 * from the list. 7411 */ 7412 tmphme = sfhme->hme_next; 7413 if (IS_PAHME(sfhme)) 7414 continue; 7415 /* 7416 * If we are looking for large mappings and this hme doesn't 7417 * reach the range we are seeking, just ignore it. 7418 */ 7419 hmeblkp = sfmmu_hmetohblk(sfhme); 7420 if (hmeblkp->hblk_xhat_bit) 7421 continue; 7422 7423 if (hme_size(sfhme) < cons) 7424 continue; 7425 7426 if (stop_on_sh) { 7427 if (hmeblkp->hblk_shared) { 7428 sf_srd_t *srdp = hblktosrd(hmeblkp); 7429 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7430 sf_region_t *rgnp; 7431 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7432 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7433 ASSERT(srdp != NULL); 7434 rgnp = srdp->srd_hmergnp[rid]; 7435 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7436 rgnp, rid); 7437 shcnt += rgnp->rgn_refcnt; 7438 } else { 7439 shcnt++; 7440 } 7441 if (shcnt > po_share) { 7442 /* 7443 * tell the pager to spare the page this time 7444 * around. 7445 */ 7446 hat_page_setattr(save_pp, P_REF); 7447 index = 0; 7448 break; 7449 } 7450 } 7451 tset = sfmmu_pagesync(pp, sfhme, 7452 clearflag & ~HAT_SYNC_STOPON_RM); 7453 CPUSET_OR(cpuset, tset); 7454 7455 /* 7456 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7457 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7458 */ 7459 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7460 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7461 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7462 index = 0; 7463 break; 7464 } 7465 } 7466 7467 while (index) { 7468 index = index >> 1; 7469 cons++; 7470 if (index & 0x1) { 7471 /* Go to leading page */ 7472 pp = PP_GROUPLEADER(pp, cons); 7473 goto retry; 7474 } 7475 } 7476 7477 xt_sync(cpuset); 7478 sfmmu_mlist_exit(pml); 7479 return (PP_GENERIC_ATTR(save_pp)); 7480 } 7481 7482 /* 7483 * Get all the hardware dependent attributes for a page struct 7484 */ 7485 static cpuset_t 7486 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7487 uint_t clearflag) 7488 { 7489 caddr_t addr; 7490 tte_t tte, ttemod; 7491 struct hme_blk *hmeblkp; 7492 int ret; 7493 sfmmu_t *sfmmup; 7494 cpuset_t cpuset; 7495 7496 ASSERT(pp != NULL); 7497 ASSERT(sfmmu_mlist_held(pp)); 7498 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7499 (clearflag == HAT_SYNC_ZERORM)); 7500 7501 SFMMU_STAT(sf_pagesync); 7502 7503 CPUSET_ZERO(cpuset); 7504 7505 sfmmu_pagesync_retry: 7506 7507 sfmmu_copytte(&sfhme->hme_tte, &tte); 7508 if (TTE_IS_VALID(&tte)) { 7509 hmeblkp = sfmmu_hmetohblk(sfhme); 7510 sfmmup = hblktosfmmu(hmeblkp); 7511 addr = tte_to_vaddr(hmeblkp, tte); 7512 if (clearflag == HAT_SYNC_ZERORM) { 7513 ttemod = tte; 7514 TTE_CLR_RM(&ttemod); 7515 ret = sfmmu_modifytte_try(&tte, &ttemod, 7516 &sfhme->hme_tte); 7517 if (ret < 0) { 7518 /* 7519 * cas failed and the new value is not what 7520 * we want. 7521 */ 7522 goto sfmmu_pagesync_retry; 7523 } 7524 7525 if (ret > 0) { 7526 /* we win the cas */ 7527 if (hmeblkp->hblk_shared) { 7528 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7529 uint_t rid = 7530 hmeblkp->hblk_tag.htag_rid; 7531 sf_region_t *rgnp; 7532 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7533 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7534 ASSERT(srdp != NULL); 7535 rgnp = srdp->srd_hmergnp[rid]; 7536 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7537 srdp, rgnp, rid); 7538 cpuset = sfmmu_rgntlb_demap(addr, 7539 rgnp, hmeblkp, 1); 7540 } else { 7541 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7542 0, 0); 7543 cpuset = sfmmup->sfmmu_cpusran; 7544 } 7545 } 7546 } 7547 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7548 &tte, pp); 7549 } 7550 return (cpuset); 7551 } 7552 7553 /* 7554 * Remove write permission from a mappings to a page, so that 7555 * we can detect the next modification of it. This requires modifying 7556 * the TTE then invalidating (demap) any TLB entry using that TTE. 7557 * This code is similar to sfmmu_pagesync(). 7558 */ 7559 static cpuset_t 7560 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7561 { 7562 caddr_t addr; 7563 tte_t tte; 7564 tte_t ttemod; 7565 struct hme_blk *hmeblkp; 7566 int ret; 7567 sfmmu_t *sfmmup; 7568 cpuset_t cpuset; 7569 7570 ASSERT(pp != NULL); 7571 ASSERT(sfmmu_mlist_held(pp)); 7572 7573 CPUSET_ZERO(cpuset); 7574 SFMMU_STAT(sf_clrwrt); 7575 7576 retry: 7577 7578 sfmmu_copytte(&sfhme->hme_tte, &tte); 7579 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7580 hmeblkp = sfmmu_hmetohblk(sfhme); 7581 7582 /* 7583 * xhat mappings should never be to a VMODSORT page. 7584 */ 7585 ASSERT(hmeblkp->hblk_xhat_bit == 0); 7586 7587 sfmmup = hblktosfmmu(hmeblkp); 7588 addr = tte_to_vaddr(hmeblkp, tte); 7589 7590 ttemod = tte; 7591 TTE_CLR_WRT(&ttemod); 7592 TTE_CLR_MOD(&ttemod); 7593 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7594 7595 /* 7596 * if cas failed and the new value is not what 7597 * we want retry 7598 */ 7599 if (ret < 0) 7600 goto retry; 7601 7602 /* we win the cas */ 7603 if (ret > 0) { 7604 if (hmeblkp->hblk_shared) { 7605 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7606 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7607 sf_region_t *rgnp; 7608 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7609 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7610 ASSERT(srdp != NULL); 7611 rgnp = srdp->srd_hmergnp[rid]; 7612 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7613 srdp, rgnp, rid); 7614 cpuset = sfmmu_rgntlb_demap(addr, 7615 rgnp, hmeblkp, 1); 7616 } else { 7617 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7618 cpuset = sfmmup->sfmmu_cpusran; 7619 } 7620 } 7621 } 7622 7623 return (cpuset); 7624 } 7625 7626 /* 7627 * Walk all mappings of a page, removing write permission and clearing the 7628 * ref/mod bits. This code is similar to hat_pagesync() 7629 */ 7630 static void 7631 hat_page_clrwrt(page_t *pp) 7632 { 7633 struct sf_hment *sfhme; 7634 struct sf_hment *tmphme = NULL; 7635 kmutex_t *pml; 7636 cpuset_t cpuset; 7637 cpuset_t tset; 7638 int index; 7639 int cons; 7640 7641 CPUSET_ZERO(cpuset); 7642 7643 pml = sfmmu_mlist_enter(pp); 7644 index = PP_MAPINDEX(pp); 7645 cons = TTE8K; 7646 retry: 7647 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7648 tmphme = sfhme->hme_next; 7649 7650 /* 7651 * If we are looking for large mappings and this hme doesn't 7652 * reach the range we are seeking, just ignore its. 7653 */ 7654 7655 if (hme_size(sfhme) < cons) 7656 continue; 7657 7658 tset = sfmmu_pageclrwrt(pp, sfhme); 7659 CPUSET_OR(cpuset, tset); 7660 } 7661 7662 while (index) { 7663 index = index >> 1; 7664 cons++; 7665 if (index & 0x1) { 7666 /* Go to leading page */ 7667 pp = PP_GROUPLEADER(pp, cons); 7668 goto retry; 7669 } 7670 } 7671 7672 xt_sync(cpuset); 7673 sfmmu_mlist_exit(pml); 7674 } 7675 7676 /* 7677 * Set the given REF/MOD/RO bits for the given page. 7678 * For a vnode with a sorted v_pages list, we need to change 7679 * the attributes and the v_pages list together under page_vnode_mutex. 7680 */ 7681 void 7682 hat_page_setattr(page_t *pp, uint_t flag) 7683 { 7684 vnode_t *vp = pp->p_vnode; 7685 page_t **listp; 7686 kmutex_t *pmtx; 7687 kmutex_t *vphm = NULL; 7688 int noshuffle; 7689 7690 noshuffle = flag & P_NSH; 7691 flag &= ~P_NSH; 7692 7693 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7694 7695 /* 7696 * nothing to do if attribute already set 7697 */ 7698 if ((pp->p_nrm & flag) == flag) 7699 return; 7700 7701 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7702 !noshuffle) { 7703 vphm = page_vnode_mutex(vp); 7704 mutex_enter(vphm); 7705 } 7706 7707 pmtx = sfmmu_page_enter(pp); 7708 pp->p_nrm |= flag; 7709 sfmmu_page_exit(pmtx); 7710 7711 if (vphm != NULL) { 7712 /* 7713 * Some File Systems examine v_pages for NULL w/o 7714 * grabbing the vphm mutex. Must not let it become NULL when 7715 * pp is the only page on the list. 7716 */ 7717 if (pp->p_vpnext != pp) { 7718 page_vpsub(&vp->v_pages, pp); 7719 if (vp->v_pages != NULL) 7720 listp = &vp->v_pages->p_vpprev->p_vpnext; 7721 else 7722 listp = &vp->v_pages; 7723 page_vpadd(listp, pp); 7724 } 7725 mutex_exit(vphm); 7726 } 7727 } 7728 7729 void 7730 hat_page_clrattr(page_t *pp, uint_t flag) 7731 { 7732 vnode_t *vp = pp->p_vnode; 7733 kmutex_t *pmtx; 7734 7735 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7736 7737 pmtx = sfmmu_page_enter(pp); 7738 7739 /* 7740 * Caller is expected to hold page's io lock for VMODSORT to work 7741 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7742 * bit is cleared. 7743 * We don't have assert to avoid tripping some existing third party 7744 * code. The dirty page is moved back to top of the v_page list 7745 * after IO is done in pvn_write_done(). 7746 */ 7747 pp->p_nrm &= ~flag; 7748 sfmmu_page_exit(pmtx); 7749 7750 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7751 7752 /* 7753 * VMODSORT works by removing write permissions and getting 7754 * a fault when a page is made dirty. At this point 7755 * we need to remove write permission from all mappings 7756 * to this page. 7757 */ 7758 hat_page_clrwrt(pp); 7759 } 7760 } 7761 7762 uint_t 7763 hat_page_getattr(page_t *pp, uint_t flag) 7764 { 7765 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7766 return ((uint_t)(pp->p_nrm & flag)); 7767 } 7768 7769 /* 7770 * DEBUG kernels: verify that a kernel va<->pa translation 7771 * is safe by checking the underlying page_t is in a page 7772 * relocation-safe state. 7773 */ 7774 #ifdef DEBUG 7775 void 7776 sfmmu_check_kpfn(pfn_t pfn) 7777 { 7778 page_t *pp; 7779 int index, cons; 7780 7781 if (hat_check_vtop == 0) 7782 return; 7783 7784 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7785 return; 7786 7787 pp = page_numtopp_nolock(pfn); 7788 if (!pp) 7789 return; 7790 7791 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7792 return; 7793 7794 /* 7795 * Handed a large kernel page, we dig up the root page since we 7796 * know the root page might have the lock also. 7797 */ 7798 if (pp->p_szc != 0) { 7799 index = PP_MAPINDEX(pp); 7800 cons = TTE8K; 7801 again: 7802 while (index != 0) { 7803 index >>= 1; 7804 if (index != 0) 7805 cons++; 7806 if (index & 0x1) { 7807 pp = PP_GROUPLEADER(pp, cons); 7808 goto again; 7809 } 7810 } 7811 } 7812 7813 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7814 return; 7815 7816 /* 7817 * Pages need to be locked or allocated "permanent" (either from 7818 * static_arena arena or explicitly setting PG_NORELOC when calling 7819 * page_create_va()) for VA->PA translations to be valid. 7820 */ 7821 if (!PP_ISNORELOC(pp)) 7822 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7823 (void *)pp); 7824 else 7825 panic("Illegal VA->PA translation, pp 0x%p not locked", 7826 (void *)pp); 7827 } 7828 #endif /* DEBUG */ 7829 7830 /* 7831 * Returns a page frame number for a given virtual address. 7832 * Returns PFN_INVALID to indicate an invalid mapping 7833 */ 7834 pfn_t 7835 hat_getpfnum(struct hat *hat, caddr_t addr) 7836 { 7837 pfn_t pfn; 7838 tte_t tte; 7839 7840 /* 7841 * We would like to 7842 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7843 * but we can't because the iommu driver will call this 7844 * routine at interrupt time and it can't grab the as lock 7845 * or it will deadlock: A thread could have the as lock 7846 * and be waiting for io. The io can't complete 7847 * because the interrupt thread is blocked trying to grab 7848 * the as lock. 7849 */ 7850 7851 ASSERT(hat->sfmmu_xhat_provider == NULL); 7852 7853 if (hat == ksfmmup) { 7854 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7855 ASSERT(segkmem_lpszc > 0); 7856 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7857 if (pfn != PFN_INVALID) { 7858 sfmmu_check_kpfn(pfn); 7859 return (pfn); 7860 } 7861 } else if (segkpm && IS_KPM_ADDR(addr)) { 7862 return (sfmmu_kpm_vatopfn(addr)); 7863 } 7864 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7865 == PFN_SUSPENDED) { 7866 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7867 } 7868 sfmmu_check_kpfn(pfn); 7869 return (pfn); 7870 } else { 7871 return (sfmmu_uvatopfn(addr, hat, NULL)); 7872 } 7873 } 7874 7875 /* 7876 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7877 * Use hat_getpfnum(kas.a_hat, ...) instead. 7878 * 7879 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7880 * but can't right now due to the fact that some software has grown to use 7881 * this interface incorrectly. So for now when the interface is misused, 7882 * return a warning to the user that in the future it won't work in the 7883 * way they're abusing it, and carry on (after disabling page relocation). 7884 */ 7885 pfn_t 7886 hat_getkpfnum(caddr_t addr) 7887 { 7888 pfn_t pfn; 7889 tte_t tte; 7890 int badcaller = 0; 7891 extern int segkmem_reloc; 7892 7893 if (segkpm && IS_KPM_ADDR(addr)) { 7894 badcaller = 1; 7895 pfn = sfmmu_kpm_vatopfn(addr); 7896 } else { 7897 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7898 == PFN_SUSPENDED) { 7899 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7900 } 7901 badcaller = pf_is_memory(pfn); 7902 } 7903 7904 if (badcaller) { 7905 /* 7906 * We can't return PFN_INVALID or the caller may panic 7907 * or corrupt the system. The only alternative is to 7908 * disable page relocation at this point for all kernel 7909 * memory. This will impact any callers of page_relocate() 7910 * such as FMA or DR. 7911 * 7912 * RFE: Add junk here to spit out an ereport so the sysadmin 7913 * can be advised that he should upgrade his device driver 7914 * so that this doesn't happen. 7915 */ 7916 hat_getkpfnum_badcall(caller()); 7917 if (hat_kpr_enabled && segkmem_reloc) { 7918 hat_kpr_enabled = 0; 7919 segkmem_reloc = 0; 7920 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7921 } 7922 } 7923 return (pfn); 7924 } 7925 7926 /* 7927 * This routine will return both pfn and tte for the vaddr. 7928 */ 7929 static pfn_t 7930 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7931 { 7932 struct hmehash_bucket *hmebp; 7933 hmeblk_tag hblktag; 7934 int hmeshift, hashno = 1; 7935 struct hme_blk *hmeblkp = NULL; 7936 tte_t tte; 7937 7938 struct sf_hment *sfhmep; 7939 pfn_t pfn; 7940 7941 /* support for ISM */ 7942 ism_map_t *ism_map; 7943 ism_blk_t *ism_blkp; 7944 int i; 7945 sfmmu_t *ism_hatid = NULL; 7946 sfmmu_t *locked_hatid = NULL; 7947 sfmmu_t *sv_sfmmup = sfmmup; 7948 caddr_t sv_vaddr = vaddr; 7949 sf_srd_t *srdp; 7950 7951 if (ttep == NULL) { 7952 ttep = &tte; 7953 } else { 7954 ttep->ll = 0; 7955 } 7956 7957 ASSERT(sfmmup != ksfmmup); 7958 SFMMU_STAT(sf_user_vtop); 7959 /* 7960 * Set ism_hatid if vaddr falls in a ISM segment. 7961 */ 7962 ism_blkp = sfmmup->sfmmu_iblk; 7963 if (ism_blkp != NULL) { 7964 sfmmu_ismhat_enter(sfmmup, 0); 7965 locked_hatid = sfmmup; 7966 } 7967 while (ism_blkp != NULL && ism_hatid == NULL) { 7968 ism_map = ism_blkp->iblk_maps; 7969 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7970 if (vaddr >= ism_start(ism_map[i]) && 7971 vaddr < ism_end(ism_map[i])) { 7972 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7973 vaddr = (caddr_t)(vaddr - 7974 ism_start(ism_map[i])); 7975 break; 7976 } 7977 } 7978 ism_blkp = ism_blkp->iblk_next; 7979 } 7980 if (locked_hatid) { 7981 sfmmu_ismhat_exit(locked_hatid, 0); 7982 } 7983 7984 hblktag.htag_id = sfmmup; 7985 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7986 do { 7987 hmeshift = HME_HASH_SHIFT(hashno); 7988 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7989 hblktag.htag_rehash = hashno; 7990 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7991 7992 SFMMU_HASH_LOCK(hmebp); 7993 7994 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7995 if (hmeblkp != NULL) { 7996 ASSERT(!hmeblkp->hblk_shared); 7997 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7998 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7999 SFMMU_HASH_UNLOCK(hmebp); 8000 if (TTE_IS_VALID(ttep)) { 8001 pfn = TTE_TO_PFN(vaddr, ttep); 8002 return (pfn); 8003 } 8004 break; 8005 } 8006 SFMMU_HASH_UNLOCK(hmebp); 8007 hashno++; 8008 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 8009 8010 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 8011 return (PFN_INVALID); 8012 } 8013 srdp = sv_sfmmup->sfmmu_srdp; 8014 ASSERT(srdp != NULL); 8015 ASSERT(srdp->srd_refcnt != 0); 8016 hblktag.htag_id = srdp; 8017 hashno = 1; 8018 do { 8019 hmeshift = HME_HASH_SHIFT(hashno); 8020 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 8021 hblktag.htag_rehash = hashno; 8022 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 8023 8024 SFMMU_HASH_LOCK(hmebp); 8025 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 8026 hmeblkp = hmeblkp->hblk_next) { 8027 uint_t rid; 8028 sf_region_t *rgnp; 8029 caddr_t rsaddr; 8030 caddr_t readdr; 8031 8032 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 8033 sv_sfmmup->sfmmu_hmeregion_map)) { 8034 continue; 8035 } 8036 ASSERT(hmeblkp->hblk_shared); 8037 rid = hmeblkp->hblk_tag.htag_rid; 8038 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8039 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8040 rgnp = srdp->srd_hmergnp[rid]; 8041 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 8042 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 8043 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8044 rsaddr = rgnp->rgn_saddr; 8045 readdr = rsaddr + rgnp->rgn_size; 8046 #ifdef DEBUG 8047 if (TTE_IS_VALID(ttep) || 8048 get_hblk_ttesz(hmeblkp) > TTE8K) { 8049 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 8050 ASSERT(eva > sv_vaddr); 8051 ASSERT(sv_vaddr >= rsaddr); 8052 ASSERT(sv_vaddr < readdr); 8053 ASSERT(eva <= readdr); 8054 } 8055 #endif /* DEBUG */ 8056 /* 8057 * Continue the search if we 8058 * found an invalid 8K tte outside of the area 8059 * covered by this hmeblk's region. 8060 */ 8061 if (TTE_IS_VALID(ttep)) { 8062 SFMMU_HASH_UNLOCK(hmebp); 8063 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8064 return (pfn); 8065 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8066 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8067 SFMMU_HASH_UNLOCK(hmebp); 8068 pfn = PFN_INVALID; 8069 return (pfn); 8070 } 8071 } 8072 SFMMU_HASH_UNLOCK(hmebp); 8073 hashno++; 8074 } while (hashno <= mmu_hashcnt); 8075 return (PFN_INVALID); 8076 } 8077 8078 8079 /* 8080 * For compatability with AT&T and later optimizations 8081 */ 8082 /* ARGSUSED */ 8083 void 8084 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8085 { 8086 ASSERT(hat != NULL); 8087 ASSERT(hat->sfmmu_xhat_provider == NULL); 8088 } 8089 8090 /* 8091 * Return the number of mappings to a particular page. This number is an 8092 * approximation of the number of people sharing the page. 8093 * 8094 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8095 * hat_page_checkshare() can be used to compare threshold to share 8096 * count that reflects the number of region sharers albeit at higher cost. 8097 */ 8098 ulong_t 8099 hat_page_getshare(page_t *pp) 8100 { 8101 page_t *spp = pp; /* start page */ 8102 kmutex_t *pml; 8103 ulong_t cnt; 8104 int index, sz = TTE64K; 8105 8106 /* 8107 * We need to grab the mlist lock to make sure any outstanding 8108 * load/unloads complete. Otherwise we could return zero 8109 * even though the unload(s) hasn't finished yet. 8110 */ 8111 pml = sfmmu_mlist_enter(spp); 8112 cnt = spp->p_share; 8113 8114 #ifdef VAC 8115 if (kpm_enable) 8116 cnt += spp->p_kpmref; 8117 #endif 8118 8119 /* 8120 * If we have any large mappings, we count the number of 8121 * mappings that this large page is part of. 8122 */ 8123 index = PP_MAPINDEX(spp); 8124 index >>= 1; 8125 while (index) { 8126 pp = PP_GROUPLEADER(spp, sz); 8127 if ((index & 0x1) && pp != spp) { 8128 cnt += pp->p_share; 8129 spp = pp; 8130 } 8131 index >>= 1; 8132 sz++; 8133 } 8134 sfmmu_mlist_exit(pml); 8135 return (cnt); 8136 } 8137 8138 /* 8139 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8140 * otherwise. Count shared hmeblks by region's refcnt. 8141 */ 8142 int 8143 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8144 { 8145 kmutex_t *pml; 8146 ulong_t cnt = 0; 8147 int index, sz = TTE8K; 8148 struct sf_hment *sfhme, *tmphme = NULL; 8149 struct hme_blk *hmeblkp; 8150 8151 pml = sfmmu_mlist_enter(pp); 8152 8153 if (kpm_enable) 8154 cnt = pp->p_kpmref; 8155 8156 if (pp->p_share + cnt > sh_thresh) { 8157 sfmmu_mlist_exit(pml); 8158 return (1); 8159 } 8160 8161 index = PP_MAPINDEX(pp); 8162 8163 again: 8164 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8165 tmphme = sfhme->hme_next; 8166 if (IS_PAHME(sfhme)) { 8167 continue; 8168 } 8169 8170 hmeblkp = sfmmu_hmetohblk(sfhme); 8171 if (hmeblkp->hblk_xhat_bit) { 8172 cnt++; 8173 if (cnt > sh_thresh) { 8174 sfmmu_mlist_exit(pml); 8175 return (1); 8176 } 8177 continue; 8178 } 8179 if (hme_size(sfhme) != sz) { 8180 continue; 8181 } 8182 8183 if (hmeblkp->hblk_shared) { 8184 sf_srd_t *srdp = hblktosrd(hmeblkp); 8185 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8186 sf_region_t *rgnp; 8187 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8188 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8189 ASSERT(srdp != NULL); 8190 rgnp = srdp->srd_hmergnp[rid]; 8191 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8192 rgnp, rid); 8193 cnt += rgnp->rgn_refcnt; 8194 } else { 8195 cnt++; 8196 } 8197 if (cnt > sh_thresh) { 8198 sfmmu_mlist_exit(pml); 8199 return (1); 8200 } 8201 } 8202 8203 index >>= 1; 8204 sz++; 8205 while (index) { 8206 pp = PP_GROUPLEADER(pp, sz); 8207 ASSERT(sfmmu_mlist_held(pp)); 8208 if (index & 0x1) { 8209 goto again; 8210 } 8211 index >>= 1; 8212 sz++; 8213 } 8214 sfmmu_mlist_exit(pml); 8215 return (0); 8216 } 8217 8218 /* 8219 * Unload all large mappings to the pp and reset the p_szc field of every 8220 * constituent page according to the remaining mappings. 8221 * 8222 * pp must be locked SE_EXCL. Even though no other constituent pages are 8223 * locked it's legal to unload the large mappings to the pp because all 8224 * constituent pages of large locked mappings have to be locked SE_SHARED. 8225 * This means if we have SE_EXCL lock on one of constituent pages none of the 8226 * large mappings to pp are locked. 8227 * 8228 * Decrease p_szc field starting from the last constituent page and ending 8229 * with the root page. This method is used because other threads rely on the 8230 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8231 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8232 * ensures that p_szc changes of the constituent pages appears atomic for all 8233 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8234 * 8235 * This mechanism is only used for file system pages where it's not always 8236 * possible to get SE_EXCL locks on all constituent pages to demote the size 8237 * code (as is done for anonymous or kernel large pages). 8238 * 8239 * See more comments in front of sfmmu_mlspl_enter(). 8240 */ 8241 void 8242 hat_page_demote(page_t *pp) 8243 { 8244 int index; 8245 int sz; 8246 cpuset_t cpuset; 8247 int sync = 0; 8248 page_t *rootpp; 8249 struct sf_hment *sfhme; 8250 struct sf_hment *tmphme = NULL; 8251 struct hme_blk *hmeblkp; 8252 uint_t pszc; 8253 page_t *lastpp; 8254 cpuset_t tset; 8255 pgcnt_t npgs; 8256 kmutex_t *pml; 8257 kmutex_t *pmtx = NULL; 8258 8259 ASSERT(PAGE_EXCL(pp)); 8260 ASSERT(!PP_ISFREE(pp)); 8261 ASSERT(!PP_ISKAS(pp)); 8262 ASSERT(page_szc_lock_assert(pp)); 8263 pml = sfmmu_mlist_enter(pp); 8264 8265 pszc = pp->p_szc; 8266 if (pszc == 0) { 8267 goto out; 8268 } 8269 8270 index = PP_MAPINDEX(pp) >> 1; 8271 8272 if (index) { 8273 CPUSET_ZERO(cpuset); 8274 sz = TTE64K; 8275 sync = 1; 8276 } 8277 8278 while (index) { 8279 if (!(index & 0x1)) { 8280 index >>= 1; 8281 sz++; 8282 continue; 8283 } 8284 ASSERT(sz <= pszc); 8285 rootpp = PP_GROUPLEADER(pp, sz); 8286 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8287 tmphme = sfhme->hme_next; 8288 ASSERT(!IS_PAHME(sfhme)); 8289 hmeblkp = sfmmu_hmetohblk(sfhme); 8290 if (hme_size(sfhme) != sz) { 8291 continue; 8292 } 8293 if (hmeblkp->hblk_xhat_bit) { 8294 cmn_err(CE_PANIC, 8295 "hat_page_demote: xhat hmeblk"); 8296 } 8297 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8298 CPUSET_OR(cpuset, tset); 8299 } 8300 if (index >>= 1) { 8301 sz++; 8302 } 8303 } 8304 8305 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8306 8307 if (sync) { 8308 xt_sync(cpuset); 8309 #ifdef VAC 8310 if (PP_ISTNC(pp)) { 8311 conv_tnc(rootpp, sz); 8312 } 8313 #endif /* VAC */ 8314 } 8315 8316 pmtx = sfmmu_page_enter(pp); 8317 8318 ASSERT(pp->p_szc == pszc); 8319 rootpp = PP_PAGEROOT(pp); 8320 ASSERT(rootpp->p_szc == pszc); 8321 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8322 8323 while (lastpp != rootpp) { 8324 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8325 ASSERT(sz < pszc); 8326 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8327 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8328 while (--npgs > 0) { 8329 lastpp->p_szc = (uchar_t)sz; 8330 lastpp = PP_PAGEPREV(lastpp); 8331 } 8332 if (sz) { 8333 /* 8334 * make sure before current root's pszc 8335 * is updated all updates to constituent pages pszc 8336 * fields are globally visible. 8337 */ 8338 membar_producer(); 8339 } 8340 lastpp->p_szc = sz; 8341 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8342 if (lastpp != rootpp) { 8343 lastpp = PP_PAGEPREV(lastpp); 8344 } 8345 } 8346 if (sz == 0) { 8347 /* the loop above doesn't cover this case */ 8348 rootpp->p_szc = 0; 8349 } 8350 out: 8351 ASSERT(pp->p_szc == 0); 8352 if (pmtx != NULL) { 8353 sfmmu_page_exit(pmtx); 8354 } 8355 sfmmu_mlist_exit(pml); 8356 } 8357 8358 /* 8359 * Refresh the HAT ismttecnt[] element for size szc. 8360 * Caller must have set ISM busy flag to prevent mapping 8361 * lists from changing while we're traversing them. 8362 */ 8363 pgcnt_t 8364 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8365 { 8366 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8367 ism_map_t *ism_map; 8368 pgcnt_t npgs = 0; 8369 pgcnt_t npgs_scd = 0; 8370 int j; 8371 sf_scd_t *scdp; 8372 uchar_t rid; 8373 8374 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8375 scdp = sfmmup->sfmmu_scdp; 8376 8377 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8378 ism_map = ism_blkp->iblk_maps; 8379 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8380 rid = ism_map[j].imap_rid; 8381 ASSERT(rid == SFMMU_INVALID_ISMRID || 8382 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8383 8384 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8385 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8386 /* ISM is in sfmmup's SCD */ 8387 npgs_scd += 8388 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8389 } else { 8390 /* ISMs is not in SCD */ 8391 npgs += 8392 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8393 } 8394 } 8395 } 8396 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8397 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8398 return (npgs); 8399 } 8400 8401 /* 8402 * Yield the memory claim requirement for an address space. 8403 * 8404 * This is currently implemented as the number of bytes that have active 8405 * hardware translations that have page structures. Therefore, it can 8406 * underestimate the traditional resident set size, eg, if the 8407 * physical page is present and the hardware translation is missing; 8408 * and it can overestimate the rss, eg, if there are active 8409 * translations to a frame buffer with page structs. 8410 * Also, it does not take sharing into account. 8411 * 8412 * Note that we don't acquire locks here since this function is most often 8413 * called from the clock thread. 8414 */ 8415 size_t 8416 hat_get_mapped_size(struct hat *hat) 8417 { 8418 size_t assize = 0; 8419 int i; 8420 8421 if (hat == NULL) 8422 return (0); 8423 8424 ASSERT(hat->sfmmu_xhat_provider == NULL); 8425 8426 for (i = 0; i < mmu_page_sizes; i++) 8427 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8428 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8429 8430 if (hat->sfmmu_iblk == NULL) 8431 return (assize); 8432 8433 for (i = 0; i < mmu_page_sizes; i++) 8434 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8435 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8436 8437 return (assize); 8438 } 8439 8440 int 8441 hat_stats_enable(struct hat *hat) 8442 { 8443 hatlock_t *hatlockp; 8444 8445 ASSERT(hat->sfmmu_xhat_provider == NULL); 8446 8447 hatlockp = sfmmu_hat_enter(hat); 8448 hat->sfmmu_rmstat++; 8449 sfmmu_hat_exit(hatlockp); 8450 return (1); 8451 } 8452 8453 void 8454 hat_stats_disable(struct hat *hat) 8455 { 8456 hatlock_t *hatlockp; 8457 8458 ASSERT(hat->sfmmu_xhat_provider == NULL); 8459 8460 hatlockp = sfmmu_hat_enter(hat); 8461 hat->sfmmu_rmstat--; 8462 sfmmu_hat_exit(hatlockp); 8463 } 8464 8465 /* 8466 * Routines for entering or removing ourselves from the 8467 * ism_hat's mapping list. This is used for both private and 8468 * SCD hats. 8469 */ 8470 static void 8471 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8472 { 8473 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8474 8475 iment->iment_prev = NULL; 8476 iment->iment_next = ism_hat->sfmmu_iment; 8477 if (ism_hat->sfmmu_iment) { 8478 ism_hat->sfmmu_iment->iment_prev = iment; 8479 } 8480 ism_hat->sfmmu_iment = iment; 8481 } 8482 8483 static void 8484 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8485 { 8486 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8487 8488 if (ism_hat->sfmmu_iment == NULL) { 8489 panic("ism map entry remove - no entries"); 8490 } 8491 8492 if (iment->iment_prev) { 8493 ASSERT(ism_hat->sfmmu_iment != iment); 8494 iment->iment_prev->iment_next = iment->iment_next; 8495 } else { 8496 ASSERT(ism_hat->sfmmu_iment == iment); 8497 ism_hat->sfmmu_iment = iment->iment_next; 8498 } 8499 8500 if (iment->iment_next) { 8501 iment->iment_next->iment_prev = iment->iment_prev; 8502 } 8503 8504 /* 8505 * zero out the entry 8506 */ 8507 iment->iment_next = NULL; 8508 iment->iment_prev = NULL; 8509 iment->iment_hat = NULL; 8510 } 8511 8512 /* 8513 * Hat_share()/unshare() return an (non-zero) error 8514 * when saddr and daddr are not properly aligned. 8515 * 8516 * The top level mapping element determines the alignment 8517 * requirement for saddr and daddr, depending on different 8518 * architectures. 8519 * 8520 * When hat_share()/unshare() are not supported, 8521 * HATOP_SHARE()/UNSHARE() return 0 8522 */ 8523 int 8524 hat_share(struct hat *sfmmup, caddr_t addr, 8525 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8526 { 8527 ism_blk_t *ism_blkp; 8528 ism_blk_t *new_iblk; 8529 ism_map_t *ism_map; 8530 ism_ment_t *ism_ment; 8531 int i, added; 8532 hatlock_t *hatlockp; 8533 int reload_mmu = 0; 8534 uint_t ismshift = page_get_shift(ismszc); 8535 size_t ismpgsz = page_get_pagesize(ismszc); 8536 uint_t ismmask = (uint_t)ismpgsz - 1; 8537 size_t sh_size = ISM_SHIFT(ismshift, len); 8538 ushort_t ismhatflag; 8539 hat_region_cookie_t rcookie; 8540 sf_scd_t *old_scdp; 8541 8542 #ifdef DEBUG 8543 caddr_t eaddr = addr + len; 8544 #endif /* DEBUG */ 8545 8546 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8547 ASSERT(sptaddr == ISMID_STARTADDR); 8548 /* 8549 * Check the alignment. 8550 */ 8551 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8552 return (EINVAL); 8553 8554 /* 8555 * Check size alignment. 8556 */ 8557 if (!ISM_ALIGNED(ismshift, len)) 8558 return (EINVAL); 8559 8560 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 8561 8562 /* 8563 * Allocate ism_ment for the ism_hat's mapping list, and an 8564 * ism map blk in case we need one. We must do our 8565 * allocations before acquiring locks to prevent a deadlock 8566 * in the kmem allocator on the mapping list lock. 8567 */ 8568 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8569 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8570 8571 /* 8572 * Serialize ISM mappings with the ISM busy flag, and also the 8573 * trap handlers. 8574 */ 8575 sfmmu_ismhat_enter(sfmmup, 0); 8576 8577 /* 8578 * Allocate an ism map blk if necessary. 8579 */ 8580 if (sfmmup->sfmmu_iblk == NULL) { 8581 sfmmup->sfmmu_iblk = new_iblk; 8582 bzero(new_iblk, sizeof (*new_iblk)); 8583 new_iblk->iblk_nextpa = (uint64_t)-1; 8584 membar_stst(); /* make sure next ptr visible to all CPUs */ 8585 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8586 reload_mmu = 1; 8587 new_iblk = NULL; 8588 } 8589 8590 #ifdef DEBUG 8591 /* 8592 * Make sure mapping does not already exist. 8593 */ 8594 ism_blkp = sfmmup->sfmmu_iblk; 8595 while (ism_blkp != NULL) { 8596 ism_map = ism_blkp->iblk_maps; 8597 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8598 if ((addr >= ism_start(ism_map[i]) && 8599 addr < ism_end(ism_map[i])) || 8600 eaddr > ism_start(ism_map[i]) && 8601 eaddr <= ism_end(ism_map[i])) { 8602 panic("sfmmu_share: Already mapped!"); 8603 } 8604 } 8605 ism_blkp = ism_blkp->iblk_next; 8606 } 8607 #endif /* DEBUG */ 8608 8609 ASSERT(ismszc >= TTE4M); 8610 if (ismszc == TTE4M) { 8611 ismhatflag = HAT_4M_FLAG; 8612 } else if (ismszc == TTE32M) { 8613 ismhatflag = HAT_32M_FLAG; 8614 } else if (ismszc == TTE256M) { 8615 ismhatflag = HAT_256M_FLAG; 8616 } 8617 /* 8618 * Add mapping to first available mapping slot. 8619 */ 8620 ism_blkp = sfmmup->sfmmu_iblk; 8621 added = 0; 8622 while (!added) { 8623 ism_map = ism_blkp->iblk_maps; 8624 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8625 if (ism_map[i].imap_ismhat == NULL) { 8626 8627 ism_map[i].imap_ismhat = ism_hatid; 8628 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8629 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8630 ism_map[i].imap_hatflags = ismhatflag; 8631 ism_map[i].imap_sz_mask = ismmask; 8632 /* 8633 * imap_seg is checked in ISM_CHECK to see if 8634 * non-NULL, then other info assumed valid. 8635 */ 8636 membar_stst(); 8637 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8638 ism_map[i].imap_ment = ism_ment; 8639 8640 /* 8641 * Now add ourselves to the ism_hat's 8642 * mapping list. 8643 */ 8644 ism_ment->iment_hat = sfmmup; 8645 ism_ment->iment_base_va = addr; 8646 ism_hatid->sfmmu_ismhat = 1; 8647 mutex_enter(&ism_mlist_lock); 8648 iment_add(ism_ment, ism_hatid); 8649 mutex_exit(&ism_mlist_lock); 8650 added = 1; 8651 break; 8652 } 8653 } 8654 if (!added && ism_blkp->iblk_next == NULL) { 8655 ism_blkp->iblk_next = new_iblk; 8656 new_iblk = NULL; 8657 bzero(ism_blkp->iblk_next, 8658 sizeof (*ism_blkp->iblk_next)); 8659 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8660 membar_stst(); 8661 ism_blkp->iblk_nextpa = 8662 va_to_pa((caddr_t)ism_blkp->iblk_next); 8663 } 8664 ism_blkp = ism_blkp->iblk_next; 8665 } 8666 8667 /* 8668 * After calling hat_join_region, sfmmup may join a new SCD or 8669 * move from the old scd to a new scd, in which case, we want to 8670 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8671 * sfmmu_check_page_sizes at the end of this routine. 8672 */ 8673 old_scdp = sfmmup->sfmmu_scdp; 8674 8675 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8676 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8677 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8678 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8679 } 8680 /* 8681 * Update our counters for this sfmmup's ism mappings. 8682 */ 8683 for (i = 0; i <= ismszc; i++) { 8684 if (!(disable_ism_large_pages & (1 << i))) 8685 (void) ism_tsb_entries(sfmmup, i); 8686 } 8687 8688 /* 8689 * For ISM and DISM we do not support 512K pages, so we only only 8690 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8691 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8692 * 8693 * Need to set 32M/256M ISM flags to make sure 8694 * sfmmu_check_page_sizes() enables them on Panther. 8695 */ 8696 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8697 8698 switch (ismszc) { 8699 case TTE256M: 8700 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8701 hatlockp = sfmmu_hat_enter(sfmmup); 8702 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8703 sfmmu_hat_exit(hatlockp); 8704 } 8705 break; 8706 case TTE32M: 8707 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8708 hatlockp = sfmmu_hat_enter(sfmmup); 8709 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8710 sfmmu_hat_exit(hatlockp); 8711 } 8712 break; 8713 default: 8714 break; 8715 } 8716 8717 /* 8718 * If we updated the ismblkpa for this HAT we must make 8719 * sure all CPUs running this process reload their tsbmiss area. 8720 * Otherwise they will fail to load the mappings in the tsbmiss 8721 * handler and will loop calling pagefault(). 8722 */ 8723 if (reload_mmu) { 8724 hatlockp = sfmmu_hat_enter(sfmmup); 8725 sfmmu_sync_mmustate(sfmmup); 8726 sfmmu_hat_exit(hatlockp); 8727 } 8728 8729 sfmmu_ismhat_exit(sfmmup, 0); 8730 8731 /* 8732 * Free up ismblk if we didn't use it. 8733 */ 8734 if (new_iblk != NULL) 8735 kmem_cache_free(ism_blk_cache, new_iblk); 8736 8737 /* 8738 * Check TSB and TLB page sizes. 8739 */ 8740 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8741 sfmmu_check_page_sizes(sfmmup, 0); 8742 } else { 8743 sfmmu_check_page_sizes(sfmmup, 1); 8744 } 8745 return (0); 8746 } 8747 8748 /* 8749 * hat_unshare removes exactly one ism_map from 8750 * this process's as. It expects multiple calls 8751 * to hat_unshare for multiple shm segments. 8752 */ 8753 void 8754 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8755 { 8756 ism_map_t *ism_map; 8757 ism_ment_t *free_ment = NULL; 8758 ism_blk_t *ism_blkp; 8759 struct hat *ism_hatid; 8760 int found, i; 8761 hatlock_t *hatlockp; 8762 struct tsb_info *tsbinfo; 8763 uint_t ismshift = page_get_shift(ismszc); 8764 size_t sh_size = ISM_SHIFT(ismshift, len); 8765 uchar_t ism_rid; 8766 sf_scd_t *old_scdp; 8767 8768 ASSERT(ISM_ALIGNED(ismshift, addr)); 8769 ASSERT(ISM_ALIGNED(ismshift, len)); 8770 ASSERT(sfmmup != NULL); 8771 ASSERT(sfmmup != ksfmmup); 8772 8773 if (sfmmup->sfmmu_xhat_provider) { 8774 XHAT_UNSHARE(sfmmup, addr, len); 8775 return; 8776 } else { 8777 /* 8778 * This must be a CPU HAT. If the address space has 8779 * XHATs attached, inform all XHATs that ISM segment 8780 * is going away 8781 */ 8782 ASSERT(sfmmup->sfmmu_as != NULL); 8783 if (sfmmup->sfmmu_as->a_xhat != NULL) 8784 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 8785 } 8786 8787 /* 8788 * Make sure that during the entire time ISM mappings are removed, 8789 * the trap handlers serialize behind us, and that no one else 8790 * can be mucking with ISM mappings. This also lets us get away 8791 * with not doing expensive cross calls to flush the TLB -- we 8792 * just discard the context, flush the entire TSB, and call it 8793 * a day. 8794 */ 8795 sfmmu_ismhat_enter(sfmmup, 0); 8796 8797 /* 8798 * Remove the mapping. 8799 * 8800 * We can't have any holes in the ism map. 8801 * The tsb miss code while searching the ism map will 8802 * stop on an empty map slot. So we must move 8803 * everyone past the hole up 1 if any. 8804 * 8805 * Also empty ism map blks are not freed until the 8806 * process exits. This is to prevent a MT race condition 8807 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8808 */ 8809 found = 0; 8810 ism_blkp = sfmmup->sfmmu_iblk; 8811 while (!found && ism_blkp != NULL) { 8812 ism_map = ism_blkp->iblk_maps; 8813 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8814 if (addr == ism_start(ism_map[i]) && 8815 sh_size == (size_t)(ism_size(ism_map[i]))) { 8816 found = 1; 8817 break; 8818 } 8819 } 8820 if (!found) 8821 ism_blkp = ism_blkp->iblk_next; 8822 } 8823 8824 if (found) { 8825 ism_hatid = ism_map[i].imap_ismhat; 8826 ism_rid = ism_map[i].imap_rid; 8827 ASSERT(ism_hatid != NULL); 8828 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8829 8830 /* 8831 * After hat_leave_region, the sfmmup may leave SCD, 8832 * in which case, we want to grow the private tsb size when 8833 * calling sfmmu_check_page_sizes at the end of the routine. 8834 */ 8835 old_scdp = sfmmup->sfmmu_scdp; 8836 /* 8837 * Then remove ourselves from the region. 8838 */ 8839 if (ism_rid != SFMMU_INVALID_ISMRID) { 8840 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8841 HAT_REGION_ISM); 8842 } 8843 8844 /* 8845 * And now guarantee that any other cpu 8846 * that tries to process an ISM miss 8847 * will go to tl=0. 8848 */ 8849 hatlockp = sfmmu_hat_enter(sfmmup); 8850 sfmmu_invalidate_ctx(sfmmup); 8851 sfmmu_hat_exit(hatlockp); 8852 8853 /* 8854 * Remove ourselves from the ism mapping list. 8855 */ 8856 mutex_enter(&ism_mlist_lock); 8857 iment_sub(ism_map[i].imap_ment, ism_hatid); 8858 mutex_exit(&ism_mlist_lock); 8859 free_ment = ism_map[i].imap_ment; 8860 8861 /* 8862 * We delete the ism map by copying 8863 * the next map over the current one. 8864 * We will take the next one in the maps 8865 * array or from the next ism_blk. 8866 */ 8867 while (ism_blkp != NULL) { 8868 ism_map = ism_blkp->iblk_maps; 8869 while (i < (ISM_MAP_SLOTS - 1)) { 8870 ism_map[i] = ism_map[i + 1]; 8871 i++; 8872 } 8873 /* i == (ISM_MAP_SLOTS - 1) */ 8874 ism_blkp = ism_blkp->iblk_next; 8875 if (ism_blkp != NULL) { 8876 ism_map[i] = ism_blkp->iblk_maps[0]; 8877 i = 0; 8878 } else { 8879 ism_map[i].imap_seg = 0; 8880 ism_map[i].imap_vb_shift = 0; 8881 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8882 ism_map[i].imap_hatflags = 0; 8883 ism_map[i].imap_sz_mask = 0; 8884 ism_map[i].imap_ismhat = NULL; 8885 ism_map[i].imap_ment = NULL; 8886 } 8887 } 8888 8889 /* 8890 * Now flush entire TSB for the process, since 8891 * demapping page by page can be too expensive. 8892 * We don't have to flush the TLB here anymore 8893 * since we switch to a new TLB ctx instead. 8894 * Also, there is no need to flush if the process 8895 * is exiting since the TSB will be freed later. 8896 */ 8897 if (!sfmmup->sfmmu_free) { 8898 hatlockp = sfmmu_hat_enter(sfmmup); 8899 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8900 tsbinfo = tsbinfo->tsb_next) { 8901 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8902 continue; 8903 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8904 tsbinfo->tsb_flags |= 8905 TSB_FLUSH_NEEDED; 8906 continue; 8907 } 8908 8909 sfmmu_inv_tsb(tsbinfo->tsb_va, 8910 TSB_BYTES(tsbinfo->tsb_szc)); 8911 } 8912 sfmmu_hat_exit(hatlockp); 8913 } 8914 } 8915 8916 /* 8917 * Update our counters for this sfmmup's ism mappings. 8918 */ 8919 for (i = 0; i <= ismszc; i++) { 8920 if (!(disable_ism_large_pages & (1 << i))) 8921 (void) ism_tsb_entries(sfmmup, i); 8922 } 8923 8924 sfmmu_ismhat_exit(sfmmup, 0); 8925 8926 /* 8927 * We must do our freeing here after dropping locks 8928 * to prevent a deadlock in the kmem allocator on the 8929 * mapping list lock. 8930 */ 8931 if (free_ment != NULL) 8932 kmem_cache_free(ism_ment_cache, free_ment); 8933 8934 /* 8935 * Check TSB and TLB page sizes if the process isn't exiting. 8936 */ 8937 if (!sfmmup->sfmmu_free) { 8938 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8939 sfmmu_check_page_sizes(sfmmup, 1); 8940 } else { 8941 sfmmu_check_page_sizes(sfmmup, 0); 8942 } 8943 } 8944 } 8945 8946 /* ARGSUSED */ 8947 static int 8948 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8949 { 8950 /* void *buf is sfmmu_t pointer */ 8951 bzero(buf, sizeof (sfmmu_t)); 8952 8953 return (0); 8954 } 8955 8956 /* ARGSUSED */ 8957 static void 8958 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8959 { 8960 /* void *buf is sfmmu_t pointer */ 8961 } 8962 8963 /* 8964 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8965 * field to be the pa of this hmeblk 8966 */ 8967 /* ARGSUSED */ 8968 static int 8969 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8970 { 8971 struct hme_blk *hmeblkp; 8972 8973 bzero(buf, (size_t)cdrarg); 8974 hmeblkp = (struct hme_blk *)buf; 8975 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8976 8977 #ifdef HBLK_TRACE 8978 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8979 #endif /* HBLK_TRACE */ 8980 8981 return (0); 8982 } 8983 8984 /* ARGSUSED */ 8985 static void 8986 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8987 { 8988 8989 #ifdef HBLK_TRACE 8990 8991 struct hme_blk *hmeblkp; 8992 8993 hmeblkp = (struct hme_blk *)buf; 8994 mutex_destroy(&hmeblkp->hblk_audit_lock); 8995 8996 #endif /* HBLK_TRACE */ 8997 } 8998 8999 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 9000 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 9001 /* 9002 * The kmem allocator will callback into our reclaim routine when the system 9003 * is running low in memory. We traverse the hash and free up all unused but 9004 * still cached hme_blks. We also traverse the free list and free them up 9005 * as well. 9006 */ 9007 /*ARGSUSED*/ 9008 static void 9009 sfmmu_hblkcache_reclaim(void *cdrarg) 9010 { 9011 int i; 9012 uint64_t hblkpa, prevpa, nx_pa; 9013 struct hmehash_bucket *hmebp; 9014 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 9015 static struct hmehash_bucket *uhmehash_reclaim_hand; 9016 static struct hmehash_bucket *khmehash_reclaim_hand; 9017 struct hme_blk *list = NULL; 9018 9019 hmebp = uhmehash_reclaim_hand; 9020 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 9021 uhmehash_reclaim_hand = hmebp = uhme_hash; 9022 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9023 9024 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9025 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9026 hmeblkp = hmebp->hmeblkp; 9027 hblkpa = hmebp->hmeh_nextpa; 9028 prevpa = 0; 9029 pr_hblk = NULL; 9030 while (hmeblkp) { 9031 nx_hblk = hmeblkp->hblk_next; 9032 nx_pa = hmeblkp->hblk_nextpa; 9033 if (!hmeblkp->hblk_vcnt && 9034 !hmeblkp->hblk_hmecnt) { 9035 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9036 prevpa, pr_hblk); 9037 sfmmu_hblk_free(hmebp, hmeblkp, 9038 hblkpa, &list); 9039 } else { 9040 pr_hblk = hmeblkp; 9041 prevpa = hblkpa; 9042 } 9043 hmeblkp = nx_hblk; 9044 hblkpa = nx_pa; 9045 } 9046 SFMMU_HASH_UNLOCK(hmebp); 9047 } 9048 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9049 hmebp = uhme_hash; 9050 } 9051 9052 hmebp = khmehash_reclaim_hand; 9053 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9054 khmehash_reclaim_hand = hmebp = khme_hash; 9055 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9056 9057 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9058 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9059 hmeblkp = hmebp->hmeblkp; 9060 hblkpa = hmebp->hmeh_nextpa; 9061 prevpa = 0; 9062 pr_hblk = NULL; 9063 while (hmeblkp) { 9064 nx_hblk = hmeblkp->hblk_next; 9065 nx_pa = hmeblkp->hblk_nextpa; 9066 if (!hmeblkp->hblk_vcnt && 9067 !hmeblkp->hblk_hmecnt) { 9068 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9069 prevpa, pr_hblk); 9070 sfmmu_hblk_free(hmebp, hmeblkp, 9071 hblkpa, &list); 9072 } else { 9073 pr_hblk = hmeblkp; 9074 prevpa = hblkpa; 9075 } 9076 hmeblkp = nx_hblk; 9077 hblkpa = nx_pa; 9078 } 9079 SFMMU_HASH_UNLOCK(hmebp); 9080 } 9081 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9082 hmebp = khme_hash; 9083 } 9084 sfmmu_hblks_list_purge(&list); 9085 } 9086 9087 /* 9088 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9089 * same goes for sfmmu_get_addrvcolor(). 9090 * 9091 * This function will return the virtual color for the specified page. The 9092 * virtual color corresponds to this page current mapping or its last mapping. 9093 * It is used by memory allocators to choose addresses with the correct 9094 * alignment so vac consistency is automatically maintained. If the page 9095 * has no color it returns -1. 9096 */ 9097 /*ARGSUSED*/ 9098 int 9099 sfmmu_get_ppvcolor(struct page *pp) 9100 { 9101 #ifdef VAC 9102 int color; 9103 9104 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9105 return (-1); 9106 } 9107 color = PP_GET_VCOLOR(pp); 9108 ASSERT(color < mmu_btop(shm_alignment)); 9109 return (color); 9110 #else 9111 return (-1); 9112 #endif /* VAC */ 9113 } 9114 9115 /* 9116 * This function will return the desired alignment for vac consistency 9117 * (vac color) given a virtual address. If no vac is present it returns -1. 9118 */ 9119 /*ARGSUSED*/ 9120 int 9121 sfmmu_get_addrvcolor(caddr_t vaddr) 9122 { 9123 #ifdef VAC 9124 if (cache & CACHE_VAC) { 9125 return (addr_to_vcolor(vaddr)); 9126 } else { 9127 return (-1); 9128 } 9129 #else 9130 return (-1); 9131 #endif /* VAC */ 9132 } 9133 9134 #ifdef VAC 9135 /* 9136 * Check for conflicts. 9137 * A conflict exists if the new and existent mappings do not match in 9138 * their "shm_alignment fields. If conflicts exist, the existant mappings 9139 * are flushed unless one of them is locked. If one of them is locked, then 9140 * the mappings are flushed and converted to non-cacheable mappings. 9141 */ 9142 static void 9143 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9144 { 9145 struct hat *tmphat; 9146 struct sf_hment *sfhmep, *tmphme = NULL; 9147 struct hme_blk *hmeblkp; 9148 int vcolor; 9149 tte_t tte; 9150 9151 ASSERT(sfmmu_mlist_held(pp)); 9152 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9153 9154 vcolor = addr_to_vcolor(addr); 9155 if (PP_NEWPAGE(pp)) { 9156 PP_SET_VCOLOR(pp, vcolor); 9157 return; 9158 } 9159 9160 if (PP_GET_VCOLOR(pp) == vcolor) { 9161 return; 9162 } 9163 9164 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9165 /* 9166 * Previous user of page had a different color 9167 * but since there are no current users 9168 * we just flush the cache and change the color. 9169 */ 9170 SFMMU_STAT(sf_pgcolor_conflict); 9171 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9172 PP_SET_VCOLOR(pp, vcolor); 9173 return; 9174 } 9175 9176 /* 9177 * If we get here we have a vac conflict with a current 9178 * mapping. VAC conflict policy is as follows. 9179 * - The default is to unload the other mappings unless: 9180 * - If we have a large mapping we uncache the page. 9181 * We need to uncache the rest of the large page too. 9182 * - If any of the mappings are locked we uncache the page. 9183 * - If the requested mapping is inconsistent 9184 * with another mapping and that mapping 9185 * is in the same address space we have to 9186 * make it non-cached. The default thing 9187 * to do is unload the inconsistent mapping 9188 * but if they are in the same address space 9189 * we run the risk of unmapping the pc or the 9190 * stack which we will use as we return to the user, 9191 * in which case we can then fault on the thing 9192 * we just unloaded and get into an infinite loop. 9193 */ 9194 if (PP_ISMAPPED_LARGE(pp)) { 9195 int sz; 9196 9197 /* 9198 * Existing mapping is for big pages. We don't unload 9199 * existing big mappings to satisfy new mappings. 9200 * Always convert all mappings to TNC. 9201 */ 9202 sz = fnd_mapping_sz(pp); 9203 pp = PP_GROUPLEADER(pp, sz); 9204 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9205 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9206 TTEPAGES(sz)); 9207 9208 return; 9209 } 9210 9211 /* 9212 * check if any mapping is in same as or if it is locked 9213 * since in that case we need to uncache. 9214 */ 9215 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9216 tmphme = sfhmep->hme_next; 9217 if (IS_PAHME(sfhmep)) 9218 continue; 9219 hmeblkp = sfmmu_hmetohblk(sfhmep); 9220 if (hmeblkp->hblk_xhat_bit) 9221 continue; 9222 tmphat = hblktosfmmu(hmeblkp); 9223 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9224 ASSERT(TTE_IS_VALID(&tte)); 9225 if (hmeblkp->hblk_shared || tmphat == hat || 9226 hmeblkp->hblk_lckcnt) { 9227 /* 9228 * We have an uncache conflict 9229 */ 9230 SFMMU_STAT(sf_uncache_conflict); 9231 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9232 return; 9233 } 9234 } 9235 9236 /* 9237 * We have an unload conflict 9238 * We have already checked for LARGE mappings, therefore 9239 * the remaining mapping(s) must be TTE8K. 9240 */ 9241 SFMMU_STAT(sf_unload_conflict); 9242 9243 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9244 tmphme = sfhmep->hme_next; 9245 if (IS_PAHME(sfhmep)) 9246 continue; 9247 hmeblkp = sfmmu_hmetohblk(sfhmep); 9248 if (hmeblkp->hblk_xhat_bit) 9249 continue; 9250 ASSERT(!hmeblkp->hblk_shared); 9251 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9252 } 9253 9254 if (PP_ISMAPPED_KPM(pp)) 9255 sfmmu_kpm_vac_unload(pp, addr); 9256 9257 /* 9258 * Unloads only do TLB flushes so we need to flush the 9259 * cache here. 9260 */ 9261 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9262 PP_SET_VCOLOR(pp, vcolor); 9263 } 9264 9265 /* 9266 * Whenever a mapping is unloaded and the page is in TNC state, 9267 * we see if the page can be made cacheable again. 'pp' is 9268 * the page that we just unloaded a mapping from, the size 9269 * of mapping that was unloaded is 'ottesz'. 9270 * Remark: 9271 * The recache policy for mpss pages can leave a performance problem 9272 * under the following circumstances: 9273 * . A large page in uncached mode has just been unmapped. 9274 * . All constituent pages are TNC due to a conflicting small mapping. 9275 * . There are many other, non conflicting, small mappings around for 9276 * a lot of the constituent pages. 9277 * . We're called w/ the "old" groupleader page and the old ottesz, 9278 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9279 * we end up w/ TTE8K or npages == 1. 9280 * . We call tst_tnc w/ the old groupleader only, and if there is no 9281 * conflict, we re-cache only this page. 9282 * . All other small mappings are not checked and will be left in TNC mode. 9283 * The problem is not very serious because: 9284 * . mpss is actually only defined for heap and stack, so the probability 9285 * is not very high that a large page mapping exists in parallel to a small 9286 * one (this is possible, but seems to be bad programming style in the 9287 * appl). 9288 * . The problem gets a little bit more serious, when those TNC pages 9289 * have to be mapped into kernel space, e.g. for networking. 9290 * . When VAC alias conflicts occur in applications, this is regarded 9291 * as an application bug. So if kstat's show them, the appl should 9292 * be changed anyway. 9293 */ 9294 void 9295 conv_tnc(page_t *pp, int ottesz) 9296 { 9297 int cursz, dosz; 9298 pgcnt_t curnpgs, dopgs; 9299 pgcnt_t pg64k; 9300 page_t *pp2; 9301 9302 /* 9303 * Determine how big a range we check for TNC and find 9304 * leader page. cursz is the size of the biggest 9305 * mapping that still exist on 'pp'. 9306 */ 9307 if (PP_ISMAPPED_LARGE(pp)) { 9308 cursz = fnd_mapping_sz(pp); 9309 } else { 9310 cursz = TTE8K; 9311 } 9312 9313 if (ottesz >= cursz) { 9314 dosz = ottesz; 9315 pp2 = pp; 9316 } else { 9317 dosz = cursz; 9318 pp2 = PP_GROUPLEADER(pp, dosz); 9319 } 9320 9321 pg64k = TTEPAGES(TTE64K); 9322 dopgs = TTEPAGES(dosz); 9323 9324 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9325 9326 while (dopgs != 0) { 9327 curnpgs = TTEPAGES(cursz); 9328 if (tst_tnc(pp2, curnpgs)) { 9329 SFMMU_STAT_ADD(sf_recache, curnpgs); 9330 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9331 curnpgs); 9332 } 9333 9334 ASSERT(dopgs >= curnpgs); 9335 dopgs -= curnpgs; 9336 9337 if (dopgs == 0) { 9338 break; 9339 } 9340 9341 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9342 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9343 cursz = fnd_mapping_sz(pp2); 9344 } else { 9345 cursz = TTE8K; 9346 } 9347 } 9348 } 9349 9350 /* 9351 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9352 * returns 0 otherwise. Note that oaddr argument is valid for only 9353 * 8k pages. 9354 */ 9355 int 9356 tst_tnc(page_t *pp, pgcnt_t npages) 9357 { 9358 struct sf_hment *sfhme; 9359 struct hme_blk *hmeblkp; 9360 tte_t tte; 9361 caddr_t vaddr; 9362 int clr_valid = 0; 9363 int color, color1, bcolor; 9364 int i, ncolors; 9365 9366 ASSERT(pp != NULL); 9367 ASSERT(!(cache & CACHE_WRITEBACK)); 9368 9369 if (npages > 1) { 9370 ncolors = CACHE_NUM_COLOR; 9371 } 9372 9373 for (i = 0; i < npages; i++) { 9374 ASSERT(sfmmu_mlist_held(pp)); 9375 ASSERT(PP_ISTNC(pp)); 9376 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9377 9378 if (PP_ISPNC(pp)) { 9379 return (0); 9380 } 9381 9382 clr_valid = 0; 9383 if (PP_ISMAPPED_KPM(pp)) { 9384 caddr_t kpmvaddr; 9385 9386 ASSERT(kpm_enable); 9387 kpmvaddr = hat_kpm_page2va(pp, 1); 9388 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9389 color1 = addr_to_vcolor(kpmvaddr); 9390 clr_valid = 1; 9391 } 9392 9393 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9394 if (IS_PAHME(sfhme)) 9395 continue; 9396 hmeblkp = sfmmu_hmetohblk(sfhme); 9397 if (hmeblkp->hblk_xhat_bit) 9398 continue; 9399 9400 sfmmu_copytte(&sfhme->hme_tte, &tte); 9401 ASSERT(TTE_IS_VALID(&tte)); 9402 9403 vaddr = tte_to_vaddr(hmeblkp, tte); 9404 color = addr_to_vcolor(vaddr); 9405 9406 if (npages > 1) { 9407 /* 9408 * If there is a big mapping, make sure 9409 * 8K mapping is consistent with the big 9410 * mapping. 9411 */ 9412 bcolor = i % ncolors; 9413 if (color != bcolor) { 9414 return (0); 9415 } 9416 } 9417 if (!clr_valid) { 9418 clr_valid = 1; 9419 color1 = color; 9420 } 9421 9422 if (color1 != color) { 9423 return (0); 9424 } 9425 } 9426 9427 pp = PP_PAGENEXT(pp); 9428 } 9429 9430 return (1); 9431 } 9432 9433 void 9434 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9435 pgcnt_t npages) 9436 { 9437 kmutex_t *pmtx; 9438 int i, ncolors, bcolor; 9439 kpm_hlk_t *kpmp; 9440 cpuset_t cpuset; 9441 9442 ASSERT(pp != NULL); 9443 ASSERT(!(cache & CACHE_WRITEBACK)); 9444 9445 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9446 pmtx = sfmmu_page_enter(pp); 9447 9448 /* 9449 * Fast path caching single unmapped page 9450 */ 9451 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9452 flags == HAT_CACHE) { 9453 PP_CLRTNC(pp); 9454 PP_CLRPNC(pp); 9455 sfmmu_page_exit(pmtx); 9456 sfmmu_kpm_kpmp_exit(kpmp); 9457 return; 9458 } 9459 9460 /* 9461 * We need to capture all cpus in order to change cacheability 9462 * because we can't allow one cpu to access the same physical 9463 * page using a cacheable and a non-cachebale mapping at the same 9464 * time. Since we may end up walking the ism mapping list 9465 * have to grab it's lock now since we can't after all the 9466 * cpus have been captured. 9467 */ 9468 sfmmu_hat_lock_all(); 9469 mutex_enter(&ism_mlist_lock); 9470 kpreempt_disable(); 9471 cpuset = cpu_ready_set; 9472 xc_attention(cpuset); 9473 9474 if (npages > 1) { 9475 /* 9476 * Make sure all colors are flushed since the 9477 * sfmmu_page_cache() only flushes one color- 9478 * it does not know big pages. 9479 */ 9480 ncolors = CACHE_NUM_COLOR; 9481 if (flags & HAT_TMPNC) { 9482 for (i = 0; i < ncolors; i++) { 9483 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9484 } 9485 cache_flush_flag = CACHE_NO_FLUSH; 9486 } 9487 } 9488 9489 for (i = 0; i < npages; i++) { 9490 9491 ASSERT(sfmmu_mlist_held(pp)); 9492 9493 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9494 9495 if (npages > 1) { 9496 bcolor = i % ncolors; 9497 } else { 9498 bcolor = NO_VCOLOR; 9499 } 9500 9501 sfmmu_page_cache(pp, flags, cache_flush_flag, 9502 bcolor); 9503 } 9504 9505 pp = PP_PAGENEXT(pp); 9506 } 9507 9508 xt_sync(cpuset); 9509 xc_dismissed(cpuset); 9510 mutex_exit(&ism_mlist_lock); 9511 sfmmu_hat_unlock_all(); 9512 sfmmu_page_exit(pmtx); 9513 sfmmu_kpm_kpmp_exit(kpmp); 9514 kpreempt_enable(); 9515 } 9516 9517 /* 9518 * This function changes the virtual cacheability of all mappings to a 9519 * particular page. When changing from uncache to cacheable the mappings will 9520 * only be changed if all of them have the same virtual color. 9521 * We need to flush the cache in all cpus. It is possible that 9522 * a process referenced a page as cacheable but has sinced exited 9523 * and cleared the mapping list. We still to flush it but have no 9524 * state so all cpus is the only alternative. 9525 */ 9526 static void 9527 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9528 { 9529 struct sf_hment *sfhme; 9530 struct hme_blk *hmeblkp; 9531 sfmmu_t *sfmmup; 9532 tte_t tte, ttemod; 9533 caddr_t vaddr; 9534 int ret, color; 9535 pfn_t pfn; 9536 9537 color = bcolor; 9538 pfn = pp->p_pagenum; 9539 9540 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9541 9542 if (IS_PAHME(sfhme)) 9543 continue; 9544 hmeblkp = sfmmu_hmetohblk(sfhme); 9545 9546 if (hmeblkp->hblk_xhat_bit) 9547 continue; 9548 9549 sfmmu_copytte(&sfhme->hme_tte, &tte); 9550 ASSERT(TTE_IS_VALID(&tte)); 9551 vaddr = tte_to_vaddr(hmeblkp, tte); 9552 color = addr_to_vcolor(vaddr); 9553 9554 #ifdef DEBUG 9555 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9556 ASSERT(color == bcolor); 9557 } 9558 #endif 9559 9560 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9561 9562 ttemod = tte; 9563 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9564 TTE_CLR_VCACHEABLE(&ttemod); 9565 } else { /* flags & HAT_CACHE */ 9566 TTE_SET_VCACHEABLE(&ttemod); 9567 } 9568 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9569 if (ret < 0) { 9570 /* 9571 * Since all cpus are captured modifytte should not 9572 * fail. 9573 */ 9574 panic("sfmmu_page_cache: write to tte failed"); 9575 } 9576 9577 sfmmup = hblktosfmmu(hmeblkp); 9578 if (cache_flush_flag == CACHE_FLUSH) { 9579 /* 9580 * Flush TSBs, TLBs and caches 9581 */ 9582 if (hmeblkp->hblk_shared) { 9583 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9584 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9585 sf_region_t *rgnp; 9586 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9587 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9588 ASSERT(srdp != NULL); 9589 rgnp = srdp->srd_hmergnp[rid]; 9590 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9591 srdp, rgnp, rid); 9592 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9593 hmeblkp, 0); 9594 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9595 } else if (sfmmup->sfmmu_ismhat) { 9596 if (flags & HAT_CACHE) { 9597 SFMMU_STAT(sf_ism_recache); 9598 } else { 9599 SFMMU_STAT(sf_ism_uncache); 9600 } 9601 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9602 pfn, CACHE_FLUSH); 9603 } else { 9604 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9605 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9606 } 9607 9608 /* 9609 * all cache entries belonging to this pfn are 9610 * now flushed. 9611 */ 9612 cache_flush_flag = CACHE_NO_FLUSH; 9613 } else { 9614 /* 9615 * Flush only TSBs and TLBs. 9616 */ 9617 if (hmeblkp->hblk_shared) { 9618 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9619 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9620 sf_region_t *rgnp; 9621 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9622 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9623 ASSERT(srdp != NULL); 9624 rgnp = srdp->srd_hmergnp[rid]; 9625 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9626 srdp, rgnp, rid); 9627 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9628 hmeblkp, 0); 9629 } else if (sfmmup->sfmmu_ismhat) { 9630 if (flags & HAT_CACHE) { 9631 SFMMU_STAT(sf_ism_recache); 9632 } else { 9633 SFMMU_STAT(sf_ism_uncache); 9634 } 9635 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9636 pfn, CACHE_NO_FLUSH); 9637 } else { 9638 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9639 } 9640 } 9641 } 9642 9643 if (PP_ISMAPPED_KPM(pp)) 9644 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9645 9646 switch (flags) { 9647 9648 default: 9649 panic("sfmmu_pagecache: unknown flags"); 9650 break; 9651 9652 case HAT_CACHE: 9653 PP_CLRTNC(pp); 9654 PP_CLRPNC(pp); 9655 PP_SET_VCOLOR(pp, color); 9656 break; 9657 9658 case HAT_TMPNC: 9659 PP_SETTNC(pp); 9660 PP_SET_VCOLOR(pp, NO_VCOLOR); 9661 break; 9662 9663 case HAT_UNCACHE: 9664 PP_SETPNC(pp); 9665 PP_CLRTNC(pp); 9666 PP_SET_VCOLOR(pp, NO_VCOLOR); 9667 break; 9668 } 9669 } 9670 #endif /* VAC */ 9671 9672 9673 /* 9674 * Wrapper routine used to return a context. 9675 * 9676 * It's the responsibility of the caller to guarantee that the 9677 * process serializes on calls here by taking the HAT lock for 9678 * the hat. 9679 * 9680 */ 9681 static void 9682 sfmmu_get_ctx(sfmmu_t *sfmmup) 9683 { 9684 mmu_ctx_t *mmu_ctxp; 9685 uint_t pstate_save; 9686 int ret; 9687 9688 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9689 ASSERT(sfmmup != ksfmmup); 9690 9691 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9692 sfmmu_setup_tsbinfo(sfmmup); 9693 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9694 } 9695 9696 kpreempt_disable(); 9697 9698 mmu_ctxp = CPU_MMU_CTXP(CPU); 9699 ASSERT(mmu_ctxp); 9700 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9701 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9702 9703 /* 9704 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9705 */ 9706 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9707 sfmmu_ctx_wrap_around(mmu_ctxp); 9708 9709 /* 9710 * Let the MMU set up the page sizes to use for 9711 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9712 */ 9713 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9714 mmu_set_ctx_page_sizes(sfmmup); 9715 } 9716 9717 /* 9718 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9719 * interrupts disabled to prevent race condition with wrap-around 9720 * ctx invalidatation. In sun4v, ctx invalidation also involves 9721 * a HV call to set the number of TSBs to 0. If interrupts are not 9722 * disabled until after sfmmu_load_mmustate is complete TSBs may 9723 * become assigned to INVALID_CONTEXT. This is not allowed. 9724 */ 9725 pstate_save = sfmmu_disable_intrs(); 9726 9727 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9728 sfmmup->sfmmu_scdp != NULL) { 9729 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9730 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9731 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9732 /* debug purpose only */ 9733 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9734 != INVALID_CONTEXT); 9735 } 9736 sfmmu_load_mmustate(sfmmup); 9737 9738 sfmmu_enable_intrs(pstate_save); 9739 9740 kpreempt_enable(); 9741 } 9742 9743 /* 9744 * When all cnums are used up in a MMU, cnum will wrap around to the 9745 * next generation and start from 2. 9746 */ 9747 static void 9748 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 9749 { 9750 9751 /* caller must have disabled the preemption */ 9752 ASSERT(curthread->t_preempt >= 1); 9753 ASSERT(mmu_ctxp != NULL); 9754 9755 /* acquire Per-MMU (PM) spin lock */ 9756 mutex_enter(&mmu_ctxp->mmu_lock); 9757 9758 /* re-check to see if wrap-around is needed */ 9759 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9760 goto done; 9761 9762 SFMMU_MMU_STAT(mmu_wrap_around); 9763 9764 /* update gnum */ 9765 ASSERT(mmu_ctxp->mmu_gnum != 0); 9766 mmu_ctxp->mmu_gnum++; 9767 if (mmu_ctxp->mmu_gnum == 0 || 9768 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9769 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9770 (void *)mmu_ctxp); 9771 } 9772 9773 if (mmu_ctxp->mmu_ncpus > 1) { 9774 cpuset_t cpuset; 9775 9776 membar_enter(); /* make sure updated gnum visible */ 9777 9778 SFMMU_XCALL_STATS(NULL); 9779 9780 /* xcall to others on the same MMU to invalidate ctx */ 9781 cpuset = mmu_ctxp->mmu_cpuset; 9782 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 9783 CPUSET_DEL(cpuset, CPU->cpu_id); 9784 CPUSET_AND(cpuset, cpu_ready_set); 9785 9786 /* 9787 * Pass in INVALID_CONTEXT as the first parameter to 9788 * sfmmu_raise_tsb_exception, which invalidates the context 9789 * of any process running on the CPUs in the MMU. 9790 */ 9791 xt_some(cpuset, sfmmu_raise_tsb_exception, 9792 INVALID_CONTEXT, INVALID_CONTEXT); 9793 xt_sync(cpuset); 9794 9795 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9796 } 9797 9798 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9799 sfmmu_setctx_sec(INVALID_CONTEXT); 9800 sfmmu_clear_utsbinfo(); 9801 } 9802 9803 /* 9804 * No xcall is needed here. For sun4u systems all CPUs in context 9805 * domain share a single physical MMU therefore it's enough to flush 9806 * TLB on local CPU. On sun4v systems we use 1 global context 9807 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9808 * handler. Note that vtag_flushall_uctxs() is called 9809 * for Ultra II machine, where the equivalent flushall functionality 9810 * is implemented in SW, and only user ctx TLB entries are flushed. 9811 */ 9812 if (&vtag_flushall_uctxs != NULL) { 9813 vtag_flushall_uctxs(); 9814 } else { 9815 vtag_flushall(); 9816 } 9817 9818 /* reset mmu cnum, skips cnum 0 and 1 */ 9819 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9820 9821 done: 9822 mutex_exit(&mmu_ctxp->mmu_lock); 9823 } 9824 9825 9826 /* 9827 * For multi-threaded process, set the process context to INVALID_CONTEXT 9828 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9829 * process, we can just load the MMU state directly without having to 9830 * set context invalid. Caller must hold the hat lock since we don't 9831 * acquire it here. 9832 */ 9833 static void 9834 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9835 { 9836 uint_t cnum; 9837 uint_t pstate_save; 9838 9839 ASSERT(sfmmup != ksfmmup); 9840 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9841 9842 kpreempt_disable(); 9843 9844 /* 9845 * We check whether the pass'ed-in sfmmup is the same as the 9846 * current running proc. This is to makes sure the current proc 9847 * stays single-threaded if it already is. 9848 */ 9849 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9850 (curthread->t_procp->p_lwpcnt == 1)) { 9851 /* single-thread */ 9852 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9853 if (cnum != INVALID_CONTEXT) { 9854 uint_t curcnum; 9855 /* 9856 * Disable interrupts to prevent race condition 9857 * with sfmmu_ctx_wrap_around ctx invalidation. 9858 * In sun4v, ctx invalidation involves setting 9859 * TSB to NULL, hence, interrupts should be disabled 9860 * untill after sfmmu_load_mmustate is completed. 9861 */ 9862 pstate_save = sfmmu_disable_intrs(); 9863 curcnum = sfmmu_getctx_sec(); 9864 if (curcnum == cnum) 9865 sfmmu_load_mmustate(sfmmup); 9866 sfmmu_enable_intrs(pstate_save); 9867 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9868 } 9869 } else { 9870 /* 9871 * multi-thread 9872 * or when sfmmup is not the same as the curproc. 9873 */ 9874 sfmmu_invalidate_ctx(sfmmup); 9875 } 9876 9877 kpreempt_enable(); 9878 } 9879 9880 9881 /* 9882 * Replace the specified TSB with a new TSB. This function gets called when 9883 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9884 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9885 * (8K). 9886 * 9887 * Caller must hold the HAT lock, but should assume any tsb_info 9888 * pointers it has are no longer valid after calling this function. 9889 * 9890 * Return values: 9891 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9892 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9893 * something to this tsbinfo/TSB 9894 * TSB_SUCCESS Operation succeeded 9895 */ 9896 static tsb_replace_rc_t 9897 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9898 hatlock_t *hatlockp, uint_t flags) 9899 { 9900 struct tsb_info *new_tsbinfo = NULL; 9901 struct tsb_info *curtsb, *prevtsb; 9902 uint_t tte_sz_mask; 9903 int i; 9904 9905 ASSERT(sfmmup != ksfmmup); 9906 ASSERT(sfmmup->sfmmu_ismhat == 0); 9907 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9908 ASSERT(szc <= tsb_max_growsize); 9909 9910 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9911 return (TSB_LOSTRACE); 9912 9913 /* 9914 * Find the tsb_info ahead of this one in the list, and 9915 * also make sure that the tsb_info passed in really 9916 * exists! 9917 */ 9918 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9919 curtsb != old_tsbinfo && curtsb != NULL; 9920 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9921 ; 9922 ASSERT(curtsb != NULL); 9923 9924 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9925 /* 9926 * The process is swapped out, so just set the new size 9927 * code. When it swaps back in, we'll allocate a new one 9928 * of the new chosen size. 9929 */ 9930 curtsb->tsb_szc = szc; 9931 return (TSB_SUCCESS); 9932 } 9933 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9934 9935 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9936 9937 /* 9938 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9939 * If we fail to allocate a TSB, exit. 9940 * 9941 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9942 * then try 4M slab after the initial alloc fails. 9943 * 9944 * If tsb swapin with tsb size > 4M, then try 4M after the 9945 * initial alloc fails. 9946 */ 9947 sfmmu_hat_exit(hatlockp); 9948 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9949 tte_sz_mask, flags, sfmmup) && 9950 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9951 (!(flags & TSB_SWAPIN) && 9952 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9953 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9954 tte_sz_mask, flags, sfmmup))) { 9955 (void) sfmmu_hat_enter(sfmmup); 9956 if (!(flags & TSB_SWAPIN)) 9957 SFMMU_STAT(sf_tsb_resize_failures); 9958 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9959 return (TSB_ALLOCFAIL); 9960 } 9961 (void) sfmmu_hat_enter(sfmmup); 9962 9963 /* 9964 * Re-check to make sure somebody else didn't muck with us while we 9965 * didn't hold the HAT lock. If the process swapped out, fine, just 9966 * exit; this can happen if we try to shrink the TSB from the context 9967 * of another process (such as on an ISM unmap), though it is rare. 9968 */ 9969 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9970 SFMMU_STAT(sf_tsb_resize_failures); 9971 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9972 sfmmu_hat_exit(hatlockp); 9973 sfmmu_tsbinfo_free(new_tsbinfo); 9974 (void) sfmmu_hat_enter(sfmmup); 9975 return (TSB_LOSTRACE); 9976 } 9977 9978 #ifdef DEBUG 9979 /* Reverify that the tsb_info still exists.. for debugging only */ 9980 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9981 curtsb != old_tsbinfo && curtsb != NULL; 9982 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9983 ; 9984 ASSERT(curtsb != NULL); 9985 #endif /* DEBUG */ 9986 9987 /* 9988 * Quiesce any CPUs running this process on their next TLB miss 9989 * so they atomically see the new tsb_info. We temporarily set the 9990 * context to invalid context so new threads that come on processor 9991 * after we do the xcall to cpusran will also serialize behind the 9992 * HAT lock on TLB miss and will see the new TSB. Since this short 9993 * race with a new thread coming on processor is relatively rare, 9994 * this synchronization mechanism should be cheaper than always 9995 * pausing all CPUs for the duration of the setup, which is what 9996 * the old implementation did. This is particuarly true if we are 9997 * copying a huge chunk of memory around during that window. 9998 * 9999 * The memory barriers are to make sure things stay consistent 10000 * with resume() since it does not hold the HAT lock while 10001 * walking the list of tsb_info structures. 10002 */ 10003 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 10004 /* The TSB is either growing or shrinking. */ 10005 sfmmu_invalidate_ctx(sfmmup); 10006 } else { 10007 /* 10008 * It is illegal to swap in TSBs from a process other 10009 * than a process being swapped in. This in turn 10010 * implies we do not have a valid MMU context here 10011 * since a process needs one to resolve translation 10012 * misses. 10013 */ 10014 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 10015 } 10016 10017 #ifdef DEBUG 10018 ASSERT(max_mmu_ctxdoms > 0); 10019 10020 /* 10021 * Process should have INVALID_CONTEXT on all MMUs 10022 */ 10023 for (i = 0; i < max_mmu_ctxdoms; i++) { 10024 10025 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 10026 } 10027 #endif 10028 10029 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 10030 membar_stst(); /* strict ordering required */ 10031 if (prevtsb) 10032 prevtsb->tsb_next = new_tsbinfo; 10033 else 10034 sfmmup->sfmmu_tsb = new_tsbinfo; 10035 membar_enter(); /* make sure new TSB globally visible */ 10036 10037 /* 10038 * We need to migrate TSB entries from the old TSB to the new TSB 10039 * if tsb_remap_ttes is set and the TSB is growing. 10040 */ 10041 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 10042 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 10043 10044 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10045 10046 /* 10047 * Drop the HAT lock to free our old tsb_info. 10048 */ 10049 sfmmu_hat_exit(hatlockp); 10050 10051 if ((flags & TSB_GROW) == TSB_GROW) { 10052 SFMMU_STAT(sf_tsb_grow); 10053 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 10054 SFMMU_STAT(sf_tsb_shrink); 10055 } 10056 10057 sfmmu_tsbinfo_free(old_tsbinfo); 10058 10059 (void) sfmmu_hat_enter(sfmmup); 10060 return (TSB_SUCCESS); 10061 } 10062 10063 /* 10064 * This function will re-program hat pgsz array, and invalidate the 10065 * process' context, forcing the process to switch to another 10066 * context on the next TLB miss, and therefore start using the 10067 * TLB that is reprogrammed for the new page sizes. 10068 */ 10069 void 10070 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10071 { 10072 int i; 10073 hatlock_t *hatlockp = NULL; 10074 10075 hatlockp = sfmmu_hat_enter(sfmmup); 10076 /* USIII+-IV+ optimization, requires hat lock */ 10077 if (tmp_pgsz) { 10078 for (i = 0; i < mmu_page_sizes; i++) 10079 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10080 } 10081 SFMMU_STAT(sf_tlb_reprog_pgsz); 10082 10083 sfmmu_invalidate_ctx(sfmmup); 10084 10085 sfmmu_hat_exit(hatlockp); 10086 } 10087 10088 /* 10089 * The scd_rttecnt field in the SCD must be updated to take account of the 10090 * regions which it contains. 10091 */ 10092 static void 10093 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10094 { 10095 uint_t rid; 10096 uint_t i, j; 10097 ulong_t w; 10098 sf_region_t *rgnp; 10099 10100 ASSERT(srdp != NULL); 10101 10102 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10103 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10104 continue; 10105 } 10106 10107 j = 0; 10108 while (w) { 10109 if (!(w & 0x1)) { 10110 j++; 10111 w >>= 1; 10112 continue; 10113 } 10114 rid = (i << BT_ULSHIFT) | j; 10115 j++; 10116 w >>= 1; 10117 10118 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10119 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10120 rgnp = srdp->srd_hmergnp[rid]; 10121 ASSERT(rgnp->rgn_refcnt > 0); 10122 ASSERT(rgnp->rgn_id == rid); 10123 10124 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10125 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10126 10127 /* 10128 * Maintain the tsb0 inflation cnt for the regions 10129 * in the SCD. 10130 */ 10131 if (rgnp->rgn_pgszc >= TTE4M) { 10132 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10133 rgnp->rgn_size >> 10134 (TTE_PAGE_SHIFT(TTE8K) + 2); 10135 } 10136 } 10137 } 10138 } 10139 10140 /* 10141 * This function assumes that there are either four or six supported page 10142 * sizes and at most two programmable TLBs, so we need to decide which 10143 * page sizes are most important and then tell the MMU layer so it 10144 * can adjust the TLB page sizes accordingly (if supported). 10145 * 10146 * If these assumptions change, this function will need to be 10147 * updated to support whatever the new limits are. 10148 * 10149 * The growing flag is nonzero if we are growing the address space, 10150 * and zero if it is shrinking. This allows us to decide whether 10151 * to grow or shrink our TSB, depending upon available memory 10152 * conditions. 10153 */ 10154 static void 10155 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10156 { 10157 uint64_t ttecnt[MMU_PAGE_SIZES]; 10158 uint64_t tte8k_cnt, tte4m_cnt; 10159 uint8_t i; 10160 int sectsb_thresh; 10161 10162 /* 10163 * Kernel threads, processes with small address spaces not using 10164 * large pages, and dummy ISM HATs need not apply. 10165 */ 10166 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10167 return; 10168 10169 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10170 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10171 return; 10172 10173 for (i = 0; i < mmu_page_sizes; i++) { 10174 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10175 sfmmup->sfmmu_ismttecnt[i]; 10176 } 10177 10178 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10179 if (&mmu_check_page_sizes) 10180 mmu_check_page_sizes(sfmmup, ttecnt); 10181 10182 /* 10183 * Calculate the number of 8k ttes to represent the span of these 10184 * pages. 10185 */ 10186 tte8k_cnt = ttecnt[TTE8K] + 10187 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10188 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10189 if (mmu_page_sizes == max_mmu_page_sizes) { 10190 tte4m_cnt = ttecnt[TTE4M] + 10191 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10192 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10193 } else { 10194 tte4m_cnt = ttecnt[TTE4M]; 10195 } 10196 10197 /* 10198 * Inflate tte8k_cnt to allow for region large page allocation failure. 10199 */ 10200 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10201 10202 /* 10203 * Inflate TSB sizes by a factor of 2 if this process 10204 * uses 4M text pages to minimize extra conflict misses 10205 * in the first TSB since without counting text pages 10206 * 8K TSB may become too small. 10207 * 10208 * Also double the size of the second TSB to minimize 10209 * extra conflict misses due to competition between 4M text pages 10210 * and data pages. 10211 * 10212 * We need to adjust the second TSB allocation threshold by the 10213 * inflation factor, since there is no point in creating a second 10214 * TSB when we know all the mappings can fit in the I/D TLBs. 10215 */ 10216 sectsb_thresh = tsb_sectsb_threshold; 10217 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10218 tte8k_cnt <<= 1; 10219 tte4m_cnt <<= 1; 10220 sectsb_thresh <<= 1; 10221 } 10222 10223 /* 10224 * Check to see if our TSB is the right size; we may need to 10225 * grow or shrink it. If the process is small, our work is 10226 * finished at this point. 10227 */ 10228 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10229 return; 10230 } 10231 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10232 } 10233 10234 static void 10235 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10236 uint64_t tte4m_cnt, int sectsb_thresh) 10237 { 10238 int tsb_bits; 10239 uint_t tsb_szc; 10240 struct tsb_info *tsbinfop; 10241 hatlock_t *hatlockp = NULL; 10242 10243 hatlockp = sfmmu_hat_enter(sfmmup); 10244 ASSERT(hatlockp != NULL); 10245 tsbinfop = sfmmup->sfmmu_tsb; 10246 ASSERT(tsbinfop != NULL); 10247 10248 /* 10249 * If we're growing, select the size based on RSS. If we're 10250 * shrinking, leave some room so we don't have to turn around and 10251 * grow again immediately. 10252 */ 10253 if (growing) 10254 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10255 else 10256 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10257 10258 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10259 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10260 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10261 hatlockp, TSB_SHRINK); 10262 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10263 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10264 hatlockp, TSB_GROW); 10265 } 10266 tsbinfop = sfmmup->sfmmu_tsb; 10267 10268 /* 10269 * With the TLB and first TSB out of the way, we need to see if 10270 * we need a second TSB for 4M pages. If we managed to reprogram 10271 * the TLB page sizes above, the process will start using this new 10272 * TSB right away; otherwise, it will start using it on the next 10273 * context switch. Either way, it's no big deal so there's no 10274 * synchronization with the trap handlers here unless we grow the 10275 * TSB (in which case it's required to prevent using the old one 10276 * after it's freed). Note: second tsb is required for 32M/256M 10277 * page sizes. 10278 */ 10279 if (tte4m_cnt > sectsb_thresh) { 10280 /* 10281 * If we're growing, select the size based on RSS. If we're 10282 * shrinking, leave some room so we don't have to turn 10283 * around and grow again immediately. 10284 */ 10285 if (growing) 10286 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10287 else 10288 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10289 if (tsbinfop->tsb_next == NULL) { 10290 struct tsb_info *newtsb; 10291 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10292 0 : TSB_ALLOC; 10293 10294 sfmmu_hat_exit(hatlockp); 10295 10296 /* 10297 * Try to allocate a TSB for 4[32|256]M pages. If we 10298 * can't get the size we want, retry w/a minimum sized 10299 * TSB. If that still didn't work, give up; we can 10300 * still run without one. 10301 */ 10302 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10303 TSB4M|TSB32M|TSB256M:TSB4M; 10304 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10305 allocflags, sfmmup)) && 10306 (tsb_szc <= TSB_4M_SZCODE || 10307 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10308 tsb_bits, allocflags, sfmmup)) && 10309 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10310 tsb_bits, allocflags, sfmmup)) { 10311 return; 10312 } 10313 10314 hatlockp = sfmmu_hat_enter(sfmmup); 10315 10316 sfmmu_invalidate_ctx(sfmmup); 10317 10318 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10319 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10320 SFMMU_STAT(sf_tsb_sectsb_create); 10321 sfmmu_hat_exit(hatlockp); 10322 return; 10323 } else { 10324 /* 10325 * It's annoying, but possible for us 10326 * to get here.. we dropped the HAT lock 10327 * because of locking order in the kmem 10328 * allocator, and while we were off getting 10329 * our memory, some other thread decided to 10330 * do us a favor and won the race to get a 10331 * second TSB for this process. Sigh. 10332 */ 10333 sfmmu_hat_exit(hatlockp); 10334 sfmmu_tsbinfo_free(newtsb); 10335 return; 10336 } 10337 } 10338 10339 /* 10340 * We have a second TSB, see if it's big enough. 10341 */ 10342 tsbinfop = tsbinfop->tsb_next; 10343 10344 /* 10345 * Check to see if our second TSB is the right size; 10346 * we may need to grow or shrink it. 10347 * To prevent thrashing (e.g. growing the TSB on a 10348 * subsequent map operation), only try to shrink if 10349 * the TSB reach exceeds twice the virtual address 10350 * space size. 10351 */ 10352 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10353 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10354 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10355 tsb_szc, hatlockp, TSB_SHRINK); 10356 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10357 TSB_OK_GROW()) { 10358 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10359 tsb_szc, hatlockp, TSB_GROW); 10360 } 10361 } 10362 10363 sfmmu_hat_exit(hatlockp); 10364 } 10365 10366 /* 10367 * Free up a sfmmu 10368 * Since the sfmmu is currently embedded in the hat struct we simply zero 10369 * out our fields and free up the ism map blk list if any. 10370 */ 10371 static void 10372 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10373 { 10374 ism_blk_t *blkp, *nx_blkp; 10375 #ifdef DEBUG 10376 ism_map_t *map; 10377 int i; 10378 #endif 10379 10380 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10381 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10382 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10383 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10384 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10385 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10386 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10387 10388 sfmmup->sfmmu_free = 0; 10389 sfmmup->sfmmu_ismhat = 0; 10390 10391 blkp = sfmmup->sfmmu_iblk; 10392 sfmmup->sfmmu_iblk = NULL; 10393 10394 while (blkp) { 10395 #ifdef DEBUG 10396 map = blkp->iblk_maps; 10397 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10398 ASSERT(map[i].imap_seg == 0); 10399 ASSERT(map[i].imap_ismhat == NULL); 10400 ASSERT(map[i].imap_ment == NULL); 10401 } 10402 #endif 10403 nx_blkp = blkp->iblk_next; 10404 blkp->iblk_next = NULL; 10405 blkp->iblk_nextpa = (uint64_t)-1; 10406 kmem_cache_free(ism_blk_cache, blkp); 10407 blkp = nx_blkp; 10408 } 10409 } 10410 10411 /* 10412 * Locking primitves accessed by HATLOCK macros 10413 */ 10414 10415 #define SFMMU_SPL_MTX (0x0) 10416 #define SFMMU_ML_MTX (0x1) 10417 10418 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10419 SPL_HASH(pg) : MLIST_HASH(pg)) 10420 10421 kmutex_t * 10422 sfmmu_page_enter(struct page *pp) 10423 { 10424 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10425 } 10426 10427 void 10428 sfmmu_page_exit(kmutex_t *spl) 10429 { 10430 mutex_exit(spl); 10431 } 10432 10433 int 10434 sfmmu_page_spl_held(struct page *pp) 10435 { 10436 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10437 } 10438 10439 kmutex_t * 10440 sfmmu_mlist_enter(struct page *pp) 10441 { 10442 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10443 } 10444 10445 void 10446 sfmmu_mlist_exit(kmutex_t *mml) 10447 { 10448 mutex_exit(mml); 10449 } 10450 10451 int 10452 sfmmu_mlist_held(struct page *pp) 10453 { 10454 10455 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10456 } 10457 10458 /* 10459 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10460 * sfmmu_mlist_enter() case mml_table lock array is used and for 10461 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10462 * 10463 * The lock is taken on a root page so that it protects an operation on all 10464 * constituent pages of a large page pp belongs to. 10465 * 10466 * The routine takes a lock from the appropriate array. The lock is determined 10467 * by hashing the root page. After taking the lock this routine checks if the 10468 * root page has the same size code that was used to determine the root (i.e 10469 * that root hasn't changed). If root page has the expected p_szc field we 10470 * have the right lock and it's returned to the caller. If root's p_szc 10471 * decreased we release the lock and retry from the beginning. This case can 10472 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10473 * value and taking the lock. The number of retries due to p_szc decrease is 10474 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10475 * determined by hashing pp itself. 10476 * 10477 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10478 * possible that p_szc can increase. To increase p_szc a thread has to lock 10479 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10480 * callers that don't hold a page locked recheck if hmeblk through which pp 10481 * was found still maps this pp. If it doesn't map it anymore returned lock 10482 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10483 * p_szc increase after taking the lock it returns this lock without further 10484 * retries because in this case the caller doesn't care about which lock was 10485 * taken. The caller will drop it right away. 10486 * 10487 * After the routine returns it's guaranteed that hat_page_demote() can't 10488 * change p_szc field of any of constituent pages of a large page pp belongs 10489 * to as long as pp was either locked at least SHARED prior to this call or 10490 * the caller finds that hment that pointed to this pp still references this 10491 * pp (this also assumes that the caller holds hme hash bucket lock so that 10492 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10493 * hat_pageunload()). 10494 */ 10495 static kmutex_t * 10496 sfmmu_mlspl_enter(struct page *pp, int type) 10497 { 10498 kmutex_t *mtx; 10499 uint_t prev_rszc = UINT_MAX; 10500 page_t *rootpp; 10501 uint_t szc; 10502 uint_t rszc; 10503 uint_t pszc = pp->p_szc; 10504 10505 ASSERT(pp != NULL); 10506 10507 again: 10508 if (pszc == 0) { 10509 mtx = SFMMU_MLSPL_MTX(type, pp); 10510 mutex_enter(mtx); 10511 return (mtx); 10512 } 10513 10514 /* The lock lives in the root page */ 10515 rootpp = PP_GROUPLEADER(pp, pszc); 10516 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10517 mutex_enter(mtx); 10518 10519 /* 10520 * Return mml in the following 3 cases: 10521 * 10522 * 1) If pp itself is root since if its p_szc decreased before we took 10523 * the lock pp is still the root of smaller szc page. And if its p_szc 10524 * increased it doesn't matter what lock we return (see comment in 10525 * front of this routine). 10526 * 10527 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10528 * large page we have the right lock since any previous potential 10529 * hat_page_demote() is done demoting from greater than current root's 10530 * p_szc because hat_page_demote() changes root's p_szc last. No 10531 * further hat_page_demote() can start or be in progress since it 10532 * would need the same lock we currently hold. 10533 * 10534 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10535 * matter what lock we return (see comment in front of this routine). 10536 */ 10537 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10538 rszc >= prev_rszc) { 10539 return (mtx); 10540 } 10541 10542 /* 10543 * hat_page_demote() could have decreased root's p_szc. 10544 * In this case pp's p_szc must also be smaller than pszc. 10545 * Retry. 10546 */ 10547 if (rszc < pszc) { 10548 szc = pp->p_szc; 10549 if (szc < pszc) { 10550 mutex_exit(mtx); 10551 pszc = szc; 10552 goto again; 10553 } 10554 /* 10555 * pp's p_szc increased after it was decreased. 10556 * page cannot be mapped. Return current lock. The caller 10557 * will drop it right away. 10558 */ 10559 return (mtx); 10560 } 10561 10562 /* 10563 * root's p_szc is greater than pp's p_szc. 10564 * hat_page_demote() is not done with all pages 10565 * yet. Wait for it to complete. 10566 */ 10567 mutex_exit(mtx); 10568 rootpp = PP_GROUPLEADER(rootpp, rszc); 10569 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10570 mutex_enter(mtx); 10571 mutex_exit(mtx); 10572 prev_rszc = rszc; 10573 goto again; 10574 } 10575 10576 static int 10577 sfmmu_mlspl_held(struct page *pp, int type) 10578 { 10579 kmutex_t *mtx; 10580 10581 ASSERT(pp != NULL); 10582 /* The lock lives in the root page */ 10583 pp = PP_PAGEROOT(pp); 10584 ASSERT(pp != NULL); 10585 10586 mtx = SFMMU_MLSPL_MTX(type, pp); 10587 return (MUTEX_HELD(mtx)); 10588 } 10589 10590 static uint_t 10591 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10592 { 10593 struct hme_blk *hblkp; 10594 10595 if (freehblkp != NULL) { 10596 mutex_enter(&freehblkp_lock); 10597 if (freehblkp != NULL) { 10598 /* 10599 * If the current thread is owning hblk_reserve OR 10600 * critical request from sfmmu_hblk_steal() 10601 * let it succeed even if freehblkcnt is really low. 10602 */ 10603 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10604 SFMMU_STAT(sf_get_free_throttle); 10605 mutex_exit(&freehblkp_lock); 10606 return (0); 10607 } 10608 freehblkcnt--; 10609 *hmeblkpp = freehblkp; 10610 hblkp = *hmeblkpp; 10611 freehblkp = hblkp->hblk_next; 10612 mutex_exit(&freehblkp_lock); 10613 hblkp->hblk_next = NULL; 10614 SFMMU_STAT(sf_get_free_success); 10615 return (1); 10616 } 10617 mutex_exit(&freehblkp_lock); 10618 } 10619 SFMMU_STAT(sf_get_free_fail); 10620 return (0); 10621 } 10622 10623 static uint_t 10624 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10625 { 10626 struct hme_blk *hblkp; 10627 10628 /* 10629 * If the current thread is mapping into kernel space, 10630 * let it succede even if freehblkcnt is max 10631 * so that it will avoid freeing it to kmem. 10632 * This will prevent stack overflow due to 10633 * possible recursion since kmem_cache_free() 10634 * might require creation of a slab which 10635 * in turn needs an hmeblk to map that slab; 10636 * let's break this vicious chain at the first 10637 * opportunity. 10638 */ 10639 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10640 mutex_enter(&freehblkp_lock); 10641 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10642 SFMMU_STAT(sf_put_free_success); 10643 freehblkcnt++; 10644 hmeblkp->hblk_next = freehblkp; 10645 freehblkp = hmeblkp; 10646 mutex_exit(&freehblkp_lock); 10647 return (1); 10648 } 10649 mutex_exit(&freehblkp_lock); 10650 } 10651 10652 /* 10653 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10654 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10655 * we are not in the process of mapping into kernel space. 10656 */ 10657 ASSERT(!critical); 10658 while (freehblkcnt > HBLK_RESERVE_CNT) { 10659 mutex_enter(&freehblkp_lock); 10660 if (freehblkcnt > HBLK_RESERVE_CNT) { 10661 freehblkcnt--; 10662 hblkp = freehblkp; 10663 freehblkp = hblkp->hblk_next; 10664 mutex_exit(&freehblkp_lock); 10665 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10666 kmem_cache_free(sfmmu8_cache, hblkp); 10667 continue; 10668 } 10669 mutex_exit(&freehblkp_lock); 10670 } 10671 SFMMU_STAT(sf_put_free_fail); 10672 return (0); 10673 } 10674 10675 static void 10676 sfmmu_hblk_swap(struct hme_blk *new) 10677 { 10678 struct hme_blk *old, *hblkp, *prev; 10679 uint64_t hblkpa, prevpa, newpa; 10680 caddr_t base, vaddr, endaddr; 10681 struct hmehash_bucket *hmebp; 10682 struct sf_hment *osfhme, *nsfhme; 10683 page_t *pp; 10684 kmutex_t *pml; 10685 tte_t tte; 10686 10687 #ifdef DEBUG 10688 hmeblk_tag hblktag; 10689 struct hme_blk *found; 10690 #endif 10691 old = HBLK_RESERVE; 10692 ASSERT(!old->hblk_shared); 10693 10694 /* 10695 * save pa before bcopy clobbers it 10696 */ 10697 newpa = new->hblk_nextpa; 10698 10699 base = (caddr_t)get_hblk_base(old); 10700 endaddr = base + get_hblk_span(old); 10701 10702 /* 10703 * acquire hash bucket lock. 10704 */ 10705 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10706 SFMMU_INVALID_SHMERID); 10707 10708 /* 10709 * copy contents from old to new 10710 */ 10711 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10712 10713 /* 10714 * add new to hash chain 10715 */ 10716 sfmmu_hblk_hash_add(hmebp, new, newpa); 10717 10718 /* 10719 * search hash chain for hblk_reserve; this needs to be performed 10720 * after adding new, otherwise prevpa and prev won't correspond 10721 * to the hblk which is prior to old in hash chain when we call 10722 * sfmmu_hblk_hash_rm to remove old later. 10723 */ 10724 for (prevpa = 0, prev = NULL, 10725 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 10726 hblkp != NULL && hblkp != old; 10727 prevpa = hblkpa, prev = hblkp, 10728 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next) 10729 ; 10730 10731 if (hblkp != old) 10732 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10733 10734 /* 10735 * p_mapping list is still pointing to hments in hblk_reserve; 10736 * fix up p_mapping list so that they point to hments in new. 10737 * 10738 * Since all these mappings are created by hblk_reserve_thread 10739 * on the way and it's using at least one of the buffers from each of 10740 * the newly minted slabs, there is no danger of any of these 10741 * mappings getting unloaded by another thread. 10742 * 10743 * tsbmiss could only modify ref/mod bits of hments in old/new. 10744 * Since all of these hments hold mappings established by segkmem 10745 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10746 * have no meaning for the mappings in hblk_reserve. hments in 10747 * old and new are identical except for ref/mod bits. 10748 */ 10749 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10750 10751 HBLKTOHME(osfhme, old, vaddr); 10752 sfmmu_copytte(&osfhme->hme_tte, &tte); 10753 10754 if (TTE_IS_VALID(&tte)) { 10755 if ((pp = osfhme->hme_page) == NULL) 10756 panic("sfmmu_hblk_swap: page not mapped"); 10757 10758 pml = sfmmu_mlist_enter(pp); 10759 10760 if (pp != osfhme->hme_page) 10761 panic("sfmmu_hblk_swap: mapping changed"); 10762 10763 HBLKTOHME(nsfhme, new, vaddr); 10764 10765 HME_ADD(nsfhme, pp); 10766 HME_SUB(osfhme, pp); 10767 10768 sfmmu_mlist_exit(pml); 10769 } 10770 } 10771 10772 /* 10773 * remove old from hash chain 10774 */ 10775 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 10776 10777 #ifdef DEBUG 10778 10779 hblktag.htag_id = ksfmmup; 10780 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10781 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10782 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10783 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10784 10785 if (found != new) 10786 panic("sfmmu_hblk_swap: new hblk not found"); 10787 #endif 10788 10789 SFMMU_HASH_UNLOCK(hmebp); 10790 10791 /* 10792 * Reset hblk_reserve 10793 */ 10794 bzero((void *)old, HME8BLK_SZ); 10795 old->hblk_nextpa = va_to_pa((caddr_t)old); 10796 } 10797 10798 /* 10799 * Grab the mlist mutex for both pages passed in. 10800 * 10801 * low and high will be returned as pointers to the mutexes for these pages. 10802 * low refers to the mutex residing in the lower bin of the mlist hash, while 10803 * high refers to the mutex residing in the higher bin of the mlist hash. This 10804 * is due to the locking order restrictions on the same thread grabbing 10805 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10806 * 10807 * If both pages hash to the same mutex, only grab that single mutex, and 10808 * high will be returned as NULL 10809 * If the pages hash to different bins in the hash, grab the lower addressed 10810 * lock first and then the higher addressed lock in order to follow the locking 10811 * rules involved with the same thread grabbing multiple mlist mutexes. 10812 * low and high will both have non-NULL values. 10813 */ 10814 static void 10815 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10816 kmutex_t **low, kmutex_t **high) 10817 { 10818 kmutex_t *mml_targ, *mml_repl; 10819 10820 /* 10821 * no need to do the dance around szc as in sfmmu_mlist_enter() 10822 * because this routine is only called by hat_page_relocate() and all 10823 * targ and repl pages are already locked EXCL so szc can't change. 10824 */ 10825 10826 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10827 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10828 10829 if (mml_targ == mml_repl) { 10830 *low = mml_targ; 10831 *high = NULL; 10832 } else { 10833 if (mml_targ < mml_repl) { 10834 *low = mml_targ; 10835 *high = mml_repl; 10836 } else { 10837 *low = mml_repl; 10838 *high = mml_targ; 10839 } 10840 } 10841 10842 mutex_enter(*low); 10843 if (*high) 10844 mutex_enter(*high); 10845 } 10846 10847 static void 10848 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10849 { 10850 if (high) 10851 mutex_exit(high); 10852 mutex_exit(low); 10853 } 10854 10855 static hatlock_t * 10856 sfmmu_hat_enter(sfmmu_t *sfmmup) 10857 { 10858 hatlock_t *hatlockp; 10859 10860 if (sfmmup != ksfmmup) { 10861 hatlockp = TSB_HASH(sfmmup); 10862 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10863 return (hatlockp); 10864 } 10865 return (NULL); 10866 } 10867 10868 static hatlock_t * 10869 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10870 { 10871 hatlock_t *hatlockp; 10872 10873 if (sfmmup != ksfmmup) { 10874 hatlockp = TSB_HASH(sfmmup); 10875 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10876 return (NULL); 10877 return (hatlockp); 10878 } 10879 return (NULL); 10880 } 10881 10882 static void 10883 sfmmu_hat_exit(hatlock_t *hatlockp) 10884 { 10885 if (hatlockp != NULL) 10886 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10887 } 10888 10889 static void 10890 sfmmu_hat_lock_all(void) 10891 { 10892 int i; 10893 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10894 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10895 } 10896 10897 static void 10898 sfmmu_hat_unlock_all(void) 10899 { 10900 int i; 10901 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10902 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10903 } 10904 10905 int 10906 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10907 { 10908 ASSERT(sfmmup != ksfmmup); 10909 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10910 } 10911 10912 /* 10913 * Locking primitives to provide consistency between ISM unmap 10914 * and other operations. Since ISM unmap can take a long time, we 10915 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10916 * contention on the hatlock buckets while ISM segments are being 10917 * unmapped. The tradeoff is that the flags don't prevent priority 10918 * inversion from occurring, so we must request kernel priority in 10919 * case we have to sleep to keep from getting buried while holding 10920 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10921 * threads from running (for example, in sfmmu_uvatopfn()). 10922 */ 10923 static void 10924 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10925 { 10926 hatlock_t *hatlockp; 10927 10928 THREAD_KPRI_REQUEST(); 10929 if (!hatlock_held) 10930 hatlockp = sfmmu_hat_enter(sfmmup); 10931 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10932 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10933 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10934 if (!hatlock_held) 10935 sfmmu_hat_exit(hatlockp); 10936 } 10937 10938 static void 10939 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10940 { 10941 hatlock_t *hatlockp; 10942 10943 if (!hatlock_held) 10944 hatlockp = sfmmu_hat_enter(sfmmup); 10945 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10946 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10947 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10948 if (!hatlock_held) 10949 sfmmu_hat_exit(hatlockp); 10950 THREAD_KPRI_RELEASE(); 10951 } 10952 10953 /* 10954 * 10955 * Algorithm: 10956 * 10957 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10958 * hblks. 10959 * 10960 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10961 * 10962 * (a) try to return an hblk from reserve pool of free hblks; 10963 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10964 * and return hblk_reserve. 10965 * 10966 * (3) call kmem_cache_alloc() to allocate hblk; 10967 * 10968 * (a) if hblk_reserve_lock is held by the current thread, 10969 * atomically replace hblk_reserve by the hblk that is 10970 * returned by kmem_cache_alloc; release hblk_reserve_lock 10971 * and call kmem_cache_alloc() again. 10972 * (b) if reserve pool is not full, add the hblk that is 10973 * returned by kmem_cache_alloc to reserve pool and 10974 * call kmem_cache_alloc again. 10975 * 10976 */ 10977 static struct hme_blk * 10978 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10979 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10980 uint_t flags, uint_t rid) 10981 { 10982 struct hme_blk *hmeblkp = NULL; 10983 struct hme_blk *newhblkp; 10984 struct hme_blk *shw_hblkp = NULL; 10985 struct kmem_cache *sfmmu_cache = NULL; 10986 uint64_t hblkpa; 10987 ulong_t index; 10988 uint_t owner; /* set to 1 if using hblk_reserve */ 10989 uint_t forcefree; 10990 int sleep; 10991 sf_srd_t *srdp; 10992 sf_region_t *rgnp; 10993 10994 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10995 ASSERT(hblktag.htag_rid == rid); 10996 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10997 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10998 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10999 11000 /* 11001 * If segkmem is not created yet, allocate from static hmeblks 11002 * created at the end of startup_modules(). See the block comment 11003 * in startup_modules() describing how we estimate the number of 11004 * static hmeblks that will be needed during re-map. 11005 */ 11006 if (!hblk_alloc_dynamic) { 11007 11008 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11009 11010 if (size == TTE8K) { 11011 index = nucleus_hblk8.index; 11012 if (index >= nucleus_hblk8.len) { 11013 /* 11014 * If we panic here, see startup_modules() to 11015 * make sure that we are calculating the 11016 * number of hblk8's that we need correctly. 11017 */ 11018 prom_panic("no nucleus hblk8 to allocate"); 11019 } 11020 hmeblkp = 11021 (struct hme_blk *)&nucleus_hblk8.list[index]; 11022 nucleus_hblk8.index++; 11023 SFMMU_STAT(sf_hblk8_nalloc); 11024 } else { 11025 index = nucleus_hblk1.index; 11026 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 11027 /* 11028 * If we panic here, see startup_modules(). 11029 * Most likely you need to update the 11030 * calculation of the number of hblk1 elements 11031 * that the kernel needs to boot. 11032 */ 11033 prom_panic("no nucleus hblk1 to allocate"); 11034 } 11035 hmeblkp = 11036 (struct hme_blk *)&nucleus_hblk1.list[index]; 11037 nucleus_hblk1.index++; 11038 SFMMU_STAT(sf_hblk1_nalloc); 11039 } 11040 11041 goto hblk_init; 11042 } 11043 11044 SFMMU_HASH_UNLOCK(hmebp); 11045 11046 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11047 if (mmu_page_sizes == max_mmu_page_sizes) { 11048 if (size < TTE256M) 11049 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11050 size, flags); 11051 } else { 11052 if (size < TTE4M) 11053 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11054 size, flags); 11055 } 11056 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11057 /* 11058 * Shared hmes use per region bitmaps in rgn_hmeflag 11059 * rather than shadow hmeblks to keep track of the 11060 * mapping sizes which have been allocated for the region. 11061 * Here we cleanup old invalid hmeblks with this rid, 11062 * which may be left around by pageunload(). 11063 */ 11064 int ttesz; 11065 caddr_t va; 11066 caddr_t eva = vaddr + TTEBYTES(size); 11067 11068 ASSERT(sfmmup != KHATID); 11069 11070 srdp = sfmmup->sfmmu_srdp; 11071 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11072 rgnp = srdp->srd_hmergnp[rid]; 11073 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11074 ASSERT(rgnp->rgn_refcnt != 0); 11075 ASSERT(size <= rgnp->rgn_pgszc); 11076 11077 ttesz = HBLK_MIN_TTESZ; 11078 do { 11079 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11080 continue; 11081 } 11082 11083 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11084 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11085 } else if (ttesz < size) { 11086 for (va = vaddr; va < eva; 11087 va += TTEBYTES(ttesz)) { 11088 sfmmu_cleanup_rhblk(srdp, va, rid, 11089 ttesz); 11090 } 11091 } 11092 } while (++ttesz <= rgnp->rgn_pgszc); 11093 } 11094 11095 fill_hblk: 11096 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11097 11098 if (owner && size == TTE8K) { 11099 11100 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11101 /* 11102 * We are really in a tight spot. We already own 11103 * hblk_reserve and we need another hblk. In anticipation 11104 * of this kind of scenario, we specifically set aside 11105 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11106 * by owner of hblk_reserve. 11107 */ 11108 SFMMU_STAT(sf_hblk_recurse_cnt); 11109 11110 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11111 panic("sfmmu_hblk_alloc: reserve list is empty"); 11112 11113 goto hblk_verify; 11114 } 11115 11116 ASSERT(!owner); 11117 11118 if ((flags & HAT_NO_KALLOC) == 0) { 11119 11120 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11121 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11122 11123 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11124 hmeblkp = sfmmu_hblk_steal(size); 11125 } else { 11126 /* 11127 * if we are the owner of hblk_reserve, 11128 * swap hblk_reserve with hmeblkp and 11129 * start a fresh life. Hope things go 11130 * better this time. 11131 */ 11132 if (hblk_reserve_thread == curthread) { 11133 ASSERT(sfmmu_cache == sfmmu8_cache); 11134 sfmmu_hblk_swap(hmeblkp); 11135 hblk_reserve_thread = NULL; 11136 mutex_exit(&hblk_reserve_lock); 11137 goto fill_hblk; 11138 } 11139 /* 11140 * let's donate this hblk to our reserve list if 11141 * we are not mapping kernel range 11142 */ 11143 if (size == TTE8K && sfmmup != KHATID) 11144 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11145 goto fill_hblk; 11146 } 11147 } else { 11148 /* 11149 * We are here to map the slab in sfmmu8_cache; let's 11150 * check if we could tap our reserve list; if successful, 11151 * this will avoid the pain of going thru sfmmu_hblk_swap 11152 */ 11153 SFMMU_STAT(sf_hblk_slab_cnt); 11154 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11155 /* 11156 * let's start hblk_reserve dance 11157 */ 11158 SFMMU_STAT(sf_hblk_reserve_cnt); 11159 owner = 1; 11160 mutex_enter(&hblk_reserve_lock); 11161 hmeblkp = HBLK_RESERVE; 11162 hblk_reserve_thread = curthread; 11163 } 11164 } 11165 11166 hblk_verify: 11167 ASSERT(hmeblkp != NULL); 11168 set_hblk_sz(hmeblkp, size); 11169 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11170 SFMMU_HASH_LOCK(hmebp); 11171 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11172 if (newhblkp != NULL) { 11173 SFMMU_HASH_UNLOCK(hmebp); 11174 if (hmeblkp != HBLK_RESERVE) { 11175 /* 11176 * This is really tricky! 11177 * 11178 * vmem_alloc(vmem_seg_arena) 11179 * vmem_alloc(vmem_internal_arena) 11180 * segkmem_alloc(heap_arena) 11181 * vmem_alloc(heap_arena) 11182 * page_create() 11183 * hat_memload() 11184 * kmem_cache_free() 11185 * kmem_cache_alloc() 11186 * kmem_slab_create() 11187 * vmem_alloc(kmem_internal_arena) 11188 * segkmem_alloc(heap_arena) 11189 * vmem_alloc(heap_arena) 11190 * page_create() 11191 * hat_memload() 11192 * kmem_cache_free() 11193 * ... 11194 * 11195 * Thus, hat_memload() could call kmem_cache_free 11196 * for enough number of times that we could easily 11197 * hit the bottom of the stack or run out of reserve 11198 * list of vmem_seg structs. So, we must donate 11199 * this hblk to reserve list if it's allocated 11200 * from sfmmu8_cache *and* mapping kernel range. 11201 * We don't need to worry about freeing hmeblk1's 11202 * to kmem since they don't map any kmem slabs. 11203 * 11204 * Note: When segkmem supports largepages, we must 11205 * free hmeblk1's to reserve list as well. 11206 */ 11207 forcefree = (sfmmup == KHATID) ? 1 : 0; 11208 if (size == TTE8K && 11209 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11210 goto re_verify; 11211 } 11212 ASSERT(sfmmup != KHATID); 11213 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11214 } else { 11215 /* 11216 * Hey! we don't need hblk_reserve any more. 11217 */ 11218 ASSERT(owner); 11219 hblk_reserve_thread = NULL; 11220 mutex_exit(&hblk_reserve_lock); 11221 owner = 0; 11222 } 11223 re_verify: 11224 /* 11225 * let's check if the goodies are still present 11226 */ 11227 SFMMU_HASH_LOCK(hmebp); 11228 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11229 if (newhblkp != NULL) { 11230 /* 11231 * return newhblkp if it's not hblk_reserve; 11232 * if newhblkp is hblk_reserve, return it 11233 * _only if_ we are the owner of hblk_reserve. 11234 */ 11235 if (newhblkp != HBLK_RESERVE || owner) { 11236 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11237 newhblkp->hblk_shared); 11238 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11239 !newhblkp->hblk_shared); 11240 return (newhblkp); 11241 } else { 11242 /* 11243 * we just hit hblk_reserve in the hash and 11244 * we are not the owner of that; 11245 * 11246 * block until hblk_reserve_thread completes 11247 * swapping hblk_reserve and try the dance 11248 * once again. 11249 */ 11250 SFMMU_HASH_UNLOCK(hmebp); 11251 mutex_enter(&hblk_reserve_lock); 11252 mutex_exit(&hblk_reserve_lock); 11253 SFMMU_STAT(sf_hblk_reserve_hit); 11254 goto fill_hblk; 11255 } 11256 } else { 11257 /* 11258 * it's no more! try the dance once again. 11259 */ 11260 SFMMU_HASH_UNLOCK(hmebp); 11261 goto fill_hblk; 11262 } 11263 } 11264 11265 hblk_init: 11266 if (SFMMU_IS_SHMERID_VALID(rid)) { 11267 uint16_t tteflag = 0x1 << 11268 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11269 11270 if (!(rgnp->rgn_hmeflags & tteflag)) { 11271 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11272 } 11273 hmeblkp->hblk_shared = 1; 11274 } else { 11275 hmeblkp->hblk_shared = 0; 11276 } 11277 set_hblk_sz(hmeblkp, size); 11278 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11279 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11280 hmeblkp->hblk_tag = hblktag; 11281 hmeblkp->hblk_shadow = shw_hblkp; 11282 hblkpa = hmeblkp->hblk_nextpa; 11283 hmeblkp->hblk_nextpa = 0; 11284 11285 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11286 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11287 ASSERT(hmeblkp->hblk_hmecnt == 0); 11288 ASSERT(hmeblkp->hblk_vcnt == 0); 11289 ASSERT(hmeblkp->hblk_lckcnt == 0); 11290 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11291 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11292 return (hmeblkp); 11293 } 11294 11295 /* 11296 * This function performs any cleanup required on the hme_blk 11297 * and returns it to the free list. 11298 */ 11299 /* ARGSUSED */ 11300 static void 11301 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11302 uint64_t hblkpa, struct hme_blk **listp) 11303 { 11304 int shw_size, vshift; 11305 struct hme_blk *shw_hblkp; 11306 uint_t shw_mask, newshw_mask; 11307 caddr_t vaddr; 11308 int size; 11309 uint_t critical; 11310 11311 ASSERT(hmeblkp); 11312 ASSERT(!hmeblkp->hblk_hmecnt); 11313 ASSERT(!hmeblkp->hblk_vcnt); 11314 ASSERT(!hmeblkp->hblk_lckcnt); 11315 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11316 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11317 11318 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11319 11320 size = get_hblk_ttesz(hmeblkp); 11321 shw_hblkp = hmeblkp->hblk_shadow; 11322 if (shw_hblkp) { 11323 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 11324 ASSERT(!hmeblkp->hblk_shared); 11325 if (mmu_page_sizes == max_mmu_page_sizes) { 11326 ASSERT(size < TTE256M); 11327 } else { 11328 ASSERT(size < TTE4M); 11329 } 11330 11331 shw_size = get_hblk_ttesz(shw_hblkp); 11332 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11333 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11334 ASSERT(vshift < 8); 11335 /* 11336 * Atomically clear shadow mask bit 11337 */ 11338 do { 11339 shw_mask = shw_hblkp->hblk_shw_mask; 11340 ASSERT(shw_mask & (1 << vshift)); 11341 newshw_mask = shw_mask & ~(1 << vshift); 11342 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11343 shw_mask, newshw_mask); 11344 } while (newshw_mask != shw_mask); 11345 hmeblkp->hblk_shadow = NULL; 11346 } 11347 hmeblkp->hblk_next = NULL; 11348 hmeblkp->hblk_nextpa = hblkpa; 11349 hmeblkp->hblk_shw_bit = 0; 11350 11351 if (hmeblkp->hblk_shared) { 11352 sf_srd_t *srdp; 11353 sf_region_t *rgnp; 11354 uint_t rid; 11355 11356 srdp = hblktosrd(hmeblkp); 11357 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11358 rid = hmeblkp->hblk_tag.htag_rid; 11359 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11360 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11361 rgnp = srdp->srd_hmergnp[rid]; 11362 ASSERT(rgnp != NULL); 11363 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11364 hmeblkp->hblk_shared = 0; 11365 } 11366 11367 if (hmeblkp->hblk_nuc_bit == 0) { 11368 11369 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 11370 return; 11371 11372 hmeblkp->hblk_next = *listp; 11373 *listp = hmeblkp; 11374 } 11375 } 11376 11377 static void 11378 sfmmu_hblks_list_purge(struct hme_blk **listp) 11379 { 11380 struct hme_blk *hmeblkp; 11381 11382 while ((hmeblkp = *listp) != NULL) { 11383 *listp = hmeblkp->hblk_next; 11384 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11385 } 11386 } 11387 11388 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11389 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11390 11391 static uint_t sfmmu_hblk_steal_twice; 11392 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11393 11394 /* 11395 * Steal a hmeblk from user or kernel hme hash lists. 11396 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11397 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11398 * tap into critical reserve of freehblkp. 11399 * Note: We remain looping in this routine until we find one. 11400 */ 11401 static struct hme_blk * 11402 sfmmu_hblk_steal(int size) 11403 { 11404 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11405 struct hmehash_bucket *hmebp; 11406 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11407 uint64_t hblkpa, prevpa; 11408 int i; 11409 uint_t loop_cnt = 0, critical; 11410 11411 for (;;) { 11412 if (size == TTE8K) { 11413 critical = 11414 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11415 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11416 return (hmeblkp); 11417 } 11418 11419 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11420 uhmehash_steal_hand; 11421 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11422 11423 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11424 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11425 SFMMU_HASH_LOCK(hmebp); 11426 hmeblkp = hmebp->hmeblkp; 11427 hblkpa = hmebp->hmeh_nextpa; 11428 prevpa = 0; 11429 pr_hblk = NULL; 11430 while (hmeblkp) { 11431 /* 11432 * check if it is a hmeblk that is not locked 11433 * and not shared. skip shadow hmeblks with 11434 * shadow_mask set i.e valid count non zero. 11435 */ 11436 if ((get_hblk_ttesz(hmeblkp) == size) && 11437 (hmeblkp->hblk_shw_bit == 0 || 11438 hmeblkp->hblk_vcnt == 0) && 11439 (hmeblkp->hblk_lckcnt == 0)) { 11440 /* 11441 * there is a high probability that we 11442 * will find a free one. search some 11443 * buckets for a free hmeblk initially 11444 * before unloading a valid hmeblk. 11445 */ 11446 if ((hmeblkp->hblk_vcnt == 0 && 11447 hmeblkp->hblk_hmecnt == 0) || (i >= 11448 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11449 if (sfmmu_steal_this_hblk(hmebp, 11450 hmeblkp, hblkpa, prevpa, 11451 pr_hblk)) { 11452 /* 11453 * Hblk is unloaded 11454 * successfully 11455 */ 11456 break; 11457 } 11458 } 11459 } 11460 pr_hblk = hmeblkp; 11461 prevpa = hblkpa; 11462 hblkpa = hmeblkp->hblk_nextpa; 11463 hmeblkp = hmeblkp->hblk_next; 11464 } 11465 11466 SFMMU_HASH_UNLOCK(hmebp); 11467 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11468 hmebp = uhme_hash; 11469 } 11470 uhmehash_steal_hand = hmebp; 11471 11472 if (hmeblkp != NULL) 11473 break; 11474 11475 /* 11476 * in the worst case, look for a free one in the kernel 11477 * hash table. 11478 */ 11479 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11480 SFMMU_HASH_LOCK(hmebp); 11481 hmeblkp = hmebp->hmeblkp; 11482 hblkpa = hmebp->hmeh_nextpa; 11483 prevpa = 0; 11484 pr_hblk = NULL; 11485 while (hmeblkp) { 11486 /* 11487 * check if it is free hmeblk 11488 */ 11489 if ((get_hblk_ttesz(hmeblkp) == size) && 11490 (hmeblkp->hblk_lckcnt == 0) && 11491 (hmeblkp->hblk_vcnt == 0) && 11492 (hmeblkp->hblk_hmecnt == 0)) { 11493 if (sfmmu_steal_this_hblk(hmebp, 11494 hmeblkp, hblkpa, prevpa, pr_hblk)) { 11495 break; 11496 } else { 11497 /* 11498 * Cannot fail since we have 11499 * hash lock. 11500 */ 11501 panic("fail to steal?"); 11502 } 11503 } 11504 11505 pr_hblk = hmeblkp; 11506 prevpa = hblkpa; 11507 hblkpa = hmeblkp->hblk_nextpa; 11508 hmeblkp = hmeblkp->hblk_next; 11509 } 11510 11511 SFMMU_HASH_UNLOCK(hmebp); 11512 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11513 hmebp = khme_hash; 11514 } 11515 11516 if (hmeblkp != NULL) 11517 break; 11518 sfmmu_hblk_steal_twice++; 11519 } 11520 return (hmeblkp); 11521 } 11522 11523 /* 11524 * This routine does real work to prepare a hblk to be "stolen" by 11525 * unloading the mappings, updating shadow counts .... 11526 * It returns 1 if the block is ready to be reused (stolen), or 0 11527 * means the block cannot be stolen yet- pageunload is still working 11528 * on this hblk. 11529 */ 11530 static int 11531 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11532 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 11533 { 11534 int shw_size, vshift; 11535 struct hme_blk *shw_hblkp; 11536 caddr_t vaddr; 11537 uint_t shw_mask, newshw_mask; 11538 11539 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11540 11541 /* 11542 * check if the hmeblk is free, unload if necessary 11543 */ 11544 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11545 sfmmu_t *sfmmup; 11546 demap_range_t dmr; 11547 11548 sfmmup = hblktosfmmu(hmeblkp); 11549 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11550 return (0); 11551 } 11552 DEMAP_RANGE_INIT(sfmmup, &dmr); 11553 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11554 (caddr_t)get_hblk_base(hmeblkp), 11555 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11556 DEMAP_RANGE_FLUSH(&dmr); 11557 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11558 /* 11559 * Pageunload is working on the same hblk. 11560 */ 11561 return (0); 11562 } 11563 11564 sfmmu_hblk_steal_unload_count++; 11565 } 11566 11567 ASSERT(hmeblkp->hblk_lckcnt == 0); 11568 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11569 11570 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 11571 hmeblkp->hblk_nextpa = hblkpa; 11572 11573 shw_hblkp = hmeblkp->hblk_shadow; 11574 if (shw_hblkp) { 11575 ASSERT(!hmeblkp->hblk_shared); 11576 shw_size = get_hblk_ttesz(shw_hblkp); 11577 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11578 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11579 ASSERT(vshift < 8); 11580 /* 11581 * Atomically clear shadow mask bit 11582 */ 11583 do { 11584 shw_mask = shw_hblkp->hblk_shw_mask; 11585 ASSERT(shw_mask & (1 << vshift)); 11586 newshw_mask = shw_mask & ~(1 << vshift); 11587 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11588 shw_mask, newshw_mask); 11589 } while (newshw_mask != shw_mask); 11590 hmeblkp->hblk_shadow = NULL; 11591 } 11592 11593 /* 11594 * remove shadow bit if we are stealing an unused shadow hmeblk. 11595 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11596 * we are indeed allocating a shadow hmeblk. 11597 */ 11598 hmeblkp->hblk_shw_bit = 0; 11599 11600 if (hmeblkp->hblk_shared) { 11601 sf_srd_t *srdp; 11602 sf_region_t *rgnp; 11603 uint_t rid; 11604 11605 srdp = hblktosrd(hmeblkp); 11606 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11607 rid = hmeblkp->hblk_tag.htag_rid; 11608 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11609 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11610 rgnp = srdp->srd_hmergnp[rid]; 11611 ASSERT(rgnp != NULL); 11612 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11613 hmeblkp->hblk_shared = 0; 11614 } 11615 11616 sfmmu_hblk_steal_count++; 11617 SFMMU_STAT(sf_steal_count); 11618 11619 return (1); 11620 } 11621 11622 struct hme_blk * 11623 sfmmu_hmetohblk(struct sf_hment *sfhme) 11624 { 11625 struct hme_blk *hmeblkp; 11626 struct sf_hment *sfhme0; 11627 struct hme_blk *hblk_dummy = 0; 11628 11629 /* 11630 * No dummy sf_hments, please. 11631 */ 11632 ASSERT(sfhme->hme_tte.ll != 0); 11633 11634 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11635 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11636 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11637 11638 return (hmeblkp); 11639 } 11640 11641 /* 11642 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11643 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11644 * KM_SLEEP allocation. 11645 * 11646 * Return 0 on success, -1 otherwise. 11647 */ 11648 static void 11649 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11650 { 11651 struct tsb_info *tsbinfop, *next; 11652 tsb_replace_rc_t rc; 11653 boolean_t gotfirst = B_FALSE; 11654 11655 ASSERT(sfmmup != ksfmmup); 11656 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11657 11658 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11659 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11660 } 11661 11662 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11663 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11664 } else { 11665 return; 11666 } 11667 11668 ASSERT(sfmmup->sfmmu_tsb != NULL); 11669 11670 /* 11671 * Loop over all tsbinfo's replacing them with ones that actually have 11672 * a TSB. If any of the replacements ever fail, bail out of the loop. 11673 */ 11674 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11675 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11676 next = tsbinfop->tsb_next; 11677 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11678 hatlockp, TSB_SWAPIN); 11679 if (rc != TSB_SUCCESS) { 11680 break; 11681 } 11682 gotfirst = B_TRUE; 11683 } 11684 11685 switch (rc) { 11686 case TSB_SUCCESS: 11687 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11688 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11689 return; 11690 case TSB_LOSTRACE: 11691 break; 11692 case TSB_ALLOCFAIL: 11693 break; 11694 default: 11695 panic("sfmmu_replace_tsb returned unrecognized failure code " 11696 "%d", rc); 11697 } 11698 11699 /* 11700 * In this case, we failed to get one of our TSBs. If we failed to 11701 * get the first TSB, get one of minimum size (8KB). Walk the list 11702 * and throw away the tsbinfos, starting where the allocation failed; 11703 * we can get by with just one TSB as long as we don't leave the 11704 * SWAPPED tsbinfo structures lying around. 11705 */ 11706 tsbinfop = sfmmup->sfmmu_tsb; 11707 next = tsbinfop->tsb_next; 11708 tsbinfop->tsb_next = NULL; 11709 11710 sfmmu_hat_exit(hatlockp); 11711 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11712 next = tsbinfop->tsb_next; 11713 sfmmu_tsbinfo_free(tsbinfop); 11714 } 11715 hatlockp = sfmmu_hat_enter(sfmmup); 11716 11717 /* 11718 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11719 * pages. 11720 */ 11721 if (!gotfirst) { 11722 tsbinfop = sfmmup->sfmmu_tsb; 11723 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11724 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11725 ASSERT(rc == TSB_SUCCESS); 11726 } 11727 11728 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11729 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11730 } 11731 11732 static int 11733 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11734 { 11735 ulong_t bix = 0; 11736 uint_t rid; 11737 sf_region_t *rgnp; 11738 11739 ASSERT(srdp != NULL); 11740 ASSERT(srdp->srd_refcnt != 0); 11741 11742 w <<= BT_ULSHIFT; 11743 while (bmw) { 11744 if (!(bmw & 0x1)) { 11745 bix++; 11746 bmw >>= 1; 11747 continue; 11748 } 11749 rid = w | bix; 11750 rgnp = srdp->srd_hmergnp[rid]; 11751 ASSERT(rgnp->rgn_refcnt > 0); 11752 ASSERT(rgnp->rgn_id == rid); 11753 if (addr < rgnp->rgn_saddr || 11754 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11755 bix++; 11756 bmw >>= 1; 11757 } else { 11758 return (1); 11759 } 11760 } 11761 return (0); 11762 } 11763 11764 /* 11765 * Handle exceptions for low level tsb_handler. 11766 * 11767 * There are many scenarios that could land us here: 11768 * 11769 * If the context is invalid we land here. The context can be invalid 11770 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11771 * perform a wrap around operation in order to allocate a new context. 11772 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11773 * TSBs configuration is changeing for this process and we are forced into 11774 * here to do a syncronization operation. If the context is valid we can 11775 * be here from window trap hanlder. In this case just call trap to handle 11776 * the fault. 11777 * 11778 * Note that the process will run in INVALID_CONTEXT before 11779 * faulting into here and subsequently loading the MMU registers 11780 * (including the TSB base register) associated with this process. 11781 * For this reason, the trap handlers must all test for 11782 * INVALID_CONTEXT before attempting to access any registers other 11783 * than the context registers. 11784 */ 11785 void 11786 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11787 { 11788 sfmmu_t *sfmmup, *shsfmmup; 11789 uint_t ctxtype; 11790 klwp_id_t lwp; 11791 char lwp_save_state; 11792 hatlock_t *hatlockp, *shatlockp; 11793 struct tsb_info *tsbinfop; 11794 struct tsbmiss *tsbmp; 11795 sf_scd_t *scdp; 11796 11797 SFMMU_STAT(sf_tsb_exceptions); 11798 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11799 sfmmup = astosfmmu(curthread->t_procp->p_as); 11800 /* 11801 * note that in sun4u, tagacces register contains ctxnum 11802 * while sun4v passes ctxtype in the tagaccess register. 11803 */ 11804 ctxtype = tagaccess & TAGACC_CTX_MASK; 11805 11806 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11807 ASSERT(sfmmup->sfmmu_ismhat == 0); 11808 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11809 ctxtype == INVALID_CONTEXT); 11810 11811 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11812 /* 11813 * We may land here because shme bitmap and pagesize 11814 * flags are updated lazily in tsbmiss area on other cpus. 11815 * If we detect here that tsbmiss area is out of sync with 11816 * sfmmu update it and retry the trapped instruction. 11817 * Otherwise call trap(). 11818 */ 11819 int ret = 0; 11820 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11821 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11822 11823 /* 11824 * Must set lwp state to LWP_SYS before 11825 * trying to acquire any adaptive lock 11826 */ 11827 lwp = ttolwp(curthread); 11828 ASSERT(lwp); 11829 lwp_save_state = lwp->lwp_state; 11830 lwp->lwp_state = LWP_SYS; 11831 11832 hatlockp = sfmmu_hat_enter(sfmmup); 11833 kpreempt_disable(); 11834 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11835 ASSERT(sfmmup == tsbmp->usfmmup); 11836 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11837 ~tteflag_mask) || 11838 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11839 ~tteflag_mask)) { 11840 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11841 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11842 ret = 1; 11843 } 11844 if (sfmmup->sfmmu_srdp != NULL) { 11845 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11846 ulong_t *tm = tsbmp->shmermap; 11847 ulong_t i; 11848 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11849 ulong_t d = tm[i] ^ sm[i]; 11850 if (d) { 11851 if (d & sm[i]) { 11852 if (!ret && sfmmu_is_rgnva( 11853 sfmmup->sfmmu_srdp, 11854 addr, i, d & sm[i])) { 11855 ret = 1; 11856 } 11857 } 11858 tm[i] = sm[i]; 11859 } 11860 } 11861 } 11862 kpreempt_enable(); 11863 sfmmu_hat_exit(hatlockp); 11864 lwp->lwp_state = lwp_save_state; 11865 if (ret) { 11866 return; 11867 } 11868 } else if (ctxtype == INVALID_CONTEXT) { 11869 /* 11870 * First, make sure we come out of here with a valid ctx, 11871 * since if we don't get one we'll simply loop on the 11872 * faulting instruction. 11873 * 11874 * If the ISM mappings are changing, the TSB is relocated, 11875 * the process is swapped, the process is joining SCD or 11876 * leaving SCD or shared regions we serialize behind the 11877 * controlling thread with hat lock, sfmmu_flags and 11878 * sfmmu_tsb_cv condition variable. 11879 */ 11880 11881 /* 11882 * Must set lwp state to LWP_SYS before 11883 * trying to acquire any adaptive lock 11884 */ 11885 lwp = ttolwp(curthread); 11886 ASSERT(lwp); 11887 lwp_save_state = lwp->lwp_state; 11888 lwp->lwp_state = LWP_SYS; 11889 11890 hatlockp = sfmmu_hat_enter(sfmmup); 11891 retry: 11892 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11893 shsfmmup = scdp->scd_sfmmup; 11894 ASSERT(shsfmmup != NULL); 11895 11896 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11897 tsbinfop = tsbinfop->tsb_next) { 11898 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11899 /* drop the private hat lock */ 11900 sfmmu_hat_exit(hatlockp); 11901 /* acquire the shared hat lock */ 11902 shatlockp = sfmmu_hat_enter(shsfmmup); 11903 /* 11904 * recheck to see if anything changed 11905 * after we drop the private hat lock. 11906 */ 11907 if (sfmmup->sfmmu_scdp == scdp && 11908 shsfmmup == scdp->scd_sfmmup) { 11909 sfmmu_tsb_chk_reloc(shsfmmup, 11910 shatlockp); 11911 } 11912 sfmmu_hat_exit(shatlockp); 11913 hatlockp = sfmmu_hat_enter(sfmmup); 11914 goto retry; 11915 } 11916 } 11917 } 11918 11919 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11920 tsbinfop = tsbinfop->tsb_next) { 11921 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11922 cv_wait(&sfmmup->sfmmu_tsb_cv, 11923 HATLOCK_MUTEXP(hatlockp)); 11924 goto retry; 11925 } 11926 } 11927 11928 /* 11929 * Wait for ISM maps to be updated. 11930 */ 11931 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11932 cv_wait(&sfmmup->sfmmu_tsb_cv, 11933 HATLOCK_MUTEXP(hatlockp)); 11934 goto retry; 11935 } 11936 11937 /* Is this process joining an SCD? */ 11938 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11939 /* 11940 * Flush private TSB and setup shared TSB. 11941 * sfmmu_finish_join_scd() does not drop the 11942 * hat lock. 11943 */ 11944 sfmmu_finish_join_scd(sfmmup); 11945 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11946 } 11947 11948 /* 11949 * If we're swapping in, get TSB(s). Note that we must do 11950 * this before we get a ctx or load the MMU state. Once 11951 * we swap in we have to recheck to make sure the TSB(s) and 11952 * ISM mappings didn't change while we slept. 11953 */ 11954 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11955 sfmmu_tsb_swapin(sfmmup, hatlockp); 11956 goto retry; 11957 } 11958 11959 sfmmu_get_ctx(sfmmup); 11960 11961 sfmmu_hat_exit(hatlockp); 11962 /* 11963 * Must restore lwp_state if not calling 11964 * trap() for further processing. Restore 11965 * it anyway. 11966 */ 11967 lwp->lwp_state = lwp_save_state; 11968 return; 11969 } 11970 trap(rp, (caddr_t)tagaccess, traptype, 0); 11971 } 11972 11973 static void 11974 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11975 { 11976 struct tsb_info *tp; 11977 11978 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11979 11980 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11981 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11982 cv_wait(&sfmmup->sfmmu_tsb_cv, 11983 HATLOCK_MUTEXP(hatlockp)); 11984 break; 11985 } 11986 } 11987 } 11988 11989 /* 11990 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11991 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11992 * rather than spinning to avoid send mondo timeouts with 11993 * interrupts enabled. When the lock is acquired it is immediately 11994 * released and we return back to sfmmu_vatopfn just after 11995 * the GET_TTE call. 11996 */ 11997 void 11998 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11999 { 12000 struct page **pp; 12001 12002 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12003 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12004 } 12005 12006 /* 12007 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 12008 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 12009 * cross traps which cannot be handled while spinning in the 12010 * trap handlers. Simply enter and exit the kpr_suspendlock spin 12011 * mutex, which is held by the holder of the suspend bit, and then 12012 * retry the trapped instruction after unwinding. 12013 */ 12014 /*ARGSUSED*/ 12015 void 12016 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 12017 { 12018 ASSERT(curthread != kreloc_thread); 12019 mutex_enter(&kpr_suspendlock); 12020 mutex_exit(&kpr_suspendlock); 12021 } 12022 12023 /* 12024 * This routine could be optimized to reduce the number of xcalls by flushing 12025 * the entire TLBs if region reference count is above some threshold but the 12026 * tradeoff will depend on the size of the TLB. So for now flush the specific 12027 * page a context at a time. 12028 * 12029 * If uselocks is 0 then it's called after all cpus were captured and all the 12030 * hat locks were taken. In this case don't take the region lock by relying on 12031 * the order of list region update operations in hat_join_region(), 12032 * hat_leave_region() and hat_dup_region(). The ordering in those routines 12033 * guarantees that list is always forward walkable and reaches active sfmmus 12034 * regardless of where xc_attention() captures a cpu. 12035 */ 12036 cpuset_t 12037 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 12038 struct hme_blk *hmeblkp, int uselocks) 12039 { 12040 sfmmu_t *sfmmup; 12041 cpuset_t cpuset; 12042 cpuset_t rcpuset; 12043 hatlock_t *hatlockp; 12044 uint_t rid = rgnp->rgn_id; 12045 sf_rgn_link_t *rlink; 12046 sf_scd_t *scdp; 12047 12048 ASSERT(hmeblkp->hblk_shared); 12049 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 12050 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 12051 12052 CPUSET_ZERO(rcpuset); 12053 if (uselocks) { 12054 mutex_enter(&rgnp->rgn_mutex); 12055 } 12056 sfmmup = rgnp->rgn_sfmmu_head; 12057 while (sfmmup != NULL) { 12058 if (uselocks) { 12059 hatlockp = sfmmu_hat_enter(sfmmup); 12060 } 12061 12062 /* 12063 * When an SCD is created the SCD hat is linked on the sfmmu 12064 * region lists for each hme region which is part of the 12065 * SCD. If we find an SCD hat, when walking these lists, 12066 * then we flush the shared TSBs, if we find a private hat, 12067 * which is part of an SCD, but where the region 12068 * is not part of the SCD then we flush the private TSBs. 12069 */ 12070 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12071 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12072 scdp = sfmmup->sfmmu_scdp; 12073 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 12074 if (uselocks) { 12075 sfmmu_hat_exit(hatlockp); 12076 } 12077 goto next; 12078 } 12079 } 12080 12081 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12082 12083 kpreempt_disable(); 12084 cpuset = sfmmup->sfmmu_cpusran; 12085 CPUSET_AND(cpuset, cpu_ready_set); 12086 CPUSET_DEL(cpuset, CPU->cpu_id); 12087 SFMMU_XCALL_STATS(sfmmup); 12088 xt_some(cpuset, vtag_flushpage_tl1, 12089 (uint64_t)addr, (uint64_t)sfmmup); 12090 vtag_flushpage(addr, (uint64_t)sfmmup); 12091 if (uselocks) { 12092 sfmmu_hat_exit(hatlockp); 12093 } 12094 kpreempt_enable(); 12095 CPUSET_OR(rcpuset, cpuset); 12096 12097 next: 12098 /* LINTED: constant in conditional context */ 12099 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12100 ASSERT(rlink != NULL); 12101 sfmmup = rlink->next; 12102 } 12103 if (uselocks) { 12104 mutex_exit(&rgnp->rgn_mutex); 12105 } 12106 return (rcpuset); 12107 } 12108 12109 /* 12110 * This routine takes an sfmmu pointer and the va for an adddress in an 12111 * ISM region as input and returns the corresponding region id in ism_rid. 12112 * The return value of 1 indicates that a region has been found and ism_rid 12113 * is valid, otherwise 0 is returned. 12114 */ 12115 static int 12116 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12117 { 12118 ism_blk_t *ism_blkp; 12119 int i; 12120 ism_map_t *ism_map; 12121 #ifdef DEBUG 12122 struct hat *ism_hatid; 12123 #endif 12124 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12125 12126 ism_blkp = sfmmup->sfmmu_iblk; 12127 while (ism_blkp != NULL) { 12128 ism_map = ism_blkp->iblk_maps; 12129 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12130 if ((va >= ism_start(ism_map[i])) && 12131 (va < ism_end(ism_map[i]))) { 12132 12133 *ism_rid = ism_map[i].imap_rid; 12134 #ifdef DEBUG 12135 ism_hatid = ism_map[i].imap_ismhat; 12136 ASSERT(ism_hatid == ism_sfmmup); 12137 ASSERT(ism_hatid->sfmmu_ismhat); 12138 #endif 12139 return (1); 12140 } 12141 } 12142 ism_blkp = ism_blkp->iblk_next; 12143 } 12144 return (0); 12145 } 12146 12147 /* 12148 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12149 * This routine may be called with all cpu's captured. Therefore, the 12150 * caller is responsible for holding all locks and disabling kernel 12151 * preemption. 12152 */ 12153 /* ARGSUSED */ 12154 static void 12155 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12156 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12157 { 12158 cpuset_t cpuset; 12159 caddr_t va; 12160 ism_ment_t *ment; 12161 sfmmu_t *sfmmup; 12162 #ifdef VAC 12163 int vcolor; 12164 #endif 12165 12166 sf_scd_t *scdp; 12167 uint_t ism_rid; 12168 12169 ASSERT(!hmeblkp->hblk_shared); 12170 /* 12171 * Walk the ism_hat's mapping list and flush the page 12172 * from every hat sharing this ism_hat. This routine 12173 * may be called while all cpu's have been captured. 12174 * Therefore we can't attempt to grab any locks. For now 12175 * this means we will protect the ism mapping list under 12176 * a single lock which will be grabbed by the caller. 12177 * If hat_share/unshare scalibility becomes a performance 12178 * problem then we may need to re-think ism mapping list locking. 12179 */ 12180 ASSERT(ism_sfmmup->sfmmu_ismhat); 12181 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12182 addr = addr - ISMID_STARTADDR; 12183 12184 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12185 12186 sfmmup = ment->iment_hat; 12187 12188 va = ment->iment_base_va; 12189 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12190 12191 /* 12192 * When an SCD is created the SCD hat is linked on the ism 12193 * mapping lists for each ISM segment which is part of the 12194 * SCD. If we find an SCD hat, when walking these lists, 12195 * then we flush the shared TSBs, if we find a private hat, 12196 * which is part of an SCD, but where the region 12197 * corresponding to this va is not part of the SCD then we 12198 * flush the private TSBs. 12199 */ 12200 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12201 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12202 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12203 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12204 &ism_rid)) { 12205 cmn_err(CE_PANIC, 12206 "can't find matching ISM rid!"); 12207 } 12208 12209 scdp = sfmmup->sfmmu_scdp; 12210 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12211 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12212 ism_rid)) { 12213 continue; 12214 } 12215 } 12216 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12217 12218 cpuset = sfmmup->sfmmu_cpusran; 12219 CPUSET_AND(cpuset, cpu_ready_set); 12220 CPUSET_DEL(cpuset, CPU->cpu_id); 12221 SFMMU_XCALL_STATS(sfmmup); 12222 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12223 (uint64_t)sfmmup); 12224 vtag_flushpage(va, (uint64_t)sfmmup); 12225 12226 #ifdef VAC 12227 /* 12228 * Flush D$ 12229 * When flushing D$ we must flush all 12230 * cpu's. See sfmmu_cache_flush(). 12231 */ 12232 if (cache_flush_flag == CACHE_FLUSH) { 12233 cpuset = cpu_ready_set; 12234 CPUSET_DEL(cpuset, CPU->cpu_id); 12235 12236 SFMMU_XCALL_STATS(sfmmup); 12237 vcolor = addr_to_vcolor(va); 12238 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12239 vac_flushpage(pfnum, vcolor); 12240 } 12241 #endif /* VAC */ 12242 } 12243 } 12244 12245 /* 12246 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12247 * a particular virtual address and ctx. If noflush is set we do not 12248 * flush the TLB/TSB. This function may or may not be called with the 12249 * HAT lock held. 12250 */ 12251 static void 12252 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12253 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12254 int hat_lock_held) 12255 { 12256 #ifdef VAC 12257 int vcolor; 12258 #endif 12259 cpuset_t cpuset; 12260 hatlock_t *hatlockp; 12261 12262 ASSERT(!hmeblkp->hblk_shared); 12263 12264 #if defined(lint) && !defined(VAC) 12265 pfnum = pfnum; 12266 cpu_flag = cpu_flag; 12267 cache_flush_flag = cache_flush_flag; 12268 #endif 12269 12270 /* 12271 * There is no longer a need to protect against ctx being 12272 * stolen here since we don't store the ctx in the TSB anymore. 12273 */ 12274 #ifdef VAC 12275 vcolor = addr_to_vcolor(addr); 12276 #endif 12277 12278 /* 12279 * We must hold the hat lock during the flush of TLB, 12280 * to avoid a race with sfmmu_invalidate_ctx(), where 12281 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12282 * causing TLB demap routine to skip flush on that MMU. 12283 * If the context on a MMU has already been set to 12284 * INVALID_CONTEXT, we just get an extra flush on 12285 * that MMU. 12286 */ 12287 if (!hat_lock_held && !tlb_noflush) 12288 hatlockp = sfmmu_hat_enter(sfmmup); 12289 12290 kpreempt_disable(); 12291 if (!tlb_noflush) { 12292 /* 12293 * Flush the TSB and TLB. 12294 */ 12295 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12296 12297 cpuset = sfmmup->sfmmu_cpusran; 12298 CPUSET_AND(cpuset, cpu_ready_set); 12299 CPUSET_DEL(cpuset, CPU->cpu_id); 12300 12301 SFMMU_XCALL_STATS(sfmmup); 12302 12303 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12304 (uint64_t)sfmmup); 12305 12306 vtag_flushpage(addr, (uint64_t)sfmmup); 12307 } 12308 12309 if (!hat_lock_held && !tlb_noflush) 12310 sfmmu_hat_exit(hatlockp); 12311 12312 #ifdef VAC 12313 /* 12314 * Flush the D$ 12315 * 12316 * Even if the ctx is stolen, we need to flush the 12317 * cache. Our ctx stealer only flushes the TLBs. 12318 */ 12319 if (cache_flush_flag == CACHE_FLUSH) { 12320 if (cpu_flag & FLUSH_ALL_CPUS) { 12321 cpuset = cpu_ready_set; 12322 } else { 12323 cpuset = sfmmup->sfmmu_cpusran; 12324 CPUSET_AND(cpuset, cpu_ready_set); 12325 } 12326 CPUSET_DEL(cpuset, CPU->cpu_id); 12327 SFMMU_XCALL_STATS(sfmmup); 12328 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12329 vac_flushpage(pfnum, vcolor); 12330 } 12331 #endif /* VAC */ 12332 kpreempt_enable(); 12333 } 12334 12335 /* 12336 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12337 * address and ctx. If noflush is set we do not currently do anything. 12338 * This function may or may not be called with the HAT lock held. 12339 */ 12340 static void 12341 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12342 int tlb_noflush, int hat_lock_held) 12343 { 12344 cpuset_t cpuset; 12345 hatlock_t *hatlockp; 12346 12347 ASSERT(!hmeblkp->hblk_shared); 12348 12349 /* 12350 * If the process is exiting we have nothing to do. 12351 */ 12352 if (tlb_noflush) 12353 return; 12354 12355 /* 12356 * Flush TSB. 12357 */ 12358 if (!hat_lock_held) 12359 hatlockp = sfmmu_hat_enter(sfmmup); 12360 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12361 12362 kpreempt_disable(); 12363 12364 cpuset = sfmmup->sfmmu_cpusran; 12365 CPUSET_AND(cpuset, cpu_ready_set); 12366 CPUSET_DEL(cpuset, CPU->cpu_id); 12367 12368 SFMMU_XCALL_STATS(sfmmup); 12369 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12370 12371 vtag_flushpage(addr, (uint64_t)sfmmup); 12372 12373 if (!hat_lock_held) 12374 sfmmu_hat_exit(hatlockp); 12375 12376 kpreempt_enable(); 12377 12378 } 12379 12380 /* 12381 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12382 * call handler that can flush a range of pages to save on xcalls. 12383 */ 12384 static int sfmmu_xcall_save; 12385 12386 /* 12387 * this routine is never used for demaping addresses backed by SRD hmeblks. 12388 */ 12389 static void 12390 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12391 { 12392 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12393 hatlock_t *hatlockp; 12394 cpuset_t cpuset; 12395 uint64_t sfmmu_pgcnt; 12396 pgcnt_t pgcnt = 0; 12397 int pgunload = 0; 12398 int dirtypg = 0; 12399 caddr_t addr = dmrp->dmr_addr; 12400 caddr_t eaddr; 12401 uint64_t bitvec = dmrp->dmr_bitvec; 12402 12403 ASSERT(bitvec & 1); 12404 12405 /* 12406 * Flush TSB and calculate number of pages to flush. 12407 */ 12408 while (bitvec != 0) { 12409 dirtypg = 0; 12410 /* 12411 * Find the first page to flush and then count how many 12412 * pages there are after it that also need to be flushed. 12413 * This way the number of TSB flushes is minimized. 12414 */ 12415 while ((bitvec & 1) == 0) { 12416 pgcnt++; 12417 addr += MMU_PAGESIZE; 12418 bitvec >>= 1; 12419 } 12420 while (bitvec & 1) { 12421 dirtypg++; 12422 bitvec >>= 1; 12423 } 12424 eaddr = addr + ptob(dirtypg); 12425 hatlockp = sfmmu_hat_enter(sfmmup); 12426 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12427 sfmmu_hat_exit(hatlockp); 12428 pgunload += dirtypg; 12429 addr = eaddr; 12430 pgcnt += dirtypg; 12431 } 12432 12433 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12434 if (sfmmup->sfmmu_free == 0) { 12435 addr = dmrp->dmr_addr; 12436 bitvec = dmrp->dmr_bitvec; 12437 12438 /* 12439 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12440 * as it will be used to pack argument for xt_some 12441 */ 12442 ASSERT((pgcnt > 0) && 12443 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12444 12445 /* 12446 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12447 * the low 6 bits of sfmmup. This is doable since pgcnt 12448 * always >= 1. 12449 */ 12450 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12451 sfmmu_pgcnt = (uint64_t)sfmmup | 12452 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12453 12454 /* 12455 * We must hold the hat lock during the flush of TLB, 12456 * to avoid a race with sfmmu_invalidate_ctx(), where 12457 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12458 * causing TLB demap routine to skip flush on that MMU. 12459 * If the context on a MMU has already been set to 12460 * INVALID_CONTEXT, we just get an extra flush on 12461 * that MMU. 12462 */ 12463 hatlockp = sfmmu_hat_enter(sfmmup); 12464 kpreempt_disable(); 12465 12466 cpuset = sfmmup->sfmmu_cpusran; 12467 CPUSET_AND(cpuset, cpu_ready_set); 12468 CPUSET_DEL(cpuset, CPU->cpu_id); 12469 12470 SFMMU_XCALL_STATS(sfmmup); 12471 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12472 sfmmu_pgcnt); 12473 12474 for (; bitvec != 0; bitvec >>= 1) { 12475 if (bitvec & 1) 12476 vtag_flushpage(addr, (uint64_t)sfmmup); 12477 addr += MMU_PAGESIZE; 12478 } 12479 kpreempt_enable(); 12480 sfmmu_hat_exit(hatlockp); 12481 12482 sfmmu_xcall_save += (pgunload-1); 12483 } 12484 dmrp->dmr_bitvec = 0; 12485 } 12486 12487 /* 12488 * In cases where we need to synchronize with TLB/TSB miss trap 12489 * handlers, _and_ need to flush the TLB, it's a lot easier to 12490 * throw away the context from the process than to do a 12491 * special song and dance to keep things consistent for the 12492 * handlers. 12493 * 12494 * Since the process suddenly ends up without a context and our caller 12495 * holds the hat lock, threads that fault after this function is called 12496 * will pile up on the lock. We can then do whatever we need to 12497 * atomically from the context of the caller. The first blocked thread 12498 * to resume executing will get the process a new context, and the 12499 * process will resume executing. 12500 * 12501 * One added advantage of this approach is that on MMUs that 12502 * support a "flush all" operation, we will delay the flush until 12503 * cnum wrap-around, and then flush the TLB one time. This 12504 * is rather rare, so it's a lot less expensive than making 8000 12505 * x-calls to flush the TLB 8000 times. 12506 * 12507 * A per-process (PP) lock is used to synchronize ctx allocations in 12508 * resume() and ctx invalidations here. 12509 */ 12510 static void 12511 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12512 { 12513 cpuset_t cpuset; 12514 int cnum, currcnum; 12515 mmu_ctx_t *mmu_ctxp; 12516 int i; 12517 uint_t pstate_save; 12518 12519 SFMMU_STAT(sf_ctx_inv); 12520 12521 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12522 ASSERT(sfmmup != ksfmmup); 12523 12524 kpreempt_disable(); 12525 12526 mmu_ctxp = CPU_MMU_CTXP(CPU); 12527 ASSERT(mmu_ctxp); 12528 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12529 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12530 12531 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12532 12533 pstate_save = sfmmu_disable_intrs(); 12534 12535 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12536 /* set HAT cnum invalid across all context domains. */ 12537 for (i = 0; i < max_mmu_ctxdoms; i++) { 12538 12539 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12540 if (cnum == INVALID_CONTEXT) { 12541 continue; 12542 } 12543 12544 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12545 } 12546 membar_enter(); /* make sure globally visible to all CPUs */ 12547 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12548 12549 sfmmu_enable_intrs(pstate_save); 12550 12551 cpuset = sfmmup->sfmmu_cpusran; 12552 CPUSET_DEL(cpuset, CPU->cpu_id); 12553 CPUSET_AND(cpuset, cpu_ready_set); 12554 if (!CPUSET_ISNULL(cpuset)) { 12555 SFMMU_XCALL_STATS(sfmmup); 12556 xt_some(cpuset, sfmmu_raise_tsb_exception, 12557 (uint64_t)sfmmup, INVALID_CONTEXT); 12558 xt_sync(cpuset); 12559 SFMMU_STAT(sf_tsb_raise_exception); 12560 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12561 } 12562 12563 /* 12564 * If the hat to-be-invalidated is the same as the current 12565 * process on local CPU we need to invalidate 12566 * this CPU context as well. 12567 */ 12568 if ((sfmmu_getctx_sec() == currcnum) && 12569 (currcnum != INVALID_CONTEXT)) { 12570 /* sets shared context to INVALID too */ 12571 sfmmu_setctx_sec(INVALID_CONTEXT); 12572 sfmmu_clear_utsbinfo(); 12573 } 12574 12575 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12576 12577 kpreempt_enable(); 12578 12579 /* 12580 * we hold the hat lock, so nobody should allocate a context 12581 * for us yet 12582 */ 12583 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12584 } 12585 12586 #ifdef VAC 12587 /* 12588 * We need to flush the cache in all cpus. It is possible that 12589 * a process referenced a page as cacheable but has sinced exited 12590 * and cleared the mapping list. We still to flush it but have no 12591 * state so all cpus is the only alternative. 12592 */ 12593 void 12594 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12595 { 12596 cpuset_t cpuset; 12597 12598 kpreempt_disable(); 12599 cpuset = cpu_ready_set; 12600 CPUSET_DEL(cpuset, CPU->cpu_id); 12601 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12602 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12603 xt_sync(cpuset); 12604 vac_flushpage(pfnum, vcolor); 12605 kpreempt_enable(); 12606 } 12607 12608 void 12609 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12610 { 12611 cpuset_t cpuset; 12612 12613 ASSERT(vcolor >= 0); 12614 12615 kpreempt_disable(); 12616 cpuset = cpu_ready_set; 12617 CPUSET_DEL(cpuset, CPU->cpu_id); 12618 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12619 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12620 xt_sync(cpuset); 12621 vac_flushcolor(vcolor, pfnum); 12622 kpreempt_enable(); 12623 } 12624 #endif /* VAC */ 12625 12626 /* 12627 * We need to prevent processes from accessing the TSB using a cached physical 12628 * address. It's alright if they try to access the TSB via virtual address 12629 * since they will just fault on that virtual address once the mapping has 12630 * been suspended. 12631 */ 12632 #pragma weak sendmondo_in_recover 12633 12634 /* ARGSUSED */ 12635 static int 12636 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12637 { 12638 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12639 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12640 hatlock_t *hatlockp; 12641 sf_scd_t *scdp; 12642 12643 if (flags != HAT_PRESUSPEND) 12644 return (0); 12645 12646 /* 12647 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12648 * be a shared hat, then set SCD's tsbinfo's flag. 12649 * If tsb is not shared, sfmmup is a private hat, then set 12650 * its private tsbinfo's flag. 12651 */ 12652 hatlockp = sfmmu_hat_enter(sfmmup); 12653 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12654 12655 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12656 sfmmu_tsb_inv_ctx(sfmmup); 12657 sfmmu_hat_exit(hatlockp); 12658 } else { 12659 /* release lock on the shared hat */ 12660 sfmmu_hat_exit(hatlockp); 12661 /* sfmmup is a shared hat */ 12662 ASSERT(sfmmup->sfmmu_scdhat); 12663 scdp = sfmmup->sfmmu_scdp; 12664 ASSERT(scdp != NULL); 12665 /* get private hat from the scd list */ 12666 mutex_enter(&scdp->scd_mutex); 12667 sfmmup = scdp->scd_sf_list; 12668 while (sfmmup != NULL) { 12669 hatlockp = sfmmu_hat_enter(sfmmup); 12670 /* 12671 * We do not call sfmmu_tsb_inv_ctx here because 12672 * sendmondo_in_recover check is only needed for 12673 * sun4u. 12674 */ 12675 sfmmu_invalidate_ctx(sfmmup); 12676 sfmmu_hat_exit(hatlockp); 12677 sfmmup = sfmmup->sfmmu_scd_link.next; 12678 12679 } 12680 mutex_exit(&scdp->scd_mutex); 12681 } 12682 return (0); 12683 } 12684 12685 static void 12686 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12687 { 12688 extern uint32_t sendmondo_in_recover; 12689 12690 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12691 12692 /* 12693 * For Cheetah+ Erratum 25: 12694 * Wait for any active recovery to finish. We can't risk 12695 * relocating the TSB of the thread running mondo_recover_proc() 12696 * since, if we did that, we would deadlock. The scenario we are 12697 * trying to avoid is as follows: 12698 * 12699 * THIS CPU RECOVER CPU 12700 * -------- ----------- 12701 * Begins recovery, walking through TSB 12702 * hat_pagesuspend() TSB TTE 12703 * TLB miss on TSB TTE, spins at TL1 12704 * xt_sync() 12705 * send_mondo_timeout() 12706 * mondo_recover_proc() 12707 * ((deadlocked)) 12708 * 12709 * The second half of the workaround is that mondo_recover_proc() 12710 * checks to see if the tsb_info has the RELOC flag set, and if it 12711 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12712 * and hence avoiding the TLB miss that could result in a deadlock. 12713 */ 12714 if (&sendmondo_in_recover) { 12715 membar_enter(); /* make sure RELOC flag visible */ 12716 while (sendmondo_in_recover) { 12717 drv_usecwait(1); 12718 membar_consumer(); 12719 } 12720 } 12721 12722 sfmmu_invalidate_ctx(sfmmup); 12723 } 12724 12725 /* ARGSUSED */ 12726 static int 12727 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12728 void *tsbinfo, pfn_t newpfn) 12729 { 12730 hatlock_t *hatlockp; 12731 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12732 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12733 12734 if (flags != HAT_POSTUNSUSPEND) 12735 return (0); 12736 12737 hatlockp = sfmmu_hat_enter(sfmmup); 12738 12739 SFMMU_STAT(sf_tsb_reloc); 12740 12741 /* 12742 * The process may have swapped out while we were relocating one 12743 * of its TSBs. If so, don't bother doing the setup since the 12744 * process can't be using the memory anymore. 12745 */ 12746 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12747 ASSERT(va == tsbinfop->tsb_va); 12748 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12749 12750 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12751 sfmmu_inv_tsb(tsbinfop->tsb_va, 12752 TSB_BYTES(tsbinfop->tsb_szc)); 12753 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12754 } 12755 } 12756 12757 membar_exit(); 12758 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12759 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12760 12761 sfmmu_hat_exit(hatlockp); 12762 12763 return (0); 12764 } 12765 12766 /* 12767 * Allocate and initialize a tsb_info structure. Note that we may or may not 12768 * allocate a TSB here, depending on the flags passed in. 12769 */ 12770 static int 12771 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12772 uint_t flags, sfmmu_t *sfmmup) 12773 { 12774 int err; 12775 12776 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12777 sfmmu_tsbinfo_cache, KM_SLEEP); 12778 12779 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12780 tsb_szc, flags, sfmmup)) != 0) { 12781 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12782 SFMMU_STAT(sf_tsb_allocfail); 12783 *tsbinfopp = NULL; 12784 return (err); 12785 } 12786 SFMMU_STAT(sf_tsb_alloc); 12787 12788 /* 12789 * Bump the TSB size counters for this TSB size. 12790 */ 12791 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12792 return (0); 12793 } 12794 12795 static void 12796 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12797 { 12798 caddr_t tsbva = tsbinfo->tsb_va; 12799 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12800 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12801 vmem_t *vmp = tsbinfo->tsb_vmp; 12802 12803 /* 12804 * If we allocated this TSB from relocatable kernel memory, then we 12805 * need to uninstall the callback handler. 12806 */ 12807 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12808 uintptr_t slab_mask; 12809 caddr_t slab_vaddr; 12810 page_t **ppl; 12811 int ret; 12812 12813 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12814 if (tsb_size > MMU_PAGESIZE4M) 12815 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12816 else 12817 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12818 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12819 12820 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12821 ASSERT(ret == 0); 12822 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12823 0, NULL); 12824 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12825 } 12826 12827 if (kmem_cachep != NULL) { 12828 kmem_cache_free(kmem_cachep, tsbva); 12829 } else { 12830 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12831 } 12832 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12833 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12834 } 12835 12836 static void 12837 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12838 { 12839 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12840 sfmmu_tsb_free(tsbinfo); 12841 } 12842 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12843 12844 } 12845 12846 /* 12847 * Setup all the references to physical memory for this tsbinfo. 12848 * The underlying page(s) must be locked. 12849 */ 12850 static void 12851 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12852 { 12853 ASSERT(pfn != PFN_INVALID); 12854 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12855 12856 #ifndef sun4v 12857 if (tsbinfo->tsb_szc == 0) { 12858 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12859 PROT_WRITE|PROT_READ, TTE8K); 12860 } else { 12861 /* 12862 * Round down PA and use a large mapping; the handlers will 12863 * compute the TSB pointer at the correct offset into the 12864 * big virtual page. NOTE: this assumes all TSBs larger 12865 * than 8K must come from physically contiguous slabs of 12866 * size tsb_slab_size. 12867 */ 12868 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12869 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12870 } 12871 tsbinfo->tsb_pa = ptob(pfn); 12872 12873 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12874 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12875 12876 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12877 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12878 #else /* sun4v */ 12879 tsbinfo->tsb_pa = ptob(pfn); 12880 #endif /* sun4v */ 12881 } 12882 12883 12884 /* 12885 * Returns zero on success, ENOMEM if over the high water mark, 12886 * or EAGAIN if the caller needs to retry with a smaller TSB 12887 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12888 * 12889 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12890 * is specified and the TSB requested is PAGESIZE, though it 12891 * may sleep waiting for memory if sufficient memory is not 12892 * available. 12893 */ 12894 static int 12895 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12896 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12897 { 12898 caddr_t vaddr = NULL; 12899 caddr_t slab_vaddr; 12900 uintptr_t slab_mask; 12901 int tsbbytes = TSB_BYTES(tsbcode); 12902 int lowmem = 0; 12903 struct kmem_cache *kmem_cachep = NULL; 12904 vmem_t *vmp = NULL; 12905 lgrp_id_t lgrpid = LGRP_NONE; 12906 pfn_t pfn; 12907 uint_t cbflags = HAC_SLEEP; 12908 page_t **pplist; 12909 int ret; 12910 12911 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12912 if (tsbbytes > MMU_PAGESIZE4M) 12913 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12914 else 12915 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12916 12917 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12918 flags |= TSB_ALLOC; 12919 12920 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12921 12922 tsbinfo->tsb_sfmmu = sfmmup; 12923 12924 /* 12925 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12926 * return. 12927 */ 12928 if ((flags & TSB_ALLOC) == 0) { 12929 tsbinfo->tsb_szc = tsbcode; 12930 tsbinfo->tsb_ttesz_mask = tteszmask; 12931 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12932 tsbinfo->tsb_pa = -1; 12933 tsbinfo->tsb_tte.ll = 0; 12934 tsbinfo->tsb_next = NULL; 12935 tsbinfo->tsb_flags = TSB_SWAPPED; 12936 tsbinfo->tsb_cache = NULL; 12937 tsbinfo->tsb_vmp = NULL; 12938 return (0); 12939 } 12940 12941 #ifdef DEBUG 12942 /* 12943 * For debugging: 12944 * Randomly force allocation failures every tsb_alloc_mtbf 12945 * tries if TSB_FORCEALLOC is not specified. This will 12946 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12947 * it is even, to allow testing of both failure paths... 12948 */ 12949 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12950 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12951 tsb_alloc_count = 0; 12952 tsb_alloc_fail_mtbf++; 12953 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12954 } 12955 #endif /* DEBUG */ 12956 12957 /* 12958 * Enforce high water mark if we are not doing a forced allocation 12959 * and are not shrinking a process' TSB. 12960 */ 12961 if ((flags & TSB_SHRINK) == 0 && 12962 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12963 if ((flags & TSB_FORCEALLOC) == 0) 12964 return (ENOMEM); 12965 lowmem = 1; 12966 } 12967 12968 /* 12969 * Allocate from the correct location based upon the size of the TSB 12970 * compared to the base page size, and what memory conditions dictate. 12971 * Note we always do nonblocking allocations from the TSB arena since 12972 * we don't want memory fragmentation to cause processes to block 12973 * indefinitely waiting for memory; until the kernel algorithms that 12974 * coalesce large pages are improved this is our best option. 12975 * 12976 * Algorithm: 12977 * If allocating a "large" TSB (>8K), allocate from the 12978 * appropriate kmem_tsb_default_arena vmem arena 12979 * else if low on memory or the TSB_FORCEALLOC flag is set or 12980 * tsb_forceheap is set 12981 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12982 * KM_SLEEP (never fails) 12983 * else 12984 * Allocate from appropriate sfmmu_tsb_cache with 12985 * KM_NOSLEEP 12986 * endif 12987 */ 12988 if (tsb_lgrp_affinity) 12989 lgrpid = lgrp_home_id(curthread); 12990 if (lgrpid == LGRP_NONE) 12991 lgrpid = 0; /* use lgrp of boot CPU */ 12992 12993 if (tsbbytes > MMU_PAGESIZE) { 12994 if (tsbbytes > MMU_PAGESIZE4M) { 12995 vmp = kmem_bigtsb_default_arena[lgrpid]; 12996 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12997 0, 0, NULL, NULL, VM_NOSLEEP); 12998 } else { 12999 vmp = kmem_tsb_default_arena[lgrpid]; 13000 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 13001 0, 0, NULL, NULL, VM_NOSLEEP); 13002 } 13003 #ifdef DEBUG 13004 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 13005 #else /* !DEBUG */ 13006 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 13007 #endif /* DEBUG */ 13008 kmem_cachep = sfmmu_tsb8k_cache; 13009 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 13010 ASSERT(vaddr != NULL); 13011 } else { 13012 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 13013 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 13014 } 13015 13016 tsbinfo->tsb_cache = kmem_cachep; 13017 tsbinfo->tsb_vmp = vmp; 13018 13019 if (vaddr == NULL) { 13020 return (EAGAIN); 13021 } 13022 13023 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 13024 kmem_cachep = tsbinfo->tsb_cache; 13025 13026 /* 13027 * If we are allocating from outside the cage, then we need to 13028 * register a relocation callback handler. Note that for now 13029 * since pseudo mappings always hang off of the slab's root page, 13030 * we need only lock the first 8K of the TSB slab. This is a bit 13031 * hacky but it is good for performance. 13032 */ 13033 if (kmem_cachep != sfmmu_tsb8k_cache) { 13034 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 13035 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 13036 ASSERT(ret == 0); 13037 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 13038 cbflags, (void *)tsbinfo, &pfn, NULL); 13039 13040 /* 13041 * Need to free up resources if we could not successfully 13042 * add the callback function and return an error condition. 13043 */ 13044 if (ret != 0) { 13045 if (kmem_cachep) { 13046 kmem_cache_free(kmem_cachep, vaddr); 13047 } else { 13048 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 13049 } 13050 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 13051 S_WRITE); 13052 return (EAGAIN); 13053 } 13054 } else { 13055 /* 13056 * Since allocation of 8K TSBs from heap is rare and occurs 13057 * during memory pressure we allocate them from permanent 13058 * memory rather than using callbacks to get the PFN. 13059 */ 13060 pfn = hat_getpfnum(kas.a_hat, vaddr); 13061 } 13062 13063 tsbinfo->tsb_va = vaddr; 13064 tsbinfo->tsb_szc = tsbcode; 13065 tsbinfo->tsb_ttesz_mask = tteszmask; 13066 tsbinfo->tsb_next = NULL; 13067 tsbinfo->tsb_flags = 0; 13068 13069 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 13070 13071 sfmmu_inv_tsb(vaddr, tsbbytes); 13072 13073 if (kmem_cachep != sfmmu_tsb8k_cache) { 13074 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 13075 } 13076 13077 return (0); 13078 } 13079 13080 /* 13081 * Initialize per cpu tsb and per cpu tsbmiss_area 13082 */ 13083 void 13084 sfmmu_init_tsbs(void) 13085 { 13086 int i; 13087 struct tsbmiss *tsbmissp; 13088 struct kpmtsbm *kpmtsbmp; 13089 #ifndef sun4v 13090 extern int dcache_line_mask; 13091 #endif /* sun4v */ 13092 extern uint_t vac_colors; 13093 13094 /* 13095 * Init. tsb miss area. 13096 */ 13097 tsbmissp = tsbmiss_area; 13098 13099 for (i = 0; i < NCPU; tsbmissp++, i++) { 13100 /* 13101 * initialize the tsbmiss area. 13102 * Do this for all possible CPUs as some may be added 13103 * while the system is running. There is no cost to this. 13104 */ 13105 tsbmissp->ksfmmup = ksfmmup; 13106 #ifndef sun4v 13107 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13108 #endif /* sun4v */ 13109 tsbmissp->khashstart = 13110 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13111 tsbmissp->uhashstart = 13112 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13113 tsbmissp->khashsz = khmehash_num; 13114 tsbmissp->uhashsz = uhmehash_num; 13115 } 13116 13117 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13118 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13119 13120 if (kpm_enable == 0) 13121 return; 13122 13123 /* -- Begin KPM specific init -- */ 13124 13125 if (kpm_smallpages) { 13126 /* 13127 * If we're using base pagesize pages for seg_kpm 13128 * mappings, we use the kernel TSB since we can't afford 13129 * to allocate a second huge TSB for these mappings. 13130 */ 13131 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13132 kpm_tsbsz = ktsb_szcode; 13133 kpmsm_tsbbase = kpm_tsbbase; 13134 kpmsm_tsbsz = kpm_tsbsz; 13135 } else { 13136 /* 13137 * In VAC conflict case, just put the entries in the 13138 * kernel 8K indexed TSB for now so we can find them. 13139 * This could really be changed in the future if we feel 13140 * the need... 13141 */ 13142 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13143 kpmsm_tsbsz = ktsb_szcode; 13144 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13145 kpm_tsbsz = ktsb4m_szcode; 13146 } 13147 13148 kpmtsbmp = kpmtsbm_area; 13149 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13150 /* 13151 * Initialize the kpmtsbm area. 13152 * Do this for all possible CPUs as some may be added 13153 * while the system is running. There is no cost to this. 13154 */ 13155 kpmtsbmp->vbase = kpm_vbase; 13156 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13157 kpmtsbmp->sz_shift = kpm_size_shift; 13158 kpmtsbmp->kpmp_shift = kpmp_shift; 13159 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13160 if (kpm_smallpages == 0) { 13161 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13162 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13163 } else { 13164 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13165 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13166 } 13167 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13168 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13169 #ifdef DEBUG 13170 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13171 #endif /* DEBUG */ 13172 if (ktsb_phys) 13173 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13174 } 13175 13176 /* -- End KPM specific init -- */ 13177 } 13178 13179 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13180 struct tsb_info ktsb_info[2]; 13181 13182 /* 13183 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13184 */ 13185 void 13186 sfmmu_init_ktsbinfo() 13187 { 13188 ASSERT(ksfmmup != NULL); 13189 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13190 /* 13191 * Allocate tsbinfos for kernel and copy in data 13192 * to make debug easier and sun4v setup easier. 13193 */ 13194 ktsb_info[0].tsb_sfmmu = ksfmmup; 13195 ktsb_info[0].tsb_szc = ktsb_szcode; 13196 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13197 ktsb_info[0].tsb_va = ktsb_base; 13198 ktsb_info[0].tsb_pa = ktsb_pbase; 13199 ktsb_info[0].tsb_flags = 0; 13200 ktsb_info[0].tsb_tte.ll = 0; 13201 ktsb_info[0].tsb_cache = NULL; 13202 13203 ktsb_info[1].tsb_sfmmu = ksfmmup; 13204 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13205 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13206 ktsb_info[1].tsb_va = ktsb4m_base; 13207 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13208 ktsb_info[1].tsb_flags = 0; 13209 ktsb_info[1].tsb_tte.ll = 0; 13210 ktsb_info[1].tsb_cache = NULL; 13211 13212 /* Link them into ksfmmup. */ 13213 ktsb_info[0].tsb_next = &ktsb_info[1]; 13214 ktsb_info[1].tsb_next = NULL; 13215 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13216 13217 sfmmu_setup_tsbinfo(ksfmmup); 13218 } 13219 13220 /* 13221 * Cache the last value returned from va_to_pa(). If the VA specified 13222 * in the current call to cached_va_to_pa() maps to the same Page (as the 13223 * previous call to cached_va_to_pa()), then compute the PA using 13224 * cached info, else call va_to_pa(). 13225 * 13226 * Note: this function is neither MT-safe nor consistent in the presence 13227 * of multiple, interleaved threads. This function was created to enable 13228 * an optimization used during boot (at a point when there's only one thread 13229 * executing on the "boot CPU", and before startup_vm() has been called). 13230 */ 13231 static uint64_t 13232 cached_va_to_pa(void *vaddr) 13233 { 13234 static uint64_t prev_vaddr_base = 0; 13235 static uint64_t prev_pfn = 0; 13236 13237 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13238 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13239 } else { 13240 uint64_t pa = va_to_pa(vaddr); 13241 13242 if (pa != ((uint64_t)-1)) { 13243 /* 13244 * Computed physical address is valid. Cache its 13245 * related info for the next cached_va_to_pa() call. 13246 */ 13247 prev_pfn = pa & MMU_PAGEMASK; 13248 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13249 } 13250 13251 return (pa); 13252 } 13253 } 13254 13255 /* 13256 * Carve up our nucleus hblk region. We may allocate more hblks than 13257 * asked due to rounding errors but we are guaranteed to have at least 13258 * enough space to allocate the requested number of hblk8's and hblk1's. 13259 */ 13260 void 13261 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13262 { 13263 struct hme_blk *hmeblkp; 13264 size_t hme8blk_sz, hme1blk_sz; 13265 size_t i; 13266 size_t hblk8_bound; 13267 ulong_t j = 0, k = 0; 13268 13269 ASSERT(addr != NULL && size != 0); 13270 13271 /* Need to use proper structure alignment */ 13272 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13273 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13274 13275 nucleus_hblk8.list = (void *)addr; 13276 nucleus_hblk8.index = 0; 13277 13278 /* 13279 * Use as much memory as possible for hblk8's since we 13280 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13281 * We need to hold back enough space for the hblk1's which 13282 * we'll allocate next. 13283 */ 13284 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13285 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13286 hmeblkp = (struct hme_blk *)addr; 13287 addr += hme8blk_sz; 13288 hmeblkp->hblk_nuc_bit = 1; 13289 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13290 } 13291 nucleus_hblk8.len = j; 13292 ASSERT(j >= nhblk8); 13293 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13294 13295 nucleus_hblk1.list = (void *)addr; 13296 nucleus_hblk1.index = 0; 13297 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13298 hmeblkp = (struct hme_blk *)addr; 13299 addr += hme1blk_sz; 13300 hmeblkp->hblk_nuc_bit = 1; 13301 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13302 } 13303 ASSERT(k >= nhblk1); 13304 nucleus_hblk1.len = k; 13305 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13306 } 13307 13308 /* 13309 * This function is currently not supported on this platform. For what 13310 * it's supposed to do, see hat.c and hat_srmmu.c 13311 */ 13312 /* ARGSUSED */ 13313 faultcode_t 13314 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13315 uint_t flags) 13316 { 13317 ASSERT(hat->sfmmu_xhat_provider == NULL); 13318 return (FC_NOSUPPORT); 13319 } 13320 13321 /* 13322 * Searchs the mapping list of the page for a mapping of the same size. If not 13323 * found the corresponding bit is cleared in the p_index field. When large 13324 * pages are more prevalent in the system, we can maintain the mapping list 13325 * in order and we don't have to traverse the list each time. Just check the 13326 * next and prev entries, and if both are of different size, we clear the bit. 13327 */ 13328 static void 13329 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13330 { 13331 struct sf_hment *sfhmep; 13332 struct hme_blk *hmeblkp; 13333 int index; 13334 pgcnt_t npgs; 13335 13336 ASSERT(ttesz > TTE8K); 13337 13338 ASSERT(sfmmu_mlist_held(pp)); 13339 13340 ASSERT(PP_ISMAPPED_LARGE(pp)); 13341 13342 /* 13343 * Traverse mapping list looking for another mapping of same size. 13344 * since we only want to clear index field if all mappings of 13345 * that size are gone. 13346 */ 13347 13348 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13349 if (IS_PAHME(sfhmep)) 13350 continue; 13351 hmeblkp = sfmmu_hmetohblk(sfhmep); 13352 if (hmeblkp->hblk_xhat_bit) 13353 continue; 13354 if (hme_size(sfhmep) == ttesz) { 13355 /* 13356 * another mapping of the same size. don't clear index. 13357 */ 13358 return; 13359 } 13360 } 13361 13362 /* 13363 * Clear the p_index bit for large page. 13364 */ 13365 index = PAGESZ_TO_INDEX(ttesz); 13366 npgs = TTEPAGES(ttesz); 13367 while (npgs-- > 0) { 13368 ASSERT(pp->p_index & index); 13369 pp->p_index &= ~index; 13370 pp = PP_PAGENEXT(pp); 13371 } 13372 } 13373 13374 /* 13375 * return supported features 13376 */ 13377 /* ARGSUSED */ 13378 int 13379 hat_supported(enum hat_features feature, void *arg) 13380 { 13381 switch (feature) { 13382 case HAT_SHARED_PT: 13383 case HAT_DYNAMIC_ISM_UNMAP: 13384 case HAT_VMODSORT: 13385 return (1); 13386 case HAT_SHARED_REGIONS: 13387 if (shctx_on) 13388 return (1); 13389 else 13390 return (0); 13391 default: 13392 return (0); 13393 } 13394 } 13395 13396 void 13397 hat_enter(struct hat *hat) 13398 { 13399 hatlock_t *hatlockp; 13400 13401 if (hat != ksfmmup) { 13402 hatlockp = TSB_HASH(hat); 13403 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13404 } 13405 } 13406 13407 void 13408 hat_exit(struct hat *hat) 13409 { 13410 hatlock_t *hatlockp; 13411 13412 if (hat != ksfmmup) { 13413 hatlockp = TSB_HASH(hat); 13414 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13415 } 13416 } 13417 13418 /*ARGSUSED*/ 13419 void 13420 hat_reserve(struct as *as, caddr_t addr, size_t len) 13421 { 13422 } 13423 13424 static void 13425 hat_kstat_init(void) 13426 { 13427 kstat_t *ksp; 13428 13429 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13430 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13431 KSTAT_FLAG_VIRTUAL); 13432 if (ksp) { 13433 ksp->ks_data = (void *) &sfmmu_global_stat; 13434 kstat_install(ksp); 13435 } 13436 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13437 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13438 KSTAT_FLAG_VIRTUAL); 13439 if (ksp) { 13440 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13441 kstat_install(ksp); 13442 } 13443 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13444 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13445 KSTAT_FLAG_WRITABLE); 13446 if (ksp) { 13447 ksp->ks_update = sfmmu_kstat_percpu_update; 13448 kstat_install(ksp); 13449 } 13450 } 13451 13452 /* ARGSUSED */ 13453 static int 13454 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13455 { 13456 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13457 struct tsbmiss *tsbm = tsbmiss_area; 13458 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13459 int i; 13460 13461 ASSERT(cpu_kstat); 13462 if (rw == KSTAT_READ) { 13463 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13464 cpu_kstat->sf_itlb_misses = 0; 13465 cpu_kstat->sf_dtlb_misses = 0; 13466 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13467 tsbm->uprot_traps; 13468 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13469 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13470 cpu_kstat->sf_tsb_hits = 0; 13471 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13472 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13473 } 13474 } else { 13475 /* KSTAT_WRITE is used to clear stats */ 13476 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13477 tsbm->utsb_misses = 0; 13478 tsbm->ktsb_misses = 0; 13479 tsbm->uprot_traps = 0; 13480 tsbm->kprot_traps = 0; 13481 kpmtsbm->kpm_dtlb_misses = 0; 13482 kpmtsbm->kpm_tsb_misses = 0; 13483 } 13484 } 13485 return (0); 13486 } 13487 13488 #ifdef DEBUG 13489 13490 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13491 13492 /* 13493 * A tte checker. *orig_old is the value we read before cas. 13494 * *cur is the value returned by cas. 13495 * *new is the desired value when we do the cas. 13496 * 13497 * *hmeblkp is currently unused. 13498 */ 13499 13500 /* ARGSUSED */ 13501 void 13502 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13503 { 13504 pfn_t i, j, k; 13505 int cpuid = CPU->cpu_id; 13506 13507 gorig[cpuid] = orig_old; 13508 gcur[cpuid] = cur; 13509 gnew[cpuid] = new; 13510 13511 #ifdef lint 13512 hmeblkp = hmeblkp; 13513 #endif 13514 13515 if (TTE_IS_VALID(orig_old)) { 13516 if (TTE_IS_VALID(cur)) { 13517 i = TTE_TO_TTEPFN(orig_old); 13518 j = TTE_TO_TTEPFN(cur); 13519 k = TTE_TO_TTEPFN(new); 13520 if (i != j) { 13521 /* remap error? */ 13522 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13523 } 13524 13525 if (i != k) { 13526 /* remap error? */ 13527 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13528 } 13529 } else { 13530 if (TTE_IS_VALID(new)) { 13531 panic("chk_tte: invalid cur? "); 13532 } 13533 13534 i = TTE_TO_TTEPFN(orig_old); 13535 k = TTE_TO_TTEPFN(new); 13536 if (i != k) { 13537 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13538 } 13539 } 13540 } else { 13541 if (TTE_IS_VALID(cur)) { 13542 j = TTE_TO_TTEPFN(cur); 13543 if (TTE_IS_VALID(new)) { 13544 k = TTE_TO_TTEPFN(new); 13545 if (j != k) { 13546 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13547 j, k); 13548 } 13549 } else { 13550 panic("chk_tte: why here?"); 13551 } 13552 } else { 13553 if (!TTE_IS_VALID(new)) { 13554 panic("chk_tte: why here2 ?"); 13555 } 13556 } 13557 } 13558 } 13559 13560 #endif /* DEBUG */ 13561 13562 extern void prefetch_tsbe_read(struct tsbe *); 13563 extern void prefetch_tsbe_write(struct tsbe *); 13564 13565 13566 /* 13567 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13568 * us optimal performance on Cheetah+. You can only have 8 outstanding 13569 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13570 * prefetch to make the most utilization of the prefetch capability. 13571 */ 13572 #define TSBE_PREFETCH_STRIDE (7) 13573 13574 void 13575 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13576 { 13577 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13578 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13579 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13580 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13581 struct tsbe *old; 13582 struct tsbe *new; 13583 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13584 uint64_t va; 13585 int new_offset; 13586 int i; 13587 int vpshift; 13588 int last_prefetch; 13589 13590 if (old_bytes == new_bytes) { 13591 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13592 } else { 13593 13594 /* 13595 * A TSBE is 16 bytes which means there are four TSBE's per 13596 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13597 */ 13598 old = (struct tsbe *)old_tsbinfo->tsb_va; 13599 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13600 for (i = 0; i < old_entries; i++, old++) { 13601 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13602 prefetch_tsbe_read(old); 13603 if (!old->tte_tag.tag_invalid) { 13604 /* 13605 * We have a valid TTE to remap. Check the 13606 * size. We won't remap 64K or 512K TTEs 13607 * because they span more than one TSB entry 13608 * and are indexed using an 8K virt. page. 13609 * Ditto for 32M and 256M TTEs. 13610 */ 13611 if (TTE_CSZ(&old->tte_data) == TTE64K || 13612 TTE_CSZ(&old->tte_data) == TTE512K) 13613 continue; 13614 if (mmu_page_sizes == max_mmu_page_sizes) { 13615 if (TTE_CSZ(&old->tte_data) == TTE32M || 13616 TTE_CSZ(&old->tte_data) == TTE256M) 13617 continue; 13618 } 13619 13620 /* clear the lower 22 bits of the va */ 13621 va = *(uint64_t *)old << 22; 13622 /* turn va into a virtual pfn */ 13623 va >>= 22 - TSB_START_SIZE; 13624 /* 13625 * or in bits from the offset in the tsb 13626 * to get the real virtual pfn. These 13627 * correspond to bits [21:13] in the va 13628 */ 13629 vpshift = 13630 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13631 0x1ff; 13632 va |= (i << vpshift); 13633 va >>= vpshift; 13634 new_offset = va & (new_entries - 1); 13635 new = new_base + new_offset; 13636 prefetch_tsbe_write(new); 13637 *new = *old; 13638 } 13639 } 13640 } 13641 } 13642 13643 /* 13644 * unused in sfmmu 13645 */ 13646 void 13647 hat_dump(void) 13648 { 13649 } 13650 13651 /* 13652 * Called when a thread is exiting and we have switched to the kernel address 13653 * space. Perform the same VM initialization resume() uses when switching 13654 * processes. 13655 * 13656 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13657 * we call it anyway in case the semantics change in the future. 13658 */ 13659 /*ARGSUSED*/ 13660 void 13661 hat_thread_exit(kthread_t *thd) 13662 { 13663 uint_t pgsz_cnum; 13664 uint_t pstate_save; 13665 13666 ASSERT(thd->t_procp->p_as == &kas); 13667 13668 pgsz_cnum = KCONTEXT; 13669 #ifdef sun4u 13670 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13671 #endif 13672 13673 /* 13674 * Note that sfmmu_load_mmustate() is currently a no-op for 13675 * kernel threads. We need to disable interrupts here, 13676 * simply because otherwise sfmmu_load_mmustate() would panic 13677 * if the caller does not disable interrupts. 13678 */ 13679 pstate_save = sfmmu_disable_intrs(); 13680 13681 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13682 sfmmu_setctx_sec(pgsz_cnum); 13683 sfmmu_load_mmustate(ksfmmup); 13684 sfmmu_enable_intrs(pstate_save); 13685 } 13686 13687 13688 /* 13689 * SRD support 13690 */ 13691 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13692 (((uintptr_t)(vp)) >> 11)) & \ 13693 srd_hashmask) 13694 13695 /* 13696 * Attach the process to the srd struct associated with the exec vnode 13697 * from which the process is started. 13698 */ 13699 void 13700 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13701 { 13702 uint_t hash = SRD_HASH_FUNCTION(evp); 13703 sf_srd_t *srdp; 13704 sf_srd_t *newsrdp; 13705 13706 ASSERT(sfmmup != ksfmmup); 13707 ASSERT(sfmmup->sfmmu_srdp == NULL); 13708 13709 if (!shctx_on) { 13710 return; 13711 } 13712 13713 VN_HOLD(evp); 13714 13715 if (srd_buckets[hash].srdb_srdp != NULL) { 13716 mutex_enter(&srd_buckets[hash].srdb_lock); 13717 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13718 srdp = srdp->srd_hash) { 13719 if (srdp->srd_evp == evp) { 13720 ASSERT(srdp->srd_refcnt >= 0); 13721 sfmmup->sfmmu_srdp = srdp; 13722 atomic_add_32( 13723 (volatile uint_t *)&srdp->srd_refcnt, 1); 13724 mutex_exit(&srd_buckets[hash].srdb_lock); 13725 return; 13726 } 13727 } 13728 mutex_exit(&srd_buckets[hash].srdb_lock); 13729 } 13730 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13731 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13732 13733 newsrdp->srd_evp = evp; 13734 newsrdp->srd_refcnt = 1; 13735 newsrdp->srd_hmergnfree = NULL; 13736 newsrdp->srd_ismrgnfree = NULL; 13737 13738 mutex_enter(&srd_buckets[hash].srdb_lock); 13739 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13740 srdp = srdp->srd_hash) { 13741 if (srdp->srd_evp == evp) { 13742 ASSERT(srdp->srd_refcnt >= 0); 13743 sfmmup->sfmmu_srdp = srdp; 13744 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 13745 mutex_exit(&srd_buckets[hash].srdb_lock); 13746 kmem_cache_free(srd_cache, newsrdp); 13747 return; 13748 } 13749 } 13750 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13751 srd_buckets[hash].srdb_srdp = newsrdp; 13752 sfmmup->sfmmu_srdp = newsrdp; 13753 13754 mutex_exit(&srd_buckets[hash].srdb_lock); 13755 13756 } 13757 13758 static void 13759 sfmmu_leave_srd(sfmmu_t *sfmmup) 13760 { 13761 vnode_t *evp; 13762 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13763 uint_t hash; 13764 sf_srd_t **prev_srdpp; 13765 sf_region_t *rgnp; 13766 sf_region_t *nrgnp; 13767 #ifdef DEBUG 13768 int rgns = 0; 13769 #endif 13770 int i; 13771 13772 ASSERT(sfmmup != ksfmmup); 13773 ASSERT(srdp != NULL); 13774 ASSERT(srdp->srd_refcnt > 0); 13775 ASSERT(sfmmup->sfmmu_scdp == NULL); 13776 ASSERT(sfmmup->sfmmu_free == 1); 13777 13778 sfmmup->sfmmu_srdp = NULL; 13779 evp = srdp->srd_evp; 13780 ASSERT(evp != NULL); 13781 if (atomic_add_32_nv( 13782 (volatile uint_t *)&srdp->srd_refcnt, -1)) { 13783 VN_RELE(evp); 13784 return; 13785 } 13786 13787 hash = SRD_HASH_FUNCTION(evp); 13788 mutex_enter(&srd_buckets[hash].srdb_lock); 13789 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13790 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13791 if (srdp->srd_evp == evp) { 13792 break; 13793 } 13794 } 13795 if (srdp == NULL || srdp->srd_refcnt) { 13796 mutex_exit(&srd_buckets[hash].srdb_lock); 13797 VN_RELE(evp); 13798 return; 13799 } 13800 *prev_srdpp = srdp->srd_hash; 13801 mutex_exit(&srd_buckets[hash].srdb_lock); 13802 13803 ASSERT(srdp->srd_refcnt == 0); 13804 VN_RELE(evp); 13805 13806 #ifdef DEBUG 13807 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13808 ASSERT(srdp->srd_rgnhash[i] == NULL); 13809 } 13810 #endif /* DEBUG */ 13811 13812 /* free each hme regions in the srd */ 13813 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13814 nrgnp = rgnp->rgn_next; 13815 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13816 ASSERT(rgnp->rgn_refcnt == 0); 13817 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13818 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13819 ASSERT(rgnp->rgn_hmeflags == 0); 13820 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13821 #ifdef DEBUG 13822 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13823 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13824 } 13825 rgns++; 13826 #endif /* DEBUG */ 13827 kmem_cache_free(region_cache, rgnp); 13828 } 13829 ASSERT(rgns == srdp->srd_next_hmerid); 13830 13831 #ifdef DEBUG 13832 rgns = 0; 13833 #endif 13834 /* free each ism rgns in the srd */ 13835 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13836 nrgnp = rgnp->rgn_next; 13837 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13838 ASSERT(rgnp->rgn_refcnt == 0); 13839 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13840 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13841 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13842 #ifdef DEBUG 13843 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13844 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13845 } 13846 rgns++; 13847 #endif /* DEBUG */ 13848 kmem_cache_free(region_cache, rgnp); 13849 } 13850 ASSERT(rgns == srdp->srd_next_ismrid); 13851 ASSERT(srdp->srd_ismbusyrgns == 0); 13852 ASSERT(srdp->srd_hmebusyrgns == 0); 13853 13854 srdp->srd_next_ismrid = 0; 13855 srdp->srd_next_hmerid = 0; 13856 13857 bzero((void *)srdp->srd_ismrgnp, 13858 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13859 bzero((void *)srdp->srd_hmergnp, 13860 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13861 13862 ASSERT(srdp->srd_scdp == NULL); 13863 kmem_cache_free(srd_cache, srdp); 13864 } 13865 13866 /* ARGSUSED */ 13867 static int 13868 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13869 { 13870 sf_srd_t *srdp = (sf_srd_t *)buf; 13871 bzero(buf, sizeof (*srdp)); 13872 13873 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13874 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13875 return (0); 13876 } 13877 13878 /* ARGSUSED */ 13879 static void 13880 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13881 { 13882 sf_srd_t *srdp = (sf_srd_t *)buf; 13883 13884 mutex_destroy(&srdp->srd_mutex); 13885 mutex_destroy(&srdp->srd_scd_mutex); 13886 } 13887 13888 /* 13889 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13890 * at the same time for the same process and address range. This is ensured by 13891 * the fact that address space is locked as writer when a process joins the 13892 * regions. Therefore there's no need to hold an srd lock during the entire 13893 * execution of hat_join_region()/hat_leave_region(). 13894 */ 13895 13896 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13897 (((uintptr_t)(obj)) >> 11)) & \ 13898 srd_rgn_hashmask) 13899 /* 13900 * This routine implements the shared context functionality required when 13901 * attaching a segment to an address space. It must be called from 13902 * hat_share() for D(ISM) segments and from segvn_create() for segments 13903 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13904 * which is saved in the private segment data for hme segments and 13905 * the ism_map structure for ism segments. 13906 */ 13907 hat_region_cookie_t 13908 hat_join_region(struct hat *sfmmup, 13909 caddr_t r_saddr, 13910 size_t r_size, 13911 void *r_obj, 13912 u_offset_t r_objoff, 13913 uchar_t r_perm, 13914 uchar_t r_pgszc, 13915 hat_rgn_cb_func_t r_cb_function, 13916 uint_t flags) 13917 { 13918 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13919 uint_t rhash; 13920 uint_t rid; 13921 hatlock_t *hatlockp; 13922 sf_region_t *rgnp; 13923 sf_region_t *new_rgnp = NULL; 13924 int i; 13925 uint16_t *nextidp; 13926 sf_region_t **freelistp; 13927 int maxids; 13928 sf_region_t **rarrp; 13929 uint16_t *busyrgnsp; 13930 ulong_t rttecnt; 13931 uchar_t tteflag; 13932 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13933 int text = (r_type == HAT_REGION_TEXT); 13934 13935 if (srdp == NULL || r_size == 0) { 13936 return (HAT_INVALID_REGION_COOKIE); 13937 } 13938 13939 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 13940 ASSERT(sfmmup != ksfmmup); 13941 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 13942 ASSERT(srdp->srd_refcnt > 0); 13943 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13944 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13945 ASSERT(r_pgszc < mmu_page_sizes); 13946 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13947 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13948 panic("hat_join_region: region addr or size is not aligned\n"); 13949 } 13950 13951 13952 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13953 SFMMU_REGION_HME; 13954 /* 13955 * Currently only support shared hmes for the read only main text 13956 * region. 13957 */ 13958 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13959 (r_perm & PROT_WRITE))) { 13960 return (HAT_INVALID_REGION_COOKIE); 13961 } 13962 13963 rhash = RGN_HASH_FUNCTION(r_obj); 13964 13965 if (r_type == SFMMU_REGION_ISM) { 13966 nextidp = &srdp->srd_next_ismrid; 13967 freelistp = &srdp->srd_ismrgnfree; 13968 maxids = SFMMU_MAX_ISM_REGIONS; 13969 rarrp = srdp->srd_ismrgnp; 13970 busyrgnsp = &srdp->srd_ismbusyrgns; 13971 } else { 13972 nextidp = &srdp->srd_next_hmerid; 13973 freelistp = &srdp->srd_hmergnfree; 13974 maxids = SFMMU_MAX_HME_REGIONS; 13975 rarrp = srdp->srd_hmergnp; 13976 busyrgnsp = &srdp->srd_hmebusyrgns; 13977 } 13978 13979 mutex_enter(&srdp->srd_mutex); 13980 13981 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13982 rgnp = rgnp->rgn_hash) { 13983 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13984 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13985 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13986 break; 13987 } 13988 } 13989 13990 rfound: 13991 if (rgnp != NULL) { 13992 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13993 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13994 ASSERT(rgnp->rgn_refcnt >= 0); 13995 rid = rgnp->rgn_id; 13996 ASSERT(rid < maxids); 13997 ASSERT(rarrp[rid] == rgnp); 13998 ASSERT(rid < *nextidp); 13999 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14000 mutex_exit(&srdp->srd_mutex); 14001 if (new_rgnp != NULL) { 14002 kmem_cache_free(region_cache, new_rgnp); 14003 } 14004 if (r_type == SFMMU_REGION_HME) { 14005 int myjoin = 14006 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 14007 14008 sfmmu_link_to_hmeregion(sfmmup, rgnp); 14009 /* 14010 * bitmap should be updated after linking sfmmu on 14011 * region list so that pageunload() doesn't skip 14012 * TSB/TLB flush. As soon as bitmap is updated another 14013 * thread in this process can already start accessing 14014 * this region. 14015 */ 14016 /* 14017 * Normally ttecnt accounting is done as part of 14018 * pagefault handling. But a process may not take any 14019 * pagefaults on shared hmeblks created by some other 14020 * process. To compensate for this assume that the 14021 * entire region will end up faulted in using 14022 * the region's pagesize. 14023 * 14024 */ 14025 if (r_pgszc > TTE8K) { 14026 tteflag = 1 << r_pgszc; 14027 if (disable_large_pages & tteflag) { 14028 tteflag = 0; 14029 } 14030 } else { 14031 tteflag = 0; 14032 } 14033 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 14034 hatlockp = sfmmu_hat_enter(sfmmup); 14035 sfmmup->sfmmu_rtteflags |= tteflag; 14036 sfmmu_hat_exit(hatlockp); 14037 } 14038 hatlockp = sfmmu_hat_enter(sfmmup); 14039 14040 /* 14041 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 14042 * region to allow for large page allocation failure. 14043 */ 14044 if (r_pgszc >= TTE4M) { 14045 sfmmup->sfmmu_tsb0_4minflcnt += 14046 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14047 } 14048 14049 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14050 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14051 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14052 rttecnt); 14053 14054 if (text && r_pgszc >= TTE4M && 14055 (tteflag || ((disable_large_pages >> TTE4M) & 14056 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 14057 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 14058 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 14059 } 14060 14061 sfmmu_hat_exit(hatlockp); 14062 /* 14063 * On Panther we need to make sure TLB is programmed 14064 * to accept 32M/256M pages. Call 14065 * sfmmu_check_page_sizes() now to make sure TLB is 14066 * setup before making hmeregions visible to other 14067 * threads. 14068 */ 14069 sfmmu_check_page_sizes(sfmmup, 1); 14070 hatlockp = sfmmu_hat_enter(sfmmup); 14071 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14072 14073 /* 14074 * if context is invalid tsb miss exception code will 14075 * call sfmmu_check_page_sizes() and update tsbmiss 14076 * area later. 14077 */ 14078 kpreempt_disable(); 14079 if (myjoin && 14080 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 14081 != INVALID_CONTEXT)) { 14082 struct tsbmiss *tsbmp; 14083 14084 tsbmp = &tsbmiss_area[CPU->cpu_id]; 14085 ASSERT(sfmmup == tsbmp->usfmmup); 14086 BT_SET(tsbmp->shmermap, rid); 14087 if (r_pgszc > TTE64K) { 14088 tsbmp->uhat_rtteflags |= tteflag; 14089 } 14090 14091 } 14092 kpreempt_enable(); 14093 14094 sfmmu_hat_exit(hatlockp); 14095 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14096 HAT_INVALID_REGION_COOKIE); 14097 } else { 14098 hatlockp = sfmmu_hat_enter(sfmmup); 14099 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14100 sfmmu_hat_exit(hatlockp); 14101 } 14102 ASSERT(rid < maxids); 14103 14104 if (r_type == SFMMU_REGION_ISM) { 14105 sfmmu_find_scd(sfmmup); 14106 } 14107 return ((hat_region_cookie_t)((uint64_t)rid)); 14108 } 14109 14110 ASSERT(new_rgnp == NULL); 14111 14112 if (*busyrgnsp >= maxids) { 14113 mutex_exit(&srdp->srd_mutex); 14114 return (HAT_INVALID_REGION_COOKIE); 14115 } 14116 14117 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14118 if (*freelistp != NULL) { 14119 rgnp = *freelistp; 14120 *freelistp = rgnp->rgn_next; 14121 ASSERT(rgnp->rgn_id < *nextidp); 14122 ASSERT(rgnp->rgn_id < maxids); 14123 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14124 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14125 == r_type); 14126 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14127 ASSERT(rgnp->rgn_hmeflags == 0); 14128 } else { 14129 /* 14130 * release local locks before memory allocation. 14131 */ 14132 mutex_exit(&srdp->srd_mutex); 14133 14134 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14135 14136 mutex_enter(&srdp->srd_mutex); 14137 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14138 rgnp = rgnp->rgn_hash) { 14139 if (rgnp->rgn_saddr == r_saddr && 14140 rgnp->rgn_size == r_size && 14141 rgnp->rgn_obj == r_obj && 14142 rgnp->rgn_objoff == r_objoff && 14143 rgnp->rgn_perm == r_perm && 14144 rgnp->rgn_pgszc == r_pgszc) { 14145 break; 14146 } 14147 } 14148 if (rgnp != NULL) { 14149 goto rfound; 14150 } 14151 14152 if (*nextidp >= maxids) { 14153 mutex_exit(&srdp->srd_mutex); 14154 goto fail; 14155 } 14156 rgnp = new_rgnp; 14157 new_rgnp = NULL; 14158 rgnp->rgn_id = (*nextidp)++; 14159 ASSERT(rgnp->rgn_id < maxids); 14160 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14161 rarrp[rgnp->rgn_id] = rgnp; 14162 } 14163 14164 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14165 ASSERT(rgnp->rgn_hmeflags == 0); 14166 #ifdef DEBUG 14167 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14168 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14169 } 14170 #endif 14171 rgnp->rgn_saddr = r_saddr; 14172 rgnp->rgn_size = r_size; 14173 rgnp->rgn_obj = r_obj; 14174 rgnp->rgn_objoff = r_objoff; 14175 rgnp->rgn_perm = r_perm; 14176 rgnp->rgn_pgszc = r_pgszc; 14177 rgnp->rgn_flags = r_type; 14178 rgnp->rgn_refcnt = 0; 14179 rgnp->rgn_cb_function = r_cb_function; 14180 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14181 srdp->srd_rgnhash[rhash] = rgnp; 14182 (*busyrgnsp)++; 14183 ASSERT(*busyrgnsp <= maxids); 14184 goto rfound; 14185 14186 fail: 14187 ASSERT(new_rgnp != NULL); 14188 kmem_cache_free(region_cache, new_rgnp); 14189 return (HAT_INVALID_REGION_COOKIE); 14190 } 14191 14192 /* 14193 * This function implements the shared context functionality required 14194 * when detaching a segment from an address space. It must be called 14195 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14196 * for segments with a valid region_cookie. 14197 * It will also be called from all seg_vn routines which change a 14198 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14199 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14200 * from segvn_fault(). 14201 */ 14202 void 14203 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14204 { 14205 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14206 sf_scd_t *scdp; 14207 uint_t rhash; 14208 uint_t rid = (uint_t)((uint64_t)rcookie); 14209 hatlock_t *hatlockp = NULL; 14210 sf_region_t *rgnp; 14211 sf_region_t **prev_rgnpp; 14212 sf_region_t *cur_rgnp; 14213 void *r_obj; 14214 int i; 14215 caddr_t r_saddr; 14216 caddr_t r_eaddr; 14217 size_t r_size; 14218 uchar_t r_pgszc; 14219 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14220 14221 ASSERT(sfmmup != ksfmmup); 14222 ASSERT(srdp != NULL); 14223 ASSERT(srdp->srd_refcnt > 0); 14224 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14225 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14226 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14227 14228 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14229 SFMMU_REGION_HME; 14230 14231 if (r_type == SFMMU_REGION_ISM) { 14232 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14233 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14234 rgnp = srdp->srd_ismrgnp[rid]; 14235 } else { 14236 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14237 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14238 rgnp = srdp->srd_hmergnp[rid]; 14239 } 14240 ASSERT(rgnp != NULL); 14241 ASSERT(rgnp->rgn_id == rid); 14242 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14243 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14244 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14245 14246 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14247 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { 14248 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, 14249 rgnp->rgn_size, 0, NULL); 14250 } 14251 14252 if (sfmmup->sfmmu_free) { 14253 ulong_t rttecnt; 14254 r_pgszc = rgnp->rgn_pgszc; 14255 r_size = rgnp->rgn_size; 14256 14257 ASSERT(sfmmup->sfmmu_scdp == NULL); 14258 if (r_type == SFMMU_REGION_ISM) { 14259 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14260 } else { 14261 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14262 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14263 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14264 14265 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14266 -rttecnt); 14267 14268 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14269 } 14270 } else if (r_type == SFMMU_REGION_ISM) { 14271 hatlockp = sfmmu_hat_enter(sfmmup); 14272 ASSERT(rid < srdp->srd_next_ismrid); 14273 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14274 scdp = sfmmup->sfmmu_scdp; 14275 if (scdp != NULL && 14276 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14277 sfmmu_leave_scd(sfmmup, r_type); 14278 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14279 } 14280 sfmmu_hat_exit(hatlockp); 14281 } else { 14282 ulong_t rttecnt; 14283 r_pgszc = rgnp->rgn_pgszc; 14284 r_saddr = rgnp->rgn_saddr; 14285 r_size = rgnp->rgn_size; 14286 r_eaddr = r_saddr + r_size; 14287 14288 ASSERT(r_type == SFMMU_REGION_HME); 14289 hatlockp = sfmmu_hat_enter(sfmmup); 14290 ASSERT(rid < srdp->srd_next_hmerid); 14291 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14292 14293 /* 14294 * If region is part of an SCD call sfmmu_leave_scd(). 14295 * Otherwise if process is not exiting and has valid context 14296 * just drop the context on the floor to lose stale TLB 14297 * entries and force the update of tsb miss area to reflect 14298 * the new region map. After that clean our TSB entries. 14299 */ 14300 scdp = sfmmup->sfmmu_scdp; 14301 if (scdp != NULL && 14302 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14303 sfmmu_leave_scd(sfmmup, r_type); 14304 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14305 } 14306 sfmmu_invalidate_ctx(sfmmup); 14307 14308 i = TTE8K; 14309 while (i < mmu_page_sizes) { 14310 if (rgnp->rgn_ttecnt[i] != 0) { 14311 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14312 r_eaddr, i); 14313 if (i < TTE4M) { 14314 i = TTE4M; 14315 continue; 14316 } else { 14317 break; 14318 } 14319 } 14320 i++; 14321 } 14322 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14323 if (r_pgszc >= TTE4M) { 14324 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14325 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14326 rttecnt); 14327 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14328 } 14329 14330 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14331 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14332 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14333 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14334 14335 sfmmu_hat_exit(hatlockp); 14336 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14337 /* sfmmup left the scd, grow private tsb */ 14338 sfmmu_check_page_sizes(sfmmup, 1); 14339 } else { 14340 sfmmu_check_page_sizes(sfmmup, 0); 14341 } 14342 } 14343 14344 if (r_type == SFMMU_REGION_HME) { 14345 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14346 } 14347 14348 r_obj = rgnp->rgn_obj; 14349 if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { 14350 return; 14351 } 14352 14353 /* 14354 * looks like nobody uses this region anymore. Free it. 14355 */ 14356 rhash = RGN_HASH_FUNCTION(r_obj); 14357 mutex_enter(&srdp->srd_mutex); 14358 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14359 (cur_rgnp = *prev_rgnpp) != NULL; 14360 prev_rgnpp = &cur_rgnp->rgn_hash) { 14361 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14362 break; 14363 } 14364 } 14365 14366 if (cur_rgnp == NULL) { 14367 mutex_exit(&srdp->srd_mutex); 14368 return; 14369 } 14370 14371 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14372 *prev_rgnpp = rgnp->rgn_hash; 14373 if (r_type == SFMMU_REGION_ISM) { 14374 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14375 ASSERT(rid < srdp->srd_next_ismrid); 14376 rgnp->rgn_next = srdp->srd_ismrgnfree; 14377 srdp->srd_ismrgnfree = rgnp; 14378 ASSERT(srdp->srd_ismbusyrgns > 0); 14379 srdp->srd_ismbusyrgns--; 14380 mutex_exit(&srdp->srd_mutex); 14381 return; 14382 } 14383 mutex_exit(&srdp->srd_mutex); 14384 14385 /* 14386 * Destroy region's hmeblks. 14387 */ 14388 sfmmu_unload_hmeregion(srdp, rgnp); 14389 14390 rgnp->rgn_hmeflags = 0; 14391 14392 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14393 ASSERT(rgnp->rgn_id == rid); 14394 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14395 rgnp->rgn_ttecnt[i] = 0; 14396 } 14397 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14398 mutex_enter(&srdp->srd_mutex); 14399 ASSERT(rid < srdp->srd_next_hmerid); 14400 rgnp->rgn_next = srdp->srd_hmergnfree; 14401 srdp->srd_hmergnfree = rgnp; 14402 ASSERT(srdp->srd_hmebusyrgns > 0); 14403 srdp->srd_hmebusyrgns--; 14404 mutex_exit(&srdp->srd_mutex); 14405 } 14406 14407 /* 14408 * For now only called for hmeblk regions and not for ISM regions. 14409 */ 14410 void 14411 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14412 { 14413 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14414 uint_t rid = (uint_t)((uint64_t)rcookie); 14415 sf_region_t *rgnp; 14416 sf_rgn_link_t *rlink; 14417 sf_rgn_link_t *hrlink; 14418 ulong_t rttecnt; 14419 14420 ASSERT(sfmmup != ksfmmup); 14421 ASSERT(srdp != NULL); 14422 ASSERT(srdp->srd_refcnt > 0); 14423 14424 ASSERT(rid < srdp->srd_next_hmerid); 14425 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14426 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14427 14428 rgnp = srdp->srd_hmergnp[rid]; 14429 ASSERT(rgnp->rgn_refcnt > 0); 14430 ASSERT(rgnp->rgn_id == rid); 14431 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14432 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14433 14434 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14435 14436 /* LINTED: constant in conditional context */ 14437 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14438 ASSERT(rlink != NULL); 14439 mutex_enter(&rgnp->rgn_mutex); 14440 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14441 /* LINTED: constant in conditional context */ 14442 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14443 ASSERT(hrlink != NULL); 14444 ASSERT(hrlink->prev == NULL); 14445 rlink->next = rgnp->rgn_sfmmu_head; 14446 rlink->prev = NULL; 14447 hrlink->prev = sfmmup; 14448 /* 14449 * make sure rlink's next field is correct 14450 * before making this link visible. 14451 */ 14452 membar_stst(); 14453 rgnp->rgn_sfmmu_head = sfmmup; 14454 mutex_exit(&rgnp->rgn_mutex); 14455 14456 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14457 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14458 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14459 /* update tsb0 inflation count */ 14460 if (rgnp->rgn_pgszc >= TTE4M) { 14461 sfmmup->sfmmu_tsb0_4minflcnt += 14462 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14463 } 14464 /* 14465 * Update regionid bitmask without hat lock since no other thread 14466 * can update this region bitmask right now. 14467 */ 14468 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14469 } 14470 14471 /* ARGSUSED */ 14472 static int 14473 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14474 { 14475 sf_region_t *rgnp = (sf_region_t *)buf; 14476 bzero(buf, sizeof (*rgnp)); 14477 14478 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14479 14480 return (0); 14481 } 14482 14483 /* ARGSUSED */ 14484 static void 14485 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14486 { 14487 sf_region_t *rgnp = (sf_region_t *)buf; 14488 mutex_destroy(&rgnp->rgn_mutex); 14489 } 14490 14491 static int 14492 sfrgnmap_isnull(sf_region_map_t *map) 14493 { 14494 int i; 14495 14496 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14497 if (map->bitmap[i] != 0) { 14498 return (0); 14499 } 14500 } 14501 return (1); 14502 } 14503 14504 static int 14505 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14506 { 14507 int i; 14508 14509 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14510 if (map->bitmap[i] != 0) { 14511 return (0); 14512 } 14513 } 14514 return (1); 14515 } 14516 14517 #ifdef DEBUG 14518 static void 14519 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14520 { 14521 sfmmu_t *sp; 14522 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14523 14524 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14525 ASSERT(srdp == sp->sfmmu_srdp); 14526 if (sp == sfmmup) { 14527 if (onlist) { 14528 return; 14529 } else { 14530 panic("shctx: sfmmu 0x%p found on scd" 14531 "list 0x%p", (void *)sfmmup, 14532 (void *)*headp); 14533 } 14534 } 14535 } 14536 if (onlist) { 14537 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14538 (void *)sfmmup, (void *)*headp); 14539 } else { 14540 return; 14541 } 14542 } 14543 #else /* DEBUG */ 14544 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14545 #endif /* DEBUG */ 14546 14547 /* 14548 * Removes an sfmmu from the SCD sfmmu list. 14549 */ 14550 static void 14551 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14552 { 14553 ASSERT(sfmmup->sfmmu_srdp != NULL); 14554 check_scd_sfmmu_list(headp, sfmmup, 1); 14555 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14556 ASSERT(*headp != sfmmup); 14557 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14558 sfmmup->sfmmu_scd_link.next; 14559 } else { 14560 ASSERT(*headp == sfmmup); 14561 *headp = sfmmup->sfmmu_scd_link.next; 14562 } 14563 if (sfmmup->sfmmu_scd_link.next != NULL) { 14564 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14565 sfmmup->sfmmu_scd_link.prev; 14566 } 14567 } 14568 14569 14570 /* 14571 * Adds an sfmmu to the start of the queue. 14572 */ 14573 static void 14574 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14575 { 14576 check_scd_sfmmu_list(headp, sfmmup, 0); 14577 sfmmup->sfmmu_scd_link.prev = NULL; 14578 sfmmup->sfmmu_scd_link.next = *headp; 14579 if (*headp != NULL) 14580 (*headp)->sfmmu_scd_link.prev = sfmmup; 14581 *headp = sfmmup; 14582 } 14583 14584 /* 14585 * Remove an scd from the start of the queue. 14586 */ 14587 static void 14588 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14589 { 14590 if (scdp->scd_prev != NULL) { 14591 ASSERT(*headp != scdp); 14592 scdp->scd_prev->scd_next = scdp->scd_next; 14593 } else { 14594 ASSERT(*headp == scdp); 14595 *headp = scdp->scd_next; 14596 } 14597 14598 if (scdp->scd_next != NULL) { 14599 scdp->scd_next->scd_prev = scdp->scd_prev; 14600 } 14601 } 14602 14603 /* 14604 * Add an scd to the start of the queue. 14605 */ 14606 static void 14607 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14608 { 14609 scdp->scd_prev = NULL; 14610 scdp->scd_next = *headp; 14611 if (*headp != NULL) { 14612 (*headp)->scd_prev = scdp; 14613 } 14614 *headp = scdp; 14615 } 14616 14617 static int 14618 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14619 { 14620 uint_t rid; 14621 uint_t i; 14622 uint_t j; 14623 ulong_t w; 14624 sf_region_t *rgnp; 14625 ulong_t tte8k_cnt = 0; 14626 ulong_t tte4m_cnt = 0; 14627 uint_t tsb_szc; 14628 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14629 sfmmu_t *ism_hatid; 14630 struct tsb_info *newtsb; 14631 int szc; 14632 14633 ASSERT(srdp != NULL); 14634 14635 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14636 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14637 continue; 14638 } 14639 j = 0; 14640 while (w) { 14641 if (!(w & 0x1)) { 14642 j++; 14643 w >>= 1; 14644 continue; 14645 } 14646 rid = (i << BT_ULSHIFT) | j; 14647 j++; 14648 w >>= 1; 14649 14650 if (rid < SFMMU_MAX_HME_REGIONS) { 14651 rgnp = srdp->srd_hmergnp[rid]; 14652 ASSERT(rgnp->rgn_id == rid); 14653 ASSERT(rgnp->rgn_refcnt > 0); 14654 14655 if (rgnp->rgn_pgszc < TTE4M) { 14656 tte8k_cnt += rgnp->rgn_size >> 14657 TTE_PAGE_SHIFT(TTE8K); 14658 } else { 14659 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14660 tte4m_cnt += rgnp->rgn_size >> 14661 TTE_PAGE_SHIFT(TTE4M); 14662 /* 14663 * Inflate SCD tsb0 by preallocating 14664 * 1/4 8k ttecnt for 4M regions to 14665 * allow for lgpg alloc failure. 14666 */ 14667 tte8k_cnt += rgnp->rgn_size >> 14668 (TTE_PAGE_SHIFT(TTE8K) + 2); 14669 } 14670 } else { 14671 rid -= SFMMU_MAX_HME_REGIONS; 14672 rgnp = srdp->srd_ismrgnp[rid]; 14673 ASSERT(rgnp->rgn_id == rid); 14674 ASSERT(rgnp->rgn_refcnt > 0); 14675 14676 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14677 ASSERT(ism_hatid->sfmmu_ismhat); 14678 14679 for (szc = 0; szc < TTE4M; szc++) { 14680 tte8k_cnt += 14681 ism_hatid->sfmmu_ttecnt[szc] << 14682 TTE_BSZS_SHIFT(szc); 14683 } 14684 14685 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14686 if (rgnp->rgn_pgszc >= TTE4M) { 14687 tte4m_cnt += rgnp->rgn_size >> 14688 TTE_PAGE_SHIFT(TTE4M); 14689 } 14690 } 14691 } 14692 } 14693 14694 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14695 14696 /* Allocate both the SCD TSBs here. */ 14697 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14698 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14699 (tsb_szc <= TSB_4M_SZCODE || 14700 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14701 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14702 TSB_ALLOC, scsfmmup))) { 14703 14704 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14705 return (TSB_ALLOCFAIL); 14706 } else { 14707 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14708 14709 if (tte4m_cnt) { 14710 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14711 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14712 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14713 (tsb_szc <= TSB_4M_SZCODE || 14714 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14715 TSB4M|TSB32M|TSB256M, 14716 TSB_ALLOC, scsfmmup))) { 14717 /* 14718 * If we fail to allocate the 2nd shared tsb, 14719 * just free the 1st tsb, return failure. 14720 */ 14721 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14722 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14723 return (TSB_ALLOCFAIL); 14724 } else { 14725 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14726 newtsb->tsb_flags |= TSB_SHAREDCTX; 14727 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14728 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14729 } 14730 } 14731 SFMMU_STAT(sf_scd_1sttsb_alloc); 14732 } 14733 return (TSB_SUCCESS); 14734 } 14735 14736 static void 14737 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14738 { 14739 while (scd_sfmmu->sfmmu_tsb != NULL) { 14740 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14741 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14742 scd_sfmmu->sfmmu_tsb = next; 14743 } 14744 } 14745 14746 /* 14747 * Link the sfmmu onto the hme region list. 14748 */ 14749 void 14750 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14751 { 14752 uint_t rid; 14753 sf_rgn_link_t *rlink; 14754 sfmmu_t *head; 14755 sf_rgn_link_t *hrlink; 14756 14757 rid = rgnp->rgn_id; 14758 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14759 14760 /* LINTED: constant in conditional context */ 14761 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14762 ASSERT(rlink != NULL); 14763 mutex_enter(&rgnp->rgn_mutex); 14764 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14765 rlink->next = NULL; 14766 rlink->prev = NULL; 14767 /* 14768 * make sure rlink's next field is NULL 14769 * before making this link visible. 14770 */ 14771 membar_stst(); 14772 rgnp->rgn_sfmmu_head = sfmmup; 14773 } else { 14774 /* LINTED: constant in conditional context */ 14775 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14776 ASSERT(hrlink != NULL); 14777 ASSERT(hrlink->prev == NULL); 14778 rlink->next = head; 14779 rlink->prev = NULL; 14780 hrlink->prev = sfmmup; 14781 /* 14782 * make sure rlink's next field is correct 14783 * before making this link visible. 14784 */ 14785 membar_stst(); 14786 rgnp->rgn_sfmmu_head = sfmmup; 14787 } 14788 mutex_exit(&rgnp->rgn_mutex); 14789 } 14790 14791 /* 14792 * Unlink the sfmmu from the hme region list. 14793 */ 14794 void 14795 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14796 { 14797 uint_t rid; 14798 sf_rgn_link_t *rlink; 14799 14800 rid = rgnp->rgn_id; 14801 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14802 14803 /* LINTED: constant in conditional context */ 14804 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14805 ASSERT(rlink != NULL); 14806 mutex_enter(&rgnp->rgn_mutex); 14807 if (rgnp->rgn_sfmmu_head == sfmmup) { 14808 sfmmu_t *next = rlink->next; 14809 rgnp->rgn_sfmmu_head = next; 14810 /* 14811 * if we are stopped by xc_attention() after this 14812 * point the forward link walking in 14813 * sfmmu_rgntlb_demap() will work correctly since the 14814 * head correctly points to the next element. 14815 */ 14816 membar_stst(); 14817 rlink->next = NULL; 14818 ASSERT(rlink->prev == NULL); 14819 if (next != NULL) { 14820 sf_rgn_link_t *nrlink; 14821 /* LINTED: constant in conditional context */ 14822 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14823 ASSERT(nrlink != NULL); 14824 ASSERT(nrlink->prev == sfmmup); 14825 nrlink->prev = NULL; 14826 } 14827 } else { 14828 sfmmu_t *next = rlink->next; 14829 sfmmu_t *prev = rlink->prev; 14830 sf_rgn_link_t *prlink; 14831 14832 ASSERT(prev != NULL); 14833 /* LINTED: constant in conditional context */ 14834 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14835 ASSERT(prlink != NULL); 14836 ASSERT(prlink->next == sfmmup); 14837 prlink->next = next; 14838 /* 14839 * if we are stopped by xc_attention() 14840 * after this point the forward link walking 14841 * will work correctly since the prev element 14842 * correctly points to the next element. 14843 */ 14844 membar_stst(); 14845 rlink->next = NULL; 14846 rlink->prev = NULL; 14847 if (next != NULL) { 14848 sf_rgn_link_t *nrlink; 14849 /* LINTED: constant in conditional context */ 14850 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14851 ASSERT(nrlink != NULL); 14852 ASSERT(nrlink->prev == sfmmup); 14853 nrlink->prev = prev; 14854 } 14855 } 14856 mutex_exit(&rgnp->rgn_mutex); 14857 } 14858 14859 /* 14860 * Link scd sfmmu onto ism or hme region list for each region in the 14861 * scd region map. 14862 */ 14863 void 14864 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14865 { 14866 uint_t rid; 14867 uint_t i; 14868 uint_t j; 14869 ulong_t w; 14870 sf_region_t *rgnp; 14871 sfmmu_t *scsfmmup; 14872 14873 scsfmmup = scdp->scd_sfmmup; 14874 ASSERT(scsfmmup->sfmmu_scdhat); 14875 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14876 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14877 continue; 14878 } 14879 j = 0; 14880 while (w) { 14881 if (!(w & 0x1)) { 14882 j++; 14883 w >>= 1; 14884 continue; 14885 } 14886 rid = (i << BT_ULSHIFT) | j; 14887 j++; 14888 w >>= 1; 14889 14890 if (rid < SFMMU_MAX_HME_REGIONS) { 14891 rgnp = srdp->srd_hmergnp[rid]; 14892 ASSERT(rgnp->rgn_id == rid); 14893 ASSERT(rgnp->rgn_refcnt > 0); 14894 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14895 } else { 14896 sfmmu_t *ism_hatid = NULL; 14897 ism_ment_t *ism_ment; 14898 rid -= SFMMU_MAX_HME_REGIONS; 14899 rgnp = srdp->srd_ismrgnp[rid]; 14900 ASSERT(rgnp->rgn_id == rid); 14901 ASSERT(rgnp->rgn_refcnt > 0); 14902 14903 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14904 ASSERT(ism_hatid->sfmmu_ismhat); 14905 ism_ment = &scdp->scd_ism_links[rid]; 14906 ism_ment->iment_hat = scsfmmup; 14907 ism_ment->iment_base_va = rgnp->rgn_saddr; 14908 mutex_enter(&ism_mlist_lock); 14909 iment_add(ism_ment, ism_hatid); 14910 mutex_exit(&ism_mlist_lock); 14911 14912 } 14913 } 14914 } 14915 } 14916 /* 14917 * Unlink scd sfmmu from ism or hme region list for each region in the 14918 * scd region map. 14919 */ 14920 void 14921 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14922 { 14923 uint_t rid; 14924 uint_t i; 14925 uint_t j; 14926 ulong_t w; 14927 sf_region_t *rgnp; 14928 sfmmu_t *scsfmmup; 14929 14930 scsfmmup = scdp->scd_sfmmup; 14931 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14932 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14933 continue; 14934 } 14935 j = 0; 14936 while (w) { 14937 if (!(w & 0x1)) { 14938 j++; 14939 w >>= 1; 14940 continue; 14941 } 14942 rid = (i << BT_ULSHIFT) | j; 14943 j++; 14944 w >>= 1; 14945 14946 if (rid < SFMMU_MAX_HME_REGIONS) { 14947 rgnp = srdp->srd_hmergnp[rid]; 14948 ASSERT(rgnp->rgn_id == rid); 14949 ASSERT(rgnp->rgn_refcnt > 0); 14950 sfmmu_unlink_from_hmeregion(scsfmmup, 14951 rgnp); 14952 14953 } else { 14954 sfmmu_t *ism_hatid = NULL; 14955 ism_ment_t *ism_ment; 14956 rid -= SFMMU_MAX_HME_REGIONS; 14957 rgnp = srdp->srd_ismrgnp[rid]; 14958 ASSERT(rgnp->rgn_id == rid); 14959 ASSERT(rgnp->rgn_refcnt > 0); 14960 14961 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14962 ASSERT(ism_hatid->sfmmu_ismhat); 14963 ism_ment = &scdp->scd_ism_links[rid]; 14964 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14965 ASSERT(ism_ment->iment_base_va == 14966 rgnp->rgn_saddr); 14967 ism_ment->iment_hat = NULL; 14968 ism_ment->iment_base_va = 0; 14969 mutex_enter(&ism_mlist_lock); 14970 iment_sub(ism_ment, ism_hatid); 14971 mutex_exit(&ism_mlist_lock); 14972 14973 } 14974 } 14975 } 14976 } 14977 /* 14978 * Allocates and initialises a new SCD structure, this is called with 14979 * the srd_scd_mutex held and returns with the reference count 14980 * initialised to 1. 14981 */ 14982 static sf_scd_t * 14983 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14984 { 14985 sf_scd_t *new_scdp; 14986 sfmmu_t *scsfmmup; 14987 int i; 14988 14989 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14990 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14991 14992 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14993 new_scdp->scd_sfmmup = scsfmmup; 14994 scsfmmup->sfmmu_srdp = srdp; 14995 scsfmmup->sfmmu_scdp = new_scdp; 14996 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14997 scsfmmup->sfmmu_scdhat = 1; 14998 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14999 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 15000 15001 ASSERT(max_mmu_ctxdoms > 0); 15002 for (i = 0; i < max_mmu_ctxdoms; i++) { 15003 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 15004 scsfmmup->sfmmu_ctxs[i].gnum = 0; 15005 } 15006 15007 for (i = 0; i < MMU_PAGE_SIZES; i++) { 15008 new_scdp->scd_rttecnt[i] = 0; 15009 } 15010 15011 new_scdp->scd_region_map = *new_map; 15012 new_scdp->scd_refcnt = 1; 15013 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 15014 kmem_cache_free(scd_cache, new_scdp); 15015 kmem_cache_free(sfmmuid_cache, scsfmmup); 15016 return (NULL); 15017 } 15018 return (new_scdp); 15019 } 15020 15021 /* 15022 * The first phase of a process joining an SCD. The hat structure is 15023 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 15024 * and a cross-call with context invalidation is used to cause the 15025 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 15026 * routine. 15027 */ 15028 static void 15029 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 15030 { 15031 hatlock_t *hatlockp; 15032 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15033 int i; 15034 sf_scd_t *old_scdp; 15035 15036 ASSERT(srdp != NULL); 15037 ASSERT(scdp != NULL); 15038 ASSERT(scdp->scd_refcnt > 0); 15039 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15040 15041 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 15042 ASSERT(old_scdp != scdp); 15043 15044 mutex_enter(&old_scdp->scd_mutex); 15045 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 15046 mutex_exit(&old_scdp->scd_mutex); 15047 /* 15048 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 15049 * include the shme rgn ttecnt for rgns that 15050 * were in the old SCD 15051 */ 15052 for (i = 0; i < mmu_page_sizes; i++) { 15053 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15054 old_scdp->scd_rttecnt[i]); 15055 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15056 sfmmup->sfmmu_scdrttecnt[i]); 15057 } 15058 } 15059 15060 /* 15061 * Move sfmmu to the scd lists. 15062 */ 15063 mutex_enter(&scdp->scd_mutex); 15064 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 15065 mutex_exit(&scdp->scd_mutex); 15066 SF_SCD_INCR_REF(scdp); 15067 15068 hatlockp = sfmmu_hat_enter(sfmmup); 15069 /* 15070 * For a multi-thread process, we must stop 15071 * all the other threads before joining the scd. 15072 */ 15073 15074 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 15075 15076 sfmmu_invalidate_ctx(sfmmup); 15077 sfmmup->sfmmu_scdp = scdp; 15078 15079 /* 15080 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 15081 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 15082 */ 15083 for (i = 0; i < mmu_page_sizes; i++) { 15084 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 15085 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 15086 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15087 -sfmmup->sfmmu_scdrttecnt[i]); 15088 } 15089 /* update tsb0 inflation count */ 15090 if (old_scdp != NULL) { 15091 sfmmup->sfmmu_tsb0_4minflcnt += 15092 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15093 } 15094 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 15095 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15096 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15097 15098 sfmmu_hat_exit(hatlockp); 15099 15100 if (old_scdp != NULL) { 15101 SF_SCD_DECR_REF(srdp, old_scdp); 15102 } 15103 15104 } 15105 15106 /* 15107 * This routine is called by a process to become part of an SCD. It is called 15108 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15109 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15110 */ 15111 static void 15112 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15113 { 15114 struct tsb_info *tsbinfop; 15115 15116 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15117 ASSERT(sfmmup->sfmmu_scdp != NULL); 15118 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15119 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15120 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15121 15122 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15123 tsbinfop = tsbinfop->tsb_next) { 15124 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15125 continue; 15126 } 15127 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15128 15129 sfmmu_inv_tsb(tsbinfop->tsb_va, 15130 TSB_BYTES(tsbinfop->tsb_szc)); 15131 } 15132 15133 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15134 sfmmu_ism_hatflags(sfmmup, 1); 15135 15136 SFMMU_STAT(sf_join_scd); 15137 } 15138 15139 /* 15140 * This routine is called in order to check if there is an SCD which matches 15141 * the process's region map if not then a new SCD may be created. 15142 */ 15143 static void 15144 sfmmu_find_scd(sfmmu_t *sfmmup) 15145 { 15146 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15147 sf_scd_t *scdp, *new_scdp; 15148 int ret; 15149 15150 ASSERT(srdp != NULL); 15151 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15152 15153 mutex_enter(&srdp->srd_scd_mutex); 15154 for (scdp = srdp->srd_scdp; scdp != NULL; 15155 scdp = scdp->scd_next) { 15156 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15157 &sfmmup->sfmmu_region_map, ret); 15158 if (ret == 1) { 15159 SF_SCD_INCR_REF(scdp); 15160 mutex_exit(&srdp->srd_scd_mutex); 15161 sfmmu_join_scd(scdp, sfmmup); 15162 ASSERT(scdp->scd_refcnt >= 2); 15163 atomic_add_32((volatile uint32_t *) 15164 &scdp->scd_refcnt, -1); 15165 return; 15166 } else { 15167 /* 15168 * If the sfmmu region map is a subset of the scd 15169 * region map, then the assumption is that this process 15170 * will continue attaching to ISM segments until the 15171 * region maps are equal. 15172 */ 15173 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15174 &sfmmup->sfmmu_region_map, ret); 15175 if (ret == 1) { 15176 mutex_exit(&srdp->srd_scd_mutex); 15177 return; 15178 } 15179 } 15180 } 15181 15182 ASSERT(scdp == NULL); 15183 /* 15184 * No matching SCD has been found, create a new one. 15185 */ 15186 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15187 NULL) { 15188 mutex_exit(&srdp->srd_scd_mutex); 15189 return; 15190 } 15191 15192 /* 15193 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15194 */ 15195 15196 /* Set scd_rttecnt for shme rgns in SCD */ 15197 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15198 15199 /* 15200 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15201 */ 15202 sfmmu_link_scd_to_regions(srdp, new_scdp); 15203 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15204 SFMMU_STAT_ADD(sf_create_scd, 1); 15205 15206 mutex_exit(&srdp->srd_scd_mutex); 15207 sfmmu_join_scd(new_scdp, sfmmup); 15208 ASSERT(new_scdp->scd_refcnt >= 2); 15209 atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); 15210 } 15211 15212 /* 15213 * This routine is called by a process to remove itself from an SCD. It is 15214 * either called when the processes has detached from a segment or from 15215 * hat_free_start() as a result of calling exit. 15216 */ 15217 static void 15218 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15219 { 15220 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15221 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15222 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15223 int i; 15224 15225 ASSERT(scdp != NULL); 15226 ASSERT(srdp != NULL); 15227 15228 if (sfmmup->sfmmu_free) { 15229 /* 15230 * If the process is part of an SCD the sfmmu is unlinked 15231 * from scd_sf_list. 15232 */ 15233 mutex_enter(&scdp->scd_mutex); 15234 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15235 mutex_exit(&scdp->scd_mutex); 15236 /* 15237 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15238 * are about to leave the SCD 15239 */ 15240 for (i = 0; i < mmu_page_sizes; i++) { 15241 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15242 scdp->scd_rttecnt[i]); 15243 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15244 sfmmup->sfmmu_scdrttecnt[i]); 15245 sfmmup->sfmmu_scdrttecnt[i] = 0; 15246 } 15247 sfmmup->sfmmu_scdp = NULL; 15248 15249 SF_SCD_DECR_REF(srdp, scdp); 15250 return; 15251 } 15252 15253 ASSERT(r_type != SFMMU_REGION_ISM || 15254 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15255 ASSERT(scdp->scd_refcnt); 15256 ASSERT(!sfmmup->sfmmu_free); 15257 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15258 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15259 15260 /* 15261 * Wait for ISM maps to be updated. 15262 */ 15263 if (r_type != SFMMU_REGION_ISM) { 15264 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15265 sfmmup->sfmmu_scdp != NULL) { 15266 cv_wait(&sfmmup->sfmmu_tsb_cv, 15267 HATLOCK_MUTEXP(hatlockp)); 15268 } 15269 15270 if (sfmmup->sfmmu_scdp == NULL) { 15271 sfmmu_hat_exit(hatlockp); 15272 return; 15273 } 15274 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15275 } 15276 15277 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15278 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15279 /* 15280 * Since HAT_JOIN_SCD was set our context 15281 * is still invalid. 15282 */ 15283 } else { 15284 /* 15285 * For a multi-thread process, we must stop 15286 * all the other threads before leaving the scd. 15287 */ 15288 15289 sfmmu_invalidate_ctx(sfmmup); 15290 } 15291 15292 /* Clear all the rid's for ISM, delete flags, etc */ 15293 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15294 sfmmu_ism_hatflags(sfmmup, 0); 15295 15296 /* 15297 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15298 * are in SCD before this sfmmup leaves the SCD. 15299 */ 15300 for (i = 0; i < mmu_page_sizes; i++) { 15301 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15302 scdp->scd_rttecnt[i]); 15303 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15304 sfmmup->sfmmu_scdrttecnt[i]); 15305 sfmmup->sfmmu_scdrttecnt[i] = 0; 15306 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15307 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15308 sfmmup->sfmmu_scdismttecnt[i] = 0; 15309 } 15310 /* update tsb0 inflation count */ 15311 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15312 15313 if (r_type != SFMMU_REGION_ISM) { 15314 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15315 } 15316 sfmmup->sfmmu_scdp = NULL; 15317 15318 sfmmu_hat_exit(hatlockp); 15319 15320 /* 15321 * Unlink sfmmu from scd_sf_list this can be done without holding 15322 * the hat lock as we hold the sfmmu_as lock which prevents 15323 * hat_join_region from adding this thread to the scd again. Other 15324 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15325 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15326 * while holding the hat lock. 15327 */ 15328 mutex_enter(&scdp->scd_mutex); 15329 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15330 mutex_exit(&scdp->scd_mutex); 15331 SFMMU_STAT(sf_leave_scd); 15332 15333 SF_SCD_DECR_REF(srdp, scdp); 15334 hatlockp = sfmmu_hat_enter(sfmmup); 15335 15336 } 15337 15338 /* 15339 * Unlink and free up an SCD structure with a reference count of 0. 15340 */ 15341 static void 15342 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15343 { 15344 sfmmu_t *scsfmmup; 15345 sf_scd_t *sp; 15346 hatlock_t *shatlockp; 15347 int i, ret; 15348 15349 mutex_enter(&srdp->srd_scd_mutex); 15350 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15351 if (sp == scdp) 15352 break; 15353 } 15354 if (sp == NULL || sp->scd_refcnt) { 15355 mutex_exit(&srdp->srd_scd_mutex); 15356 return; 15357 } 15358 15359 /* 15360 * It is possible that the scd has been freed and reallocated with a 15361 * different region map while we've been waiting for the srd_scd_mutex. 15362 */ 15363 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15364 if (ret != 1) { 15365 mutex_exit(&srdp->srd_scd_mutex); 15366 return; 15367 } 15368 15369 ASSERT(scdp->scd_sf_list == NULL); 15370 /* 15371 * Unlink scd from srd_scdp list. 15372 */ 15373 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15374 mutex_exit(&srdp->srd_scd_mutex); 15375 15376 sfmmu_unlink_scd_from_regions(srdp, scdp); 15377 15378 /* Clear shared context tsb and release ctx */ 15379 scsfmmup = scdp->scd_sfmmup; 15380 15381 /* 15382 * create a barrier so that scd will not be destroyed 15383 * if other thread still holds the same shared hat lock. 15384 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15385 * shared hat lock before checking the shared tsb reloc flag. 15386 */ 15387 shatlockp = sfmmu_hat_enter(scsfmmup); 15388 sfmmu_hat_exit(shatlockp); 15389 15390 sfmmu_free_scd_tsbs(scsfmmup); 15391 15392 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15393 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15394 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15395 SFMMU_L2_HMERLINKS_SIZE); 15396 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15397 } 15398 } 15399 kmem_cache_free(sfmmuid_cache, scsfmmup); 15400 kmem_cache_free(scd_cache, scdp); 15401 SFMMU_STAT(sf_destroy_scd); 15402 } 15403 15404 /* 15405 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15406 * bits which are set in the ism_region_map parameter. This flag indicates to 15407 * the tsbmiss handler that mapping for these segments should be loaded using 15408 * the shared context. 15409 */ 15410 static void 15411 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15412 { 15413 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15414 ism_blk_t *ism_blkp; 15415 ism_map_t *ism_map; 15416 int i, rid; 15417 15418 ASSERT(sfmmup->sfmmu_iblk != NULL); 15419 ASSERT(scdp != NULL); 15420 /* 15421 * Note that the caller either set HAT_ISMBUSY flag or checked 15422 * under hat lock that HAT_ISMBUSY was not set by another thread. 15423 */ 15424 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15425 15426 ism_blkp = sfmmup->sfmmu_iblk; 15427 while (ism_blkp != NULL) { 15428 ism_map = ism_blkp->iblk_maps; 15429 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15430 rid = ism_map[i].imap_rid; 15431 if (rid == SFMMU_INVALID_ISMRID) { 15432 continue; 15433 } 15434 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15435 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15436 addflag) { 15437 ism_map[i].imap_hatflags |= 15438 HAT_CTX1_FLAG; 15439 } else { 15440 ism_map[i].imap_hatflags &= 15441 ~HAT_CTX1_FLAG; 15442 } 15443 } 15444 ism_blkp = ism_blkp->iblk_next; 15445 } 15446 } 15447 15448 static int 15449 sfmmu_srd_lock_held(sf_srd_t *srdp) 15450 { 15451 return (MUTEX_HELD(&srdp->srd_mutex)); 15452 } 15453 15454 /* ARGSUSED */ 15455 static int 15456 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15457 { 15458 sf_scd_t *scdp = (sf_scd_t *)buf; 15459 15460 bzero(buf, sizeof (sf_scd_t)); 15461 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15462 return (0); 15463 } 15464 15465 /* ARGSUSED */ 15466 static void 15467 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15468 { 15469 sf_scd_t *scdp = (sf_scd_t *)buf; 15470 15471 mutex_destroy(&scdp->scd_mutex); 15472 } 15473