1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <vm/vm_dep.h> 84 #include <vm/xhat_sfmmu.h> 85 #include <sys/fpu/fpusystm.h> 86 #include <vm/mach_kpm.h> 87 #include <sys/callb.h> 88 89 #ifdef DEBUG 90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 91 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 92 caddr_t _eaddr = (saddr) + (len); \ 93 sf_srd_t *_srdp; \ 94 sf_region_t *_rgnp; \ 95 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 97 ASSERT((hat) != ksfmmup); \ 98 _srdp = (hat)->sfmmu_srdp; \ 99 ASSERT(_srdp != NULL); \ 100 ASSERT(_srdp->srd_refcnt != 0); \ 101 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 102 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 103 ASSERT(_rgnp->rgn_refcnt != 0); \ 104 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 105 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 106 SFMMU_REGION_HME); \ 107 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 108 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 110 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 111 } 112 113 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 114 { \ 115 caddr_t _hsva; \ 116 caddr_t _heva; \ 117 caddr_t _rsva; \ 118 caddr_t _reva; \ 119 int _ttesz = get_hblk_ttesz(hmeblkp); \ 120 int _flagtte; \ 121 ASSERT((srdp)->srd_refcnt != 0); \ 122 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 123 ASSERT((rgnp)->rgn_id == rid); \ 124 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 125 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 126 SFMMU_REGION_HME); \ 127 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 129 _heva = get_hblk_endaddr(hmeblkp); \ 130 _rsva = (caddr_t)P2ALIGN( \ 131 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 132 _reva = (caddr_t)P2ROUNDUP( \ 133 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 134 HBLK_MIN_BYTES); \ 135 ASSERT(_hsva >= _rsva); \ 136 ASSERT(_hsva < _reva); \ 137 ASSERT(_heva > _rsva); \ 138 ASSERT(_heva <= _reva); \ 139 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 140 _ttesz; \ 141 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 142 } 143 144 #else /* DEBUG */ 145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 147 #endif /* DEBUG */ 148 149 #if defined(SF_ERRATA_57) 150 extern caddr_t errata57_limit; 151 #endif 152 153 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 154 (sizeof (int64_t))) 155 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 156 157 #define HBLK_RESERVE_CNT 128 158 #define HBLK_RESERVE_MIN 20 159 160 static struct hme_blk *freehblkp; 161 static kmutex_t freehblkp_lock; 162 static int freehblkcnt; 163 164 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 165 static kmutex_t hblk_reserve_lock; 166 static kthread_t *hblk_reserve_thread; 167 168 static nucleus_hblk8_info_t nucleus_hblk8; 169 static nucleus_hblk1_info_t nucleus_hblk1; 170 171 /* 172 * SFMMU specific hat functions 173 */ 174 void hat_pagecachectl(struct page *, int); 175 176 /* flags for hat_pagecachectl */ 177 #define HAT_CACHE 0x1 178 #define HAT_UNCACHE 0x2 179 #define HAT_TMPNC 0x4 180 181 /* 182 * Flag to allow the creation of non-cacheable translations 183 * to system memory. It is off by default. At the moment this 184 * flag is used by the ecache error injector. The error injector 185 * will turn it on when creating such a translation then shut it 186 * off when it's finished. 187 */ 188 189 int sfmmu_allow_nc_trans = 0; 190 191 /* 192 * Flag to disable large page support. 193 * value of 1 => disable all large pages. 194 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 195 * 196 * For example, use the value 0x4 to disable 512K pages. 197 * 198 */ 199 #define LARGE_PAGES_OFF 0x1 200 201 /* 202 * The disable_large_pages and disable_ism_large_pages variables control 203 * hat_memload_array and the page sizes to be used by ISM and the kernel. 204 * 205 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 206 * are only used to control which OOB pages to use at upper VM segment creation 207 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 208 * Their values may come from platform or CPU specific code to disable page 209 * sizes that should not be used. 210 * 211 * WARNING: 512K pages are currently not supported for ISM/DISM. 212 */ 213 uint_t disable_large_pages = 0; 214 uint_t disable_ism_large_pages = (1 << TTE512K); 215 uint_t disable_auto_data_large_pages = 0; 216 uint_t disable_auto_text_large_pages = 0; 217 218 /* 219 * Private sfmmu data structures for hat management 220 */ 221 static struct kmem_cache *sfmmuid_cache; 222 static struct kmem_cache *mmuctxdom_cache; 223 224 /* 225 * Private sfmmu data structures for tsb management 226 */ 227 static struct kmem_cache *sfmmu_tsbinfo_cache; 228 static struct kmem_cache *sfmmu_tsb8k_cache; 229 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 230 static vmem_t *kmem_bigtsb_arena; 231 static vmem_t *kmem_tsb_arena; 232 233 /* 234 * sfmmu static variables for hmeblk resource management. 235 */ 236 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 237 static struct kmem_cache *sfmmu8_cache; 238 static struct kmem_cache *sfmmu1_cache; 239 static struct kmem_cache *pa_hment_cache; 240 241 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 242 /* 243 * private data for ism 244 */ 245 static struct kmem_cache *ism_blk_cache; 246 static struct kmem_cache *ism_ment_cache; 247 #define ISMID_STARTADDR NULL 248 249 /* 250 * Region management data structures and function declarations. 251 */ 252 253 static void sfmmu_leave_srd(sfmmu_t *); 254 static int sfmmu_srdcache_constructor(void *, void *, int); 255 static void sfmmu_srdcache_destructor(void *, void *); 256 static int sfmmu_rgncache_constructor(void *, void *, int); 257 static void sfmmu_rgncache_destructor(void *, void *); 258 static int sfrgnmap_isnull(sf_region_map_t *); 259 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 260 static int sfmmu_scdcache_constructor(void *, void *, int); 261 static void sfmmu_scdcache_destructor(void *, void *); 262 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 263 size_t, void *, u_offset_t); 264 265 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 266 static sf_srd_bucket_t *srd_buckets; 267 static struct kmem_cache *srd_cache; 268 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 269 static struct kmem_cache *region_cache; 270 static struct kmem_cache *scd_cache; 271 272 #ifdef sun4v 273 int use_bigtsb_arena = 1; 274 #else 275 int use_bigtsb_arena = 0; 276 #endif 277 278 /* External /etc/system tunable, for turning on&off the shctx support */ 279 int disable_shctx = 0; 280 /* Internal variable, set by MD if the HW supports shctx feature */ 281 int shctx_on = 0; 282 283 #ifdef DEBUG 284 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 285 #endif 286 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 287 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 288 289 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 290 static void sfmmu_find_scd(sfmmu_t *); 291 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 292 static void sfmmu_finish_join_scd(sfmmu_t *); 293 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 294 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 295 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 296 static void sfmmu_free_scd_tsbs(sfmmu_t *); 297 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 298 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 299 static void sfmmu_ism_hatflags(sfmmu_t *, int); 300 static int sfmmu_srd_lock_held(sf_srd_t *); 301 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 302 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 303 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 304 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 305 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 306 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 307 308 /* 309 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 310 * HAT flags, synchronizing TLB/TSB coherency, and context management. 311 * The lock is hashed on the sfmmup since the case where we need to lock 312 * all processes is rare but does occur (e.g. we need to unload a shared 313 * mapping from all processes using the mapping). We have a lot of buckets, 314 * and each slab of sfmmu_t's can use about a quarter of them, giving us 315 * a fairly good distribution without wasting too much space and overhead 316 * when we have to grab them all. 317 */ 318 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 319 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 320 321 /* 322 * Hash algorithm optimized for a small number of slabs. 323 * 7 is (highbit((sizeof sfmmu_t)) - 1) 324 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 325 * kmem_cache, and thus they will be sequential within that cache. In 326 * addition, each new slab will have a different "color" up to cache_maxcolor 327 * which will skew the hashing for each successive slab which is allocated. 328 * If the size of sfmmu_t changed to a larger size, this algorithm may need 329 * to be revisited. 330 */ 331 #define TSB_HASH_SHIFT_BITS (7) 332 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 333 334 #ifdef DEBUG 335 int tsb_hash_debug = 0; 336 #define TSB_HASH(sfmmup) \ 337 (tsb_hash_debug ? &hat_lock[0] : \ 338 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 339 #else /* DEBUG */ 340 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 341 #endif /* DEBUG */ 342 343 344 /* sfmmu_replace_tsb() return codes. */ 345 typedef enum tsb_replace_rc { 346 TSB_SUCCESS, 347 TSB_ALLOCFAIL, 348 TSB_LOSTRACE, 349 TSB_ALREADY_SWAPPED, 350 TSB_CANTGROW 351 } tsb_replace_rc_t; 352 353 /* 354 * Flags for TSB allocation routines. 355 */ 356 #define TSB_ALLOC 0x01 357 #define TSB_FORCEALLOC 0x02 358 #define TSB_GROW 0x04 359 #define TSB_SHRINK 0x08 360 #define TSB_SWAPIN 0x10 361 362 /* 363 * Support for HAT callbacks. 364 */ 365 #define SFMMU_MAX_RELOC_CALLBACKS 10 366 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 367 static id_t sfmmu_cb_nextid = 0; 368 static id_t sfmmu_tsb_cb_id; 369 struct sfmmu_callback *sfmmu_cb_table; 370 371 /* 372 * Kernel page relocation is enabled by default for non-caged 373 * kernel pages. This has little effect unless segkmem_reloc is 374 * set, since by default kernel memory comes from inside the 375 * kernel cage. 376 */ 377 int hat_kpr_enabled = 1; 378 379 kmutex_t kpr_mutex; 380 kmutex_t kpr_suspendlock; 381 kthread_t *kreloc_thread; 382 383 /* 384 * Enable VA->PA translation sanity checking on DEBUG kernels. 385 * Disabled by default. This is incompatible with some 386 * drivers (error injector, RSM) so if it breaks you get 387 * to keep both pieces. 388 */ 389 int hat_check_vtop = 0; 390 391 /* 392 * Private sfmmu routines (prototypes) 393 */ 394 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 395 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 396 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 397 uint_t); 398 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 399 caddr_t, demap_range_t *, uint_t); 400 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 401 caddr_t, int); 402 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 403 uint64_t, struct hme_blk **); 404 static void sfmmu_hblks_list_purge(struct hme_blk **); 405 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 406 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 407 static struct hme_blk *sfmmu_hblk_steal(int); 408 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 409 struct hme_blk *, uint64_t, uint64_t, 410 struct hme_blk *); 411 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 412 413 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 414 struct page **, uint_t, uint_t, uint_t); 415 static void hat_do_memload(struct hat *, caddr_t, struct page *, 416 uint_t, uint_t, uint_t); 417 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 418 uint_t, uint_t, pgcnt_t, uint_t); 419 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 420 uint_t); 421 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 422 uint_t, uint_t); 423 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 424 caddr_t, int, uint_t); 425 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 426 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 427 uint_t); 428 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 429 caddr_t, page_t **, uint_t, uint_t); 430 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 431 432 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 433 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 434 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 435 #ifdef VAC 436 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 437 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 438 int tst_tnc(page_t *pp, pgcnt_t); 439 void conv_tnc(page_t *pp, int); 440 #endif 441 442 static void sfmmu_get_ctx(sfmmu_t *); 443 static void sfmmu_free_sfmmu(sfmmu_t *); 444 445 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 446 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 447 448 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 449 static void hat_pagereload(struct page *, struct page *); 450 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 451 #ifdef VAC 452 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 453 static void sfmmu_page_cache(page_t *, int, int, int); 454 #endif 455 456 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 457 struct hme_blk *, int); 458 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 459 pfn_t, int, int, int, int); 460 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 461 pfn_t, int); 462 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 463 static void sfmmu_tlb_range_demap(demap_range_t *); 464 static void sfmmu_invalidate_ctx(sfmmu_t *); 465 static void sfmmu_sync_mmustate(sfmmu_t *); 466 467 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 468 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 469 sfmmu_t *); 470 static void sfmmu_tsb_free(struct tsb_info *); 471 static void sfmmu_tsbinfo_free(struct tsb_info *); 472 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 473 sfmmu_t *); 474 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 475 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 476 static int sfmmu_select_tsb_szc(pgcnt_t); 477 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 478 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 479 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 480 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 481 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 482 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 483 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 484 hatlock_t *, uint_t); 485 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 486 487 #ifdef VAC 488 void sfmmu_cache_flush(pfn_t, int); 489 void sfmmu_cache_flushcolor(int, pfn_t); 490 #endif 491 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 492 caddr_t, demap_range_t *, uint_t, int); 493 494 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 495 static uint_t sfmmu_ptov_attr(tte_t *); 496 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 497 caddr_t, demap_range_t *, uint_t); 498 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 499 static int sfmmu_idcache_constructor(void *, void *, int); 500 static void sfmmu_idcache_destructor(void *, void *); 501 static int sfmmu_hblkcache_constructor(void *, void *, int); 502 static void sfmmu_hblkcache_destructor(void *, void *); 503 static void sfmmu_hblkcache_reclaim(void *); 504 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 505 struct hmehash_bucket *); 506 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 507 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 508 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 509 int, caddr_t *); 510 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 511 512 static void sfmmu_rm_large_mappings(page_t *, int); 513 514 static void hat_lock_init(void); 515 static void hat_kstat_init(void); 516 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 517 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 518 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 519 static void sfmmu_check_page_sizes(sfmmu_t *, int); 520 int fnd_mapping_sz(page_t *); 521 static void iment_add(struct ism_ment *, struct hat *); 522 static void iment_sub(struct ism_ment *, struct hat *); 523 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 524 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 525 extern void sfmmu_clear_utsbinfo(void); 526 527 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 528 529 /* kpm globals */ 530 #ifdef DEBUG 531 /* 532 * Enable trap level tsbmiss handling 533 */ 534 int kpm_tsbmtl = 1; 535 536 /* 537 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 538 * required TLB shootdowns in this case, so handle w/ care. Off by default. 539 */ 540 int kpm_tlb_flush; 541 #endif /* DEBUG */ 542 543 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 544 545 #ifdef DEBUG 546 static void sfmmu_check_hblk_flist(); 547 #endif 548 549 /* 550 * Semi-private sfmmu data structures. Some of them are initialize in 551 * startup or in hat_init. Some of them are private but accessed by 552 * assembly code or mach_sfmmu.c 553 */ 554 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 555 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 556 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 557 uint64_t khme_hash_pa; /* PA of khme_hash */ 558 int uhmehash_num; /* # of buckets in user hash table */ 559 int khmehash_num; /* # of buckets in kernel hash table */ 560 561 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 562 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 563 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 564 565 #define DEFAULT_NUM_CTXS_PER_MMU 8192 566 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 567 568 int cache; /* describes system cache */ 569 570 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 571 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 572 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 573 int ktsb_sz; /* kernel 8k-indexed tsb size */ 574 575 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 576 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 577 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 578 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 579 580 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 581 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 582 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 583 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 584 585 #ifndef sun4v 586 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 587 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 588 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 589 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 590 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 591 #endif /* sun4v */ 592 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 593 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 594 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 595 596 /* 597 * Size to use for TSB slabs. Future platforms that support page sizes 598 * larger than 4M may wish to change these values, and provide their own 599 * assembly macros for building and decoding the TSB base register contents. 600 * Note disable_large_pages will override the value set here. 601 */ 602 static uint_t tsb_slab_ttesz = TTE4M; 603 size_t tsb_slab_size = MMU_PAGESIZE4M; 604 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 605 /* PFN mask for TTE */ 606 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 607 608 /* 609 * Size to use for TSB slabs. These are used only when 256M tsb arenas 610 * exist. 611 */ 612 static uint_t bigtsb_slab_ttesz = TTE256M; 613 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 614 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 615 /* 256M page alignment for 8K pfn */ 616 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 617 618 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 619 static int tsb_max_growsize = 0; 620 621 /* 622 * Tunable parameters dealing with TSB policies. 623 */ 624 625 /* 626 * This undocumented tunable forces all 8K TSBs to be allocated from 627 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 628 */ 629 #ifdef DEBUG 630 int tsb_forceheap = 0; 631 #endif /* DEBUG */ 632 633 /* 634 * Decide whether to use per-lgroup arenas, or one global set of 635 * TSB arenas. The default is not to break up per-lgroup, since 636 * most platforms don't recognize any tangible benefit from it. 637 */ 638 int tsb_lgrp_affinity = 0; 639 640 /* 641 * Used for growing the TSB based on the process RSS. 642 * tsb_rss_factor is based on the smallest TSB, and is 643 * shifted by the TSB size to determine if we need to grow. 644 * The default will grow the TSB if the number of TTEs for 645 * this page size exceeds 75% of the number of TSB entries, 646 * which should _almost_ eliminate all conflict misses 647 * (at the expense of using up lots and lots of memory). 648 */ 649 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 650 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 651 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 652 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 653 default_tsb_size) 654 #define TSB_OK_SHRINK() \ 655 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 656 #define TSB_OK_GROW() \ 657 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 658 659 int enable_tsb_rss_sizing = 1; 660 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 661 662 /* which TSB size code to use for new address spaces or if rss sizing off */ 663 int default_tsb_size = TSB_8K_SZCODE; 664 665 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 666 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 667 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 668 669 #ifdef DEBUG 670 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 671 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 672 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 673 static int tsb_alloc_fail_mtbf = 0; 674 static int tsb_alloc_count = 0; 675 #endif /* DEBUG */ 676 677 /* if set to 1, will remap valid TTEs when growing TSB. */ 678 int tsb_remap_ttes = 1; 679 680 /* 681 * If we have more than this many mappings, allocate a second TSB. 682 * This default is chosen because the I/D fully associative TLBs are 683 * assumed to have at least 8 available entries. Platforms with a 684 * larger fully-associative TLB could probably override the default. 685 */ 686 687 #ifdef sun4v 688 int tsb_sectsb_threshold = 0; 689 #else 690 int tsb_sectsb_threshold = 8; 691 #endif 692 693 /* 694 * kstat data 695 */ 696 struct sfmmu_global_stat sfmmu_global_stat; 697 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 698 699 /* 700 * Global data 701 */ 702 sfmmu_t *ksfmmup; /* kernel's hat id */ 703 704 #ifdef DEBUG 705 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 706 #endif 707 708 /* sfmmu locking operations */ 709 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 710 static int sfmmu_mlspl_held(struct page *, int); 711 712 kmutex_t *sfmmu_page_enter(page_t *); 713 void sfmmu_page_exit(kmutex_t *); 714 int sfmmu_page_spl_held(struct page *); 715 716 /* sfmmu internal locking operations - accessed directly */ 717 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 718 kmutex_t **, kmutex_t **); 719 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 720 static hatlock_t * 721 sfmmu_hat_enter(sfmmu_t *); 722 static hatlock_t * 723 sfmmu_hat_tryenter(sfmmu_t *); 724 static void sfmmu_hat_exit(hatlock_t *); 725 static void sfmmu_hat_lock_all(void); 726 static void sfmmu_hat_unlock_all(void); 727 static void sfmmu_ismhat_enter(sfmmu_t *, int); 728 static void sfmmu_ismhat_exit(sfmmu_t *, int); 729 730 /* 731 * Array of mutexes protecting a page's mapping list and p_nrm field. 732 * 733 * The hash function looks complicated, but is made up so that: 734 * 735 * "pp" not shifted, so adjacent pp values will hash to different cache lines 736 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 737 * 738 * "pp" >> mml_shift, incorporates more source bits into the hash result 739 * 740 * "& (mml_table_size - 1), should be faster than using remainder "%" 741 * 742 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 743 * cacheline, since they get declared next to each other below. We'll trust 744 * ld not to do something random. 745 */ 746 #ifdef DEBUG 747 int mlist_hash_debug = 0; 748 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 749 &mml_table[((uintptr_t)(pp) + \ 750 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 751 #else /* !DEBUG */ 752 #define MLIST_HASH(pp) &mml_table[ \ 753 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 754 #endif /* !DEBUG */ 755 756 kmutex_t *mml_table; 757 uint_t mml_table_sz; /* must be a power of 2 */ 758 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 759 760 kpm_hlk_t *kpmp_table; 761 uint_t kpmp_table_sz; /* must be a power of 2 */ 762 uchar_t kpmp_shift; 763 764 kpm_shlk_t *kpmp_stable; 765 uint_t kpmp_stable_sz; /* must be a power of 2 */ 766 767 /* 768 * SPL_HASH was improved to avoid false cache line sharing 769 */ 770 #define SPL_TABLE_SIZE 128 771 #define SPL_MASK (SPL_TABLE_SIZE - 1) 772 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 773 774 #define SPL_INDEX(pp) \ 775 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 776 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 777 (SPL_TABLE_SIZE - 1)) 778 779 #define SPL_HASH(pp) \ 780 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 781 782 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 783 784 785 /* 786 * hat_unload_callback() will group together callbacks in order 787 * to avoid xt_sync() calls. This is the maximum size of the group. 788 */ 789 #define MAX_CB_ADDR 32 790 791 tte_t hw_tte; 792 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 793 794 static char *mmu_ctx_kstat_names[] = { 795 "mmu_ctx_tsb_exceptions", 796 "mmu_ctx_tsb_raise_exception", 797 "mmu_ctx_wrap_around", 798 }; 799 800 /* 801 * Wrapper for vmem_xalloc since vmem_create only allows limited 802 * parameters for vm_source_alloc functions. This function allows us 803 * to specify alignment consistent with the size of the object being 804 * allocated. 805 */ 806 static void * 807 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 808 { 809 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 810 } 811 812 /* Common code for setting tsb_alloc_hiwater. */ 813 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 814 ptob(pages) / tsb_alloc_hiwater_factor 815 816 /* 817 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 818 * a single TSB. physmem is the number of physical pages so we need physmem 8K 819 * TTEs to represent all those physical pages. We round this up by using 820 * 1<<highbit(). To figure out which size code to use, remember that the size 821 * code is just an amount to shift the smallest TSB size to get the size of 822 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 823 * highbit() - 1) to get the size code for the smallest TSB that can represent 824 * all of physical memory, while erring on the side of too much. 825 * 826 * Restrict tsb_max_growsize to make sure that: 827 * 1) TSBs can't grow larger than the TSB slab size 828 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 829 */ 830 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 831 int _i, _szc, _slabszc, _tsbszc; \ 832 \ 833 _i = highbit(pages); \ 834 if ((1 << (_i - 1)) == (pages)) \ 835 _i--; /* 2^n case, round down */ \ 836 _szc = _i - TSB_START_SIZE; \ 837 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 838 _tsbszc = MIN(_szc, _slabszc); \ 839 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 840 } 841 842 /* 843 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 844 * tsb_info which handles that TTE size. 845 */ 846 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 847 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 848 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 849 sfmmu_hat_lock_held(sfmmup)); \ 850 if ((tte_szc) >= TTE4M) { \ 851 ASSERT((tsbinfop) != NULL); \ 852 (tsbinfop) = (tsbinfop)->tsb_next; \ 853 } \ 854 } 855 856 /* 857 * Macro to use to unload entries from the TSB. 858 * It has knowledge of which page sizes get replicated in the TSB 859 * and will call the appropriate unload routine for the appropriate size. 860 */ 861 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 862 { \ 863 int ttesz = get_hblk_ttesz(hmeblkp); \ 864 if (ttesz == TTE8K || ttesz == TTE4M) { \ 865 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 866 } else { \ 867 caddr_t sva = ismhat ? addr : \ 868 (caddr_t)get_hblk_base(hmeblkp); \ 869 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 870 ASSERT(addr >= sva && addr < eva); \ 871 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 872 } \ 873 } 874 875 876 /* Update tsb_alloc_hiwater after memory is configured. */ 877 /*ARGSUSED*/ 878 static void 879 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 880 { 881 /* Assumes physmem has already been updated. */ 882 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 883 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 884 } 885 886 /* 887 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 888 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 889 * deleted. 890 */ 891 /*ARGSUSED*/ 892 static int 893 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 894 { 895 return (0); 896 } 897 898 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 899 /*ARGSUSED*/ 900 static void 901 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 902 { 903 /* 904 * Whether the delete was cancelled or not, just go ahead and update 905 * tsb_alloc_hiwater and tsb_max_growsize. 906 */ 907 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 908 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 909 } 910 911 static kphysm_setup_vector_t sfmmu_update_vec = { 912 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 913 sfmmu_update_post_add, /* post_add */ 914 sfmmu_update_pre_del, /* pre_del */ 915 sfmmu_update_post_del /* post_del */ 916 }; 917 918 919 /* 920 * HME_BLK HASH PRIMITIVES 921 */ 922 923 /* 924 * Enter a hme on the mapping list for page pp. 925 * When large pages are more prevalent in the system we might want to 926 * keep the mapping list in ascending order by the hment size. For now, 927 * small pages are more frequent, so don't slow it down. 928 */ 929 #define HME_ADD(hme, pp) \ 930 { \ 931 ASSERT(sfmmu_mlist_held(pp)); \ 932 \ 933 hme->hme_prev = NULL; \ 934 hme->hme_next = pp->p_mapping; \ 935 hme->hme_page = pp; \ 936 if (pp->p_mapping) { \ 937 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 938 ASSERT(pp->p_share > 0); \ 939 } else { \ 940 /* EMPTY */ \ 941 ASSERT(pp->p_share == 0); \ 942 } \ 943 pp->p_mapping = hme; \ 944 pp->p_share++; \ 945 } 946 947 /* 948 * Enter a hme on the mapping list for page pp. 949 * If we are unmapping a large translation, we need to make sure that the 950 * change is reflect in the corresponding bit of the p_index field. 951 */ 952 #define HME_SUB(hme, pp) \ 953 { \ 954 ASSERT(sfmmu_mlist_held(pp)); \ 955 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 956 \ 957 if (pp->p_mapping == NULL) { \ 958 panic("hme_remove - no mappings"); \ 959 } \ 960 \ 961 membar_stst(); /* ensure previous stores finish */ \ 962 \ 963 ASSERT(pp->p_share > 0); \ 964 pp->p_share--; \ 965 \ 966 if (hme->hme_prev) { \ 967 ASSERT(pp->p_mapping != hme); \ 968 ASSERT(hme->hme_prev->hme_page == pp || \ 969 IS_PAHME(hme->hme_prev)); \ 970 hme->hme_prev->hme_next = hme->hme_next; \ 971 } else { \ 972 ASSERT(pp->p_mapping == hme); \ 973 pp->p_mapping = hme->hme_next; \ 974 ASSERT((pp->p_mapping == NULL) ? \ 975 (pp->p_share == 0) : 1); \ 976 } \ 977 \ 978 if (hme->hme_next) { \ 979 ASSERT(hme->hme_next->hme_page == pp || \ 980 IS_PAHME(hme->hme_next)); \ 981 hme->hme_next->hme_prev = hme->hme_prev; \ 982 } \ 983 \ 984 /* zero out the entry */ \ 985 hme->hme_next = NULL; \ 986 hme->hme_prev = NULL; \ 987 hme->hme_page = NULL; \ 988 \ 989 if (hme_size(hme) > TTE8K) { \ 990 /* remove mappings for remainder of large pg */ \ 991 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 992 } \ 993 } 994 995 /* 996 * This function returns the hment given the hme_blk and a vaddr. 997 * It assumes addr has already been checked to belong to hme_blk's 998 * range. 999 */ 1000 #define HBLKTOHME(hment, hmeblkp, addr) \ 1001 { \ 1002 int index; \ 1003 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 1004 } 1005 1006 /* 1007 * Version of HBLKTOHME that also returns the index in hmeblkp 1008 * of the hment. 1009 */ 1010 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1011 { \ 1012 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1013 \ 1014 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1015 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1016 } else \ 1017 idx = 0; \ 1018 \ 1019 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1020 } 1021 1022 /* 1023 * Disable any page sizes not supported by the CPU 1024 */ 1025 void 1026 hat_init_pagesizes() 1027 { 1028 int i; 1029 1030 mmu_exported_page_sizes = 0; 1031 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1032 1033 szc_2_userszc[i] = (uint_t)-1; 1034 userszc_2_szc[i] = (uint_t)-1; 1035 1036 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1037 disable_large_pages |= (1 << i); 1038 } else { 1039 szc_2_userszc[i] = mmu_exported_page_sizes; 1040 userszc_2_szc[mmu_exported_page_sizes] = i; 1041 mmu_exported_page_sizes++; 1042 } 1043 } 1044 1045 disable_ism_large_pages |= disable_large_pages; 1046 disable_auto_data_large_pages = disable_large_pages; 1047 disable_auto_text_large_pages = disable_large_pages; 1048 1049 /* 1050 * Initialize mmu-specific large page sizes. 1051 */ 1052 if (&mmu_large_pages_disabled) { 1053 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1054 disable_ism_large_pages |= 1055 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1056 disable_auto_data_large_pages |= 1057 mmu_large_pages_disabled(HAT_AUTO_DATA); 1058 disable_auto_text_large_pages |= 1059 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1060 } 1061 } 1062 1063 /* 1064 * Initialize the hardware address translation structures. 1065 */ 1066 void 1067 hat_init(void) 1068 { 1069 int i; 1070 uint_t sz; 1071 size_t size; 1072 1073 hat_lock_init(); 1074 hat_kstat_init(); 1075 1076 /* 1077 * Hardware-only bits in a TTE 1078 */ 1079 MAKE_TTE_MASK(&hw_tte); 1080 1081 hat_init_pagesizes(); 1082 1083 /* Initialize the hash locks */ 1084 for (i = 0; i < khmehash_num; i++) { 1085 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1086 MUTEX_DEFAULT, NULL); 1087 } 1088 for (i = 0; i < uhmehash_num; i++) { 1089 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1090 MUTEX_DEFAULT, NULL); 1091 } 1092 khmehash_num--; /* make sure counter starts from 0 */ 1093 uhmehash_num--; /* make sure counter starts from 0 */ 1094 1095 /* 1096 * Allocate context domain structures. 1097 * 1098 * A platform may choose to modify max_mmu_ctxdoms in 1099 * set_platform_defaults(). If a platform does not define 1100 * a set_platform_defaults() or does not choose to modify 1101 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1102 * 1103 * For sun4v, there will be one global context domain, this is to 1104 * avoid the ldom cpu substitution problem. 1105 * 1106 * For all platforms that have CPUs sharing MMUs, this 1107 * value must be defined. 1108 */ 1109 if (max_mmu_ctxdoms == 0) { 1110 #ifndef sun4v 1111 max_mmu_ctxdoms = max_ncpus; 1112 #else /* sun4v */ 1113 max_mmu_ctxdoms = 1; 1114 #endif /* sun4v */ 1115 } 1116 1117 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1118 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1119 1120 /* mmu_ctx_t is 64 bytes aligned */ 1121 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1122 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1123 /* 1124 * MMU context domain initialization for the Boot CPU. 1125 * This needs the context domains array allocated above. 1126 */ 1127 mutex_enter(&cpu_lock); 1128 sfmmu_cpu_init(CPU); 1129 mutex_exit(&cpu_lock); 1130 1131 /* 1132 * Intialize ism mapping list lock. 1133 */ 1134 1135 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1136 1137 /* 1138 * Each sfmmu structure carries an array of MMU context info 1139 * structures, one per context domain. The size of this array depends 1140 * on the maximum number of context domains. So, the size of the 1141 * sfmmu structure varies per platform. 1142 * 1143 * sfmmu is allocated from static arena, because trap 1144 * handler at TL > 0 is not allowed to touch kernel relocatable 1145 * memory. sfmmu's alignment is changed to 64 bytes from 1146 * default 8 bytes, as the lower 6 bits will be used to pass 1147 * pgcnt to vtag_flush_pgcnt_tl1. 1148 */ 1149 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1150 1151 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1152 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1153 NULL, NULL, static_arena, 0); 1154 1155 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1156 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1157 1158 /* 1159 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1160 * from the heap when low on memory or when TSB_FORCEALLOC is 1161 * specified, don't use magazines to cache them--we want to return 1162 * them to the system as quickly as possible. 1163 */ 1164 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1165 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1166 static_arena, KMC_NOMAGAZINE); 1167 1168 /* 1169 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1170 * memory, which corresponds to the old static reserve for TSBs. 1171 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1172 * memory we'll allocate for TSB slabs; beyond this point TSB 1173 * allocations will be taken from the kernel heap (via 1174 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1175 * consumer. 1176 */ 1177 if (tsb_alloc_hiwater_factor == 0) { 1178 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1179 } 1180 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1181 1182 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1183 if (!(disable_large_pages & (1 << sz))) 1184 break; 1185 } 1186 1187 if (sz < tsb_slab_ttesz) { 1188 tsb_slab_ttesz = sz; 1189 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1190 tsb_slab_size = 1 << tsb_slab_shift; 1191 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1192 use_bigtsb_arena = 0; 1193 } else if (use_bigtsb_arena && 1194 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1195 use_bigtsb_arena = 0; 1196 } 1197 1198 if (!use_bigtsb_arena) { 1199 bigtsb_slab_shift = tsb_slab_shift; 1200 } 1201 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1202 1203 /* 1204 * On smaller memory systems, allocate TSB memory in smaller chunks 1205 * than the default 4M slab size. We also honor disable_large_pages 1206 * here. 1207 * 1208 * The trap handlers need to be patched with the final slab shift, 1209 * since they need to be able to construct the TSB pointer at runtime. 1210 */ 1211 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1212 !(disable_large_pages & (1 << TTE512K))) { 1213 tsb_slab_ttesz = TTE512K; 1214 tsb_slab_shift = MMU_PAGESHIFT512K; 1215 tsb_slab_size = MMU_PAGESIZE512K; 1216 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1217 use_bigtsb_arena = 0; 1218 } 1219 1220 if (!use_bigtsb_arena) { 1221 bigtsb_slab_ttesz = tsb_slab_ttesz; 1222 bigtsb_slab_shift = tsb_slab_shift; 1223 bigtsb_slab_size = tsb_slab_size; 1224 bigtsb_slab_mask = tsb_slab_mask; 1225 } 1226 1227 1228 /* 1229 * Set up memory callback to update tsb_alloc_hiwater and 1230 * tsb_max_growsize. 1231 */ 1232 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1233 ASSERT(i == 0); 1234 1235 /* 1236 * kmem_tsb_arena is the source from which large TSB slabs are 1237 * drawn. The quantum of this arena corresponds to the largest 1238 * TSB size we can dynamically allocate for user processes. 1239 * Currently it must also be a supported page size since we 1240 * use exactly one translation entry to map each slab page. 1241 * 1242 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1243 * which most TSBs are allocated. Since most TSB allocations are 1244 * typically 8K we have a kmem cache we stack on top of each 1245 * kmem_tsb_default_arena to speed up those allocations. 1246 * 1247 * Note the two-level scheme of arenas is required only 1248 * because vmem_create doesn't allow us to specify alignment 1249 * requirements. If this ever changes the code could be 1250 * simplified to use only one level of arenas. 1251 * 1252 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1253 * will be provided in addition to the 4M kmem_tsb_arena. 1254 */ 1255 if (use_bigtsb_arena) { 1256 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1257 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1258 vmem_xfree, heap_arena, 0, VM_SLEEP); 1259 } 1260 1261 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1262 sfmmu_vmem_xalloc_aligned_wrapper, 1263 vmem_xfree, heap_arena, 0, VM_SLEEP); 1264 1265 if (tsb_lgrp_affinity) { 1266 char s[50]; 1267 for (i = 0; i < NLGRPS_MAX; i++) { 1268 if (use_bigtsb_arena) { 1269 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1270 kmem_bigtsb_default_arena[i] = vmem_create(s, 1271 NULL, 0, 2 * tsb_slab_size, 1272 sfmmu_tsb_segkmem_alloc, 1273 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1274 0, VM_SLEEP | VM_BESTFIT); 1275 } 1276 1277 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1278 kmem_tsb_default_arena[i] = vmem_create(s, 1279 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1280 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1281 VM_SLEEP | VM_BESTFIT); 1282 1283 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1284 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1285 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1286 kmem_tsb_default_arena[i], 0); 1287 } 1288 } else { 1289 if (use_bigtsb_arena) { 1290 kmem_bigtsb_default_arena[0] = 1291 vmem_create("kmem_bigtsb_default", NULL, 0, 1292 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1293 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1294 VM_SLEEP | VM_BESTFIT); 1295 } 1296 1297 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1298 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1299 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1300 VM_SLEEP | VM_BESTFIT); 1301 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1302 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1303 kmem_tsb_default_arena[0], 0); 1304 } 1305 1306 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1307 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1308 sfmmu_hblkcache_destructor, 1309 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1310 hat_memload_arena, KMC_NOHASH); 1311 1312 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1313 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1314 1315 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1316 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1317 sfmmu_hblkcache_destructor, 1318 NULL, (void *)HME1BLK_SZ, 1319 hat_memload1_arena, KMC_NOHASH); 1320 1321 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1322 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1323 1324 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1325 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1326 NULL, NULL, static_arena, KMC_NOHASH); 1327 1328 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1329 sizeof (ism_ment_t), 0, NULL, NULL, 1330 NULL, NULL, NULL, 0); 1331 1332 /* 1333 * We grab the first hat for the kernel, 1334 */ 1335 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1336 kas.a_hat = hat_alloc(&kas); 1337 AS_LOCK_EXIT(&kas, &kas.a_lock); 1338 1339 /* 1340 * Initialize hblk_reserve. 1341 */ 1342 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1343 va_to_pa((caddr_t)hblk_reserve); 1344 1345 #ifndef UTSB_PHYS 1346 /* 1347 * Reserve some kernel virtual address space for the locked TTEs 1348 * that allow us to probe the TSB from TL>0. 1349 */ 1350 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1351 0, 0, NULL, NULL, VM_SLEEP); 1352 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1353 0, 0, NULL, NULL, VM_SLEEP); 1354 #endif 1355 1356 #ifdef VAC 1357 /* 1358 * The big page VAC handling code assumes VAC 1359 * will not be bigger than the smallest big 1360 * page- which is 64K. 1361 */ 1362 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1363 cmn_err(CE_PANIC, "VAC too big!"); 1364 } 1365 #endif 1366 1367 (void) xhat_init(); 1368 1369 uhme_hash_pa = va_to_pa(uhme_hash); 1370 khme_hash_pa = va_to_pa(khme_hash); 1371 1372 /* 1373 * Initialize relocation locks. kpr_suspendlock is held 1374 * at PIL_MAX to prevent interrupts from pinning the holder 1375 * of a suspended TTE which may access it leading to a 1376 * deadlock condition. 1377 */ 1378 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1379 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1380 1381 /* 1382 * If Shared context support is disabled via /etc/system 1383 * set shctx_on to 0 here if it was set to 1 earlier in boot 1384 * sequence by cpu module initialization code. 1385 */ 1386 if (shctx_on && disable_shctx) { 1387 shctx_on = 0; 1388 } 1389 1390 if (shctx_on) { 1391 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1392 sizeof (srd_buckets[0]), KM_SLEEP); 1393 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1394 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1395 MUTEX_DEFAULT, NULL); 1396 } 1397 1398 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1399 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1400 NULL, NULL, NULL, 0); 1401 region_cache = kmem_cache_create("region_cache", 1402 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1403 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1404 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1405 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1406 NULL, NULL, NULL, 0); 1407 } 1408 1409 /* 1410 * Pre-allocate hrm_hashtab before enabling the collection of 1411 * refmod statistics. Allocating on the fly would mean us 1412 * running the risk of suffering recursive mutex enters or 1413 * deadlocks. 1414 */ 1415 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1416 KM_SLEEP); 1417 } 1418 1419 /* 1420 * Initialize locking for the hat layer, called early during boot. 1421 */ 1422 static void 1423 hat_lock_init() 1424 { 1425 int i; 1426 1427 /* 1428 * initialize the array of mutexes protecting a page's mapping 1429 * list and p_nrm field. 1430 */ 1431 for (i = 0; i < mml_table_sz; i++) 1432 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1433 1434 if (kpm_enable) { 1435 for (i = 0; i < kpmp_table_sz; i++) { 1436 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1437 MUTEX_DEFAULT, NULL); 1438 } 1439 } 1440 1441 /* 1442 * Initialize array of mutex locks that protects sfmmu fields and 1443 * TSB lists. 1444 */ 1445 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1446 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1447 NULL); 1448 } 1449 1450 #define SFMMU_KERNEL_MAXVA \ 1451 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1452 1453 /* 1454 * Allocate a hat structure. 1455 * Called when an address space first uses a hat. 1456 */ 1457 struct hat * 1458 hat_alloc(struct as *as) 1459 { 1460 sfmmu_t *sfmmup; 1461 int i; 1462 uint64_t cnum; 1463 extern uint_t get_color_start(struct as *); 1464 1465 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1466 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1467 sfmmup->sfmmu_as = as; 1468 sfmmup->sfmmu_flags = 0; 1469 sfmmup->sfmmu_tteflags = 0; 1470 sfmmup->sfmmu_rtteflags = 0; 1471 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1472 1473 if (as == &kas) { 1474 ksfmmup = sfmmup; 1475 sfmmup->sfmmu_cext = 0; 1476 cnum = KCONTEXT; 1477 1478 sfmmup->sfmmu_clrstart = 0; 1479 sfmmup->sfmmu_tsb = NULL; 1480 /* 1481 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1482 * to setup tsb_info for ksfmmup. 1483 */ 1484 } else { 1485 1486 /* 1487 * Just set to invalid ctx. When it faults, it will 1488 * get a valid ctx. This would avoid the situation 1489 * where we get a ctx, but it gets stolen and then 1490 * we fault when we try to run and so have to get 1491 * another ctx. 1492 */ 1493 sfmmup->sfmmu_cext = 0; 1494 cnum = INVALID_CONTEXT; 1495 1496 /* initialize original physical page coloring bin */ 1497 sfmmup->sfmmu_clrstart = get_color_start(as); 1498 #ifdef DEBUG 1499 if (tsb_random_size) { 1500 uint32_t randval = (uint32_t)gettick() >> 4; 1501 int size = randval % (tsb_max_growsize + 1); 1502 1503 /* chose a random tsb size for stress testing */ 1504 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1505 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1506 } else 1507 #endif /* DEBUG */ 1508 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1509 default_tsb_size, 1510 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1511 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1512 ASSERT(sfmmup->sfmmu_tsb != NULL); 1513 } 1514 1515 ASSERT(max_mmu_ctxdoms > 0); 1516 for (i = 0; i < max_mmu_ctxdoms; i++) { 1517 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1518 sfmmup->sfmmu_ctxs[i].gnum = 0; 1519 } 1520 1521 for (i = 0; i < max_mmu_page_sizes; i++) { 1522 sfmmup->sfmmu_ttecnt[i] = 0; 1523 sfmmup->sfmmu_scdrttecnt[i] = 0; 1524 sfmmup->sfmmu_ismttecnt[i] = 0; 1525 sfmmup->sfmmu_scdismttecnt[i] = 0; 1526 sfmmup->sfmmu_pgsz[i] = TTE8K; 1527 } 1528 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1529 sfmmup->sfmmu_iblk = NULL; 1530 sfmmup->sfmmu_ismhat = 0; 1531 sfmmup->sfmmu_scdhat = 0; 1532 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1533 if (sfmmup == ksfmmup) { 1534 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1535 } else { 1536 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1537 } 1538 sfmmup->sfmmu_free = 0; 1539 sfmmup->sfmmu_rmstat = 0; 1540 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1541 sfmmup->sfmmu_xhat_provider = NULL; 1542 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1543 sfmmup->sfmmu_srdp = NULL; 1544 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1545 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1546 sfmmup->sfmmu_scdp = NULL; 1547 sfmmup->sfmmu_scd_link.next = NULL; 1548 sfmmup->sfmmu_scd_link.prev = NULL; 1549 return (sfmmup); 1550 } 1551 1552 /* 1553 * Create per-MMU context domain kstats for a given MMU ctx. 1554 */ 1555 static void 1556 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1557 { 1558 mmu_ctx_stat_t stat; 1559 kstat_t *mmu_kstat; 1560 1561 ASSERT(MUTEX_HELD(&cpu_lock)); 1562 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1563 1564 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1565 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1566 1567 if (mmu_kstat == NULL) { 1568 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1569 mmu_ctxp->mmu_idx); 1570 } else { 1571 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1572 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1573 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1574 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1575 mmu_ctxp->mmu_kstat = mmu_kstat; 1576 kstat_install(mmu_kstat); 1577 } 1578 } 1579 1580 /* 1581 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1582 * context domain information for a given CPU. If a platform does not 1583 * specify that interface, then the function below is used instead to return 1584 * default information. The defaults are as follows: 1585 * 1586 * - For sun4u systems there's one MMU context domain per CPU. 1587 * This default is used by all sun4u systems except OPL. OPL systems 1588 * provide platform specific interface to map CPU ids to MMU ids 1589 * because on OPL more than 1 CPU shares a single MMU. 1590 * Note that on sun4v, there is one global context domain for 1591 * the entire system. This is to avoid running into potential problem 1592 * with ldom physical cpu substitution feature. 1593 * - The number of MMU context IDs supported on any CPU in the 1594 * system is 8K. 1595 */ 1596 /*ARGSUSED*/ 1597 static void 1598 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1599 { 1600 infop->mmu_nctxs = nctxs; 1601 #ifndef sun4v 1602 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1603 #else /* sun4v */ 1604 infop->mmu_idx = 0; 1605 #endif /* sun4v */ 1606 } 1607 1608 /* 1609 * Called during CPU initialization to set the MMU context-related information 1610 * for a CPU. 1611 * 1612 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1613 */ 1614 void 1615 sfmmu_cpu_init(cpu_t *cp) 1616 { 1617 mmu_ctx_info_t info; 1618 mmu_ctx_t *mmu_ctxp; 1619 1620 ASSERT(MUTEX_HELD(&cpu_lock)); 1621 1622 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1623 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1624 else 1625 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1626 1627 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1628 1629 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1630 /* Each mmu_ctx is cacheline aligned. */ 1631 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1632 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1633 1634 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1635 (void *)ipltospl(DISP_LEVEL)); 1636 mmu_ctxp->mmu_idx = info.mmu_idx; 1637 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1638 /* 1639 * Globally for lifetime of a system, 1640 * gnum must always increase. 1641 * mmu_saved_gnum is protected by the cpu_lock. 1642 */ 1643 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1644 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1645 1646 sfmmu_mmu_kstat_create(mmu_ctxp); 1647 1648 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1649 } else { 1650 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1651 } 1652 1653 /* 1654 * The mmu_lock is acquired here to prevent races with 1655 * the wrap-around code. 1656 */ 1657 mutex_enter(&mmu_ctxp->mmu_lock); 1658 1659 1660 mmu_ctxp->mmu_ncpus++; 1661 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1662 CPU_MMU_IDX(cp) = info.mmu_idx; 1663 CPU_MMU_CTXP(cp) = mmu_ctxp; 1664 1665 mutex_exit(&mmu_ctxp->mmu_lock); 1666 } 1667 1668 /* 1669 * Called to perform MMU context-related cleanup for a CPU. 1670 */ 1671 void 1672 sfmmu_cpu_cleanup(cpu_t *cp) 1673 { 1674 mmu_ctx_t *mmu_ctxp; 1675 1676 ASSERT(MUTEX_HELD(&cpu_lock)); 1677 1678 mmu_ctxp = CPU_MMU_CTXP(cp); 1679 ASSERT(mmu_ctxp != NULL); 1680 1681 /* 1682 * The mmu_lock is acquired here to prevent races with 1683 * the wrap-around code. 1684 */ 1685 mutex_enter(&mmu_ctxp->mmu_lock); 1686 1687 CPU_MMU_CTXP(cp) = NULL; 1688 1689 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1690 if (--mmu_ctxp->mmu_ncpus == 0) { 1691 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1692 mutex_exit(&mmu_ctxp->mmu_lock); 1693 mutex_destroy(&mmu_ctxp->mmu_lock); 1694 1695 if (mmu_ctxp->mmu_kstat) 1696 kstat_delete(mmu_ctxp->mmu_kstat); 1697 1698 /* mmu_saved_gnum is protected by the cpu_lock. */ 1699 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1700 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1701 1702 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1703 1704 return; 1705 } 1706 1707 mutex_exit(&mmu_ctxp->mmu_lock); 1708 } 1709 1710 /* 1711 * Hat_setup, makes an address space context the current active one. 1712 * In sfmmu this translates to setting the secondary context with the 1713 * corresponding context. 1714 */ 1715 void 1716 hat_setup(struct hat *sfmmup, int allocflag) 1717 { 1718 hatlock_t *hatlockp; 1719 1720 /* Init needs some special treatment. */ 1721 if (allocflag == HAT_INIT) { 1722 /* 1723 * Make sure that we have 1724 * 1. a TSB 1725 * 2. a valid ctx that doesn't get stolen after this point. 1726 */ 1727 hatlockp = sfmmu_hat_enter(sfmmup); 1728 1729 /* 1730 * Swap in the TSB. hat_init() allocates tsbinfos without 1731 * TSBs, but we need one for init, since the kernel does some 1732 * special things to set up its stack and needs the TSB to 1733 * resolve page faults. 1734 */ 1735 sfmmu_tsb_swapin(sfmmup, hatlockp); 1736 1737 sfmmu_get_ctx(sfmmup); 1738 1739 sfmmu_hat_exit(hatlockp); 1740 } else { 1741 ASSERT(allocflag == HAT_ALLOC); 1742 1743 hatlockp = sfmmu_hat_enter(sfmmup); 1744 kpreempt_disable(); 1745 1746 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1747 /* 1748 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1749 * pagesize bits don't matter in this case since we are passing 1750 * INVALID_CONTEXT to it. 1751 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1752 */ 1753 sfmmu_setctx_sec(INVALID_CONTEXT); 1754 sfmmu_clear_utsbinfo(); 1755 1756 kpreempt_enable(); 1757 sfmmu_hat_exit(hatlockp); 1758 } 1759 } 1760 1761 /* 1762 * Free all the translation resources for the specified address space. 1763 * Called from as_free when an address space is being destroyed. 1764 */ 1765 void 1766 hat_free_start(struct hat *sfmmup) 1767 { 1768 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1769 ASSERT(sfmmup != ksfmmup); 1770 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1771 1772 sfmmup->sfmmu_free = 1; 1773 if (sfmmup->sfmmu_scdp != NULL) { 1774 sfmmu_leave_scd(sfmmup, 0); 1775 } 1776 1777 ASSERT(sfmmup->sfmmu_scdp == NULL); 1778 } 1779 1780 void 1781 hat_free_end(struct hat *sfmmup) 1782 { 1783 int i; 1784 1785 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1786 ASSERT(sfmmup->sfmmu_free == 1); 1787 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1788 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1789 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1790 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1791 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1792 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1793 1794 if (sfmmup->sfmmu_rmstat) { 1795 hat_freestat(sfmmup->sfmmu_as, NULL); 1796 } 1797 1798 while (sfmmup->sfmmu_tsb != NULL) { 1799 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1800 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1801 sfmmup->sfmmu_tsb = next; 1802 } 1803 1804 if (sfmmup->sfmmu_srdp != NULL) { 1805 sfmmu_leave_srd(sfmmup); 1806 ASSERT(sfmmup->sfmmu_srdp == NULL); 1807 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1808 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1809 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1810 SFMMU_L2_HMERLINKS_SIZE); 1811 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1812 } 1813 } 1814 } 1815 sfmmu_free_sfmmu(sfmmup); 1816 1817 #ifdef DEBUG 1818 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1819 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1820 } 1821 #endif 1822 1823 kmem_cache_free(sfmmuid_cache, sfmmup); 1824 } 1825 1826 /* 1827 * Set up any translation structures, for the specified address space, 1828 * that are needed or preferred when the process is being swapped in. 1829 */ 1830 /* ARGSUSED */ 1831 void 1832 hat_swapin(struct hat *hat) 1833 { 1834 ASSERT(hat->sfmmu_xhat_provider == NULL); 1835 } 1836 1837 /* 1838 * Free all of the translation resources, for the specified address space, 1839 * that can be freed while the process is swapped out. Called from as_swapout. 1840 * Also, free up the ctx that this process was using. 1841 */ 1842 void 1843 hat_swapout(struct hat *sfmmup) 1844 { 1845 struct hmehash_bucket *hmebp; 1846 struct hme_blk *hmeblkp; 1847 struct hme_blk *pr_hblk = NULL; 1848 struct hme_blk *nx_hblk; 1849 int i; 1850 uint64_t hblkpa, prevpa, nx_pa; 1851 struct hme_blk *list = NULL; 1852 hatlock_t *hatlockp; 1853 struct tsb_info *tsbinfop; 1854 struct free_tsb { 1855 struct free_tsb *next; 1856 struct tsb_info *tsbinfop; 1857 }; /* free list of TSBs */ 1858 struct free_tsb *freelist, *last, *next; 1859 1860 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1861 SFMMU_STAT(sf_swapout); 1862 1863 /* 1864 * There is no way to go from an as to all its translations in sfmmu. 1865 * Here is one of the times when we take the big hit and traverse 1866 * the hash looking for hme_blks to free up. Not only do we free up 1867 * this as hme_blks but all those that are free. We are obviously 1868 * swapping because we need memory so let's free up as much 1869 * as we can. 1870 * 1871 * Note that we don't flush TLB/TSB here -- it's not necessary 1872 * because: 1873 * 1) we free the ctx we're using and throw away the TSB(s); 1874 * 2) processes aren't runnable while being swapped out. 1875 */ 1876 ASSERT(sfmmup != KHATID); 1877 for (i = 0; i <= UHMEHASH_SZ; i++) { 1878 hmebp = &uhme_hash[i]; 1879 SFMMU_HASH_LOCK(hmebp); 1880 hmeblkp = hmebp->hmeblkp; 1881 hblkpa = hmebp->hmeh_nextpa; 1882 prevpa = 0; 1883 pr_hblk = NULL; 1884 while (hmeblkp) { 1885 1886 ASSERT(!hmeblkp->hblk_xhat_bit); 1887 1888 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1889 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1890 ASSERT(!hmeblkp->hblk_shared); 1891 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1892 (caddr_t)get_hblk_base(hmeblkp), 1893 get_hblk_endaddr(hmeblkp), 1894 NULL, HAT_UNLOAD); 1895 } 1896 nx_hblk = hmeblkp->hblk_next; 1897 nx_pa = hmeblkp->hblk_nextpa; 1898 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1899 ASSERT(!hmeblkp->hblk_lckcnt); 1900 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1901 prevpa, pr_hblk); 1902 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1903 } else { 1904 pr_hblk = hmeblkp; 1905 prevpa = hblkpa; 1906 } 1907 hmeblkp = nx_hblk; 1908 hblkpa = nx_pa; 1909 } 1910 SFMMU_HASH_UNLOCK(hmebp); 1911 } 1912 1913 sfmmu_hblks_list_purge(&list); 1914 1915 /* 1916 * Now free up the ctx so that others can reuse it. 1917 */ 1918 hatlockp = sfmmu_hat_enter(sfmmup); 1919 1920 sfmmu_invalidate_ctx(sfmmup); 1921 1922 /* 1923 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1924 * If TSBs were never swapped in, just return. 1925 * This implies that we don't support partial swapping 1926 * of TSBs -- either all are swapped out, or none are. 1927 * 1928 * We must hold the HAT lock here to prevent racing with another 1929 * thread trying to unmap TTEs from the TSB or running the post- 1930 * relocator after relocating the TSB's memory. Unfortunately, we 1931 * can't free memory while holding the HAT lock or we could 1932 * deadlock, so we build a list of TSBs to be freed after marking 1933 * the tsbinfos as swapped out and free them after dropping the 1934 * lock. 1935 */ 1936 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1937 sfmmu_hat_exit(hatlockp); 1938 return; 1939 } 1940 1941 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1942 last = freelist = NULL; 1943 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1944 tsbinfop = tsbinfop->tsb_next) { 1945 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1946 1947 /* 1948 * Cast the TSB into a struct free_tsb and put it on the free 1949 * list. 1950 */ 1951 if (freelist == NULL) { 1952 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1953 } else { 1954 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1955 last = last->next; 1956 } 1957 last->next = NULL; 1958 last->tsbinfop = tsbinfop; 1959 tsbinfop->tsb_flags |= TSB_SWAPPED; 1960 /* 1961 * Zero out the TTE to clear the valid bit. 1962 * Note we can't use a value like 0xbad because we want to 1963 * ensure diagnostic bits are NEVER set on TTEs that might 1964 * be loaded. The intent is to catch any invalid access 1965 * to the swapped TSB, such as a thread running with a valid 1966 * context without first calling sfmmu_tsb_swapin() to 1967 * allocate TSB memory. 1968 */ 1969 tsbinfop->tsb_tte.ll = 0; 1970 } 1971 1972 /* Now we can drop the lock and free the TSB memory. */ 1973 sfmmu_hat_exit(hatlockp); 1974 for (; freelist != NULL; freelist = next) { 1975 next = freelist->next; 1976 sfmmu_tsb_free(freelist->tsbinfop); 1977 } 1978 } 1979 1980 /* 1981 * Duplicate the translations of an as into another newas 1982 */ 1983 /* ARGSUSED */ 1984 int 1985 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1986 uint_t flag) 1987 { 1988 sf_srd_t *srdp; 1989 sf_scd_t *scdp; 1990 int i; 1991 extern uint_t get_color_start(struct as *); 1992 1993 ASSERT(hat->sfmmu_xhat_provider == NULL); 1994 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 1995 (flag == HAT_DUP_SRD)); 1996 ASSERT(hat != ksfmmup); 1997 ASSERT(newhat != ksfmmup); 1998 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 1999 2000 if (flag == HAT_DUP_COW) { 2001 panic("hat_dup: HAT_DUP_COW not supported"); 2002 } 2003 2004 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2005 ASSERT(srdp->srd_evp != NULL); 2006 VN_HOLD(srdp->srd_evp); 2007 ASSERT(srdp->srd_refcnt > 0); 2008 newhat->sfmmu_srdp = srdp; 2009 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 2010 } 2011 2012 /* 2013 * HAT_DUP_ALL flag is used after as duplication is done. 2014 */ 2015 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2016 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2017 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2018 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2019 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2020 } 2021 2022 /* check if need to join scd */ 2023 if ((scdp = hat->sfmmu_scdp) != NULL && 2024 newhat->sfmmu_scdp != scdp) { 2025 int ret; 2026 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2027 &scdp->scd_region_map, ret); 2028 ASSERT(ret); 2029 sfmmu_join_scd(scdp, newhat); 2030 ASSERT(newhat->sfmmu_scdp == scdp && 2031 scdp->scd_refcnt >= 2); 2032 for (i = 0; i < max_mmu_page_sizes; i++) { 2033 newhat->sfmmu_ismttecnt[i] = 2034 hat->sfmmu_ismttecnt[i]; 2035 newhat->sfmmu_scdismttecnt[i] = 2036 hat->sfmmu_scdismttecnt[i]; 2037 } 2038 } 2039 2040 sfmmu_check_page_sizes(newhat, 1); 2041 } 2042 2043 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2044 update_proc_pgcolorbase_after_fork != 0) { 2045 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2046 } 2047 return (0); 2048 } 2049 2050 void 2051 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2052 uint_t attr, uint_t flags) 2053 { 2054 hat_do_memload(hat, addr, pp, attr, flags, 2055 SFMMU_INVALID_SHMERID); 2056 } 2057 2058 void 2059 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2060 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2061 { 2062 uint_t rid; 2063 if (rcookie == HAT_INVALID_REGION_COOKIE || 2064 hat->sfmmu_xhat_provider != NULL) { 2065 hat_do_memload(hat, addr, pp, attr, flags, 2066 SFMMU_INVALID_SHMERID); 2067 return; 2068 } 2069 rid = (uint_t)((uint64_t)rcookie); 2070 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2071 hat_do_memload(hat, addr, pp, attr, flags, rid); 2072 } 2073 2074 /* 2075 * Set up addr to map to page pp with protection prot. 2076 * As an optimization we also load the TSB with the 2077 * corresponding tte but it is no big deal if the tte gets kicked out. 2078 */ 2079 static void 2080 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2081 uint_t attr, uint_t flags, uint_t rid) 2082 { 2083 tte_t tte; 2084 2085 2086 ASSERT(hat != NULL); 2087 ASSERT(PAGE_LOCKED(pp)); 2088 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2089 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2090 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2091 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2092 2093 if (PP_ISFREE(pp)) { 2094 panic("hat_memload: loading a mapping to free page %p", 2095 (void *)pp); 2096 } 2097 2098 if (hat->sfmmu_xhat_provider) { 2099 /* no regions for xhats */ 2100 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2101 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 2102 return; 2103 } 2104 2105 ASSERT((hat == ksfmmup) || 2106 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2107 2108 if (flags & ~SFMMU_LOAD_ALLFLAG) 2109 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2110 flags & ~SFMMU_LOAD_ALLFLAG); 2111 2112 if (hat->sfmmu_rmstat) 2113 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2114 2115 #if defined(SF_ERRATA_57) 2116 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2117 (addr < errata57_limit) && (attr & PROT_EXEC) && 2118 !(flags & HAT_LOAD_SHARE)) { 2119 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2120 " page executable"); 2121 attr &= ~PROT_EXEC; 2122 } 2123 #endif 2124 2125 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2126 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2127 2128 /* 2129 * Check TSB and TLB page sizes. 2130 */ 2131 if ((flags & HAT_LOAD_SHARE) == 0) { 2132 sfmmu_check_page_sizes(hat, 1); 2133 } 2134 } 2135 2136 /* 2137 * hat_devload can be called to map real memory (e.g. 2138 * /dev/kmem) and even though hat_devload will determine pf is 2139 * for memory, it will be unable to get a shared lock on the 2140 * page (because someone else has it exclusively) and will 2141 * pass dp = NULL. If tteload doesn't get a non-NULL 2142 * page pointer it can't cache memory. 2143 */ 2144 void 2145 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2146 uint_t attr, int flags) 2147 { 2148 tte_t tte; 2149 struct page *pp = NULL; 2150 int use_lgpg = 0; 2151 2152 ASSERT(hat != NULL); 2153 2154 if (hat->sfmmu_xhat_provider) { 2155 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 2156 return; 2157 } 2158 2159 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2160 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2161 ASSERT((hat == ksfmmup) || 2162 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2163 if (len == 0) 2164 panic("hat_devload: zero len"); 2165 if (flags & ~SFMMU_LOAD_ALLFLAG) 2166 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2167 flags & ~SFMMU_LOAD_ALLFLAG); 2168 2169 #if defined(SF_ERRATA_57) 2170 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2171 (addr < errata57_limit) && (attr & PROT_EXEC) && 2172 !(flags & HAT_LOAD_SHARE)) { 2173 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2174 " page executable"); 2175 attr &= ~PROT_EXEC; 2176 } 2177 #endif 2178 2179 /* 2180 * If it's a memory page find its pp 2181 */ 2182 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2183 pp = page_numtopp_nolock(pfn); 2184 if (pp == NULL) { 2185 flags |= HAT_LOAD_NOCONSIST; 2186 } else { 2187 if (PP_ISFREE(pp)) { 2188 panic("hat_memload: loading " 2189 "a mapping to free page %p", 2190 (void *)pp); 2191 } 2192 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2193 panic("hat_memload: loading a mapping " 2194 "to unlocked relocatable page %p", 2195 (void *)pp); 2196 } 2197 ASSERT(len == MMU_PAGESIZE); 2198 } 2199 } 2200 2201 if (hat->sfmmu_rmstat) 2202 hat_resvstat(len, hat->sfmmu_as, addr); 2203 2204 if (flags & HAT_LOAD_NOCONSIST) { 2205 attr |= SFMMU_UNCACHEVTTE; 2206 use_lgpg = 1; 2207 } 2208 if (!pf_is_memory(pfn)) { 2209 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2210 use_lgpg = 1; 2211 switch (attr & HAT_ORDER_MASK) { 2212 case HAT_STRICTORDER: 2213 case HAT_UNORDERED_OK: 2214 /* 2215 * we set the side effect bit for all non 2216 * memory mappings unless merging is ok 2217 */ 2218 attr |= SFMMU_SIDEFFECT; 2219 break; 2220 case HAT_MERGING_OK: 2221 case HAT_LOADCACHING_OK: 2222 case HAT_STORECACHING_OK: 2223 break; 2224 default: 2225 panic("hat_devload: bad attr"); 2226 break; 2227 } 2228 } 2229 while (len) { 2230 if (!use_lgpg) { 2231 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2232 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2233 flags, SFMMU_INVALID_SHMERID); 2234 len -= MMU_PAGESIZE; 2235 addr += MMU_PAGESIZE; 2236 pfn++; 2237 continue; 2238 } 2239 /* 2240 * try to use large pages, check va/pa alignments 2241 * Note that 32M/256M page sizes are not (yet) supported. 2242 */ 2243 if ((len >= MMU_PAGESIZE4M) && 2244 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2245 !(disable_large_pages & (1 << TTE4M)) && 2246 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2247 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2248 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2249 flags, SFMMU_INVALID_SHMERID); 2250 len -= MMU_PAGESIZE4M; 2251 addr += MMU_PAGESIZE4M; 2252 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2253 } else if ((len >= MMU_PAGESIZE512K) && 2254 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2255 !(disable_large_pages & (1 << TTE512K)) && 2256 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2257 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2258 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2259 flags, SFMMU_INVALID_SHMERID); 2260 len -= MMU_PAGESIZE512K; 2261 addr += MMU_PAGESIZE512K; 2262 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2263 } else if ((len >= MMU_PAGESIZE64K) && 2264 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2265 !(disable_large_pages & (1 << TTE64K)) && 2266 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2267 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2268 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2269 flags, SFMMU_INVALID_SHMERID); 2270 len -= MMU_PAGESIZE64K; 2271 addr += MMU_PAGESIZE64K; 2272 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2273 } else { 2274 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2275 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2276 flags, SFMMU_INVALID_SHMERID); 2277 len -= MMU_PAGESIZE; 2278 addr += MMU_PAGESIZE; 2279 pfn++; 2280 } 2281 } 2282 2283 /* 2284 * Check TSB and TLB page sizes. 2285 */ 2286 if ((flags & HAT_LOAD_SHARE) == 0) { 2287 sfmmu_check_page_sizes(hat, 1); 2288 } 2289 } 2290 2291 void 2292 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2293 struct page **pps, uint_t attr, uint_t flags) 2294 { 2295 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2296 SFMMU_INVALID_SHMERID); 2297 } 2298 2299 void 2300 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2301 struct page **pps, uint_t attr, uint_t flags, 2302 hat_region_cookie_t rcookie) 2303 { 2304 uint_t rid; 2305 if (rcookie == HAT_INVALID_REGION_COOKIE || 2306 hat->sfmmu_xhat_provider != NULL) { 2307 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2308 SFMMU_INVALID_SHMERID); 2309 return; 2310 } 2311 rid = (uint_t)((uint64_t)rcookie); 2312 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2313 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2314 } 2315 2316 /* 2317 * Map the largest extend possible out of the page array. The array may NOT 2318 * be in order. The largest possible mapping a page can have 2319 * is specified in the p_szc field. The p_szc field 2320 * cannot change as long as there any mappings (large or small) 2321 * to any of the pages that make up the large page. (ie. any 2322 * promotion/demotion of page size is not up to the hat but up to 2323 * the page free list manager). The array 2324 * should consist of properly aligned contigous pages that are 2325 * part of a big page for a large mapping to be created. 2326 */ 2327 static void 2328 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2329 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2330 { 2331 int ttesz; 2332 size_t mapsz; 2333 pgcnt_t numpg, npgs; 2334 tte_t tte; 2335 page_t *pp; 2336 uint_t large_pages_disable; 2337 2338 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2339 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2340 2341 if (hat->sfmmu_xhat_provider) { 2342 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2343 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2344 return; 2345 } 2346 2347 if (hat->sfmmu_rmstat) 2348 hat_resvstat(len, hat->sfmmu_as, addr); 2349 2350 #if defined(SF_ERRATA_57) 2351 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2352 (addr < errata57_limit) && (attr & PROT_EXEC) && 2353 !(flags & HAT_LOAD_SHARE)) { 2354 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2355 "user page executable"); 2356 attr &= ~PROT_EXEC; 2357 } 2358 #endif 2359 2360 /* Get number of pages */ 2361 npgs = len >> MMU_PAGESHIFT; 2362 2363 if (flags & HAT_LOAD_SHARE) { 2364 large_pages_disable = disable_ism_large_pages; 2365 } else { 2366 large_pages_disable = disable_large_pages; 2367 } 2368 2369 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2370 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2371 rid); 2372 return; 2373 } 2374 2375 while (npgs >= NHMENTS) { 2376 pp = *pps; 2377 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2378 /* 2379 * Check if this page size is disabled. 2380 */ 2381 if (large_pages_disable & (1 << ttesz)) 2382 continue; 2383 2384 numpg = TTEPAGES(ttesz); 2385 mapsz = numpg << MMU_PAGESHIFT; 2386 if ((npgs >= numpg) && 2387 IS_P2ALIGNED(addr, mapsz) && 2388 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2389 /* 2390 * At this point we have enough pages and 2391 * we know the virtual address and the pfn 2392 * are properly aligned. We still need 2393 * to check for physical contiguity but since 2394 * it is very likely that this is the case 2395 * we will assume they are so and undo 2396 * the request if necessary. It would 2397 * be great if we could get a hint flag 2398 * like HAT_CONTIG which would tell us 2399 * the pages are contigous for sure. 2400 */ 2401 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2402 attr, ttesz); 2403 if (!sfmmu_tteload_array(hat, &tte, addr, 2404 pps, flags, rid)) { 2405 break; 2406 } 2407 } 2408 } 2409 if (ttesz == TTE8K) { 2410 /* 2411 * We were not able to map array using a large page 2412 * batch a hmeblk or fraction at a time. 2413 */ 2414 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2415 & (NHMENTS-1); 2416 numpg = NHMENTS - numpg; 2417 ASSERT(numpg <= npgs); 2418 mapsz = numpg * MMU_PAGESIZE; 2419 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2420 numpg, rid); 2421 } 2422 addr += mapsz; 2423 npgs -= numpg; 2424 pps += numpg; 2425 } 2426 2427 if (npgs) { 2428 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2429 rid); 2430 } 2431 2432 /* 2433 * Check TSB and TLB page sizes. 2434 */ 2435 if ((flags & HAT_LOAD_SHARE) == 0) { 2436 sfmmu_check_page_sizes(hat, 1); 2437 } 2438 } 2439 2440 /* 2441 * Function tries to batch 8K pages into the same hme blk. 2442 */ 2443 static void 2444 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2445 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2446 { 2447 tte_t tte; 2448 page_t *pp; 2449 struct hmehash_bucket *hmebp; 2450 struct hme_blk *hmeblkp; 2451 int index; 2452 2453 while (npgs) { 2454 /* 2455 * Acquire the hash bucket. 2456 */ 2457 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2458 rid); 2459 ASSERT(hmebp); 2460 2461 /* 2462 * Find the hment block. 2463 */ 2464 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2465 TTE8K, flags, rid); 2466 ASSERT(hmeblkp); 2467 2468 do { 2469 /* 2470 * Make the tte. 2471 */ 2472 pp = *pps; 2473 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2474 2475 /* 2476 * Add the translation. 2477 */ 2478 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2479 vaddr, pps, flags, rid); 2480 2481 /* 2482 * Goto next page. 2483 */ 2484 pps++; 2485 npgs--; 2486 2487 /* 2488 * Goto next address. 2489 */ 2490 vaddr += MMU_PAGESIZE; 2491 2492 /* 2493 * Don't crossover into a different hmentblk. 2494 */ 2495 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2496 (NHMENTS-1)); 2497 2498 } while (index != 0 && npgs != 0); 2499 2500 /* 2501 * Release the hash bucket. 2502 */ 2503 2504 sfmmu_tteload_release_hashbucket(hmebp); 2505 } 2506 } 2507 2508 /* 2509 * Construct a tte for a page: 2510 * 2511 * tte_valid = 1 2512 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2513 * tte_size = size 2514 * tte_nfo = attr & HAT_NOFAULT 2515 * tte_ie = attr & HAT_STRUCTURE_LE 2516 * tte_hmenum = hmenum 2517 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2518 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2519 * tte_ref = 1 (optimization) 2520 * tte_wr_perm = attr & PROT_WRITE; 2521 * tte_no_sync = attr & HAT_NOSYNC 2522 * tte_lock = attr & SFMMU_LOCKTTE 2523 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2524 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2525 * tte_e = attr & SFMMU_SIDEFFECT 2526 * tte_priv = !(attr & PROT_USER) 2527 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2528 * tte_glb = 0 2529 */ 2530 void 2531 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2532 { 2533 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2534 2535 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2536 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2537 2538 if (TTE_IS_NOSYNC(ttep)) { 2539 TTE_SET_REF(ttep); 2540 if (TTE_IS_WRITABLE(ttep)) { 2541 TTE_SET_MOD(ttep); 2542 } 2543 } 2544 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2545 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2546 } 2547 } 2548 2549 /* 2550 * This function will add a translation to the hme_blk and allocate the 2551 * hme_blk if one does not exist. 2552 * If a page structure is specified then it will add the 2553 * corresponding hment to the mapping list. 2554 * It will also update the hmenum field for the tte. 2555 * 2556 * Currently this function is only used for kernel mappings. 2557 * So pass invalid region to sfmmu_tteload_array(). 2558 */ 2559 void 2560 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2561 uint_t flags) 2562 { 2563 ASSERT(sfmmup == ksfmmup); 2564 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2565 SFMMU_INVALID_SHMERID); 2566 } 2567 2568 /* 2569 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2570 * Assumes that a particular page size may only be resident in one TSB. 2571 */ 2572 static void 2573 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2574 { 2575 struct tsb_info *tsbinfop = NULL; 2576 uint64_t tag; 2577 struct tsbe *tsbe_addr; 2578 uint64_t tsb_base; 2579 uint_t tsb_size; 2580 int vpshift = MMU_PAGESHIFT; 2581 int phys = 0; 2582 2583 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2584 phys = ktsb_phys; 2585 if (ttesz >= TTE4M) { 2586 #ifndef sun4v 2587 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2588 #endif 2589 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2590 tsb_size = ktsb4m_szcode; 2591 } else { 2592 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2593 tsb_size = ktsb_szcode; 2594 } 2595 } else { 2596 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2597 2598 /* 2599 * If there isn't a TSB for this page size, or the TSB is 2600 * swapped out, there is nothing to do. Note that the latter 2601 * case seems impossible but can occur if hat_pageunload() 2602 * is called on an ISM mapping while the process is swapped 2603 * out. 2604 */ 2605 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2606 return; 2607 2608 /* 2609 * If another thread is in the middle of relocating a TSB 2610 * we can't unload the entry so set a flag so that the 2611 * TSB will be flushed before it can be accessed by the 2612 * process. 2613 */ 2614 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2615 if (ttep == NULL) 2616 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2617 return; 2618 } 2619 #if defined(UTSB_PHYS) 2620 phys = 1; 2621 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2622 #else 2623 tsb_base = (uint64_t)tsbinfop->tsb_va; 2624 #endif 2625 tsb_size = tsbinfop->tsb_szc; 2626 } 2627 if (ttesz >= TTE4M) 2628 vpshift = MMU_PAGESHIFT4M; 2629 2630 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2631 tag = sfmmu_make_tsbtag(vaddr); 2632 2633 if (ttep == NULL) { 2634 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2635 } else { 2636 if (ttesz >= TTE4M) { 2637 SFMMU_STAT(sf_tsb_load4m); 2638 } else { 2639 SFMMU_STAT(sf_tsb_load8k); 2640 } 2641 2642 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2643 } 2644 } 2645 2646 /* 2647 * Unmap all entries from [start, end) matching the given page size. 2648 * 2649 * This function is used primarily to unmap replicated 64K or 512K entries 2650 * from the TSB that are inserted using the base page size TSB pointer, but 2651 * it may also be called to unmap a range of addresses from the TSB. 2652 */ 2653 void 2654 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2655 { 2656 struct tsb_info *tsbinfop; 2657 uint64_t tag; 2658 struct tsbe *tsbe_addr; 2659 caddr_t vaddr; 2660 uint64_t tsb_base; 2661 int vpshift, vpgsz; 2662 uint_t tsb_size; 2663 int phys = 0; 2664 2665 /* 2666 * Assumptions: 2667 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2668 * at a time shooting down any valid entries we encounter. 2669 * 2670 * If ttesz >= 4M we walk the range 4M at a time shooting 2671 * down any valid mappings we find. 2672 */ 2673 if (sfmmup == ksfmmup) { 2674 phys = ktsb_phys; 2675 if (ttesz >= TTE4M) { 2676 #ifndef sun4v 2677 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2678 #endif 2679 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2680 tsb_size = ktsb4m_szcode; 2681 } else { 2682 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2683 tsb_size = ktsb_szcode; 2684 } 2685 } else { 2686 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2687 2688 /* 2689 * If there isn't a TSB for this page size, or the TSB is 2690 * swapped out, there is nothing to do. Note that the latter 2691 * case seems impossible but can occur if hat_pageunload() 2692 * is called on an ISM mapping while the process is swapped 2693 * out. 2694 */ 2695 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2696 return; 2697 2698 /* 2699 * If another thread is in the middle of relocating a TSB 2700 * we can't unload the entry so set a flag so that the 2701 * TSB will be flushed before it can be accessed by the 2702 * process. 2703 */ 2704 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2705 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2706 return; 2707 } 2708 #if defined(UTSB_PHYS) 2709 phys = 1; 2710 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2711 #else 2712 tsb_base = (uint64_t)tsbinfop->tsb_va; 2713 #endif 2714 tsb_size = tsbinfop->tsb_szc; 2715 } 2716 if (ttesz >= TTE4M) { 2717 vpshift = MMU_PAGESHIFT4M; 2718 vpgsz = MMU_PAGESIZE4M; 2719 } else { 2720 vpshift = MMU_PAGESHIFT; 2721 vpgsz = MMU_PAGESIZE; 2722 } 2723 2724 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2725 tag = sfmmu_make_tsbtag(vaddr); 2726 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2727 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2728 } 2729 } 2730 2731 /* 2732 * Select the optimum TSB size given the number of mappings 2733 * that need to be cached. 2734 */ 2735 static int 2736 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2737 { 2738 int szc = 0; 2739 2740 #ifdef DEBUG 2741 if (tsb_grow_stress) { 2742 uint32_t randval = (uint32_t)gettick() >> 4; 2743 return (randval % (tsb_max_growsize + 1)); 2744 } 2745 #endif /* DEBUG */ 2746 2747 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2748 szc++; 2749 return (szc); 2750 } 2751 2752 /* 2753 * This function will add a translation to the hme_blk and allocate the 2754 * hme_blk if one does not exist. 2755 * If a page structure is specified then it will add the 2756 * corresponding hment to the mapping list. 2757 * It will also update the hmenum field for the tte. 2758 * Furthermore, it attempts to create a large page translation 2759 * for <addr,hat> at page array pps. It assumes addr and first 2760 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2761 */ 2762 static int 2763 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2764 page_t **pps, uint_t flags, uint_t rid) 2765 { 2766 struct hmehash_bucket *hmebp; 2767 struct hme_blk *hmeblkp; 2768 int ret; 2769 uint_t size; 2770 2771 /* 2772 * Get mapping size. 2773 */ 2774 size = TTE_CSZ(ttep); 2775 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2776 2777 /* 2778 * Acquire the hash bucket. 2779 */ 2780 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2781 ASSERT(hmebp); 2782 2783 /* 2784 * Find the hment block. 2785 */ 2786 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2787 rid); 2788 ASSERT(hmeblkp); 2789 2790 /* 2791 * Add the translation. 2792 */ 2793 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2794 rid); 2795 2796 /* 2797 * Release the hash bucket. 2798 */ 2799 sfmmu_tteload_release_hashbucket(hmebp); 2800 2801 return (ret); 2802 } 2803 2804 /* 2805 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2806 */ 2807 static struct hmehash_bucket * 2808 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2809 uint_t rid) 2810 { 2811 struct hmehash_bucket *hmebp; 2812 int hmeshift; 2813 void *htagid = sfmmutohtagid(sfmmup, rid); 2814 2815 ASSERT(htagid != NULL); 2816 2817 hmeshift = HME_HASH_SHIFT(size); 2818 2819 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2820 2821 SFMMU_HASH_LOCK(hmebp); 2822 2823 return (hmebp); 2824 } 2825 2826 /* 2827 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2828 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2829 * allocated. 2830 */ 2831 static struct hme_blk * 2832 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2833 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2834 { 2835 hmeblk_tag hblktag; 2836 int hmeshift; 2837 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2838 uint64_t hblkpa, prevpa; 2839 struct kmem_cache *sfmmu_cache; 2840 uint_t forcefree; 2841 2842 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2843 2844 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2845 ASSERT(hblktag.htag_id != NULL); 2846 hmeshift = HME_HASH_SHIFT(size); 2847 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2848 hblktag.htag_rehash = HME_HASH_REHASH(size); 2849 hblktag.htag_rid = rid; 2850 2851 ttearray_realloc: 2852 2853 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2854 pr_hblk, prevpa, &list); 2855 2856 /* 2857 * We block until hblk_reserve_lock is released; it's held by 2858 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2859 * replaced by a hblk from sfmmu8_cache. 2860 */ 2861 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2862 hblk_reserve_thread != curthread) { 2863 SFMMU_HASH_UNLOCK(hmebp); 2864 mutex_enter(&hblk_reserve_lock); 2865 mutex_exit(&hblk_reserve_lock); 2866 SFMMU_STAT(sf_hblk_reserve_hit); 2867 SFMMU_HASH_LOCK(hmebp); 2868 goto ttearray_realloc; 2869 } 2870 2871 if (hmeblkp == NULL) { 2872 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2873 hblktag, flags, rid); 2874 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2875 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2876 } else { 2877 /* 2878 * It is possible for 8k and 64k hblks to collide since they 2879 * have the same rehash value. This is because we 2880 * lazily free hblks and 8K/64K blks could be lingering. 2881 * If we find size mismatch we free the block and & try again. 2882 */ 2883 if (get_hblk_ttesz(hmeblkp) != size) { 2884 ASSERT(!hmeblkp->hblk_vcnt); 2885 ASSERT(!hmeblkp->hblk_hmecnt); 2886 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2887 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2888 goto ttearray_realloc; 2889 } 2890 if (hmeblkp->hblk_shw_bit) { 2891 /* 2892 * if the hblk was previously used as a shadow hblk then 2893 * we will change it to a normal hblk 2894 */ 2895 ASSERT(!hmeblkp->hblk_shared); 2896 if (hmeblkp->hblk_shw_mask) { 2897 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2898 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2899 goto ttearray_realloc; 2900 } else { 2901 hmeblkp->hblk_shw_bit = 0; 2902 } 2903 } 2904 SFMMU_STAT(sf_hblk_hit); 2905 } 2906 2907 /* 2908 * hat_memload() should never call kmem_cache_free(); see block 2909 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2910 * enqueue each hblk in the list to reserve list if it's created 2911 * from sfmmu8_cache *and* sfmmup == KHATID. 2912 */ 2913 forcefree = (sfmmup == KHATID) ? 1 : 0; 2914 while ((pr_hblk = list) != NULL) { 2915 list = pr_hblk->hblk_next; 2916 sfmmu_cache = get_hblk_cache(pr_hblk); 2917 if ((sfmmu_cache == sfmmu8_cache) && 2918 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2919 continue; 2920 2921 ASSERT(sfmmup != KHATID); 2922 kmem_cache_free(sfmmu_cache, pr_hblk); 2923 } 2924 2925 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2926 ASSERT(!hmeblkp->hblk_shw_bit); 2927 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2928 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2929 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 2930 2931 return (hmeblkp); 2932 } 2933 2934 /* 2935 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2936 * otherwise. 2937 */ 2938 static int 2939 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2940 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 2941 { 2942 page_t *pp = *pps; 2943 int hmenum, size, remap; 2944 tte_t tteold, flush_tte; 2945 #ifdef DEBUG 2946 tte_t orig_old; 2947 #endif /* DEBUG */ 2948 struct sf_hment *sfhme; 2949 kmutex_t *pml, *pmtx; 2950 hatlock_t *hatlockp; 2951 int myflt; 2952 2953 /* 2954 * remove this panic when we decide to let user virtual address 2955 * space be >= USERLIMIT. 2956 */ 2957 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2958 panic("user addr %p in kernel space", vaddr); 2959 #if defined(TTE_IS_GLOBAL) 2960 if (TTE_IS_GLOBAL(ttep)) 2961 panic("sfmmu_tteload: creating global tte"); 2962 #endif 2963 2964 #ifdef DEBUG 2965 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2966 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2967 panic("sfmmu_tteload: non cacheable memory tte"); 2968 #endif /* DEBUG */ 2969 2970 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 2971 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 2972 TTE_SET_REF(ttep); 2973 TTE_SET_MOD(ttep); 2974 } 2975 2976 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2977 !TTE_IS_MOD(ttep)) { 2978 /* 2979 * Don't load TSB for dummy as in ISM. Also don't preload 2980 * the TSB if the TTE isn't writable since we're likely to 2981 * fault on it again -- preloading can be fairly expensive. 2982 */ 2983 flags |= SFMMU_NO_TSBLOAD; 2984 } 2985 2986 size = TTE_CSZ(ttep); 2987 switch (size) { 2988 case TTE8K: 2989 SFMMU_STAT(sf_tteload8k); 2990 break; 2991 case TTE64K: 2992 SFMMU_STAT(sf_tteload64k); 2993 break; 2994 case TTE512K: 2995 SFMMU_STAT(sf_tteload512k); 2996 break; 2997 case TTE4M: 2998 SFMMU_STAT(sf_tteload4m); 2999 break; 3000 case (TTE32M): 3001 SFMMU_STAT(sf_tteload32m); 3002 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3003 break; 3004 case (TTE256M): 3005 SFMMU_STAT(sf_tteload256m); 3006 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3007 break; 3008 } 3009 3010 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3011 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3012 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3013 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3014 3015 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3016 3017 /* 3018 * Need to grab mlist lock here so that pageunload 3019 * will not change tte behind us. 3020 */ 3021 if (pp) { 3022 pml = sfmmu_mlist_enter(pp); 3023 } 3024 3025 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3026 /* 3027 * Look for corresponding hment and if valid verify 3028 * pfns are equal. 3029 */ 3030 remap = TTE_IS_VALID(&tteold); 3031 if (remap) { 3032 pfn_t new_pfn, old_pfn; 3033 3034 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3035 new_pfn = TTE_TO_PFN(vaddr, ttep); 3036 3037 if (flags & HAT_LOAD_REMAP) { 3038 /* make sure we are remapping same type of pages */ 3039 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3040 panic("sfmmu_tteload - tte remap io<->memory"); 3041 } 3042 if (old_pfn != new_pfn && 3043 (pp != NULL || sfhme->hme_page != NULL)) { 3044 panic("sfmmu_tteload - tte remap pp != NULL"); 3045 } 3046 } else if (old_pfn != new_pfn) { 3047 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3048 (void *)hmeblkp); 3049 } 3050 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3051 } 3052 3053 if (pp) { 3054 if (size == TTE8K) { 3055 #ifdef VAC 3056 /* 3057 * Handle VAC consistency 3058 */ 3059 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3060 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3061 } 3062 #endif 3063 3064 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3065 pmtx = sfmmu_page_enter(pp); 3066 PP_CLRRO(pp); 3067 sfmmu_page_exit(pmtx); 3068 } else if (!PP_ISMAPPED(pp) && 3069 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3070 pmtx = sfmmu_page_enter(pp); 3071 if (!(PP_ISMOD(pp))) { 3072 PP_SETRO(pp); 3073 } 3074 sfmmu_page_exit(pmtx); 3075 } 3076 3077 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3078 /* 3079 * sfmmu_pagearray_setup failed so return 3080 */ 3081 sfmmu_mlist_exit(pml); 3082 return (1); 3083 } 3084 } 3085 3086 /* 3087 * Make sure hment is not on a mapping list. 3088 */ 3089 ASSERT(remap || (sfhme->hme_page == NULL)); 3090 3091 /* if it is not a remap then hme->next better be NULL */ 3092 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3093 3094 if (flags & HAT_LOAD_LOCK) { 3095 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3096 panic("too high lckcnt-hmeblk %p", 3097 (void *)hmeblkp); 3098 } 3099 atomic_add_32(&hmeblkp->hblk_lckcnt, 1); 3100 3101 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3102 } 3103 3104 #ifdef VAC 3105 if (pp && PP_ISNC(pp)) { 3106 /* 3107 * If the physical page is marked to be uncacheable, like 3108 * by a vac conflict, make sure the new mapping is also 3109 * uncacheable. 3110 */ 3111 TTE_CLR_VCACHEABLE(ttep); 3112 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3113 } 3114 #endif 3115 ttep->tte_hmenum = hmenum; 3116 3117 #ifdef DEBUG 3118 orig_old = tteold; 3119 #endif /* DEBUG */ 3120 3121 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3122 if ((sfmmup == KHATID) && 3123 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3124 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3125 } 3126 #ifdef DEBUG 3127 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3128 #endif /* DEBUG */ 3129 } 3130 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3131 3132 if (!TTE_IS_VALID(&tteold)) { 3133 3134 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 3135 if (rid == SFMMU_INVALID_SHMERID) { 3136 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 3137 } else { 3138 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3139 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3140 /* 3141 * We already accounted for region ttecnt's in sfmmu 3142 * during hat_join_region() processing. Here we 3143 * only update ttecnt's in region struture. 3144 */ 3145 atomic_add_long(&rgnp->rgn_ttecnt[size], 1); 3146 } 3147 } 3148 3149 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3150 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3151 sfmmup != ksfmmup) { 3152 uchar_t tteflag = 1 << size; 3153 if (rid == SFMMU_INVALID_SHMERID) { 3154 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3155 hatlockp = sfmmu_hat_enter(sfmmup); 3156 sfmmup->sfmmu_tteflags |= tteflag; 3157 sfmmu_hat_exit(hatlockp); 3158 } 3159 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3160 hatlockp = sfmmu_hat_enter(sfmmup); 3161 sfmmup->sfmmu_rtteflags |= tteflag; 3162 sfmmu_hat_exit(hatlockp); 3163 } 3164 /* 3165 * Update the current CPU tsbmiss area, so the current thread 3166 * won't need to take the tsbmiss for the new pagesize. 3167 * The other threads in the process will update their tsb 3168 * miss area lazily in sfmmu_tsbmiss_exception() when they 3169 * fail to find the translation for a newly added pagesize. 3170 */ 3171 if (size > TTE64K && myflt) { 3172 struct tsbmiss *tsbmp; 3173 kpreempt_disable(); 3174 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3175 if (rid == SFMMU_INVALID_SHMERID) { 3176 if (!(tsbmp->uhat_tteflags & tteflag)) { 3177 tsbmp->uhat_tteflags |= tteflag; 3178 } 3179 } else { 3180 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3181 tsbmp->uhat_rtteflags |= tteflag; 3182 } 3183 } 3184 kpreempt_enable(); 3185 } 3186 } 3187 3188 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3189 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3190 hatlockp = sfmmu_hat_enter(sfmmup); 3191 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3192 sfmmu_hat_exit(hatlockp); 3193 } 3194 3195 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3196 hw_tte.tte_intlo; 3197 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3198 hw_tte.tte_inthi; 3199 3200 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3201 /* 3202 * If remap and new tte differs from old tte we need 3203 * to sync the mod bit and flush TLB/TSB. We don't 3204 * need to sync ref bit because we currently always set 3205 * ref bit in tteload. 3206 */ 3207 ASSERT(TTE_IS_REF(ttep)); 3208 if (TTE_IS_MOD(&tteold)) { 3209 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3210 } 3211 /* 3212 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3213 * hmes are only used for read only text. Adding this code for 3214 * completeness and future use of shared hmeblks with writable 3215 * mappings of VMODSORT vnodes. 3216 */ 3217 if (hmeblkp->hblk_shared) { 3218 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3219 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3220 xt_sync(cpuset); 3221 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3222 } else { 3223 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3224 xt_sync(sfmmup->sfmmu_cpusran); 3225 } 3226 } 3227 3228 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3229 /* 3230 * We only preload 8K and 4M mappings into the TSB, since 3231 * 64K and 512K mappings are replicated and hence don't 3232 * have a single, unique TSB entry. Ditto for 32M/256M. 3233 */ 3234 if (size == TTE8K || size == TTE4M) { 3235 sf_scd_t *scdp; 3236 hatlockp = sfmmu_hat_enter(sfmmup); 3237 /* 3238 * Don't preload private TSB if the mapping is used 3239 * by the shctx in the SCD. 3240 */ 3241 scdp = sfmmup->sfmmu_scdp; 3242 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3243 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3244 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3245 size); 3246 } 3247 sfmmu_hat_exit(hatlockp); 3248 } 3249 } 3250 if (pp) { 3251 if (!remap) { 3252 HME_ADD(sfhme, pp); 3253 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 3254 ASSERT(hmeblkp->hblk_hmecnt > 0); 3255 3256 /* 3257 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3258 * see pageunload() for comment. 3259 */ 3260 } 3261 sfmmu_mlist_exit(pml); 3262 } 3263 3264 return (0); 3265 } 3266 /* 3267 * Function unlocks hash bucket. 3268 */ 3269 static void 3270 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3271 { 3272 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3273 SFMMU_HASH_UNLOCK(hmebp); 3274 } 3275 3276 /* 3277 * function which checks and sets up page array for a large 3278 * translation. Will set p_vcolor, p_index, p_ro fields. 3279 * Assumes addr and pfnum of first page are properly aligned. 3280 * Will check for physical contiguity. If check fails it return 3281 * non null. 3282 */ 3283 static int 3284 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3285 { 3286 int i, index, ttesz; 3287 pfn_t pfnum; 3288 pgcnt_t npgs; 3289 page_t *pp, *pp1; 3290 kmutex_t *pmtx; 3291 #ifdef VAC 3292 int osz; 3293 int cflags = 0; 3294 int vac_err = 0; 3295 #endif 3296 int newidx = 0; 3297 3298 ttesz = TTE_CSZ(ttep); 3299 3300 ASSERT(ttesz > TTE8K); 3301 3302 npgs = TTEPAGES(ttesz); 3303 index = PAGESZ_TO_INDEX(ttesz); 3304 3305 pfnum = (*pps)->p_pagenum; 3306 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3307 3308 /* 3309 * Save the first pp so we can do HAT_TMPNC at the end. 3310 */ 3311 pp1 = *pps; 3312 #ifdef VAC 3313 osz = fnd_mapping_sz(pp1); 3314 #endif 3315 3316 for (i = 0; i < npgs; i++, pps++) { 3317 pp = *pps; 3318 ASSERT(PAGE_LOCKED(pp)); 3319 ASSERT(pp->p_szc >= ttesz); 3320 ASSERT(pp->p_szc == pp1->p_szc); 3321 ASSERT(sfmmu_mlist_held(pp)); 3322 3323 /* 3324 * XXX is it possible to maintain P_RO on the root only? 3325 */ 3326 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3327 pmtx = sfmmu_page_enter(pp); 3328 PP_CLRRO(pp); 3329 sfmmu_page_exit(pmtx); 3330 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3331 !PP_ISMOD(pp)) { 3332 pmtx = sfmmu_page_enter(pp); 3333 if (!(PP_ISMOD(pp))) { 3334 PP_SETRO(pp); 3335 } 3336 sfmmu_page_exit(pmtx); 3337 } 3338 3339 /* 3340 * If this is a remap we skip vac & contiguity checks. 3341 */ 3342 if (remap) 3343 continue; 3344 3345 /* 3346 * set p_vcolor and detect any vac conflicts. 3347 */ 3348 #ifdef VAC 3349 if (vac_err == 0) { 3350 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3351 3352 } 3353 #endif 3354 3355 /* 3356 * Save current index in case we need to undo it. 3357 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3358 * "SFMMU_INDEX_SHIFT 6" 3359 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3360 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3361 * 3362 * So: index = PAGESZ_TO_INDEX(ttesz); 3363 * if ttesz == 1 then index = 0x2 3364 * 2 then index = 0x4 3365 * 3 then index = 0x8 3366 * 4 then index = 0x10 3367 * 5 then index = 0x20 3368 * The code below checks if it's a new pagesize (ie, newidx) 3369 * in case we need to take it back out of p_index, 3370 * and then or's the new index into the existing index. 3371 */ 3372 if ((PP_MAPINDEX(pp) & index) == 0) 3373 newidx = 1; 3374 pp->p_index = (PP_MAPINDEX(pp) | index); 3375 3376 /* 3377 * contiguity check 3378 */ 3379 if (pp->p_pagenum != pfnum) { 3380 /* 3381 * If we fail the contiguity test then 3382 * the only thing we need to fix is the p_index field. 3383 * We might get a few extra flushes but since this 3384 * path is rare that is ok. The p_ro field will 3385 * get automatically fixed on the next tteload to 3386 * the page. NO TNC bit is set yet. 3387 */ 3388 while (i >= 0) { 3389 pp = *pps; 3390 if (newidx) 3391 pp->p_index = (PP_MAPINDEX(pp) & 3392 ~index); 3393 pps--; 3394 i--; 3395 } 3396 return (1); 3397 } 3398 pfnum++; 3399 addr += MMU_PAGESIZE; 3400 } 3401 3402 #ifdef VAC 3403 if (vac_err) { 3404 if (ttesz > osz) { 3405 /* 3406 * There are some smaller mappings that causes vac 3407 * conflicts. Convert all existing small mappings to 3408 * TNC. 3409 */ 3410 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3411 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3412 npgs); 3413 } else { 3414 /* EMPTY */ 3415 /* 3416 * If there exists an big page mapping, 3417 * that means the whole existing big page 3418 * has TNC setting already. No need to covert to 3419 * TNC again. 3420 */ 3421 ASSERT(PP_ISTNC(pp1)); 3422 } 3423 } 3424 #endif /* VAC */ 3425 3426 return (0); 3427 } 3428 3429 #ifdef VAC 3430 /* 3431 * Routine that detects vac consistency for a large page. It also 3432 * sets virtual color for all pp's for this big mapping. 3433 */ 3434 static int 3435 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3436 { 3437 int vcolor, ocolor; 3438 3439 ASSERT(sfmmu_mlist_held(pp)); 3440 3441 if (PP_ISNC(pp)) { 3442 return (HAT_TMPNC); 3443 } 3444 3445 vcolor = addr_to_vcolor(addr); 3446 if (PP_NEWPAGE(pp)) { 3447 PP_SET_VCOLOR(pp, vcolor); 3448 return (0); 3449 } 3450 3451 ocolor = PP_GET_VCOLOR(pp); 3452 if (ocolor == vcolor) { 3453 return (0); 3454 } 3455 3456 if (!PP_ISMAPPED(pp)) { 3457 /* 3458 * Previous user of page had a differnet color 3459 * but since there are no current users 3460 * we just flush the cache and change the color. 3461 * As an optimization for large pages we flush the 3462 * entire cache of that color and set a flag. 3463 */ 3464 SFMMU_STAT(sf_pgcolor_conflict); 3465 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3466 CacheColor_SetFlushed(*cflags, ocolor); 3467 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3468 } 3469 PP_SET_VCOLOR(pp, vcolor); 3470 return (0); 3471 } 3472 3473 /* 3474 * We got a real conflict with a current mapping. 3475 * set flags to start unencaching all mappings 3476 * and return failure so we restart looping 3477 * the pp array from the beginning. 3478 */ 3479 return (HAT_TMPNC); 3480 } 3481 #endif /* VAC */ 3482 3483 /* 3484 * creates a large page shadow hmeblk for a tte. 3485 * The purpose of this routine is to allow us to do quick unloads because 3486 * the vm layer can easily pass a very large but sparsely populated range. 3487 */ 3488 static struct hme_blk * 3489 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3490 { 3491 struct hmehash_bucket *hmebp; 3492 hmeblk_tag hblktag; 3493 int hmeshift, size, vshift; 3494 uint_t shw_mask, newshw_mask; 3495 struct hme_blk *hmeblkp; 3496 3497 ASSERT(sfmmup != KHATID); 3498 if (mmu_page_sizes == max_mmu_page_sizes) { 3499 ASSERT(ttesz < TTE256M); 3500 } else { 3501 ASSERT(ttesz < TTE4M); 3502 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3503 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3504 } 3505 3506 if (ttesz == TTE8K) { 3507 size = TTE512K; 3508 } else { 3509 size = ++ttesz; 3510 } 3511 3512 hblktag.htag_id = sfmmup; 3513 hmeshift = HME_HASH_SHIFT(size); 3514 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3515 hblktag.htag_rehash = HME_HASH_REHASH(size); 3516 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3517 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3518 3519 SFMMU_HASH_LOCK(hmebp); 3520 3521 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3522 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3523 if (hmeblkp == NULL) { 3524 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3525 hblktag, flags, SFMMU_INVALID_SHMERID); 3526 } 3527 ASSERT(hmeblkp); 3528 if (!hmeblkp->hblk_shw_mask) { 3529 /* 3530 * if this is a unused hblk it was just allocated or could 3531 * potentially be a previous large page hblk so we need to 3532 * set the shadow bit. 3533 */ 3534 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3535 hmeblkp->hblk_shw_bit = 1; 3536 } else if (hmeblkp->hblk_shw_bit == 0) { 3537 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3538 (void *)hmeblkp); 3539 } 3540 ASSERT(hmeblkp->hblk_shw_bit == 1); 3541 ASSERT(!hmeblkp->hblk_shared); 3542 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3543 ASSERT(vshift < 8); 3544 /* 3545 * Atomically set shw mask bit 3546 */ 3547 do { 3548 shw_mask = hmeblkp->hblk_shw_mask; 3549 newshw_mask = shw_mask | (1 << vshift); 3550 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3551 newshw_mask); 3552 } while (newshw_mask != shw_mask); 3553 3554 SFMMU_HASH_UNLOCK(hmebp); 3555 3556 return (hmeblkp); 3557 } 3558 3559 /* 3560 * This routine cleanup a previous shadow hmeblk and changes it to 3561 * a regular hblk. This happens rarely but it is possible 3562 * when a process wants to use large pages and there are hblks still 3563 * lying around from the previous as that used these hmeblks. 3564 * The alternative was to cleanup the shadow hblks at unload time 3565 * but since so few user processes actually use large pages, it is 3566 * better to be lazy and cleanup at this time. 3567 */ 3568 static void 3569 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3570 struct hmehash_bucket *hmebp) 3571 { 3572 caddr_t addr, endaddr; 3573 int hashno, size; 3574 3575 ASSERT(hmeblkp->hblk_shw_bit); 3576 ASSERT(!hmeblkp->hblk_shared); 3577 3578 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3579 3580 if (!hmeblkp->hblk_shw_mask) { 3581 hmeblkp->hblk_shw_bit = 0; 3582 return; 3583 } 3584 addr = (caddr_t)get_hblk_base(hmeblkp); 3585 endaddr = get_hblk_endaddr(hmeblkp); 3586 size = get_hblk_ttesz(hmeblkp); 3587 hashno = size - 1; 3588 ASSERT(hashno > 0); 3589 SFMMU_HASH_UNLOCK(hmebp); 3590 3591 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3592 3593 SFMMU_HASH_LOCK(hmebp); 3594 } 3595 3596 static void 3597 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3598 int hashno) 3599 { 3600 int hmeshift, shadow = 0; 3601 hmeblk_tag hblktag; 3602 struct hmehash_bucket *hmebp; 3603 struct hme_blk *hmeblkp; 3604 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3605 uint64_t hblkpa, prevpa, nx_pa; 3606 3607 ASSERT(hashno > 0); 3608 hblktag.htag_id = sfmmup; 3609 hblktag.htag_rehash = hashno; 3610 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3611 3612 hmeshift = HME_HASH_SHIFT(hashno); 3613 3614 while (addr < endaddr) { 3615 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3616 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3617 SFMMU_HASH_LOCK(hmebp); 3618 /* inline HME_HASH_SEARCH */ 3619 hmeblkp = hmebp->hmeblkp; 3620 hblkpa = hmebp->hmeh_nextpa; 3621 prevpa = 0; 3622 pr_hblk = NULL; 3623 while (hmeblkp) { 3624 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3625 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3626 /* found hme_blk */ 3627 ASSERT(!hmeblkp->hblk_shared); 3628 if (hmeblkp->hblk_shw_bit) { 3629 if (hmeblkp->hblk_shw_mask) { 3630 shadow = 1; 3631 sfmmu_shadow_hcleanup(sfmmup, 3632 hmeblkp, hmebp); 3633 break; 3634 } else { 3635 hmeblkp->hblk_shw_bit = 0; 3636 } 3637 } 3638 3639 /* 3640 * Hblk_hmecnt and hblk_vcnt could be non zero 3641 * since hblk_unload() does not gurantee that. 3642 * 3643 * XXX - this could cause tteload() to spin 3644 * where sfmmu_shadow_hcleanup() is called. 3645 */ 3646 } 3647 3648 nx_hblk = hmeblkp->hblk_next; 3649 nx_pa = hmeblkp->hblk_nextpa; 3650 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3651 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3652 pr_hblk); 3653 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3654 } else { 3655 pr_hblk = hmeblkp; 3656 prevpa = hblkpa; 3657 } 3658 hmeblkp = nx_hblk; 3659 hblkpa = nx_pa; 3660 } 3661 3662 SFMMU_HASH_UNLOCK(hmebp); 3663 3664 if (shadow) { 3665 /* 3666 * We found another shadow hblk so cleaned its 3667 * children. We need to go back and cleanup 3668 * the original hblk so we don't change the 3669 * addr. 3670 */ 3671 shadow = 0; 3672 } else { 3673 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3674 (1 << hmeshift)); 3675 } 3676 } 3677 sfmmu_hblks_list_purge(&list); 3678 } 3679 3680 /* 3681 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3682 * may still linger on after pageunload. 3683 */ 3684 static void 3685 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3686 { 3687 int hmeshift; 3688 hmeblk_tag hblktag; 3689 struct hmehash_bucket *hmebp; 3690 struct hme_blk *hmeblkp; 3691 struct hme_blk *pr_hblk; 3692 struct hme_blk *list = NULL; 3693 uint64_t hblkpa, prevpa; 3694 3695 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3696 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3697 3698 hmeshift = HME_HASH_SHIFT(ttesz); 3699 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3700 hblktag.htag_rehash = ttesz; 3701 hblktag.htag_rid = rid; 3702 hblktag.htag_id = srdp; 3703 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3704 3705 SFMMU_HASH_LOCK(hmebp); 3706 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 3707 prevpa, &list); 3708 if (hmeblkp != NULL) { 3709 ASSERT(hmeblkp->hblk_shared); 3710 ASSERT(!hmeblkp->hblk_shw_bit); 3711 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3712 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3713 } 3714 ASSERT(!hmeblkp->hblk_lckcnt); 3715 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 3716 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3717 } 3718 SFMMU_HASH_UNLOCK(hmebp); 3719 sfmmu_hblks_list_purge(&list); 3720 } 3721 3722 /* ARGSUSED */ 3723 static void 3724 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3725 size_t r_size, void *r_obj, u_offset_t r_objoff) 3726 { 3727 } 3728 3729 /* 3730 * Searches for an hmeblk which maps addr, then unloads this mapping 3731 * and updates *eaddrp, if the hmeblk is found. 3732 */ 3733 static void 3734 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3735 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3736 { 3737 int hmeshift; 3738 hmeblk_tag hblktag; 3739 struct hmehash_bucket *hmebp; 3740 struct hme_blk *hmeblkp; 3741 struct hme_blk *pr_hblk; 3742 struct hme_blk *list = NULL; 3743 uint64_t hblkpa, prevpa; 3744 3745 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3746 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3747 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3748 3749 hmeshift = HME_HASH_SHIFT(ttesz); 3750 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3751 hblktag.htag_rehash = ttesz; 3752 hblktag.htag_rid = rid; 3753 hblktag.htag_id = srdp; 3754 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3755 3756 SFMMU_HASH_LOCK(hmebp); 3757 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 3758 prevpa, &list); 3759 if (hmeblkp != NULL) { 3760 ASSERT(hmeblkp->hblk_shared); 3761 ASSERT(!hmeblkp->hblk_lckcnt); 3762 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3763 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3764 eaddr, NULL, HAT_UNLOAD); 3765 ASSERT(*eaddrp > addr); 3766 } 3767 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3768 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 3769 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3770 } 3771 SFMMU_HASH_UNLOCK(hmebp); 3772 sfmmu_hblks_list_purge(&list); 3773 } 3774 3775 static void 3776 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3777 { 3778 int ttesz = rgnp->rgn_pgszc; 3779 size_t rsz = rgnp->rgn_size; 3780 caddr_t rsaddr = rgnp->rgn_saddr; 3781 caddr_t readdr = rsaddr + rsz; 3782 caddr_t rhsaddr; 3783 caddr_t va; 3784 uint_t rid = rgnp->rgn_id; 3785 caddr_t cbsaddr; 3786 caddr_t cbeaddr; 3787 hat_rgn_cb_func_t rcbfunc; 3788 ulong_t cnt; 3789 3790 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3791 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3792 3793 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3794 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3795 if (ttesz < HBLK_MIN_TTESZ) { 3796 ttesz = HBLK_MIN_TTESZ; 3797 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3798 } else { 3799 rhsaddr = rsaddr; 3800 } 3801 3802 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3803 rcbfunc = sfmmu_rgn_cb_noop; 3804 } 3805 3806 while (ttesz >= HBLK_MIN_TTESZ) { 3807 cbsaddr = rsaddr; 3808 cbeaddr = rsaddr; 3809 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3810 ttesz--; 3811 continue; 3812 } 3813 cnt = 0; 3814 va = rsaddr; 3815 while (va < readdr) { 3816 ASSERT(va >= rhsaddr); 3817 if (va != cbeaddr) { 3818 if (cbeaddr != cbsaddr) { 3819 ASSERT(cbeaddr > cbsaddr); 3820 (*rcbfunc)(cbsaddr, cbeaddr, 3821 rsaddr, rsz, rgnp->rgn_obj, 3822 rgnp->rgn_objoff); 3823 } 3824 cbsaddr = va; 3825 cbeaddr = va; 3826 } 3827 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3828 ttesz, &cbeaddr); 3829 cnt++; 3830 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3831 } 3832 if (cbeaddr != cbsaddr) { 3833 ASSERT(cbeaddr > cbsaddr); 3834 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3835 rsz, rgnp->rgn_obj, 3836 rgnp->rgn_objoff); 3837 } 3838 ttesz--; 3839 } 3840 } 3841 3842 /* 3843 * Release one hardware address translation lock on the given address range. 3844 */ 3845 void 3846 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3847 { 3848 struct hmehash_bucket *hmebp; 3849 hmeblk_tag hblktag; 3850 int hmeshift, hashno = 1; 3851 struct hme_blk *hmeblkp, *list = NULL; 3852 caddr_t endaddr; 3853 3854 ASSERT(sfmmup != NULL); 3855 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3856 3857 ASSERT((sfmmup == ksfmmup) || 3858 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3859 ASSERT((len & MMU_PAGEOFFSET) == 0); 3860 endaddr = addr + len; 3861 hblktag.htag_id = sfmmup; 3862 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3863 3864 /* 3865 * Spitfire supports 4 page sizes. 3866 * Most pages are expected to be of the smallest page size (8K) and 3867 * these will not need to be rehashed. 64K pages also don't need to be 3868 * rehashed because an hmeblk spans 64K of address space. 512K pages 3869 * might need 1 rehash and and 4M pages might need 2 rehashes. 3870 */ 3871 while (addr < endaddr) { 3872 hmeshift = HME_HASH_SHIFT(hashno); 3873 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3874 hblktag.htag_rehash = hashno; 3875 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3876 3877 SFMMU_HASH_LOCK(hmebp); 3878 3879 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3880 if (hmeblkp != NULL) { 3881 ASSERT(!hmeblkp->hblk_shared); 3882 /* 3883 * If we encounter a shadow hmeblk then 3884 * we know there are no valid hmeblks mapping 3885 * this address at this size or larger. 3886 * Just increment address by the smallest 3887 * page size. 3888 */ 3889 if (hmeblkp->hblk_shw_bit) { 3890 addr += MMU_PAGESIZE; 3891 } else { 3892 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3893 endaddr); 3894 } 3895 SFMMU_HASH_UNLOCK(hmebp); 3896 hashno = 1; 3897 continue; 3898 } 3899 SFMMU_HASH_UNLOCK(hmebp); 3900 3901 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3902 /* 3903 * We have traversed the whole list and rehashed 3904 * if necessary without finding the address to unlock 3905 * which should never happen. 3906 */ 3907 panic("sfmmu_unlock: addr not found. " 3908 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3909 } else { 3910 hashno++; 3911 } 3912 } 3913 3914 sfmmu_hblks_list_purge(&list); 3915 } 3916 3917 void 3918 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 3919 hat_region_cookie_t rcookie) 3920 { 3921 sf_srd_t *srdp; 3922 sf_region_t *rgnp; 3923 int ttesz; 3924 uint_t rid; 3925 caddr_t eaddr; 3926 caddr_t va; 3927 int hmeshift; 3928 hmeblk_tag hblktag; 3929 struct hmehash_bucket *hmebp; 3930 struct hme_blk *hmeblkp; 3931 struct hme_blk *pr_hblk; 3932 struct hme_blk *list; 3933 uint64_t hblkpa, prevpa; 3934 3935 if (rcookie == HAT_INVALID_REGION_COOKIE) { 3936 hat_unlock(sfmmup, addr, len); 3937 return; 3938 } 3939 3940 ASSERT(sfmmup != NULL); 3941 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3942 ASSERT(sfmmup != ksfmmup); 3943 3944 srdp = sfmmup->sfmmu_srdp; 3945 rid = (uint_t)((uint64_t)rcookie); 3946 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3947 eaddr = addr + len; 3948 va = addr; 3949 list = NULL; 3950 rgnp = srdp->srd_hmergnp[rid]; 3951 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 3952 3953 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 3954 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 3955 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 3956 ttesz = HBLK_MIN_TTESZ; 3957 } else { 3958 ttesz = rgnp->rgn_pgszc; 3959 } 3960 while (va < eaddr) { 3961 while (ttesz < rgnp->rgn_pgszc && 3962 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 3963 ttesz++; 3964 } 3965 while (ttesz >= HBLK_MIN_TTESZ) { 3966 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3967 ttesz--; 3968 continue; 3969 } 3970 hmeshift = HME_HASH_SHIFT(ttesz); 3971 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 3972 hblktag.htag_rehash = ttesz; 3973 hblktag.htag_rid = rid; 3974 hblktag.htag_id = srdp; 3975 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 3976 SFMMU_HASH_LOCK(hmebp); 3977 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 3978 pr_hblk, prevpa, &list); 3979 if (hmeblkp == NULL) { 3980 SFMMU_HASH_UNLOCK(hmebp); 3981 ttesz--; 3982 continue; 3983 } 3984 ASSERT(hmeblkp->hblk_shared); 3985 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 3986 ASSERT(va >= eaddr || 3987 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 3988 SFMMU_HASH_UNLOCK(hmebp); 3989 break; 3990 } 3991 if (ttesz < HBLK_MIN_TTESZ) { 3992 panic("hat_unlock_region: addr not found " 3993 "addr %p hat %p", va, sfmmup); 3994 } 3995 } 3996 sfmmu_hblks_list_purge(&list); 3997 } 3998 3999 /* 4000 * Function to unlock a range of addresses in an hmeblk. It returns the 4001 * next address that needs to be unlocked. 4002 * Should be called with the hash lock held. 4003 */ 4004 static caddr_t 4005 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4006 { 4007 struct sf_hment *sfhme; 4008 tte_t tteold, ttemod; 4009 int ttesz, ret; 4010 4011 ASSERT(in_hblk_range(hmeblkp, addr)); 4012 ASSERT(hmeblkp->hblk_shw_bit == 0); 4013 4014 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4015 ttesz = get_hblk_ttesz(hmeblkp); 4016 4017 HBLKTOHME(sfhme, hmeblkp, addr); 4018 while (addr < endaddr) { 4019 readtte: 4020 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4021 if (TTE_IS_VALID(&tteold)) { 4022 4023 ttemod = tteold; 4024 4025 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4026 &sfhme->hme_tte); 4027 4028 if (ret < 0) 4029 goto readtte; 4030 4031 if (hmeblkp->hblk_lckcnt == 0) 4032 panic("zero hblk lckcnt"); 4033 4034 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4035 (uintptr_t)endaddr) 4036 panic("can't unlock large tte"); 4037 4038 ASSERT(hmeblkp->hblk_lckcnt > 0); 4039 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 4040 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4041 } else { 4042 panic("sfmmu_hblk_unlock: invalid tte"); 4043 } 4044 addr += TTEBYTES(ttesz); 4045 sfhme++; 4046 } 4047 return (addr); 4048 } 4049 4050 /* 4051 * Physical Address Mapping Framework 4052 * 4053 * General rules: 4054 * 4055 * (1) Applies only to seg_kmem memory pages. To make things easier, 4056 * seg_kpm addresses are also accepted by the routines, but nothing 4057 * is done with them since by definition their PA mappings are static. 4058 * (2) hat_add_callback() may only be called while holding the page lock 4059 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4060 * or passing HAC_PAGELOCK flag. 4061 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4062 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4063 * callbacks may not sleep or acquire adaptive mutex locks. 4064 * (4) Either prehandler() or posthandler() (but not both) may be specified 4065 * as being NULL. Specifying an errhandler() is optional. 4066 * 4067 * Details of using the framework: 4068 * 4069 * registering a callback (hat_register_callback()) 4070 * 4071 * Pass prehandler, posthandler, errhandler addresses 4072 * as described below. If capture_cpus argument is nonzero, 4073 * suspend callback to the prehandler will occur with CPUs 4074 * captured and executing xc_loop() and CPUs will remain 4075 * captured until after the posthandler suspend callback 4076 * occurs. 4077 * 4078 * adding a callback (hat_add_callback()) 4079 * 4080 * as_pagelock(); 4081 * hat_add_callback(); 4082 * save returned pfn in private data structures or program registers; 4083 * as_pageunlock(); 4084 * 4085 * prehandler() 4086 * 4087 * Stop all accesses by physical address to this memory page. 4088 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4089 * adaptive locks. The second, SUSPEND, is called at high PIL with 4090 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4091 * locks must be XCALL_PIL or higher locks). 4092 * 4093 * May return the following errors: 4094 * EIO: A fatal error has occurred. This will result in panic. 4095 * EAGAIN: The page cannot be suspended. This will fail the 4096 * relocation. 4097 * 0: Success. 4098 * 4099 * posthandler() 4100 * 4101 * Save new pfn in private data structures or program registers; 4102 * not allowed to fail (non-zero return values will result in panic). 4103 * 4104 * errhandler() 4105 * 4106 * called when an error occurs related to the callback. Currently 4107 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4108 * a page is being freed, but there are still outstanding callback(s) 4109 * registered on the page. 4110 * 4111 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4112 * 4113 * stop using physical address 4114 * hat_delete_callback(); 4115 * 4116 */ 4117 4118 /* 4119 * Register a callback class. Each subsystem should do this once and 4120 * cache the id_t returned for use in setting up and tearing down callbacks. 4121 * 4122 * There is no facility for removing callback IDs once they are created; 4123 * the "key" should be unique for each module, so in case a module is unloaded 4124 * and subsequently re-loaded, we can recycle the module's previous entry. 4125 */ 4126 id_t 4127 hat_register_callback(int key, 4128 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4129 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4130 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4131 int capture_cpus) 4132 { 4133 id_t id; 4134 4135 /* 4136 * Search the table for a pre-existing callback associated with 4137 * the identifier "key". If one exists, we re-use that entry in 4138 * the table for this instance, otherwise we assign the next 4139 * available table slot. 4140 */ 4141 for (id = 0; id < sfmmu_max_cb_id; id++) { 4142 if (sfmmu_cb_table[id].key == key) 4143 break; 4144 } 4145 4146 if (id == sfmmu_max_cb_id) { 4147 id = sfmmu_cb_nextid++; 4148 if (id >= sfmmu_max_cb_id) 4149 panic("hat_register_callback: out of callback IDs"); 4150 } 4151 4152 ASSERT(prehandler != NULL || posthandler != NULL); 4153 4154 sfmmu_cb_table[id].key = key; 4155 sfmmu_cb_table[id].prehandler = prehandler; 4156 sfmmu_cb_table[id].posthandler = posthandler; 4157 sfmmu_cb_table[id].errhandler = errhandler; 4158 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4159 4160 return (id); 4161 } 4162 4163 #define HAC_COOKIE_NONE (void *)-1 4164 4165 /* 4166 * Add relocation callbacks to the specified addr/len which will be called 4167 * when relocating the associated page. See the description of pre and 4168 * posthandler above for more details. 4169 * 4170 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4171 * locked internally so the caller must be able to deal with the callback 4172 * running even before this function has returned. If HAC_PAGELOCK is not 4173 * set, it is assumed that the underlying memory pages are locked. 4174 * 4175 * Since the caller must track the individual page boundaries anyway, 4176 * we only allow a callback to be added to a single page (large 4177 * or small). Thus [addr, addr + len) MUST be contained within a single 4178 * page. 4179 * 4180 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4181 * _provided_that_ a unique parameter is specified for each callback. 4182 * If multiple callbacks are registered on the same range the callback will 4183 * be invoked with each unique parameter. Registering the same callback with 4184 * the same argument more than once will result in corrupted kernel state. 4185 * 4186 * Returns the pfn of the underlying kernel page in *rpfn 4187 * on success, or PFN_INVALID on failure. 4188 * 4189 * cookiep (if passed) provides storage space for an opaque cookie 4190 * to return later to hat_delete_callback(). This cookie makes the callback 4191 * deletion significantly quicker by avoiding a potentially lengthy hash 4192 * search. 4193 * 4194 * Returns values: 4195 * 0: success 4196 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4197 * EINVAL: callback ID is not valid 4198 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4199 * space 4200 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4201 */ 4202 int 4203 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4204 void *pvt, pfn_t *rpfn, void **cookiep) 4205 { 4206 struct hmehash_bucket *hmebp; 4207 hmeblk_tag hblktag; 4208 struct hme_blk *hmeblkp; 4209 int hmeshift, hashno; 4210 caddr_t saddr, eaddr, baseaddr; 4211 struct pa_hment *pahmep; 4212 struct sf_hment *sfhmep, *osfhmep; 4213 kmutex_t *pml; 4214 tte_t tte; 4215 page_t *pp; 4216 vnode_t *vp; 4217 u_offset_t off; 4218 pfn_t pfn; 4219 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4220 int locked = 0; 4221 4222 /* 4223 * For KPM mappings, just return the physical address since we 4224 * don't need to register any callbacks. 4225 */ 4226 if (IS_KPM_ADDR(vaddr)) { 4227 uint64_t paddr; 4228 SFMMU_KPM_VTOP(vaddr, paddr); 4229 *rpfn = btop(paddr); 4230 if (cookiep != NULL) 4231 *cookiep = HAC_COOKIE_NONE; 4232 return (0); 4233 } 4234 4235 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4236 *rpfn = PFN_INVALID; 4237 return (EINVAL); 4238 } 4239 4240 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4241 *rpfn = PFN_INVALID; 4242 return (ENOMEM); 4243 } 4244 4245 sfhmep = &pahmep->sfment; 4246 4247 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4248 eaddr = saddr + len; 4249 4250 rehash: 4251 /* Find the mapping(s) for this page */ 4252 for (hashno = TTE64K, hmeblkp = NULL; 4253 hmeblkp == NULL && hashno <= mmu_hashcnt; 4254 hashno++) { 4255 hmeshift = HME_HASH_SHIFT(hashno); 4256 hblktag.htag_id = ksfmmup; 4257 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4258 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4259 hblktag.htag_rehash = hashno; 4260 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4261 4262 SFMMU_HASH_LOCK(hmebp); 4263 4264 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4265 4266 if (hmeblkp == NULL) 4267 SFMMU_HASH_UNLOCK(hmebp); 4268 } 4269 4270 if (hmeblkp == NULL) { 4271 kmem_cache_free(pa_hment_cache, pahmep); 4272 *rpfn = PFN_INVALID; 4273 return (ENXIO); 4274 } 4275 4276 ASSERT(!hmeblkp->hblk_shared); 4277 4278 HBLKTOHME(osfhmep, hmeblkp, saddr); 4279 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4280 4281 if (!TTE_IS_VALID(&tte)) { 4282 SFMMU_HASH_UNLOCK(hmebp); 4283 kmem_cache_free(pa_hment_cache, pahmep); 4284 *rpfn = PFN_INVALID; 4285 return (ENXIO); 4286 } 4287 4288 /* 4289 * Make sure the boundaries for the callback fall within this 4290 * single mapping. 4291 */ 4292 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4293 ASSERT(saddr >= baseaddr); 4294 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4295 SFMMU_HASH_UNLOCK(hmebp); 4296 kmem_cache_free(pa_hment_cache, pahmep); 4297 *rpfn = PFN_INVALID; 4298 return (ERANGE); 4299 } 4300 4301 pfn = sfmmu_ttetopfn(&tte, vaddr); 4302 4303 /* 4304 * The pfn may not have a page_t underneath in which case we 4305 * just return it. This can happen if we are doing I/O to a 4306 * static portion of the kernel's address space, for instance. 4307 */ 4308 pp = osfhmep->hme_page; 4309 if (pp == NULL) { 4310 SFMMU_HASH_UNLOCK(hmebp); 4311 kmem_cache_free(pa_hment_cache, pahmep); 4312 *rpfn = pfn; 4313 if (cookiep) 4314 *cookiep = HAC_COOKIE_NONE; 4315 return (0); 4316 } 4317 ASSERT(pp == PP_PAGEROOT(pp)); 4318 4319 vp = pp->p_vnode; 4320 off = pp->p_offset; 4321 4322 pml = sfmmu_mlist_enter(pp); 4323 4324 if (flags & HAC_PAGELOCK) { 4325 if (!page_trylock(pp, SE_SHARED)) { 4326 /* 4327 * Somebody is holding SE_EXCL lock. Might 4328 * even be hat_page_relocate(). Drop all 4329 * our locks, lookup the page in &kvp, and 4330 * retry. If it doesn't exist in &kvp and &zvp, 4331 * then we must be dealing with a kernel mapped 4332 * page which doesn't actually belong to 4333 * segkmem so we punt. 4334 */ 4335 sfmmu_mlist_exit(pml); 4336 SFMMU_HASH_UNLOCK(hmebp); 4337 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4338 4339 /* check zvp before giving up */ 4340 if (pp == NULL) 4341 pp = page_lookup(&zvp, (u_offset_t)saddr, 4342 SE_SHARED); 4343 4344 /* Okay, we didn't find it, give up */ 4345 if (pp == NULL) { 4346 kmem_cache_free(pa_hment_cache, pahmep); 4347 *rpfn = pfn; 4348 if (cookiep) 4349 *cookiep = HAC_COOKIE_NONE; 4350 return (0); 4351 } 4352 page_unlock(pp); 4353 goto rehash; 4354 } 4355 locked = 1; 4356 } 4357 4358 if (!PAGE_LOCKED(pp) && !panicstr) 4359 panic("hat_add_callback: page 0x%p not locked", pp); 4360 4361 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4362 pp->p_offset != off) { 4363 /* 4364 * The page moved before we got our hands on it. Drop 4365 * all the locks and try again. 4366 */ 4367 ASSERT((flags & HAC_PAGELOCK) != 0); 4368 sfmmu_mlist_exit(pml); 4369 SFMMU_HASH_UNLOCK(hmebp); 4370 page_unlock(pp); 4371 locked = 0; 4372 goto rehash; 4373 } 4374 4375 if (!VN_ISKAS(vp)) { 4376 /* 4377 * This is not a segkmem page but another page which 4378 * has been kernel mapped. It had better have at least 4379 * a share lock on it. Return the pfn. 4380 */ 4381 sfmmu_mlist_exit(pml); 4382 SFMMU_HASH_UNLOCK(hmebp); 4383 if (locked) 4384 page_unlock(pp); 4385 kmem_cache_free(pa_hment_cache, pahmep); 4386 ASSERT(PAGE_LOCKED(pp)); 4387 *rpfn = pfn; 4388 if (cookiep) 4389 *cookiep = HAC_COOKIE_NONE; 4390 return (0); 4391 } 4392 4393 /* 4394 * Setup this pa_hment and link its embedded dummy sf_hment into 4395 * the mapping list. 4396 */ 4397 pp->p_share++; 4398 pahmep->cb_id = callback_id; 4399 pahmep->addr = vaddr; 4400 pahmep->len = len; 4401 pahmep->refcnt = 1; 4402 pahmep->flags = 0; 4403 pahmep->pvt = pvt; 4404 4405 sfhmep->hme_tte.ll = 0; 4406 sfhmep->hme_data = pahmep; 4407 sfhmep->hme_prev = osfhmep; 4408 sfhmep->hme_next = osfhmep->hme_next; 4409 4410 if (osfhmep->hme_next) 4411 osfhmep->hme_next->hme_prev = sfhmep; 4412 4413 osfhmep->hme_next = sfhmep; 4414 4415 sfmmu_mlist_exit(pml); 4416 SFMMU_HASH_UNLOCK(hmebp); 4417 4418 if (locked) 4419 page_unlock(pp); 4420 4421 *rpfn = pfn; 4422 if (cookiep) 4423 *cookiep = (void *)pahmep; 4424 4425 return (0); 4426 } 4427 4428 /* 4429 * Remove the relocation callbacks from the specified addr/len. 4430 */ 4431 void 4432 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4433 void *cookie) 4434 { 4435 struct hmehash_bucket *hmebp; 4436 hmeblk_tag hblktag; 4437 struct hme_blk *hmeblkp; 4438 int hmeshift, hashno; 4439 caddr_t saddr; 4440 struct pa_hment *pahmep; 4441 struct sf_hment *sfhmep, *osfhmep; 4442 kmutex_t *pml; 4443 tte_t tte; 4444 page_t *pp; 4445 vnode_t *vp; 4446 u_offset_t off; 4447 int locked = 0; 4448 4449 /* 4450 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4451 * remove so just return. 4452 */ 4453 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4454 return; 4455 4456 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4457 4458 rehash: 4459 /* Find the mapping(s) for this page */ 4460 for (hashno = TTE64K, hmeblkp = NULL; 4461 hmeblkp == NULL && hashno <= mmu_hashcnt; 4462 hashno++) { 4463 hmeshift = HME_HASH_SHIFT(hashno); 4464 hblktag.htag_id = ksfmmup; 4465 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4466 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4467 hblktag.htag_rehash = hashno; 4468 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4469 4470 SFMMU_HASH_LOCK(hmebp); 4471 4472 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4473 4474 if (hmeblkp == NULL) 4475 SFMMU_HASH_UNLOCK(hmebp); 4476 } 4477 4478 if (hmeblkp == NULL) 4479 return; 4480 4481 ASSERT(!hmeblkp->hblk_shared); 4482 4483 HBLKTOHME(osfhmep, hmeblkp, saddr); 4484 4485 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4486 if (!TTE_IS_VALID(&tte)) { 4487 SFMMU_HASH_UNLOCK(hmebp); 4488 return; 4489 } 4490 4491 pp = osfhmep->hme_page; 4492 if (pp == NULL) { 4493 SFMMU_HASH_UNLOCK(hmebp); 4494 ASSERT(cookie == NULL); 4495 return; 4496 } 4497 4498 vp = pp->p_vnode; 4499 off = pp->p_offset; 4500 4501 pml = sfmmu_mlist_enter(pp); 4502 4503 if (flags & HAC_PAGELOCK) { 4504 if (!page_trylock(pp, SE_SHARED)) { 4505 /* 4506 * Somebody is holding SE_EXCL lock. Might 4507 * even be hat_page_relocate(). Drop all 4508 * our locks, lookup the page in &kvp, and 4509 * retry. If it doesn't exist in &kvp and &zvp, 4510 * then we must be dealing with a kernel mapped 4511 * page which doesn't actually belong to 4512 * segkmem so we punt. 4513 */ 4514 sfmmu_mlist_exit(pml); 4515 SFMMU_HASH_UNLOCK(hmebp); 4516 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4517 /* check zvp before giving up */ 4518 if (pp == NULL) 4519 pp = page_lookup(&zvp, (u_offset_t)saddr, 4520 SE_SHARED); 4521 4522 if (pp == NULL) { 4523 ASSERT(cookie == NULL); 4524 return; 4525 } 4526 page_unlock(pp); 4527 goto rehash; 4528 } 4529 locked = 1; 4530 } 4531 4532 ASSERT(PAGE_LOCKED(pp)); 4533 4534 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4535 pp->p_offset != off) { 4536 /* 4537 * The page moved before we got our hands on it. Drop 4538 * all the locks and try again. 4539 */ 4540 ASSERT((flags & HAC_PAGELOCK) != 0); 4541 sfmmu_mlist_exit(pml); 4542 SFMMU_HASH_UNLOCK(hmebp); 4543 page_unlock(pp); 4544 locked = 0; 4545 goto rehash; 4546 } 4547 4548 if (!VN_ISKAS(vp)) { 4549 /* 4550 * This is not a segkmem page but another page which 4551 * has been kernel mapped. 4552 */ 4553 sfmmu_mlist_exit(pml); 4554 SFMMU_HASH_UNLOCK(hmebp); 4555 if (locked) 4556 page_unlock(pp); 4557 ASSERT(cookie == NULL); 4558 return; 4559 } 4560 4561 if (cookie != NULL) { 4562 pahmep = (struct pa_hment *)cookie; 4563 sfhmep = &pahmep->sfment; 4564 } else { 4565 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4566 sfhmep = sfhmep->hme_next) { 4567 4568 /* 4569 * skip va<->pa mappings 4570 */ 4571 if (!IS_PAHME(sfhmep)) 4572 continue; 4573 4574 pahmep = sfhmep->hme_data; 4575 ASSERT(pahmep != NULL); 4576 4577 /* 4578 * if pa_hment matches, remove it 4579 */ 4580 if ((pahmep->pvt == pvt) && 4581 (pahmep->addr == vaddr) && 4582 (pahmep->len == len)) { 4583 break; 4584 } 4585 } 4586 } 4587 4588 if (sfhmep == NULL) { 4589 if (!panicstr) { 4590 panic("hat_delete_callback: pa_hment not found, pp %p", 4591 (void *)pp); 4592 } 4593 return; 4594 } 4595 4596 /* 4597 * Note: at this point a valid kernel mapping must still be 4598 * present on this page. 4599 */ 4600 pp->p_share--; 4601 if (pp->p_share <= 0) 4602 panic("hat_delete_callback: zero p_share"); 4603 4604 if (--pahmep->refcnt == 0) { 4605 if (pahmep->flags != 0) 4606 panic("hat_delete_callback: pa_hment is busy"); 4607 4608 /* 4609 * Remove sfhmep from the mapping list for the page. 4610 */ 4611 if (sfhmep->hme_prev) { 4612 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4613 } else { 4614 pp->p_mapping = sfhmep->hme_next; 4615 } 4616 4617 if (sfhmep->hme_next) 4618 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4619 4620 sfmmu_mlist_exit(pml); 4621 SFMMU_HASH_UNLOCK(hmebp); 4622 4623 if (locked) 4624 page_unlock(pp); 4625 4626 kmem_cache_free(pa_hment_cache, pahmep); 4627 return; 4628 } 4629 4630 sfmmu_mlist_exit(pml); 4631 SFMMU_HASH_UNLOCK(hmebp); 4632 if (locked) 4633 page_unlock(pp); 4634 } 4635 4636 /* 4637 * hat_probe returns 1 if the translation for the address 'addr' is 4638 * loaded, zero otherwise. 4639 * 4640 * hat_probe should be used only for advisorary purposes because it may 4641 * occasionally return the wrong value. The implementation must guarantee that 4642 * returning the wrong value is a very rare event. hat_probe is used 4643 * to implement optimizations in the segment drivers. 4644 * 4645 */ 4646 int 4647 hat_probe(struct hat *sfmmup, caddr_t addr) 4648 { 4649 pfn_t pfn; 4650 tte_t tte; 4651 4652 ASSERT(sfmmup != NULL); 4653 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4654 4655 ASSERT((sfmmup == ksfmmup) || 4656 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4657 4658 if (sfmmup == ksfmmup) { 4659 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4660 == PFN_SUSPENDED) { 4661 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4662 } 4663 } else { 4664 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4665 } 4666 4667 if (pfn != PFN_INVALID) 4668 return (1); 4669 else 4670 return (0); 4671 } 4672 4673 ssize_t 4674 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4675 { 4676 tte_t tte; 4677 4678 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4679 4680 if (sfmmup == ksfmmup) { 4681 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4682 return (-1); 4683 } 4684 } else { 4685 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4686 return (-1); 4687 } 4688 } 4689 4690 ASSERT(TTE_IS_VALID(&tte)); 4691 return (TTEBYTES(TTE_CSZ(&tte))); 4692 } 4693 4694 uint_t 4695 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4696 { 4697 tte_t tte; 4698 4699 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4700 4701 if (sfmmup == ksfmmup) { 4702 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4703 tte.ll = 0; 4704 } 4705 } else { 4706 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4707 tte.ll = 0; 4708 } 4709 } 4710 if (TTE_IS_VALID(&tte)) { 4711 *attr = sfmmu_ptov_attr(&tte); 4712 return (0); 4713 } 4714 *attr = 0; 4715 return ((uint_t)0xffffffff); 4716 } 4717 4718 /* 4719 * Enables more attributes on specified address range (ie. logical OR) 4720 */ 4721 void 4722 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4723 { 4724 if (hat->sfmmu_xhat_provider) { 4725 XHAT_SETATTR(hat, addr, len, attr); 4726 return; 4727 } else { 4728 /* 4729 * This must be a CPU HAT. If the address space has 4730 * XHATs attached, change attributes for all of them, 4731 * just in case 4732 */ 4733 ASSERT(hat->sfmmu_as != NULL); 4734 if (hat->sfmmu_as->a_xhat != NULL) 4735 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4736 } 4737 4738 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4739 } 4740 4741 /* 4742 * Assigns attributes to the specified address range. All the attributes 4743 * are specified. 4744 */ 4745 void 4746 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4747 { 4748 if (hat->sfmmu_xhat_provider) { 4749 XHAT_CHGATTR(hat, addr, len, attr); 4750 return; 4751 } else { 4752 /* 4753 * This must be a CPU HAT. If the address space has 4754 * XHATs attached, change attributes for all of them, 4755 * just in case 4756 */ 4757 ASSERT(hat->sfmmu_as != NULL); 4758 if (hat->sfmmu_as->a_xhat != NULL) 4759 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4760 } 4761 4762 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4763 } 4764 4765 /* 4766 * Remove attributes on the specified address range (ie. loginal NAND) 4767 */ 4768 void 4769 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4770 { 4771 if (hat->sfmmu_xhat_provider) { 4772 XHAT_CLRATTR(hat, addr, len, attr); 4773 return; 4774 } else { 4775 /* 4776 * This must be a CPU HAT. If the address space has 4777 * XHATs attached, change attributes for all of them, 4778 * just in case 4779 */ 4780 ASSERT(hat->sfmmu_as != NULL); 4781 if (hat->sfmmu_as->a_xhat != NULL) 4782 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4783 } 4784 4785 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4786 } 4787 4788 /* 4789 * Change attributes on an address range to that specified by attr and mode. 4790 */ 4791 static void 4792 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4793 int mode) 4794 { 4795 struct hmehash_bucket *hmebp; 4796 hmeblk_tag hblktag; 4797 int hmeshift, hashno = 1; 4798 struct hme_blk *hmeblkp, *list = NULL; 4799 caddr_t endaddr; 4800 cpuset_t cpuset; 4801 demap_range_t dmr; 4802 4803 CPUSET_ZERO(cpuset); 4804 4805 ASSERT((sfmmup == ksfmmup) || 4806 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4807 ASSERT((len & MMU_PAGEOFFSET) == 0); 4808 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4809 4810 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4811 ((addr + len) > (caddr_t)USERLIMIT)) { 4812 panic("user addr %p in kernel space", 4813 (void *)addr); 4814 } 4815 4816 endaddr = addr + len; 4817 hblktag.htag_id = sfmmup; 4818 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4819 DEMAP_RANGE_INIT(sfmmup, &dmr); 4820 4821 while (addr < endaddr) { 4822 hmeshift = HME_HASH_SHIFT(hashno); 4823 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4824 hblktag.htag_rehash = hashno; 4825 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4826 4827 SFMMU_HASH_LOCK(hmebp); 4828 4829 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4830 if (hmeblkp != NULL) { 4831 ASSERT(!hmeblkp->hblk_shared); 4832 /* 4833 * We've encountered a shadow hmeblk so skip the range 4834 * of the next smaller mapping size. 4835 */ 4836 if (hmeblkp->hblk_shw_bit) { 4837 ASSERT(sfmmup != ksfmmup); 4838 ASSERT(hashno > 1); 4839 addr = (caddr_t)P2END((uintptr_t)addr, 4840 TTEBYTES(hashno - 1)); 4841 } else { 4842 addr = sfmmu_hblk_chgattr(sfmmup, 4843 hmeblkp, addr, endaddr, &dmr, attr, mode); 4844 } 4845 SFMMU_HASH_UNLOCK(hmebp); 4846 hashno = 1; 4847 continue; 4848 } 4849 SFMMU_HASH_UNLOCK(hmebp); 4850 4851 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4852 /* 4853 * We have traversed the whole list and rehashed 4854 * if necessary without finding the address to chgattr. 4855 * This is ok, so we increment the address by the 4856 * smallest hmeblk range for kernel mappings or for 4857 * user mappings with no large pages, and the largest 4858 * hmeblk range, to account for shadow hmeblks, for 4859 * user mappings with large pages and continue. 4860 */ 4861 if (sfmmup == ksfmmup) 4862 addr = (caddr_t)P2END((uintptr_t)addr, 4863 TTEBYTES(1)); 4864 else 4865 addr = (caddr_t)P2END((uintptr_t)addr, 4866 TTEBYTES(hashno)); 4867 hashno = 1; 4868 } else { 4869 hashno++; 4870 } 4871 } 4872 4873 sfmmu_hblks_list_purge(&list); 4874 DEMAP_RANGE_FLUSH(&dmr); 4875 cpuset = sfmmup->sfmmu_cpusran; 4876 xt_sync(cpuset); 4877 } 4878 4879 /* 4880 * This function chgattr on a range of addresses in an hmeblk. It returns the 4881 * next addres that needs to be chgattr. 4882 * It should be called with the hash lock held. 4883 * XXX It should be possible to optimize chgattr by not flushing every time but 4884 * on the other hand: 4885 * 1. do one flush crosscall. 4886 * 2. only flush if we are increasing permissions (make sure this will work) 4887 */ 4888 static caddr_t 4889 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4890 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4891 { 4892 tte_t tte, tteattr, tteflags, ttemod; 4893 struct sf_hment *sfhmep; 4894 int ttesz; 4895 struct page *pp = NULL; 4896 kmutex_t *pml, *pmtx; 4897 int ret; 4898 int use_demap_range; 4899 #if defined(SF_ERRATA_57) 4900 int check_exec; 4901 #endif 4902 4903 ASSERT(in_hblk_range(hmeblkp, addr)); 4904 ASSERT(hmeblkp->hblk_shw_bit == 0); 4905 ASSERT(!hmeblkp->hblk_shared); 4906 4907 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4908 ttesz = get_hblk_ttesz(hmeblkp); 4909 4910 /* 4911 * Flush the current demap region if addresses have been 4912 * skipped or the page size doesn't match. 4913 */ 4914 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4915 if (use_demap_range) { 4916 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4917 } else { 4918 DEMAP_RANGE_FLUSH(dmrp); 4919 } 4920 4921 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4922 #if defined(SF_ERRATA_57) 4923 check_exec = (sfmmup != ksfmmup) && 4924 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4925 TTE_IS_EXECUTABLE(&tteattr); 4926 #endif 4927 HBLKTOHME(sfhmep, hmeblkp, addr); 4928 while (addr < endaddr) { 4929 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4930 if (TTE_IS_VALID(&tte)) { 4931 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4932 /* 4933 * if the new attr is the same as old 4934 * continue 4935 */ 4936 goto next_addr; 4937 } 4938 if (!TTE_IS_WRITABLE(&tteattr)) { 4939 /* 4940 * make sure we clear hw modify bit if we 4941 * removing write protections 4942 */ 4943 tteflags.tte_intlo |= TTE_HWWR_INT; 4944 } 4945 4946 pml = NULL; 4947 pp = sfhmep->hme_page; 4948 if (pp) { 4949 pml = sfmmu_mlist_enter(pp); 4950 } 4951 4952 if (pp != sfhmep->hme_page) { 4953 /* 4954 * tte must have been unloaded. 4955 */ 4956 ASSERT(pml); 4957 sfmmu_mlist_exit(pml); 4958 continue; 4959 } 4960 4961 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4962 4963 ttemod = tte; 4964 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4965 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4966 4967 #if defined(SF_ERRATA_57) 4968 if (check_exec && addr < errata57_limit) 4969 ttemod.tte_exec_perm = 0; 4970 #endif 4971 ret = sfmmu_modifytte_try(&tte, &ttemod, 4972 &sfhmep->hme_tte); 4973 4974 if (ret < 0) { 4975 /* tte changed underneath us */ 4976 if (pml) { 4977 sfmmu_mlist_exit(pml); 4978 } 4979 continue; 4980 } 4981 4982 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4983 /* 4984 * need to sync if we are clearing modify bit. 4985 */ 4986 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4987 } 4988 4989 if (pp && PP_ISRO(pp)) { 4990 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4991 pmtx = sfmmu_page_enter(pp); 4992 PP_CLRRO(pp); 4993 sfmmu_page_exit(pmtx); 4994 } 4995 } 4996 4997 if (ret > 0 && use_demap_range) { 4998 DEMAP_RANGE_MARKPG(dmrp, addr); 4999 } else if (ret > 0) { 5000 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5001 } 5002 5003 if (pml) { 5004 sfmmu_mlist_exit(pml); 5005 } 5006 } 5007 next_addr: 5008 addr += TTEBYTES(ttesz); 5009 sfhmep++; 5010 DEMAP_RANGE_NEXTPG(dmrp); 5011 } 5012 return (addr); 5013 } 5014 5015 /* 5016 * This routine converts virtual attributes to physical ones. It will 5017 * update the tteflags field with the tte mask corresponding to the attributes 5018 * affected and it returns the new attributes. It will also clear the modify 5019 * bit if we are taking away write permission. This is necessary since the 5020 * modify bit is the hardware permission bit and we need to clear it in order 5021 * to detect write faults. 5022 */ 5023 static uint64_t 5024 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5025 { 5026 tte_t ttevalue; 5027 5028 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5029 5030 switch (mode) { 5031 case SFMMU_CHGATTR: 5032 /* all attributes specified */ 5033 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5034 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5035 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5036 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5037 break; 5038 case SFMMU_SETATTR: 5039 ASSERT(!(attr & ~HAT_PROT_MASK)); 5040 ttemaskp->ll = 0; 5041 ttevalue.ll = 0; 5042 /* 5043 * a valid tte implies exec and read for sfmmu 5044 * so no need to do anything about them. 5045 * since priviledged access implies user access 5046 * PROT_USER doesn't make sense either. 5047 */ 5048 if (attr & PROT_WRITE) { 5049 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5050 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5051 } 5052 break; 5053 case SFMMU_CLRATTR: 5054 /* attributes will be nand with current ones */ 5055 if (attr & ~(PROT_WRITE | PROT_USER)) { 5056 panic("sfmmu: attr %x not supported", attr); 5057 } 5058 ttemaskp->ll = 0; 5059 ttevalue.ll = 0; 5060 if (attr & PROT_WRITE) { 5061 /* clear both writable and modify bit */ 5062 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5063 } 5064 if (attr & PROT_USER) { 5065 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5066 ttevalue.tte_intlo |= TTE_PRIV_INT; 5067 } 5068 break; 5069 default: 5070 panic("sfmmu_vtop_attr: bad mode %x", mode); 5071 } 5072 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5073 return (ttevalue.ll); 5074 } 5075 5076 static uint_t 5077 sfmmu_ptov_attr(tte_t *ttep) 5078 { 5079 uint_t attr; 5080 5081 ASSERT(TTE_IS_VALID(ttep)); 5082 5083 attr = PROT_READ; 5084 5085 if (TTE_IS_WRITABLE(ttep)) { 5086 attr |= PROT_WRITE; 5087 } 5088 if (TTE_IS_EXECUTABLE(ttep)) { 5089 attr |= PROT_EXEC; 5090 } 5091 if (!TTE_IS_PRIVILEGED(ttep)) { 5092 attr |= PROT_USER; 5093 } 5094 if (TTE_IS_NFO(ttep)) { 5095 attr |= HAT_NOFAULT; 5096 } 5097 if (TTE_IS_NOSYNC(ttep)) { 5098 attr |= HAT_NOSYNC; 5099 } 5100 if (TTE_IS_SIDEFFECT(ttep)) { 5101 attr |= SFMMU_SIDEFFECT; 5102 } 5103 if (!TTE_IS_VCACHEABLE(ttep)) { 5104 attr |= SFMMU_UNCACHEVTTE; 5105 } 5106 if (!TTE_IS_PCACHEABLE(ttep)) { 5107 attr |= SFMMU_UNCACHEPTTE; 5108 } 5109 return (attr); 5110 } 5111 5112 /* 5113 * hat_chgprot is a deprecated hat call. New segment drivers 5114 * should store all attributes and use hat_*attr calls. 5115 * 5116 * Change the protections in the virtual address range 5117 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5118 * then remove write permission, leaving the other 5119 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5120 * 5121 */ 5122 void 5123 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5124 { 5125 struct hmehash_bucket *hmebp; 5126 hmeblk_tag hblktag; 5127 int hmeshift, hashno = 1; 5128 struct hme_blk *hmeblkp, *list = NULL; 5129 caddr_t endaddr; 5130 cpuset_t cpuset; 5131 demap_range_t dmr; 5132 5133 ASSERT((len & MMU_PAGEOFFSET) == 0); 5134 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5135 5136 if (sfmmup->sfmmu_xhat_provider) { 5137 XHAT_CHGPROT(sfmmup, addr, len, vprot); 5138 return; 5139 } else { 5140 /* 5141 * This must be a CPU HAT. If the address space has 5142 * XHATs attached, change attributes for all of them, 5143 * just in case 5144 */ 5145 ASSERT(sfmmup->sfmmu_as != NULL); 5146 if (sfmmup->sfmmu_as->a_xhat != NULL) 5147 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 5148 } 5149 5150 CPUSET_ZERO(cpuset); 5151 5152 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5153 ((addr + len) > (caddr_t)USERLIMIT)) { 5154 panic("user addr %p vprot %x in kernel space", 5155 (void *)addr, vprot); 5156 } 5157 endaddr = addr + len; 5158 hblktag.htag_id = sfmmup; 5159 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5160 DEMAP_RANGE_INIT(sfmmup, &dmr); 5161 5162 while (addr < endaddr) { 5163 hmeshift = HME_HASH_SHIFT(hashno); 5164 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5165 hblktag.htag_rehash = hashno; 5166 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5167 5168 SFMMU_HASH_LOCK(hmebp); 5169 5170 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5171 if (hmeblkp != NULL) { 5172 ASSERT(!hmeblkp->hblk_shared); 5173 /* 5174 * We've encountered a shadow hmeblk so skip the range 5175 * of the next smaller mapping size. 5176 */ 5177 if (hmeblkp->hblk_shw_bit) { 5178 ASSERT(sfmmup != ksfmmup); 5179 ASSERT(hashno > 1); 5180 addr = (caddr_t)P2END((uintptr_t)addr, 5181 TTEBYTES(hashno - 1)); 5182 } else { 5183 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5184 addr, endaddr, &dmr, vprot); 5185 } 5186 SFMMU_HASH_UNLOCK(hmebp); 5187 hashno = 1; 5188 continue; 5189 } 5190 SFMMU_HASH_UNLOCK(hmebp); 5191 5192 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5193 /* 5194 * We have traversed the whole list and rehashed 5195 * if necessary without finding the address to chgprot. 5196 * This is ok so we increment the address by the 5197 * smallest hmeblk range for kernel mappings and the 5198 * largest hmeblk range, to account for shadow hmeblks, 5199 * for user mappings and continue. 5200 */ 5201 if (sfmmup == ksfmmup) 5202 addr = (caddr_t)P2END((uintptr_t)addr, 5203 TTEBYTES(1)); 5204 else 5205 addr = (caddr_t)P2END((uintptr_t)addr, 5206 TTEBYTES(hashno)); 5207 hashno = 1; 5208 } else { 5209 hashno++; 5210 } 5211 } 5212 5213 sfmmu_hblks_list_purge(&list); 5214 DEMAP_RANGE_FLUSH(&dmr); 5215 cpuset = sfmmup->sfmmu_cpusran; 5216 xt_sync(cpuset); 5217 } 5218 5219 /* 5220 * This function chgprots a range of addresses in an hmeblk. It returns the 5221 * next addres that needs to be chgprot. 5222 * It should be called with the hash lock held. 5223 * XXX It shold be possible to optimize chgprot by not flushing every time but 5224 * on the other hand: 5225 * 1. do one flush crosscall. 5226 * 2. only flush if we are increasing permissions (make sure this will work) 5227 */ 5228 static caddr_t 5229 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5230 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5231 { 5232 uint_t pprot; 5233 tte_t tte, ttemod; 5234 struct sf_hment *sfhmep; 5235 uint_t tteflags; 5236 int ttesz; 5237 struct page *pp = NULL; 5238 kmutex_t *pml, *pmtx; 5239 int ret; 5240 int use_demap_range; 5241 #if defined(SF_ERRATA_57) 5242 int check_exec; 5243 #endif 5244 5245 ASSERT(in_hblk_range(hmeblkp, addr)); 5246 ASSERT(hmeblkp->hblk_shw_bit == 0); 5247 ASSERT(!hmeblkp->hblk_shared); 5248 5249 #ifdef DEBUG 5250 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5251 (endaddr < get_hblk_endaddr(hmeblkp))) { 5252 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5253 } 5254 #endif /* DEBUG */ 5255 5256 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5257 ttesz = get_hblk_ttesz(hmeblkp); 5258 5259 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5260 #if defined(SF_ERRATA_57) 5261 check_exec = (sfmmup != ksfmmup) && 5262 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5263 ((vprot & PROT_EXEC) == PROT_EXEC); 5264 #endif 5265 HBLKTOHME(sfhmep, hmeblkp, addr); 5266 5267 /* 5268 * Flush the current demap region if addresses have been 5269 * skipped or the page size doesn't match. 5270 */ 5271 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5272 if (use_demap_range) { 5273 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5274 } else { 5275 DEMAP_RANGE_FLUSH(dmrp); 5276 } 5277 5278 while (addr < endaddr) { 5279 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5280 if (TTE_IS_VALID(&tte)) { 5281 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5282 /* 5283 * if the new protection is the same as old 5284 * continue 5285 */ 5286 goto next_addr; 5287 } 5288 pml = NULL; 5289 pp = sfhmep->hme_page; 5290 if (pp) { 5291 pml = sfmmu_mlist_enter(pp); 5292 } 5293 if (pp != sfhmep->hme_page) { 5294 /* 5295 * tte most have been unloaded 5296 * underneath us. Recheck 5297 */ 5298 ASSERT(pml); 5299 sfmmu_mlist_exit(pml); 5300 continue; 5301 } 5302 5303 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5304 5305 ttemod = tte; 5306 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5307 #if defined(SF_ERRATA_57) 5308 if (check_exec && addr < errata57_limit) 5309 ttemod.tte_exec_perm = 0; 5310 #endif 5311 ret = sfmmu_modifytte_try(&tte, &ttemod, 5312 &sfhmep->hme_tte); 5313 5314 if (ret < 0) { 5315 /* tte changed underneath us */ 5316 if (pml) { 5317 sfmmu_mlist_exit(pml); 5318 } 5319 continue; 5320 } 5321 5322 if (tteflags & TTE_HWWR_INT) { 5323 /* 5324 * need to sync if we are clearing modify bit. 5325 */ 5326 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5327 } 5328 5329 if (pp && PP_ISRO(pp)) { 5330 if (pprot & TTE_WRPRM_INT) { 5331 pmtx = sfmmu_page_enter(pp); 5332 PP_CLRRO(pp); 5333 sfmmu_page_exit(pmtx); 5334 } 5335 } 5336 5337 if (ret > 0 && use_demap_range) { 5338 DEMAP_RANGE_MARKPG(dmrp, addr); 5339 } else if (ret > 0) { 5340 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5341 } 5342 5343 if (pml) { 5344 sfmmu_mlist_exit(pml); 5345 } 5346 } 5347 next_addr: 5348 addr += TTEBYTES(ttesz); 5349 sfhmep++; 5350 DEMAP_RANGE_NEXTPG(dmrp); 5351 } 5352 return (addr); 5353 } 5354 5355 /* 5356 * This routine is deprecated and should only be used by hat_chgprot. 5357 * The correct routine is sfmmu_vtop_attr. 5358 * This routine converts virtual page protections to physical ones. It will 5359 * update the tteflags field with the tte mask corresponding to the protections 5360 * affected and it returns the new protections. It will also clear the modify 5361 * bit if we are taking away write permission. This is necessary since the 5362 * modify bit is the hardware permission bit and we need to clear it in order 5363 * to detect write faults. 5364 * It accepts the following special protections: 5365 * ~PROT_WRITE = remove write permissions. 5366 * ~PROT_USER = remove user permissions. 5367 */ 5368 static uint_t 5369 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5370 { 5371 if (vprot == (uint_t)~PROT_WRITE) { 5372 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5373 return (0); /* will cause wrprm to be cleared */ 5374 } 5375 if (vprot == (uint_t)~PROT_USER) { 5376 *tteflagsp = TTE_PRIV_INT; 5377 return (0); /* will cause privprm to be cleared */ 5378 } 5379 if ((vprot == 0) || (vprot == PROT_USER) || 5380 ((vprot & PROT_ALL) != vprot)) { 5381 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5382 } 5383 5384 switch (vprot) { 5385 case (PROT_READ): 5386 case (PROT_EXEC): 5387 case (PROT_EXEC | PROT_READ): 5388 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5389 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5390 case (PROT_WRITE): 5391 case (PROT_WRITE | PROT_READ): 5392 case (PROT_EXEC | PROT_WRITE): 5393 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5394 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5395 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5396 case (PROT_USER | PROT_READ): 5397 case (PROT_USER | PROT_EXEC): 5398 case (PROT_USER | PROT_EXEC | PROT_READ): 5399 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5400 return (0); /* clr prv and wrt */ 5401 case (PROT_USER | PROT_WRITE): 5402 case (PROT_USER | PROT_WRITE | PROT_READ): 5403 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5404 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5405 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5406 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5407 default: 5408 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5409 } 5410 return (0); 5411 } 5412 5413 /* 5414 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5415 * the normal algorithm would take too long for a very large VA range with 5416 * few real mappings. This routine just walks thru all HMEs in the global 5417 * hash table to find and remove mappings. 5418 */ 5419 static void 5420 hat_unload_large_virtual( 5421 struct hat *sfmmup, 5422 caddr_t startaddr, 5423 size_t len, 5424 uint_t flags, 5425 hat_callback_t *callback) 5426 { 5427 struct hmehash_bucket *hmebp; 5428 struct hme_blk *hmeblkp; 5429 struct hme_blk *pr_hblk = NULL; 5430 struct hme_blk *nx_hblk; 5431 struct hme_blk *list = NULL; 5432 int i; 5433 uint64_t hblkpa, prevpa, nx_pa; 5434 demap_range_t dmr, *dmrp; 5435 cpuset_t cpuset; 5436 caddr_t endaddr = startaddr + len; 5437 caddr_t sa; 5438 caddr_t ea; 5439 caddr_t cb_sa[MAX_CB_ADDR]; 5440 caddr_t cb_ea[MAX_CB_ADDR]; 5441 int addr_cnt = 0; 5442 int a = 0; 5443 5444 if (sfmmup->sfmmu_free) { 5445 dmrp = NULL; 5446 } else { 5447 dmrp = &dmr; 5448 DEMAP_RANGE_INIT(sfmmup, dmrp); 5449 } 5450 5451 /* 5452 * Loop through all the hash buckets of HME blocks looking for matches. 5453 */ 5454 for (i = 0; i <= UHMEHASH_SZ; i++) { 5455 hmebp = &uhme_hash[i]; 5456 SFMMU_HASH_LOCK(hmebp); 5457 hmeblkp = hmebp->hmeblkp; 5458 hblkpa = hmebp->hmeh_nextpa; 5459 prevpa = 0; 5460 pr_hblk = NULL; 5461 while (hmeblkp) { 5462 nx_hblk = hmeblkp->hblk_next; 5463 nx_pa = hmeblkp->hblk_nextpa; 5464 5465 /* 5466 * skip if not this context, if a shadow block or 5467 * if the mapping is not in the requested range 5468 */ 5469 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5470 hmeblkp->hblk_shw_bit || 5471 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5472 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5473 pr_hblk = hmeblkp; 5474 prevpa = hblkpa; 5475 goto next_block; 5476 } 5477 5478 ASSERT(!hmeblkp->hblk_shared); 5479 /* 5480 * unload if there are any current valid mappings 5481 */ 5482 if (hmeblkp->hblk_vcnt != 0 || 5483 hmeblkp->hblk_hmecnt != 0) 5484 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5485 sa, ea, dmrp, flags); 5486 5487 /* 5488 * on unmap we also release the HME block itself, once 5489 * all mappings are gone. 5490 */ 5491 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5492 !hmeblkp->hblk_vcnt && 5493 !hmeblkp->hblk_hmecnt) { 5494 ASSERT(!hmeblkp->hblk_lckcnt); 5495 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 5496 prevpa, pr_hblk); 5497 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5498 } else { 5499 pr_hblk = hmeblkp; 5500 prevpa = hblkpa; 5501 } 5502 5503 if (callback == NULL) 5504 goto next_block; 5505 5506 /* 5507 * HME blocks may span more than one page, but we may be 5508 * unmapping only one page, so check for a smaller range 5509 * for the callback 5510 */ 5511 if (sa < startaddr) 5512 sa = startaddr; 5513 if (--ea > endaddr) 5514 ea = endaddr - 1; 5515 5516 cb_sa[addr_cnt] = sa; 5517 cb_ea[addr_cnt] = ea; 5518 if (++addr_cnt == MAX_CB_ADDR) { 5519 if (dmrp != NULL) { 5520 DEMAP_RANGE_FLUSH(dmrp); 5521 cpuset = sfmmup->sfmmu_cpusran; 5522 xt_sync(cpuset); 5523 } 5524 5525 for (a = 0; a < MAX_CB_ADDR; ++a) { 5526 callback->hcb_start_addr = cb_sa[a]; 5527 callback->hcb_end_addr = cb_ea[a]; 5528 callback->hcb_function(callback); 5529 } 5530 addr_cnt = 0; 5531 } 5532 5533 next_block: 5534 hmeblkp = nx_hblk; 5535 hblkpa = nx_pa; 5536 } 5537 SFMMU_HASH_UNLOCK(hmebp); 5538 } 5539 5540 sfmmu_hblks_list_purge(&list); 5541 if (dmrp != NULL) { 5542 DEMAP_RANGE_FLUSH(dmrp); 5543 cpuset = sfmmup->sfmmu_cpusran; 5544 xt_sync(cpuset); 5545 } 5546 5547 for (a = 0; a < addr_cnt; ++a) { 5548 callback->hcb_start_addr = cb_sa[a]; 5549 callback->hcb_end_addr = cb_ea[a]; 5550 callback->hcb_function(callback); 5551 } 5552 5553 /* 5554 * Check TSB and TLB page sizes if the process isn't exiting. 5555 */ 5556 if (!sfmmup->sfmmu_free) 5557 sfmmu_check_page_sizes(sfmmup, 0); 5558 } 5559 5560 /* 5561 * Unload all the mappings in the range [addr..addr+len). addr and len must 5562 * be MMU_PAGESIZE aligned. 5563 */ 5564 5565 extern struct seg *segkmap; 5566 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5567 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5568 5569 5570 void 5571 hat_unload_callback( 5572 struct hat *sfmmup, 5573 caddr_t addr, 5574 size_t len, 5575 uint_t flags, 5576 hat_callback_t *callback) 5577 { 5578 struct hmehash_bucket *hmebp; 5579 hmeblk_tag hblktag; 5580 int hmeshift, hashno, iskernel; 5581 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5582 caddr_t endaddr; 5583 cpuset_t cpuset; 5584 uint64_t hblkpa, prevpa; 5585 int addr_count = 0; 5586 int a; 5587 caddr_t cb_start_addr[MAX_CB_ADDR]; 5588 caddr_t cb_end_addr[MAX_CB_ADDR]; 5589 int issegkmap = ISSEGKMAP(sfmmup, addr); 5590 demap_range_t dmr, *dmrp; 5591 5592 if (sfmmup->sfmmu_xhat_provider) { 5593 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 5594 return; 5595 } else { 5596 /* 5597 * This must be a CPU HAT. If the address space has 5598 * XHATs attached, unload the mappings for all of them, 5599 * just in case 5600 */ 5601 ASSERT(sfmmup->sfmmu_as != NULL); 5602 if (sfmmup->sfmmu_as->a_xhat != NULL) 5603 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 5604 len, flags, callback); 5605 } 5606 5607 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5608 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5609 5610 ASSERT(sfmmup != NULL); 5611 ASSERT((len & MMU_PAGEOFFSET) == 0); 5612 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5613 5614 /* 5615 * Probing through a large VA range (say 63 bits) will be slow, even 5616 * at 4 Meg steps between the probes. So, when the virtual address range 5617 * is very large, search the HME entries for what to unload. 5618 * 5619 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5620 * 5621 * UHMEHASH_SZ is number of hash buckets to examine 5622 * 5623 */ 5624 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5625 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5626 return; 5627 } 5628 5629 CPUSET_ZERO(cpuset); 5630 5631 /* 5632 * If the process is exiting, we can save a lot of fuss since 5633 * we'll flush the TLB when we free the ctx anyway. 5634 */ 5635 if (sfmmup->sfmmu_free) 5636 dmrp = NULL; 5637 else 5638 dmrp = &dmr; 5639 5640 DEMAP_RANGE_INIT(sfmmup, dmrp); 5641 endaddr = addr + len; 5642 hblktag.htag_id = sfmmup; 5643 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5644 5645 /* 5646 * It is likely for the vm to call unload over a wide range of 5647 * addresses that are actually very sparsely populated by 5648 * translations. In order to speed this up the sfmmu hat supports 5649 * the concept of shadow hmeblks. Dummy large page hmeblks that 5650 * correspond to actual small translations are allocated at tteload 5651 * time and are referred to as shadow hmeblks. Now, during unload 5652 * time, we first check if we have a shadow hmeblk for that 5653 * translation. The absence of one means the corresponding address 5654 * range is empty and can be skipped. 5655 * 5656 * The kernel is an exception to above statement and that is why 5657 * we don't use shadow hmeblks and hash starting from the smallest 5658 * page size. 5659 */ 5660 if (sfmmup == KHATID) { 5661 iskernel = 1; 5662 hashno = TTE64K; 5663 } else { 5664 iskernel = 0; 5665 if (mmu_page_sizes == max_mmu_page_sizes) { 5666 hashno = TTE256M; 5667 } else { 5668 hashno = TTE4M; 5669 } 5670 } 5671 while (addr < endaddr) { 5672 hmeshift = HME_HASH_SHIFT(hashno); 5673 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5674 hblktag.htag_rehash = hashno; 5675 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5676 5677 SFMMU_HASH_LOCK(hmebp); 5678 5679 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5680 prevpa, &list); 5681 if (hmeblkp == NULL) { 5682 /* 5683 * didn't find an hmeblk. skip the appropiate 5684 * address range. 5685 */ 5686 SFMMU_HASH_UNLOCK(hmebp); 5687 if (iskernel) { 5688 if (hashno < mmu_hashcnt) { 5689 hashno++; 5690 continue; 5691 } else { 5692 hashno = TTE64K; 5693 addr = (caddr_t)roundup((uintptr_t)addr 5694 + 1, MMU_PAGESIZE64K); 5695 continue; 5696 } 5697 } 5698 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5699 (1 << hmeshift)); 5700 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5701 ASSERT(hashno == TTE64K); 5702 continue; 5703 } 5704 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5705 hashno = TTE512K; 5706 continue; 5707 } 5708 if (mmu_page_sizes == max_mmu_page_sizes) { 5709 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5710 hashno = TTE4M; 5711 continue; 5712 } 5713 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5714 hashno = TTE32M; 5715 continue; 5716 } 5717 hashno = TTE256M; 5718 continue; 5719 } else { 5720 hashno = TTE4M; 5721 continue; 5722 } 5723 } 5724 ASSERT(hmeblkp); 5725 ASSERT(!hmeblkp->hblk_shared); 5726 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5727 /* 5728 * If the valid count is zero we can skip the range 5729 * mapped by this hmeblk. 5730 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5731 * is used by segment drivers as a hint 5732 * that the mapping resource won't be used any longer. 5733 * The best example of this is during exit(). 5734 */ 5735 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5736 get_hblk_span(hmeblkp)); 5737 if ((flags & HAT_UNLOAD_UNMAP) || 5738 (iskernel && !issegkmap)) { 5739 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5740 pr_hblk); 5741 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5742 } 5743 SFMMU_HASH_UNLOCK(hmebp); 5744 5745 if (iskernel) { 5746 hashno = TTE64K; 5747 continue; 5748 } 5749 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5750 ASSERT(hashno == TTE64K); 5751 continue; 5752 } 5753 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5754 hashno = TTE512K; 5755 continue; 5756 } 5757 if (mmu_page_sizes == max_mmu_page_sizes) { 5758 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5759 hashno = TTE4M; 5760 continue; 5761 } 5762 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5763 hashno = TTE32M; 5764 continue; 5765 } 5766 hashno = TTE256M; 5767 continue; 5768 } else { 5769 hashno = TTE4M; 5770 continue; 5771 } 5772 } 5773 if (hmeblkp->hblk_shw_bit) { 5774 /* 5775 * If we encounter a shadow hmeblk we know there is 5776 * smaller sized hmeblks mapping the same address space. 5777 * Decrement the hash size and rehash. 5778 */ 5779 ASSERT(sfmmup != KHATID); 5780 hashno--; 5781 SFMMU_HASH_UNLOCK(hmebp); 5782 continue; 5783 } 5784 5785 /* 5786 * track callback address ranges. 5787 * only start a new range when it's not contiguous 5788 */ 5789 if (callback != NULL) { 5790 if (addr_count > 0 && 5791 addr == cb_end_addr[addr_count - 1]) 5792 --addr_count; 5793 else 5794 cb_start_addr[addr_count] = addr; 5795 } 5796 5797 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5798 dmrp, flags); 5799 5800 if (callback != NULL) 5801 cb_end_addr[addr_count++] = addr; 5802 5803 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5804 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5805 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5806 pr_hblk); 5807 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5808 } 5809 SFMMU_HASH_UNLOCK(hmebp); 5810 5811 /* 5812 * Notify our caller as to exactly which pages 5813 * have been unloaded. We do these in clumps, 5814 * to minimize the number of xt_sync()s that need to occur. 5815 */ 5816 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5817 DEMAP_RANGE_FLUSH(dmrp); 5818 if (dmrp != NULL) { 5819 cpuset = sfmmup->sfmmu_cpusran; 5820 xt_sync(cpuset); 5821 } 5822 5823 for (a = 0; a < MAX_CB_ADDR; ++a) { 5824 callback->hcb_start_addr = cb_start_addr[a]; 5825 callback->hcb_end_addr = cb_end_addr[a]; 5826 callback->hcb_function(callback); 5827 } 5828 addr_count = 0; 5829 } 5830 if (iskernel) { 5831 hashno = TTE64K; 5832 continue; 5833 } 5834 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5835 ASSERT(hashno == TTE64K); 5836 continue; 5837 } 5838 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5839 hashno = TTE512K; 5840 continue; 5841 } 5842 if (mmu_page_sizes == max_mmu_page_sizes) { 5843 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5844 hashno = TTE4M; 5845 continue; 5846 } 5847 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5848 hashno = TTE32M; 5849 continue; 5850 } 5851 hashno = TTE256M; 5852 } else { 5853 hashno = TTE4M; 5854 } 5855 } 5856 5857 sfmmu_hblks_list_purge(&list); 5858 DEMAP_RANGE_FLUSH(dmrp); 5859 if (dmrp != NULL) { 5860 cpuset = sfmmup->sfmmu_cpusran; 5861 xt_sync(cpuset); 5862 } 5863 if (callback && addr_count != 0) { 5864 for (a = 0; a < addr_count; ++a) { 5865 callback->hcb_start_addr = cb_start_addr[a]; 5866 callback->hcb_end_addr = cb_end_addr[a]; 5867 callback->hcb_function(callback); 5868 } 5869 } 5870 5871 /* 5872 * Check TSB and TLB page sizes if the process isn't exiting. 5873 */ 5874 if (!sfmmup->sfmmu_free) 5875 sfmmu_check_page_sizes(sfmmup, 0); 5876 } 5877 5878 /* 5879 * Unload all the mappings in the range [addr..addr+len). addr and len must 5880 * be MMU_PAGESIZE aligned. 5881 */ 5882 void 5883 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5884 { 5885 if (sfmmup->sfmmu_xhat_provider) { 5886 XHAT_UNLOAD(sfmmup, addr, len, flags); 5887 return; 5888 } 5889 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5890 } 5891 5892 5893 /* 5894 * Find the largest mapping size for this page. 5895 */ 5896 int 5897 fnd_mapping_sz(page_t *pp) 5898 { 5899 int sz; 5900 int p_index; 5901 5902 p_index = PP_MAPINDEX(pp); 5903 5904 sz = 0; 5905 p_index >>= 1; /* don't care about 8K bit */ 5906 for (; p_index; p_index >>= 1) { 5907 sz++; 5908 } 5909 5910 return (sz); 5911 } 5912 5913 /* 5914 * This function unloads a range of addresses for an hmeblk. 5915 * It returns the next address to be unloaded. 5916 * It should be called with the hash lock held. 5917 */ 5918 static caddr_t 5919 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5920 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5921 { 5922 tte_t tte, ttemod; 5923 struct sf_hment *sfhmep; 5924 int ttesz; 5925 long ttecnt; 5926 page_t *pp; 5927 kmutex_t *pml; 5928 int ret; 5929 int use_demap_range; 5930 5931 ASSERT(in_hblk_range(hmeblkp, addr)); 5932 ASSERT(!hmeblkp->hblk_shw_bit); 5933 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5934 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5935 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5936 5937 #ifdef DEBUG 5938 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5939 (endaddr < get_hblk_endaddr(hmeblkp))) { 5940 panic("sfmmu_hblk_unload: partial unload of large page"); 5941 } 5942 #endif /* DEBUG */ 5943 5944 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5945 ttesz = get_hblk_ttesz(hmeblkp); 5946 5947 use_demap_range = ((dmrp == NULL) || 5948 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5949 5950 if (use_demap_range) { 5951 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5952 } else { 5953 DEMAP_RANGE_FLUSH(dmrp); 5954 } 5955 ttecnt = 0; 5956 HBLKTOHME(sfhmep, hmeblkp, addr); 5957 5958 while (addr < endaddr) { 5959 pml = NULL; 5960 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5961 if (TTE_IS_VALID(&tte)) { 5962 pp = sfhmep->hme_page; 5963 if (pp != NULL) { 5964 pml = sfmmu_mlist_enter(pp); 5965 } 5966 5967 /* 5968 * Verify if hme still points to 'pp' now that 5969 * we have p_mapping lock. 5970 */ 5971 if (sfhmep->hme_page != pp) { 5972 if (pp != NULL && sfhmep->hme_page != NULL) { 5973 ASSERT(pml != NULL); 5974 sfmmu_mlist_exit(pml); 5975 /* Re-start this iteration. */ 5976 continue; 5977 } 5978 ASSERT((pp != NULL) && 5979 (sfhmep->hme_page == NULL)); 5980 goto tte_unloaded; 5981 } 5982 5983 /* 5984 * This point on we have both HASH and p_mapping 5985 * lock. 5986 */ 5987 ASSERT(pp == sfhmep->hme_page); 5988 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5989 5990 /* 5991 * We need to loop on modify tte because it is 5992 * possible for pagesync to come along and 5993 * change the software bits beneath us. 5994 * 5995 * Page_unload can also invalidate the tte after 5996 * we read tte outside of p_mapping lock. 5997 */ 5998 again: 5999 ttemod = tte; 6000 6001 TTE_SET_INVALID(&ttemod); 6002 ret = sfmmu_modifytte_try(&tte, &ttemod, 6003 &sfhmep->hme_tte); 6004 6005 if (ret <= 0) { 6006 if (TTE_IS_VALID(&tte)) { 6007 ASSERT(ret < 0); 6008 goto again; 6009 } 6010 if (pp != NULL) { 6011 panic("sfmmu_hblk_unload: pp = 0x%p " 6012 "tte became invalid under mlist" 6013 " lock = 0x%p", pp, pml); 6014 } 6015 continue; 6016 } 6017 6018 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6019 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6020 } 6021 6022 /* 6023 * Ok- we invalidated the tte. Do the rest of the job. 6024 */ 6025 ttecnt++; 6026 6027 if (flags & HAT_UNLOAD_UNLOCK) { 6028 ASSERT(hmeblkp->hblk_lckcnt > 0); 6029 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 6030 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6031 } 6032 6033 /* 6034 * Normally we would need to flush the page 6035 * from the virtual cache at this point in 6036 * order to prevent a potential cache alias 6037 * inconsistency. 6038 * The particular scenario we need to worry 6039 * about is: 6040 * Given: va1 and va2 are two virtual address 6041 * that alias and map the same physical 6042 * address. 6043 * 1. mapping exists from va1 to pa and data 6044 * has been read into the cache. 6045 * 2. unload va1. 6046 * 3. load va2 and modify data using va2. 6047 * 4 unload va2. 6048 * 5. load va1 and reference data. Unless we 6049 * flush the data cache when we unload we will 6050 * get stale data. 6051 * Fortunately, page coloring eliminates the 6052 * above scenario by remembering the color a 6053 * physical page was last or is currently 6054 * mapped to. Now, we delay the flush until 6055 * the loading of translations. Only when the 6056 * new translation is of a different color 6057 * are we forced to flush. 6058 */ 6059 if (use_demap_range) { 6060 /* 6061 * Mark this page as needing a demap. 6062 */ 6063 DEMAP_RANGE_MARKPG(dmrp, addr); 6064 } else { 6065 ASSERT(sfmmup != NULL); 6066 ASSERT(!hmeblkp->hblk_shared); 6067 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6068 sfmmup->sfmmu_free, 0); 6069 } 6070 6071 if (pp) { 6072 /* 6073 * Remove the hment from the mapping list 6074 */ 6075 ASSERT(hmeblkp->hblk_hmecnt > 0); 6076 6077 /* 6078 * Again, we cannot 6079 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6080 */ 6081 HME_SUB(sfhmep, pp); 6082 membar_stst(); 6083 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6084 } 6085 6086 ASSERT(hmeblkp->hblk_vcnt > 0); 6087 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6088 6089 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6090 !hmeblkp->hblk_lckcnt); 6091 6092 #ifdef VAC 6093 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6094 if (PP_ISTNC(pp)) { 6095 /* 6096 * If page was temporary 6097 * uncached, try to recache 6098 * it. Note that HME_SUB() was 6099 * called above so p_index and 6100 * mlist had been updated. 6101 */ 6102 conv_tnc(pp, ttesz); 6103 } else if (pp->p_mapping == NULL) { 6104 ASSERT(kpm_enable); 6105 /* 6106 * Page is marked to be in VAC conflict 6107 * to an existing kpm mapping and/or is 6108 * kpm mapped using only the regular 6109 * pagesize. 6110 */ 6111 sfmmu_kpm_hme_unload(pp); 6112 } 6113 } 6114 #endif /* VAC */ 6115 } else if ((pp = sfhmep->hme_page) != NULL) { 6116 /* 6117 * TTE is invalid but the hme 6118 * still exists. let pageunload 6119 * complete its job. 6120 */ 6121 ASSERT(pml == NULL); 6122 pml = sfmmu_mlist_enter(pp); 6123 if (sfhmep->hme_page != NULL) { 6124 sfmmu_mlist_exit(pml); 6125 continue; 6126 } 6127 ASSERT(sfhmep->hme_page == NULL); 6128 } else if (hmeblkp->hblk_hmecnt != 0) { 6129 /* 6130 * pageunload may have not finished decrementing 6131 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6132 * wait for pageunload to finish. Rely on pageunload 6133 * to decrement hblk_hmecnt after hblk_vcnt. 6134 */ 6135 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6136 ASSERT(pml == NULL); 6137 if (pf_is_memory(pfn)) { 6138 pp = page_numtopp_nolock(pfn); 6139 if (pp != NULL) { 6140 pml = sfmmu_mlist_enter(pp); 6141 sfmmu_mlist_exit(pml); 6142 pml = NULL; 6143 } 6144 } 6145 } 6146 6147 tte_unloaded: 6148 /* 6149 * At this point, the tte we are looking at 6150 * should be unloaded, and hme has been unlinked 6151 * from page too. This is important because in 6152 * pageunload, it does ttesync() then HME_SUB. 6153 * We need to make sure HME_SUB has been completed 6154 * so we know ttesync() has been completed. Otherwise, 6155 * at exit time, after return from hat layer, VM will 6156 * release as structure which hat_setstat() (called 6157 * by ttesync()) needs. 6158 */ 6159 #ifdef DEBUG 6160 { 6161 tte_t dtte; 6162 6163 ASSERT(sfhmep->hme_page == NULL); 6164 6165 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6166 ASSERT(!TTE_IS_VALID(&dtte)); 6167 } 6168 #endif 6169 6170 if (pml) { 6171 sfmmu_mlist_exit(pml); 6172 } 6173 6174 addr += TTEBYTES(ttesz); 6175 sfhmep++; 6176 DEMAP_RANGE_NEXTPG(dmrp); 6177 } 6178 /* 6179 * For shared hmeblks this routine is only called when region is freed 6180 * and no longer referenced. So no need to decrement ttecnt 6181 * in the region structure here. 6182 */ 6183 if (ttecnt > 0 && sfmmup != NULL) { 6184 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6185 } 6186 return (addr); 6187 } 6188 6189 /* 6190 * Synchronize all the mappings in the range [addr..addr+len). 6191 * Can be called with clearflag having two states: 6192 * HAT_SYNC_DONTZERO means just return the rm stats 6193 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6194 */ 6195 void 6196 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6197 { 6198 struct hmehash_bucket *hmebp; 6199 hmeblk_tag hblktag; 6200 int hmeshift, hashno = 1; 6201 struct hme_blk *hmeblkp, *list = NULL; 6202 caddr_t endaddr; 6203 cpuset_t cpuset; 6204 6205 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 6206 ASSERT((sfmmup == ksfmmup) || 6207 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6208 ASSERT((len & MMU_PAGEOFFSET) == 0); 6209 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6210 (clearflag == HAT_SYNC_ZERORM)); 6211 6212 CPUSET_ZERO(cpuset); 6213 6214 endaddr = addr + len; 6215 hblktag.htag_id = sfmmup; 6216 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6217 6218 /* 6219 * Spitfire supports 4 page sizes. 6220 * Most pages are expected to be of the smallest page 6221 * size (8K) and these will not need to be rehashed. 64K 6222 * pages also don't need to be rehashed because the an hmeblk 6223 * spans 64K of address space. 512K pages might need 1 rehash and 6224 * and 4M pages 2 rehashes. 6225 */ 6226 while (addr < endaddr) { 6227 hmeshift = HME_HASH_SHIFT(hashno); 6228 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6229 hblktag.htag_rehash = hashno; 6230 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6231 6232 SFMMU_HASH_LOCK(hmebp); 6233 6234 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6235 if (hmeblkp != NULL) { 6236 ASSERT(!hmeblkp->hblk_shared); 6237 /* 6238 * We've encountered a shadow hmeblk so skip the range 6239 * of the next smaller mapping size. 6240 */ 6241 if (hmeblkp->hblk_shw_bit) { 6242 ASSERT(sfmmup != ksfmmup); 6243 ASSERT(hashno > 1); 6244 addr = (caddr_t)P2END((uintptr_t)addr, 6245 TTEBYTES(hashno - 1)); 6246 } else { 6247 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6248 addr, endaddr, clearflag); 6249 } 6250 SFMMU_HASH_UNLOCK(hmebp); 6251 hashno = 1; 6252 continue; 6253 } 6254 SFMMU_HASH_UNLOCK(hmebp); 6255 6256 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6257 /* 6258 * We have traversed the whole list and rehashed 6259 * if necessary without finding the address to sync. 6260 * This is ok so we increment the address by the 6261 * smallest hmeblk range for kernel mappings and the 6262 * largest hmeblk range, to account for shadow hmeblks, 6263 * for user mappings and continue. 6264 */ 6265 if (sfmmup == ksfmmup) 6266 addr = (caddr_t)P2END((uintptr_t)addr, 6267 TTEBYTES(1)); 6268 else 6269 addr = (caddr_t)P2END((uintptr_t)addr, 6270 TTEBYTES(hashno)); 6271 hashno = 1; 6272 } else { 6273 hashno++; 6274 } 6275 } 6276 sfmmu_hblks_list_purge(&list); 6277 cpuset = sfmmup->sfmmu_cpusran; 6278 xt_sync(cpuset); 6279 } 6280 6281 static caddr_t 6282 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6283 caddr_t endaddr, int clearflag) 6284 { 6285 tte_t tte, ttemod; 6286 struct sf_hment *sfhmep; 6287 int ttesz; 6288 struct page *pp; 6289 kmutex_t *pml; 6290 int ret; 6291 6292 ASSERT(hmeblkp->hblk_shw_bit == 0); 6293 ASSERT(!hmeblkp->hblk_shared); 6294 6295 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6296 6297 ttesz = get_hblk_ttesz(hmeblkp); 6298 HBLKTOHME(sfhmep, hmeblkp, addr); 6299 6300 while (addr < endaddr) { 6301 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6302 if (TTE_IS_VALID(&tte)) { 6303 pml = NULL; 6304 pp = sfhmep->hme_page; 6305 if (pp) { 6306 pml = sfmmu_mlist_enter(pp); 6307 } 6308 if (pp != sfhmep->hme_page) { 6309 /* 6310 * tte most have been unloaded 6311 * underneath us. Recheck 6312 */ 6313 ASSERT(pml); 6314 sfmmu_mlist_exit(pml); 6315 continue; 6316 } 6317 6318 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6319 6320 if (clearflag == HAT_SYNC_ZERORM) { 6321 ttemod = tte; 6322 TTE_CLR_RM(&ttemod); 6323 ret = sfmmu_modifytte_try(&tte, &ttemod, 6324 &sfhmep->hme_tte); 6325 if (ret < 0) { 6326 if (pml) { 6327 sfmmu_mlist_exit(pml); 6328 } 6329 continue; 6330 } 6331 6332 if (ret > 0) { 6333 sfmmu_tlb_demap(addr, sfmmup, 6334 hmeblkp, 0, 0); 6335 } 6336 } 6337 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6338 if (pml) { 6339 sfmmu_mlist_exit(pml); 6340 } 6341 } 6342 addr += TTEBYTES(ttesz); 6343 sfhmep++; 6344 } 6345 return (addr); 6346 } 6347 6348 /* 6349 * This function will sync a tte to the page struct and it will 6350 * update the hat stats. Currently it allows us to pass a NULL pp 6351 * and we will simply update the stats. We may want to change this 6352 * so we only keep stats for pages backed by pp's. 6353 */ 6354 static void 6355 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6356 { 6357 uint_t rm = 0; 6358 int sz; 6359 pgcnt_t npgs; 6360 6361 ASSERT(TTE_IS_VALID(ttep)); 6362 6363 if (TTE_IS_NOSYNC(ttep)) { 6364 return; 6365 } 6366 6367 if (TTE_IS_REF(ttep)) { 6368 rm = P_REF; 6369 } 6370 if (TTE_IS_MOD(ttep)) { 6371 rm |= P_MOD; 6372 } 6373 6374 if (rm == 0) { 6375 return; 6376 } 6377 6378 sz = TTE_CSZ(ttep); 6379 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6380 int i; 6381 caddr_t vaddr = addr; 6382 6383 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6384 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6385 } 6386 6387 } 6388 6389 /* 6390 * XXX I want to use cas to update nrm bits but they 6391 * currently belong in common/vm and not in hat where 6392 * they should be. 6393 * The nrm bits are protected by the same mutex as 6394 * the one that protects the page's mapping list. 6395 */ 6396 if (!pp) 6397 return; 6398 ASSERT(sfmmu_mlist_held(pp)); 6399 /* 6400 * If the tte is for a large page, we need to sync all the 6401 * pages covered by the tte. 6402 */ 6403 if (sz != TTE8K) { 6404 ASSERT(pp->p_szc != 0); 6405 pp = PP_GROUPLEADER(pp, sz); 6406 ASSERT(sfmmu_mlist_held(pp)); 6407 } 6408 6409 /* Get number of pages from tte size. */ 6410 npgs = TTEPAGES(sz); 6411 6412 do { 6413 ASSERT(pp); 6414 ASSERT(sfmmu_mlist_held(pp)); 6415 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6416 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6417 hat_page_setattr(pp, rm); 6418 6419 /* 6420 * Are we done? If not, we must have a large mapping. 6421 * For large mappings we need to sync the rest of the pages 6422 * covered by this tte; goto the next page. 6423 */ 6424 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6425 } 6426 6427 /* 6428 * Execute pre-callback handler of each pa_hment linked to pp 6429 * 6430 * Inputs: 6431 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6432 * capture_cpus: pointer to return value (below) 6433 * 6434 * Returns: 6435 * Propagates the subsystem callback return values back to the caller; 6436 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6437 * is zero if all of the pa_hments are of a type that do not require 6438 * capturing CPUs prior to suspending the mapping, else it is 1. 6439 */ 6440 static int 6441 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6442 { 6443 struct sf_hment *sfhmep; 6444 struct pa_hment *pahmep; 6445 int (*f)(caddr_t, uint_t, uint_t, void *); 6446 int ret; 6447 id_t id; 6448 int locked = 0; 6449 kmutex_t *pml; 6450 6451 ASSERT(PAGE_EXCL(pp)); 6452 if (!sfmmu_mlist_held(pp)) { 6453 pml = sfmmu_mlist_enter(pp); 6454 locked = 1; 6455 } 6456 6457 if (capture_cpus) 6458 *capture_cpus = 0; 6459 6460 top: 6461 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6462 /* 6463 * skip sf_hments corresponding to VA<->PA mappings; 6464 * for pa_hment's, hme_tte.ll is zero 6465 */ 6466 if (!IS_PAHME(sfhmep)) 6467 continue; 6468 6469 pahmep = sfhmep->hme_data; 6470 ASSERT(pahmep != NULL); 6471 6472 /* 6473 * skip if pre-handler has been called earlier in this loop 6474 */ 6475 if (pahmep->flags & flag) 6476 continue; 6477 6478 id = pahmep->cb_id; 6479 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6480 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6481 *capture_cpus = 1; 6482 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6483 pahmep->flags |= flag; 6484 continue; 6485 } 6486 6487 /* 6488 * Drop the mapping list lock to avoid locking order issues. 6489 */ 6490 if (locked) 6491 sfmmu_mlist_exit(pml); 6492 6493 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6494 if (ret != 0) 6495 return (ret); /* caller must do the cleanup */ 6496 6497 if (locked) { 6498 pml = sfmmu_mlist_enter(pp); 6499 pahmep->flags |= flag; 6500 goto top; 6501 } 6502 6503 pahmep->flags |= flag; 6504 } 6505 6506 if (locked) 6507 sfmmu_mlist_exit(pml); 6508 6509 return (0); 6510 } 6511 6512 /* 6513 * Execute post-callback handler of each pa_hment linked to pp 6514 * 6515 * Same overall assumptions and restrictions apply as for 6516 * hat_pageprocess_precallbacks(). 6517 */ 6518 static void 6519 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6520 { 6521 pfn_t pgpfn = pp->p_pagenum; 6522 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6523 pfn_t newpfn; 6524 struct sf_hment *sfhmep; 6525 struct pa_hment *pahmep; 6526 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6527 id_t id; 6528 int locked = 0; 6529 kmutex_t *pml; 6530 6531 ASSERT(PAGE_EXCL(pp)); 6532 if (!sfmmu_mlist_held(pp)) { 6533 pml = sfmmu_mlist_enter(pp); 6534 locked = 1; 6535 } 6536 6537 top: 6538 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6539 /* 6540 * skip sf_hments corresponding to VA<->PA mappings; 6541 * for pa_hment's, hme_tte.ll is zero 6542 */ 6543 if (!IS_PAHME(sfhmep)) 6544 continue; 6545 6546 pahmep = sfhmep->hme_data; 6547 ASSERT(pahmep != NULL); 6548 6549 if ((pahmep->flags & flag) == 0) 6550 continue; 6551 6552 pahmep->flags &= ~flag; 6553 6554 id = pahmep->cb_id; 6555 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6556 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6557 continue; 6558 6559 /* 6560 * Convert the base page PFN into the constituent PFN 6561 * which is needed by the callback handler. 6562 */ 6563 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6564 6565 /* 6566 * Drop the mapping list lock to avoid locking order issues. 6567 */ 6568 if (locked) 6569 sfmmu_mlist_exit(pml); 6570 6571 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6572 != 0) 6573 panic("sfmmu: posthandler failed"); 6574 6575 if (locked) { 6576 pml = sfmmu_mlist_enter(pp); 6577 goto top; 6578 } 6579 } 6580 6581 if (locked) 6582 sfmmu_mlist_exit(pml); 6583 } 6584 6585 /* 6586 * Suspend locked kernel mapping 6587 */ 6588 void 6589 hat_pagesuspend(struct page *pp) 6590 { 6591 struct sf_hment *sfhmep; 6592 sfmmu_t *sfmmup; 6593 tte_t tte, ttemod; 6594 struct hme_blk *hmeblkp; 6595 caddr_t addr; 6596 int index, cons; 6597 cpuset_t cpuset; 6598 6599 ASSERT(PAGE_EXCL(pp)); 6600 ASSERT(sfmmu_mlist_held(pp)); 6601 6602 mutex_enter(&kpr_suspendlock); 6603 6604 /* 6605 * We're about to suspend a kernel mapping so mark this thread as 6606 * non-traceable by DTrace. This prevents us from running into issues 6607 * with probe context trying to touch a suspended page 6608 * in the relocation codepath itself. 6609 */ 6610 curthread->t_flag |= T_DONTDTRACE; 6611 6612 index = PP_MAPINDEX(pp); 6613 cons = TTE8K; 6614 6615 retry: 6616 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6617 6618 if (IS_PAHME(sfhmep)) 6619 continue; 6620 6621 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6622 continue; 6623 6624 /* 6625 * Loop until we successfully set the suspend bit in 6626 * the TTE. 6627 */ 6628 again: 6629 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6630 ASSERT(TTE_IS_VALID(&tte)); 6631 6632 ttemod = tte; 6633 TTE_SET_SUSPEND(&ttemod); 6634 if (sfmmu_modifytte_try(&tte, &ttemod, 6635 &sfhmep->hme_tte) < 0) 6636 goto again; 6637 6638 /* 6639 * Invalidate TSB entry 6640 */ 6641 hmeblkp = sfmmu_hmetohblk(sfhmep); 6642 6643 sfmmup = hblktosfmmu(hmeblkp); 6644 ASSERT(sfmmup == ksfmmup); 6645 ASSERT(!hmeblkp->hblk_shared); 6646 6647 addr = tte_to_vaddr(hmeblkp, tte); 6648 6649 /* 6650 * No need to make sure that the TSB for this sfmmu is 6651 * not being relocated since it is ksfmmup and thus it 6652 * will never be relocated. 6653 */ 6654 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6655 6656 /* 6657 * Update xcall stats 6658 */ 6659 cpuset = cpu_ready_set; 6660 CPUSET_DEL(cpuset, CPU->cpu_id); 6661 6662 /* LINTED: constant in conditional context */ 6663 SFMMU_XCALL_STATS(ksfmmup); 6664 6665 /* 6666 * Flush TLB entry on remote CPU's 6667 */ 6668 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6669 (uint64_t)ksfmmup); 6670 xt_sync(cpuset); 6671 6672 /* 6673 * Flush TLB entry on local CPU 6674 */ 6675 vtag_flushpage(addr, (uint64_t)ksfmmup); 6676 } 6677 6678 while (index != 0) { 6679 index = index >> 1; 6680 if (index != 0) 6681 cons++; 6682 if (index & 0x1) { 6683 pp = PP_GROUPLEADER(pp, cons); 6684 goto retry; 6685 } 6686 } 6687 } 6688 6689 #ifdef DEBUG 6690 6691 #define N_PRLE 1024 6692 struct prle { 6693 page_t *targ; 6694 page_t *repl; 6695 int status; 6696 int pausecpus; 6697 hrtime_t whence; 6698 }; 6699 6700 static struct prle page_relocate_log[N_PRLE]; 6701 static int prl_entry; 6702 static kmutex_t prl_mutex; 6703 6704 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6705 mutex_enter(&prl_mutex); \ 6706 page_relocate_log[prl_entry].targ = *(t); \ 6707 page_relocate_log[prl_entry].repl = *(r); \ 6708 page_relocate_log[prl_entry].status = (s); \ 6709 page_relocate_log[prl_entry].pausecpus = (p); \ 6710 page_relocate_log[prl_entry].whence = gethrtime(); \ 6711 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6712 mutex_exit(&prl_mutex); 6713 6714 #else /* !DEBUG */ 6715 #define PAGE_RELOCATE_LOG(t, r, s, p) 6716 #endif 6717 6718 /* 6719 * Core Kernel Page Relocation Algorithm 6720 * 6721 * Input: 6722 * 6723 * target : constituent pages are SE_EXCL locked. 6724 * replacement: constituent pages are SE_EXCL locked. 6725 * 6726 * Output: 6727 * 6728 * nrelocp: number of pages relocated 6729 */ 6730 int 6731 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6732 { 6733 page_t *targ, *repl; 6734 page_t *tpp, *rpp; 6735 kmutex_t *low, *high; 6736 spgcnt_t npages, i; 6737 page_t *pl = NULL; 6738 int old_pil; 6739 cpuset_t cpuset; 6740 int cap_cpus; 6741 int ret; 6742 6743 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6744 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6745 return (EAGAIN); 6746 } 6747 6748 mutex_enter(&kpr_mutex); 6749 kreloc_thread = curthread; 6750 6751 targ = *target; 6752 repl = *replacement; 6753 ASSERT(repl != NULL); 6754 ASSERT(targ->p_szc == repl->p_szc); 6755 6756 npages = page_get_pagecnt(targ->p_szc); 6757 6758 /* 6759 * unload VA<->PA mappings that are not locked 6760 */ 6761 tpp = targ; 6762 for (i = 0; i < npages; i++) { 6763 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6764 tpp++; 6765 } 6766 6767 /* 6768 * Do "presuspend" callbacks, in a context from which we can still 6769 * block as needed. Note that we don't hold the mapping list lock 6770 * of "targ" at this point due to potential locking order issues; 6771 * we assume that between the hat_pageunload() above and holding 6772 * the SE_EXCL lock that the mapping list *cannot* change at this 6773 * point. 6774 */ 6775 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6776 if (ret != 0) { 6777 /* 6778 * EIO translates to fatal error, for all others cleanup 6779 * and return EAGAIN. 6780 */ 6781 ASSERT(ret != EIO); 6782 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6783 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6784 kreloc_thread = NULL; 6785 mutex_exit(&kpr_mutex); 6786 return (EAGAIN); 6787 } 6788 6789 /* 6790 * acquire p_mapping list lock for both the target and replacement 6791 * root pages. 6792 * 6793 * low and high refer to the need to grab the mlist locks in a 6794 * specific order in order to prevent race conditions. Thus the 6795 * lower lock must be grabbed before the higher lock. 6796 * 6797 * This will block hat_unload's accessing p_mapping list. Since 6798 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6799 * blocked. Thus, no one else will be accessing the p_mapping list 6800 * while we suspend and reload the locked mapping below. 6801 */ 6802 tpp = targ; 6803 rpp = repl; 6804 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6805 6806 kpreempt_disable(); 6807 6808 #ifdef VAC 6809 /* 6810 * If the replacement page is of a different virtual color 6811 * than the page it is replacing, we need to handle the VAC 6812 * consistency for it just as we would if we were setting up 6813 * a new mapping to a page. 6814 */ 6815 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6816 if (tpp->p_vcolor != rpp->p_vcolor) { 6817 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6818 rpp->p_pagenum); 6819 } 6820 } 6821 #endif 6822 6823 /* 6824 * We raise our PIL to 13 so that we don't get captured by 6825 * another CPU or pinned by an interrupt thread. We can't go to 6826 * PIL 14 since the nexus driver(s) may need to interrupt at 6827 * that level in the case of IOMMU pseudo mappings. 6828 */ 6829 cpuset = cpu_ready_set; 6830 CPUSET_DEL(cpuset, CPU->cpu_id); 6831 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6832 old_pil = splr(XCALL_PIL); 6833 } else { 6834 old_pil = -1; 6835 xc_attention(cpuset); 6836 } 6837 ASSERT(getpil() == XCALL_PIL); 6838 6839 /* 6840 * Now do suspend callbacks. In the case of an IOMMU mapping 6841 * this will suspend all DMA activity to the page while it is 6842 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6843 * may be captured at this point we should have acquired any needed 6844 * locks in the presuspend callback. 6845 */ 6846 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6847 if (ret != 0) { 6848 repl = targ; 6849 goto suspend_fail; 6850 } 6851 6852 /* 6853 * Raise the PIL yet again, this time to block all high-level 6854 * interrupts on this CPU. This is necessary to prevent an 6855 * interrupt routine from pinning the thread which holds the 6856 * mapping suspended and then touching the suspended page. 6857 * 6858 * Once the page is suspended we also need to be careful to 6859 * avoid calling any functions which touch any seg_kmem memory 6860 * since that memory may be backed by the very page we are 6861 * relocating in here! 6862 */ 6863 hat_pagesuspend(targ); 6864 6865 /* 6866 * Now that we are confident everybody has stopped using this page, 6867 * copy the page contents. Note we use a physical copy to prevent 6868 * locking issues and to avoid fpRAS because we can't handle it in 6869 * this context. 6870 */ 6871 for (i = 0; i < npages; i++, tpp++, rpp++) { 6872 /* 6873 * Copy the contents of the page. 6874 */ 6875 ppcopy_kernel(tpp, rpp); 6876 } 6877 6878 tpp = targ; 6879 rpp = repl; 6880 for (i = 0; i < npages; i++, tpp++, rpp++) { 6881 /* 6882 * Copy attributes. VAC consistency was handled above, 6883 * if required. 6884 */ 6885 rpp->p_nrm = tpp->p_nrm; 6886 tpp->p_nrm = 0; 6887 rpp->p_index = tpp->p_index; 6888 tpp->p_index = 0; 6889 #ifdef VAC 6890 rpp->p_vcolor = tpp->p_vcolor; 6891 #endif 6892 } 6893 6894 /* 6895 * First, unsuspend the page, if we set the suspend bit, and transfer 6896 * the mapping list from the target page to the replacement page. 6897 * Next process postcallbacks; since pa_hment's are linked only to the 6898 * p_mapping list of root page, we don't iterate over the constituent 6899 * pages. 6900 */ 6901 hat_pagereload(targ, repl); 6902 6903 suspend_fail: 6904 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6905 6906 /* 6907 * Now lower our PIL and release any captured CPUs since we 6908 * are out of the "danger zone". After this it will again be 6909 * safe to acquire adaptive mutex locks, or to drop them... 6910 */ 6911 if (old_pil != -1) { 6912 splx(old_pil); 6913 } else { 6914 xc_dismissed(cpuset); 6915 } 6916 6917 kpreempt_enable(); 6918 6919 sfmmu_mlist_reloc_exit(low, high); 6920 6921 /* 6922 * Postsuspend callbacks should drop any locks held across 6923 * the suspend callbacks. As before, we don't hold the mapping 6924 * list lock at this point.. our assumption is that the mapping 6925 * list still can't change due to our holding SE_EXCL lock and 6926 * there being no unlocked mappings left. Hence the restriction 6927 * on calling context to hat_delete_callback() 6928 */ 6929 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6930 if (ret != 0) { 6931 /* 6932 * The second presuspend call failed: we got here through 6933 * the suspend_fail label above. 6934 */ 6935 ASSERT(ret != EIO); 6936 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6937 kreloc_thread = NULL; 6938 mutex_exit(&kpr_mutex); 6939 return (EAGAIN); 6940 } 6941 6942 /* 6943 * Now that we're out of the performance critical section we can 6944 * take care of updating the hash table, since we still 6945 * hold all the pages locked SE_EXCL at this point we 6946 * needn't worry about things changing out from under us. 6947 */ 6948 tpp = targ; 6949 rpp = repl; 6950 for (i = 0; i < npages; i++, tpp++, rpp++) { 6951 6952 /* 6953 * replace targ with replacement in page_hash table 6954 */ 6955 targ = tpp; 6956 page_relocate_hash(rpp, targ); 6957 6958 /* 6959 * concatenate target; caller of platform_page_relocate() 6960 * expects target to be concatenated after returning. 6961 */ 6962 ASSERT(targ->p_next == targ); 6963 ASSERT(targ->p_prev == targ); 6964 page_list_concat(&pl, &targ); 6965 } 6966 6967 ASSERT(*target == pl); 6968 *nrelocp = npages; 6969 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6970 kreloc_thread = NULL; 6971 mutex_exit(&kpr_mutex); 6972 return (0); 6973 } 6974 6975 /* 6976 * Called when stray pa_hments are found attached to a page which is 6977 * being freed. Notify the subsystem which attached the pa_hment of 6978 * the error if it registered a suitable handler, else panic. 6979 */ 6980 static void 6981 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6982 { 6983 id_t cb_id = pahmep->cb_id; 6984 6985 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6986 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6987 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6988 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6989 return; /* non-fatal */ 6990 } 6991 panic("pa_hment leaked: 0x%p", pahmep); 6992 } 6993 6994 /* 6995 * Remove all mappings to page 'pp'. 6996 */ 6997 int 6998 hat_pageunload(struct page *pp, uint_t forceflag) 6999 { 7000 struct page *origpp = pp; 7001 struct sf_hment *sfhme, *tmphme; 7002 struct hme_blk *hmeblkp; 7003 kmutex_t *pml; 7004 #ifdef VAC 7005 kmutex_t *pmtx; 7006 #endif 7007 cpuset_t cpuset, tset; 7008 int index, cons; 7009 int xhme_blks; 7010 int pa_hments; 7011 7012 ASSERT(PAGE_EXCL(pp)); 7013 7014 retry_xhat: 7015 tmphme = NULL; 7016 xhme_blks = 0; 7017 pa_hments = 0; 7018 CPUSET_ZERO(cpuset); 7019 7020 pml = sfmmu_mlist_enter(pp); 7021 7022 #ifdef VAC 7023 if (pp->p_kpmref) 7024 sfmmu_kpm_pageunload(pp); 7025 ASSERT(!PP_ISMAPPED_KPM(pp)); 7026 #endif 7027 7028 index = PP_MAPINDEX(pp); 7029 cons = TTE8K; 7030 retry: 7031 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7032 tmphme = sfhme->hme_next; 7033 7034 if (IS_PAHME(sfhme)) { 7035 ASSERT(sfhme->hme_data != NULL); 7036 pa_hments++; 7037 continue; 7038 } 7039 7040 hmeblkp = sfmmu_hmetohblk(sfhme); 7041 if (hmeblkp->hblk_xhat_bit) { 7042 struct xhat_hme_blk *xblk = 7043 (struct xhat_hme_blk *)hmeblkp; 7044 7045 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 7046 pp, forceflag, XBLK2PROVBLK(xblk)); 7047 7048 xhme_blks = 1; 7049 continue; 7050 } 7051 7052 /* 7053 * If there are kernel mappings don't unload them, they will 7054 * be suspended. 7055 */ 7056 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7057 hmeblkp->hblk_tag.htag_id == ksfmmup) 7058 continue; 7059 7060 tset = sfmmu_pageunload(pp, sfhme, cons); 7061 CPUSET_OR(cpuset, tset); 7062 } 7063 7064 while (index != 0) { 7065 index = index >> 1; 7066 if (index != 0) 7067 cons++; 7068 if (index & 0x1) { 7069 /* Go to leading page */ 7070 pp = PP_GROUPLEADER(pp, cons); 7071 ASSERT(sfmmu_mlist_held(pp)); 7072 goto retry; 7073 } 7074 } 7075 7076 /* 7077 * cpuset may be empty if the page was only mapped by segkpm, 7078 * in which case we won't actually cross-trap. 7079 */ 7080 xt_sync(cpuset); 7081 7082 /* 7083 * The page should have no mappings at this point, unless 7084 * we were called from hat_page_relocate() in which case we 7085 * leave the locked mappings which will be suspended later. 7086 */ 7087 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 7088 (forceflag == SFMMU_KERNEL_RELOC)); 7089 7090 #ifdef VAC 7091 if (PP_ISTNC(pp)) { 7092 if (cons == TTE8K) { 7093 pmtx = sfmmu_page_enter(pp); 7094 PP_CLRTNC(pp); 7095 sfmmu_page_exit(pmtx); 7096 } else { 7097 conv_tnc(pp, cons); 7098 } 7099 } 7100 #endif /* VAC */ 7101 7102 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7103 /* 7104 * Unlink any pa_hments and free them, calling back 7105 * the responsible subsystem to notify it of the error. 7106 * This can occur in situations such as drivers leaking 7107 * DMA handles: naughty, but common enough that we'd like 7108 * to keep the system running rather than bringing it 7109 * down with an obscure error like "pa_hment leaked" 7110 * which doesn't aid the user in debugging their driver. 7111 */ 7112 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7113 tmphme = sfhme->hme_next; 7114 if (IS_PAHME(sfhme)) { 7115 struct pa_hment *pahmep = sfhme->hme_data; 7116 sfmmu_pahment_leaked(pahmep); 7117 HME_SUB(sfhme, pp); 7118 kmem_cache_free(pa_hment_cache, pahmep); 7119 } 7120 } 7121 7122 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 7123 } 7124 7125 sfmmu_mlist_exit(pml); 7126 7127 /* 7128 * XHAT may not have finished unloading pages 7129 * because some other thread was waiting for 7130 * mlist lock and XHAT_PAGEUNLOAD let it do 7131 * the job. 7132 */ 7133 if (xhme_blks) { 7134 pp = origpp; 7135 goto retry_xhat; 7136 } 7137 7138 return (0); 7139 } 7140 7141 cpuset_t 7142 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7143 { 7144 struct hme_blk *hmeblkp; 7145 sfmmu_t *sfmmup; 7146 tte_t tte, ttemod; 7147 #ifdef DEBUG 7148 tte_t orig_old; 7149 #endif /* DEBUG */ 7150 caddr_t addr; 7151 int ttesz; 7152 int ret; 7153 cpuset_t cpuset; 7154 7155 ASSERT(pp != NULL); 7156 ASSERT(sfmmu_mlist_held(pp)); 7157 ASSERT(!PP_ISKAS(pp)); 7158 7159 CPUSET_ZERO(cpuset); 7160 7161 hmeblkp = sfmmu_hmetohblk(sfhme); 7162 7163 readtte: 7164 sfmmu_copytte(&sfhme->hme_tte, &tte); 7165 if (TTE_IS_VALID(&tte)) { 7166 sfmmup = hblktosfmmu(hmeblkp); 7167 ttesz = get_hblk_ttesz(hmeblkp); 7168 /* 7169 * Only unload mappings of 'cons' size. 7170 */ 7171 if (ttesz != cons) 7172 return (cpuset); 7173 7174 /* 7175 * Note that we have p_mapping lock, but no hash lock here. 7176 * hblk_unload() has to have both hash lock AND p_mapping 7177 * lock before it tries to modify tte. So, the tte could 7178 * not become invalid in the sfmmu_modifytte_try() below. 7179 */ 7180 ttemod = tte; 7181 #ifdef DEBUG 7182 orig_old = tte; 7183 #endif /* DEBUG */ 7184 7185 TTE_SET_INVALID(&ttemod); 7186 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7187 if (ret < 0) { 7188 #ifdef DEBUG 7189 /* only R/M bits can change. */ 7190 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7191 #endif /* DEBUG */ 7192 goto readtte; 7193 } 7194 7195 if (ret == 0) { 7196 panic("pageunload: cas failed?"); 7197 } 7198 7199 addr = tte_to_vaddr(hmeblkp, tte); 7200 7201 if (hmeblkp->hblk_shared) { 7202 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7203 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7204 sf_region_t *rgnp; 7205 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7206 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7207 ASSERT(srdp != NULL); 7208 rgnp = srdp->srd_hmergnp[rid]; 7209 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7210 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7211 sfmmu_ttesync(NULL, addr, &tte, pp); 7212 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7213 atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); 7214 } else { 7215 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7216 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 7217 7218 /* 7219 * We need to flush the page from the virtual cache 7220 * in order to prevent a virtual cache alias 7221 * inconsistency. The particular scenario we need 7222 * to worry about is: 7223 * Given: va1 and va2 are two virtual address that 7224 * alias and will map the same physical address. 7225 * 1. mapping exists from va1 to pa and data has 7226 * been read into the cache. 7227 * 2. unload va1. 7228 * 3. load va2 and modify data using va2. 7229 * 4 unload va2. 7230 * 5. load va1 and reference data. Unless we flush 7231 * the data cache when we unload we will get 7232 * stale data. 7233 * This scenario is taken care of by using virtual 7234 * page coloring. 7235 */ 7236 if (sfmmup->sfmmu_ismhat) { 7237 /* 7238 * Flush TSBs, TLBs and caches 7239 * of every process 7240 * sharing this ism segment. 7241 */ 7242 sfmmu_hat_lock_all(); 7243 mutex_enter(&ism_mlist_lock); 7244 kpreempt_disable(); 7245 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7246 pp->p_pagenum, CACHE_NO_FLUSH); 7247 kpreempt_enable(); 7248 mutex_exit(&ism_mlist_lock); 7249 sfmmu_hat_unlock_all(); 7250 cpuset = cpu_ready_set; 7251 } else { 7252 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7253 cpuset = sfmmup->sfmmu_cpusran; 7254 } 7255 } 7256 7257 /* 7258 * Hme_sub has to run after ttesync() and a_rss update. 7259 * See hblk_unload(). 7260 */ 7261 HME_SUB(sfhme, pp); 7262 membar_stst(); 7263 7264 /* 7265 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7266 * since pteload may have done a HME_ADD() right after 7267 * we did the HME_SUB() above. Hmecnt is now maintained 7268 * by cas only. no lock guranteed its value. The only 7269 * gurantee we have is the hmecnt should not be less than 7270 * what it should be so the hblk will not be taken away. 7271 * It's also important that we decremented the hmecnt after 7272 * we are done with hmeblkp so that this hmeblk won't be 7273 * stolen. 7274 */ 7275 ASSERT(hmeblkp->hblk_hmecnt > 0); 7276 ASSERT(hmeblkp->hblk_vcnt > 0); 7277 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 7278 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 7279 /* 7280 * This is bug 4063182. 7281 * XXX: fixme 7282 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7283 * !hmeblkp->hblk_lckcnt); 7284 */ 7285 } else { 7286 panic("invalid tte? pp %p &tte %p", 7287 (void *)pp, (void *)&tte); 7288 } 7289 7290 return (cpuset); 7291 } 7292 7293 /* 7294 * While relocating a kernel page, this function will move the mappings 7295 * from tpp to dpp and modify any associated data with these mappings. 7296 * It also unsuspends the suspended kernel mapping. 7297 */ 7298 static void 7299 hat_pagereload(struct page *tpp, struct page *dpp) 7300 { 7301 struct sf_hment *sfhme; 7302 tte_t tte, ttemod; 7303 int index, cons; 7304 7305 ASSERT(getpil() == PIL_MAX); 7306 ASSERT(sfmmu_mlist_held(tpp)); 7307 ASSERT(sfmmu_mlist_held(dpp)); 7308 7309 index = PP_MAPINDEX(tpp); 7310 cons = TTE8K; 7311 7312 /* Update real mappings to the page */ 7313 retry: 7314 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7315 if (IS_PAHME(sfhme)) 7316 continue; 7317 sfmmu_copytte(&sfhme->hme_tte, &tte); 7318 ttemod = tte; 7319 7320 /* 7321 * replace old pfn with new pfn in TTE 7322 */ 7323 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7324 7325 /* 7326 * clear suspend bit 7327 */ 7328 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7329 TTE_CLR_SUSPEND(&ttemod); 7330 7331 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7332 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7333 7334 /* 7335 * set hme_page point to new page 7336 */ 7337 sfhme->hme_page = dpp; 7338 } 7339 7340 /* 7341 * move p_mapping list from old page to new page 7342 */ 7343 dpp->p_mapping = tpp->p_mapping; 7344 tpp->p_mapping = NULL; 7345 dpp->p_share = tpp->p_share; 7346 tpp->p_share = 0; 7347 7348 while (index != 0) { 7349 index = index >> 1; 7350 if (index != 0) 7351 cons++; 7352 if (index & 0x1) { 7353 tpp = PP_GROUPLEADER(tpp, cons); 7354 dpp = PP_GROUPLEADER(dpp, cons); 7355 goto retry; 7356 } 7357 } 7358 7359 curthread->t_flag &= ~T_DONTDTRACE; 7360 mutex_exit(&kpr_suspendlock); 7361 } 7362 7363 uint_t 7364 hat_pagesync(struct page *pp, uint_t clearflag) 7365 { 7366 struct sf_hment *sfhme, *tmphme = NULL; 7367 struct hme_blk *hmeblkp; 7368 kmutex_t *pml; 7369 cpuset_t cpuset, tset; 7370 int index, cons; 7371 extern ulong_t po_share; 7372 page_t *save_pp = pp; 7373 int stop_on_sh = 0; 7374 uint_t shcnt; 7375 7376 CPUSET_ZERO(cpuset); 7377 7378 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7379 return (PP_GENERIC_ATTR(pp)); 7380 } 7381 7382 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7383 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7384 return (PP_GENERIC_ATTR(pp)); 7385 } 7386 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7387 return (PP_GENERIC_ATTR(pp)); 7388 } 7389 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7390 if (pp->p_share > po_share) { 7391 hat_page_setattr(pp, P_REF); 7392 return (PP_GENERIC_ATTR(pp)); 7393 } 7394 stop_on_sh = 1; 7395 shcnt = 0; 7396 } 7397 } 7398 7399 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7400 pml = sfmmu_mlist_enter(pp); 7401 index = PP_MAPINDEX(pp); 7402 cons = TTE8K; 7403 retry: 7404 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7405 /* 7406 * We need to save the next hment on the list since 7407 * it is possible for pagesync to remove an invalid hment 7408 * from the list. 7409 */ 7410 tmphme = sfhme->hme_next; 7411 if (IS_PAHME(sfhme)) 7412 continue; 7413 /* 7414 * If we are looking for large mappings and this hme doesn't 7415 * reach the range we are seeking, just ignore it. 7416 */ 7417 hmeblkp = sfmmu_hmetohblk(sfhme); 7418 if (hmeblkp->hblk_xhat_bit) 7419 continue; 7420 7421 if (hme_size(sfhme) < cons) 7422 continue; 7423 7424 if (stop_on_sh) { 7425 if (hmeblkp->hblk_shared) { 7426 sf_srd_t *srdp = hblktosrd(hmeblkp); 7427 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7428 sf_region_t *rgnp; 7429 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7430 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7431 ASSERT(srdp != NULL); 7432 rgnp = srdp->srd_hmergnp[rid]; 7433 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7434 rgnp, rid); 7435 shcnt += rgnp->rgn_refcnt; 7436 } else { 7437 shcnt++; 7438 } 7439 if (shcnt > po_share) { 7440 /* 7441 * tell the pager to spare the page this time 7442 * around. 7443 */ 7444 hat_page_setattr(save_pp, P_REF); 7445 index = 0; 7446 break; 7447 } 7448 } 7449 tset = sfmmu_pagesync(pp, sfhme, 7450 clearflag & ~HAT_SYNC_STOPON_RM); 7451 CPUSET_OR(cpuset, tset); 7452 7453 /* 7454 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7455 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7456 */ 7457 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7458 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7459 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7460 index = 0; 7461 break; 7462 } 7463 } 7464 7465 while (index) { 7466 index = index >> 1; 7467 cons++; 7468 if (index & 0x1) { 7469 /* Go to leading page */ 7470 pp = PP_GROUPLEADER(pp, cons); 7471 goto retry; 7472 } 7473 } 7474 7475 xt_sync(cpuset); 7476 sfmmu_mlist_exit(pml); 7477 return (PP_GENERIC_ATTR(save_pp)); 7478 } 7479 7480 /* 7481 * Get all the hardware dependent attributes for a page struct 7482 */ 7483 static cpuset_t 7484 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7485 uint_t clearflag) 7486 { 7487 caddr_t addr; 7488 tte_t tte, ttemod; 7489 struct hme_blk *hmeblkp; 7490 int ret; 7491 sfmmu_t *sfmmup; 7492 cpuset_t cpuset; 7493 7494 ASSERT(pp != NULL); 7495 ASSERT(sfmmu_mlist_held(pp)); 7496 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7497 (clearflag == HAT_SYNC_ZERORM)); 7498 7499 SFMMU_STAT(sf_pagesync); 7500 7501 CPUSET_ZERO(cpuset); 7502 7503 sfmmu_pagesync_retry: 7504 7505 sfmmu_copytte(&sfhme->hme_tte, &tte); 7506 if (TTE_IS_VALID(&tte)) { 7507 hmeblkp = sfmmu_hmetohblk(sfhme); 7508 sfmmup = hblktosfmmu(hmeblkp); 7509 addr = tte_to_vaddr(hmeblkp, tte); 7510 if (clearflag == HAT_SYNC_ZERORM) { 7511 ttemod = tte; 7512 TTE_CLR_RM(&ttemod); 7513 ret = sfmmu_modifytte_try(&tte, &ttemod, 7514 &sfhme->hme_tte); 7515 if (ret < 0) { 7516 /* 7517 * cas failed and the new value is not what 7518 * we want. 7519 */ 7520 goto sfmmu_pagesync_retry; 7521 } 7522 7523 if (ret > 0) { 7524 /* we win the cas */ 7525 if (hmeblkp->hblk_shared) { 7526 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7527 uint_t rid = 7528 hmeblkp->hblk_tag.htag_rid; 7529 sf_region_t *rgnp; 7530 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7531 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7532 ASSERT(srdp != NULL); 7533 rgnp = srdp->srd_hmergnp[rid]; 7534 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7535 srdp, rgnp, rid); 7536 cpuset = sfmmu_rgntlb_demap(addr, 7537 rgnp, hmeblkp, 1); 7538 } else { 7539 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7540 0, 0); 7541 cpuset = sfmmup->sfmmu_cpusran; 7542 } 7543 } 7544 } 7545 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7546 &tte, pp); 7547 } 7548 return (cpuset); 7549 } 7550 7551 /* 7552 * Remove write permission from a mappings to a page, so that 7553 * we can detect the next modification of it. This requires modifying 7554 * the TTE then invalidating (demap) any TLB entry using that TTE. 7555 * This code is similar to sfmmu_pagesync(). 7556 */ 7557 static cpuset_t 7558 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7559 { 7560 caddr_t addr; 7561 tte_t tte; 7562 tte_t ttemod; 7563 struct hme_blk *hmeblkp; 7564 int ret; 7565 sfmmu_t *sfmmup; 7566 cpuset_t cpuset; 7567 7568 ASSERT(pp != NULL); 7569 ASSERT(sfmmu_mlist_held(pp)); 7570 7571 CPUSET_ZERO(cpuset); 7572 SFMMU_STAT(sf_clrwrt); 7573 7574 retry: 7575 7576 sfmmu_copytte(&sfhme->hme_tte, &tte); 7577 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7578 hmeblkp = sfmmu_hmetohblk(sfhme); 7579 7580 /* 7581 * xhat mappings should never be to a VMODSORT page. 7582 */ 7583 ASSERT(hmeblkp->hblk_xhat_bit == 0); 7584 7585 sfmmup = hblktosfmmu(hmeblkp); 7586 addr = tte_to_vaddr(hmeblkp, tte); 7587 7588 ttemod = tte; 7589 TTE_CLR_WRT(&ttemod); 7590 TTE_CLR_MOD(&ttemod); 7591 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7592 7593 /* 7594 * if cas failed and the new value is not what 7595 * we want retry 7596 */ 7597 if (ret < 0) 7598 goto retry; 7599 7600 /* we win the cas */ 7601 if (ret > 0) { 7602 if (hmeblkp->hblk_shared) { 7603 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7604 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7605 sf_region_t *rgnp; 7606 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7607 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7608 ASSERT(srdp != NULL); 7609 rgnp = srdp->srd_hmergnp[rid]; 7610 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7611 srdp, rgnp, rid); 7612 cpuset = sfmmu_rgntlb_demap(addr, 7613 rgnp, hmeblkp, 1); 7614 } else { 7615 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7616 cpuset = sfmmup->sfmmu_cpusran; 7617 } 7618 } 7619 } 7620 7621 return (cpuset); 7622 } 7623 7624 /* 7625 * Walk all mappings of a page, removing write permission and clearing the 7626 * ref/mod bits. This code is similar to hat_pagesync() 7627 */ 7628 static void 7629 hat_page_clrwrt(page_t *pp) 7630 { 7631 struct sf_hment *sfhme; 7632 struct sf_hment *tmphme = NULL; 7633 kmutex_t *pml; 7634 cpuset_t cpuset; 7635 cpuset_t tset; 7636 int index; 7637 int cons; 7638 7639 CPUSET_ZERO(cpuset); 7640 7641 pml = sfmmu_mlist_enter(pp); 7642 index = PP_MAPINDEX(pp); 7643 cons = TTE8K; 7644 retry: 7645 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7646 tmphme = sfhme->hme_next; 7647 7648 /* 7649 * If we are looking for large mappings and this hme doesn't 7650 * reach the range we are seeking, just ignore its. 7651 */ 7652 7653 if (hme_size(sfhme) < cons) 7654 continue; 7655 7656 tset = sfmmu_pageclrwrt(pp, sfhme); 7657 CPUSET_OR(cpuset, tset); 7658 } 7659 7660 while (index) { 7661 index = index >> 1; 7662 cons++; 7663 if (index & 0x1) { 7664 /* Go to leading page */ 7665 pp = PP_GROUPLEADER(pp, cons); 7666 goto retry; 7667 } 7668 } 7669 7670 xt_sync(cpuset); 7671 sfmmu_mlist_exit(pml); 7672 } 7673 7674 /* 7675 * Set the given REF/MOD/RO bits for the given page. 7676 * For a vnode with a sorted v_pages list, we need to change 7677 * the attributes and the v_pages list together under page_vnode_mutex. 7678 */ 7679 void 7680 hat_page_setattr(page_t *pp, uint_t flag) 7681 { 7682 vnode_t *vp = pp->p_vnode; 7683 page_t **listp; 7684 kmutex_t *pmtx; 7685 kmutex_t *vphm = NULL; 7686 int noshuffle; 7687 7688 noshuffle = flag & P_NSH; 7689 flag &= ~P_NSH; 7690 7691 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7692 7693 /* 7694 * nothing to do if attribute already set 7695 */ 7696 if ((pp->p_nrm & flag) == flag) 7697 return; 7698 7699 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7700 !noshuffle) { 7701 vphm = page_vnode_mutex(vp); 7702 mutex_enter(vphm); 7703 } 7704 7705 pmtx = sfmmu_page_enter(pp); 7706 pp->p_nrm |= flag; 7707 sfmmu_page_exit(pmtx); 7708 7709 if (vphm != NULL) { 7710 /* 7711 * Some File Systems examine v_pages for NULL w/o 7712 * grabbing the vphm mutex. Must not let it become NULL when 7713 * pp is the only page on the list. 7714 */ 7715 if (pp->p_vpnext != pp) { 7716 page_vpsub(&vp->v_pages, pp); 7717 if (vp->v_pages != NULL) 7718 listp = &vp->v_pages->p_vpprev->p_vpnext; 7719 else 7720 listp = &vp->v_pages; 7721 page_vpadd(listp, pp); 7722 } 7723 mutex_exit(vphm); 7724 } 7725 } 7726 7727 void 7728 hat_page_clrattr(page_t *pp, uint_t flag) 7729 { 7730 vnode_t *vp = pp->p_vnode; 7731 kmutex_t *pmtx; 7732 7733 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7734 7735 pmtx = sfmmu_page_enter(pp); 7736 7737 /* 7738 * Caller is expected to hold page's io lock for VMODSORT to work 7739 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7740 * bit is cleared. 7741 * We don't have assert to avoid tripping some existing third party 7742 * code. The dirty page is moved back to top of the v_page list 7743 * after IO is done in pvn_write_done(). 7744 */ 7745 pp->p_nrm &= ~flag; 7746 sfmmu_page_exit(pmtx); 7747 7748 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7749 7750 /* 7751 * VMODSORT works by removing write permissions and getting 7752 * a fault when a page is made dirty. At this point 7753 * we need to remove write permission from all mappings 7754 * to this page. 7755 */ 7756 hat_page_clrwrt(pp); 7757 } 7758 } 7759 7760 uint_t 7761 hat_page_getattr(page_t *pp, uint_t flag) 7762 { 7763 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7764 return ((uint_t)(pp->p_nrm & flag)); 7765 } 7766 7767 /* 7768 * DEBUG kernels: verify that a kernel va<->pa translation 7769 * is safe by checking the underlying page_t is in a page 7770 * relocation-safe state. 7771 */ 7772 #ifdef DEBUG 7773 void 7774 sfmmu_check_kpfn(pfn_t pfn) 7775 { 7776 page_t *pp; 7777 int index, cons; 7778 7779 if (hat_check_vtop == 0) 7780 return; 7781 7782 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7783 return; 7784 7785 pp = page_numtopp_nolock(pfn); 7786 if (!pp) 7787 return; 7788 7789 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7790 return; 7791 7792 /* 7793 * Handed a large kernel page, we dig up the root page since we 7794 * know the root page might have the lock also. 7795 */ 7796 if (pp->p_szc != 0) { 7797 index = PP_MAPINDEX(pp); 7798 cons = TTE8K; 7799 again: 7800 while (index != 0) { 7801 index >>= 1; 7802 if (index != 0) 7803 cons++; 7804 if (index & 0x1) { 7805 pp = PP_GROUPLEADER(pp, cons); 7806 goto again; 7807 } 7808 } 7809 } 7810 7811 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7812 return; 7813 7814 /* 7815 * Pages need to be locked or allocated "permanent" (either from 7816 * static_arena arena or explicitly setting PG_NORELOC when calling 7817 * page_create_va()) for VA->PA translations to be valid. 7818 */ 7819 if (!PP_ISNORELOC(pp)) 7820 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7821 else 7822 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7823 } 7824 #endif /* DEBUG */ 7825 7826 /* 7827 * Returns a page frame number for a given virtual address. 7828 * Returns PFN_INVALID to indicate an invalid mapping 7829 */ 7830 pfn_t 7831 hat_getpfnum(struct hat *hat, caddr_t addr) 7832 { 7833 pfn_t pfn; 7834 tte_t tte; 7835 7836 /* 7837 * We would like to 7838 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7839 * but we can't because the iommu driver will call this 7840 * routine at interrupt time and it can't grab the as lock 7841 * or it will deadlock: A thread could have the as lock 7842 * and be waiting for io. The io can't complete 7843 * because the interrupt thread is blocked trying to grab 7844 * the as lock. 7845 */ 7846 7847 ASSERT(hat->sfmmu_xhat_provider == NULL); 7848 7849 if (hat == ksfmmup) { 7850 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7851 ASSERT(segkmem_lpszc > 0); 7852 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7853 if (pfn != PFN_INVALID) { 7854 sfmmu_check_kpfn(pfn); 7855 return (pfn); 7856 } 7857 } else if (segkpm && IS_KPM_ADDR(addr)) { 7858 return (sfmmu_kpm_vatopfn(addr)); 7859 } 7860 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7861 == PFN_SUSPENDED) { 7862 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7863 } 7864 sfmmu_check_kpfn(pfn); 7865 return (pfn); 7866 } else { 7867 return (sfmmu_uvatopfn(addr, hat, NULL)); 7868 } 7869 } 7870 7871 /* 7872 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7873 * Use hat_getpfnum(kas.a_hat, ...) instead. 7874 * 7875 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7876 * but can't right now due to the fact that some software has grown to use 7877 * this interface incorrectly. So for now when the interface is misused, 7878 * return a warning to the user that in the future it won't work in the 7879 * way they're abusing it, and carry on (after disabling page relocation). 7880 */ 7881 pfn_t 7882 hat_getkpfnum(caddr_t addr) 7883 { 7884 pfn_t pfn; 7885 tte_t tte; 7886 int badcaller = 0; 7887 extern int segkmem_reloc; 7888 7889 if (segkpm && IS_KPM_ADDR(addr)) { 7890 badcaller = 1; 7891 pfn = sfmmu_kpm_vatopfn(addr); 7892 } else { 7893 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7894 == PFN_SUSPENDED) { 7895 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7896 } 7897 badcaller = pf_is_memory(pfn); 7898 } 7899 7900 if (badcaller) { 7901 /* 7902 * We can't return PFN_INVALID or the caller may panic 7903 * or corrupt the system. The only alternative is to 7904 * disable page relocation at this point for all kernel 7905 * memory. This will impact any callers of page_relocate() 7906 * such as FMA or DR. 7907 * 7908 * RFE: Add junk here to spit out an ereport so the sysadmin 7909 * can be advised that he should upgrade his device driver 7910 * so that this doesn't happen. 7911 */ 7912 hat_getkpfnum_badcall(caller()); 7913 if (hat_kpr_enabled && segkmem_reloc) { 7914 hat_kpr_enabled = 0; 7915 segkmem_reloc = 0; 7916 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7917 } 7918 } 7919 return (pfn); 7920 } 7921 7922 /* 7923 * This routine will return both pfn and tte for the vaddr. 7924 */ 7925 static pfn_t 7926 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7927 { 7928 struct hmehash_bucket *hmebp; 7929 hmeblk_tag hblktag; 7930 int hmeshift, hashno = 1; 7931 struct hme_blk *hmeblkp = NULL; 7932 tte_t tte; 7933 7934 struct sf_hment *sfhmep; 7935 pfn_t pfn; 7936 7937 /* support for ISM */ 7938 ism_map_t *ism_map; 7939 ism_blk_t *ism_blkp; 7940 int i; 7941 sfmmu_t *ism_hatid = NULL; 7942 sfmmu_t *locked_hatid = NULL; 7943 sfmmu_t *sv_sfmmup = sfmmup; 7944 caddr_t sv_vaddr = vaddr; 7945 sf_srd_t *srdp; 7946 7947 if (ttep == NULL) { 7948 ttep = &tte; 7949 } else { 7950 ttep->ll = 0; 7951 } 7952 7953 ASSERT(sfmmup != ksfmmup); 7954 SFMMU_STAT(sf_user_vtop); 7955 /* 7956 * Set ism_hatid if vaddr falls in a ISM segment. 7957 */ 7958 ism_blkp = sfmmup->sfmmu_iblk; 7959 if (ism_blkp != NULL) { 7960 sfmmu_ismhat_enter(sfmmup, 0); 7961 locked_hatid = sfmmup; 7962 } 7963 while (ism_blkp != NULL && ism_hatid == NULL) { 7964 ism_map = ism_blkp->iblk_maps; 7965 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7966 if (vaddr >= ism_start(ism_map[i]) && 7967 vaddr < ism_end(ism_map[i])) { 7968 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7969 vaddr = (caddr_t)(vaddr - 7970 ism_start(ism_map[i])); 7971 break; 7972 } 7973 } 7974 ism_blkp = ism_blkp->iblk_next; 7975 } 7976 if (locked_hatid) { 7977 sfmmu_ismhat_exit(locked_hatid, 0); 7978 } 7979 7980 hblktag.htag_id = sfmmup; 7981 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7982 do { 7983 hmeshift = HME_HASH_SHIFT(hashno); 7984 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7985 hblktag.htag_rehash = hashno; 7986 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7987 7988 SFMMU_HASH_LOCK(hmebp); 7989 7990 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7991 if (hmeblkp != NULL) { 7992 ASSERT(!hmeblkp->hblk_shared); 7993 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7994 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7995 SFMMU_HASH_UNLOCK(hmebp); 7996 if (TTE_IS_VALID(ttep)) { 7997 pfn = TTE_TO_PFN(vaddr, ttep); 7998 return (pfn); 7999 } 8000 break; 8001 } 8002 SFMMU_HASH_UNLOCK(hmebp); 8003 hashno++; 8004 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 8005 8006 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 8007 return (PFN_INVALID); 8008 } 8009 srdp = sv_sfmmup->sfmmu_srdp; 8010 ASSERT(srdp != NULL); 8011 ASSERT(srdp->srd_refcnt != 0); 8012 hblktag.htag_id = srdp; 8013 hashno = 1; 8014 do { 8015 hmeshift = HME_HASH_SHIFT(hashno); 8016 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 8017 hblktag.htag_rehash = hashno; 8018 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 8019 8020 SFMMU_HASH_LOCK(hmebp); 8021 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 8022 hmeblkp = hmeblkp->hblk_next) { 8023 uint_t rid; 8024 sf_region_t *rgnp; 8025 caddr_t rsaddr; 8026 caddr_t readdr; 8027 8028 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 8029 sv_sfmmup->sfmmu_hmeregion_map)) { 8030 continue; 8031 } 8032 ASSERT(hmeblkp->hblk_shared); 8033 rid = hmeblkp->hblk_tag.htag_rid; 8034 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8035 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8036 rgnp = srdp->srd_hmergnp[rid]; 8037 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 8038 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 8039 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8040 rsaddr = rgnp->rgn_saddr; 8041 readdr = rsaddr + rgnp->rgn_size; 8042 #ifdef DEBUG 8043 if (TTE_IS_VALID(ttep) || 8044 get_hblk_ttesz(hmeblkp) > TTE8K) { 8045 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 8046 ASSERT(eva > sv_vaddr); 8047 ASSERT(sv_vaddr >= rsaddr); 8048 ASSERT(sv_vaddr < readdr); 8049 ASSERT(eva <= readdr); 8050 } 8051 #endif /* DEBUG */ 8052 /* 8053 * Continue the search if we 8054 * found an invalid 8K tte outside of the area 8055 * covered by this hmeblk's region. 8056 */ 8057 if (TTE_IS_VALID(ttep)) { 8058 SFMMU_HASH_UNLOCK(hmebp); 8059 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8060 return (pfn); 8061 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8062 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8063 SFMMU_HASH_UNLOCK(hmebp); 8064 pfn = PFN_INVALID; 8065 return (pfn); 8066 } 8067 } 8068 SFMMU_HASH_UNLOCK(hmebp); 8069 hashno++; 8070 } while (hashno <= mmu_hashcnt); 8071 return (PFN_INVALID); 8072 } 8073 8074 8075 /* 8076 * For compatability with AT&T and later optimizations 8077 */ 8078 /* ARGSUSED */ 8079 void 8080 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8081 { 8082 ASSERT(hat != NULL); 8083 ASSERT(hat->sfmmu_xhat_provider == NULL); 8084 } 8085 8086 /* 8087 * Return the number of mappings to a particular page. This number is an 8088 * approximation of the number of people sharing the page. 8089 * 8090 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8091 * hat_page_checkshare() can be used to compare threshold to share 8092 * count that reflects the number of region sharers albeit at higher cost. 8093 */ 8094 ulong_t 8095 hat_page_getshare(page_t *pp) 8096 { 8097 page_t *spp = pp; /* start page */ 8098 kmutex_t *pml; 8099 ulong_t cnt; 8100 int index, sz = TTE64K; 8101 8102 /* 8103 * We need to grab the mlist lock to make sure any outstanding 8104 * load/unloads complete. Otherwise we could return zero 8105 * even though the unload(s) hasn't finished yet. 8106 */ 8107 pml = sfmmu_mlist_enter(spp); 8108 cnt = spp->p_share; 8109 8110 #ifdef VAC 8111 if (kpm_enable) 8112 cnt += spp->p_kpmref; 8113 #endif 8114 8115 /* 8116 * If we have any large mappings, we count the number of 8117 * mappings that this large page is part of. 8118 */ 8119 index = PP_MAPINDEX(spp); 8120 index >>= 1; 8121 while (index) { 8122 pp = PP_GROUPLEADER(spp, sz); 8123 if ((index & 0x1) && pp != spp) { 8124 cnt += pp->p_share; 8125 spp = pp; 8126 } 8127 index >>= 1; 8128 sz++; 8129 } 8130 sfmmu_mlist_exit(pml); 8131 return (cnt); 8132 } 8133 8134 /* 8135 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8136 * otherwise. Count shared hmeblks by region's refcnt. 8137 */ 8138 int 8139 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8140 { 8141 kmutex_t *pml; 8142 ulong_t cnt = 0; 8143 int index, sz = TTE8K; 8144 struct sf_hment *sfhme, *tmphme = NULL; 8145 struct hme_blk *hmeblkp; 8146 8147 pml = sfmmu_mlist_enter(pp); 8148 8149 if (kpm_enable) 8150 cnt = pp->p_kpmref; 8151 8152 if (pp->p_share + cnt > sh_thresh) { 8153 sfmmu_mlist_exit(pml); 8154 return (1); 8155 } 8156 8157 index = PP_MAPINDEX(pp); 8158 8159 again: 8160 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8161 tmphme = sfhme->hme_next; 8162 if (IS_PAHME(sfhme)) { 8163 continue; 8164 } 8165 8166 hmeblkp = sfmmu_hmetohblk(sfhme); 8167 if (hmeblkp->hblk_xhat_bit) { 8168 cnt++; 8169 if (cnt > sh_thresh) { 8170 sfmmu_mlist_exit(pml); 8171 return (1); 8172 } 8173 continue; 8174 } 8175 if (hme_size(sfhme) != sz) { 8176 continue; 8177 } 8178 8179 if (hmeblkp->hblk_shared) { 8180 sf_srd_t *srdp = hblktosrd(hmeblkp); 8181 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8182 sf_region_t *rgnp; 8183 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8184 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8185 ASSERT(srdp != NULL); 8186 rgnp = srdp->srd_hmergnp[rid]; 8187 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8188 rgnp, rid); 8189 cnt += rgnp->rgn_refcnt; 8190 } else { 8191 cnt++; 8192 } 8193 if (cnt > sh_thresh) { 8194 sfmmu_mlist_exit(pml); 8195 return (1); 8196 } 8197 } 8198 8199 index >>= 1; 8200 sz++; 8201 while (index) { 8202 pp = PP_GROUPLEADER(pp, sz); 8203 ASSERT(sfmmu_mlist_held(pp)); 8204 if (index & 0x1) { 8205 goto again; 8206 } 8207 index >>= 1; 8208 sz++; 8209 } 8210 sfmmu_mlist_exit(pml); 8211 return (0); 8212 } 8213 8214 /* 8215 * Unload all large mappings to the pp and reset the p_szc field of every 8216 * constituent page according to the remaining mappings. 8217 * 8218 * pp must be locked SE_EXCL. Even though no other constituent pages are 8219 * locked it's legal to unload the large mappings to the pp because all 8220 * constituent pages of large locked mappings have to be locked SE_SHARED. 8221 * This means if we have SE_EXCL lock on one of constituent pages none of the 8222 * large mappings to pp are locked. 8223 * 8224 * Decrease p_szc field starting from the last constituent page and ending 8225 * with the root page. This method is used because other threads rely on the 8226 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8227 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8228 * ensures that p_szc changes of the constituent pages appears atomic for all 8229 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8230 * 8231 * This mechanism is only used for file system pages where it's not always 8232 * possible to get SE_EXCL locks on all constituent pages to demote the size 8233 * code (as is done for anonymous or kernel large pages). 8234 * 8235 * See more comments in front of sfmmu_mlspl_enter(). 8236 */ 8237 void 8238 hat_page_demote(page_t *pp) 8239 { 8240 int index; 8241 int sz; 8242 cpuset_t cpuset; 8243 int sync = 0; 8244 page_t *rootpp; 8245 struct sf_hment *sfhme; 8246 struct sf_hment *tmphme = NULL; 8247 struct hme_blk *hmeblkp; 8248 uint_t pszc; 8249 page_t *lastpp; 8250 cpuset_t tset; 8251 pgcnt_t npgs; 8252 kmutex_t *pml; 8253 kmutex_t *pmtx = NULL; 8254 8255 ASSERT(PAGE_EXCL(pp)); 8256 ASSERT(!PP_ISFREE(pp)); 8257 ASSERT(!PP_ISKAS(pp)); 8258 ASSERT(page_szc_lock_assert(pp)); 8259 pml = sfmmu_mlist_enter(pp); 8260 8261 pszc = pp->p_szc; 8262 if (pszc == 0) { 8263 goto out; 8264 } 8265 8266 index = PP_MAPINDEX(pp) >> 1; 8267 8268 if (index) { 8269 CPUSET_ZERO(cpuset); 8270 sz = TTE64K; 8271 sync = 1; 8272 } 8273 8274 while (index) { 8275 if (!(index & 0x1)) { 8276 index >>= 1; 8277 sz++; 8278 continue; 8279 } 8280 ASSERT(sz <= pszc); 8281 rootpp = PP_GROUPLEADER(pp, sz); 8282 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8283 tmphme = sfhme->hme_next; 8284 ASSERT(!IS_PAHME(sfhme)); 8285 hmeblkp = sfmmu_hmetohblk(sfhme); 8286 if (hme_size(sfhme) != sz) { 8287 continue; 8288 } 8289 if (hmeblkp->hblk_xhat_bit) { 8290 cmn_err(CE_PANIC, 8291 "hat_page_demote: xhat hmeblk"); 8292 } 8293 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8294 CPUSET_OR(cpuset, tset); 8295 } 8296 if (index >>= 1) { 8297 sz++; 8298 } 8299 } 8300 8301 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8302 8303 if (sync) { 8304 xt_sync(cpuset); 8305 #ifdef VAC 8306 if (PP_ISTNC(pp)) { 8307 conv_tnc(rootpp, sz); 8308 } 8309 #endif /* VAC */ 8310 } 8311 8312 pmtx = sfmmu_page_enter(pp); 8313 8314 ASSERT(pp->p_szc == pszc); 8315 rootpp = PP_PAGEROOT(pp); 8316 ASSERT(rootpp->p_szc == pszc); 8317 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8318 8319 while (lastpp != rootpp) { 8320 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8321 ASSERT(sz < pszc); 8322 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8323 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8324 while (--npgs > 0) { 8325 lastpp->p_szc = (uchar_t)sz; 8326 lastpp = PP_PAGEPREV(lastpp); 8327 } 8328 if (sz) { 8329 /* 8330 * make sure before current root's pszc 8331 * is updated all updates to constituent pages pszc 8332 * fields are globally visible. 8333 */ 8334 membar_producer(); 8335 } 8336 lastpp->p_szc = sz; 8337 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8338 if (lastpp != rootpp) { 8339 lastpp = PP_PAGEPREV(lastpp); 8340 } 8341 } 8342 if (sz == 0) { 8343 /* the loop above doesn't cover this case */ 8344 rootpp->p_szc = 0; 8345 } 8346 out: 8347 ASSERT(pp->p_szc == 0); 8348 if (pmtx != NULL) { 8349 sfmmu_page_exit(pmtx); 8350 } 8351 sfmmu_mlist_exit(pml); 8352 } 8353 8354 /* 8355 * Refresh the HAT ismttecnt[] element for size szc. 8356 * Caller must have set ISM busy flag to prevent mapping 8357 * lists from changing while we're traversing them. 8358 */ 8359 pgcnt_t 8360 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8361 { 8362 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8363 ism_map_t *ism_map; 8364 pgcnt_t npgs = 0; 8365 pgcnt_t npgs_scd = 0; 8366 int j; 8367 sf_scd_t *scdp; 8368 uchar_t rid; 8369 8370 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8371 scdp = sfmmup->sfmmu_scdp; 8372 8373 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8374 ism_map = ism_blkp->iblk_maps; 8375 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8376 rid = ism_map[j].imap_rid; 8377 ASSERT(rid == SFMMU_INVALID_ISMRID || 8378 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8379 8380 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8381 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8382 /* ISM is in sfmmup's SCD */ 8383 npgs_scd += 8384 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8385 } else { 8386 /* ISMs is not in SCD */ 8387 npgs += 8388 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8389 } 8390 } 8391 } 8392 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8393 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8394 return (npgs); 8395 } 8396 8397 /* 8398 * Yield the memory claim requirement for an address space. 8399 * 8400 * This is currently implemented as the number of bytes that have active 8401 * hardware translations that have page structures. Therefore, it can 8402 * underestimate the traditional resident set size, eg, if the 8403 * physical page is present and the hardware translation is missing; 8404 * and it can overestimate the rss, eg, if there are active 8405 * translations to a frame buffer with page structs. 8406 * Also, it does not take sharing into account. 8407 * 8408 * Note that we don't acquire locks here since this function is most often 8409 * called from the clock thread. 8410 */ 8411 size_t 8412 hat_get_mapped_size(struct hat *hat) 8413 { 8414 size_t assize = 0; 8415 int i; 8416 8417 if (hat == NULL) 8418 return (0); 8419 8420 ASSERT(hat->sfmmu_xhat_provider == NULL); 8421 8422 for (i = 0; i < mmu_page_sizes; i++) 8423 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8424 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8425 8426 if (hat->sfmmu_iblk == NULL) 8427 return (assize); 8428 8429 for (i = 0; i < mmu_page_sizes; i++) 8430 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8431 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8432 8433 return (assize); 8434 } 8435 8436 int 8437 hat_stats_enable(struct hat *hat) 8438 { 8439 hatlock_t *hatlockp; 8440 8441 ASSERT(hat->sfmmu_xhat_provider == NULL); 8442 8443 hatlockp = sfmmu_hat_enter(hat); 8444 hat->sfmmu_rmstat++; 8445 sfmmu_hat_exit(hatlockp); 8446 return (1); 8447 } 8448 8449 void 8450 hat_stats_disable(struct hat *hat) 8451 { 8452 hatlock_t *hatlockp; 8453 8454 ASSERT(hat->sfmmu_xhat_provider == NULL); 8455 8456 hatlockp = sfmmu_hat_enter(hat); 8457 hat->sfmmu_rmstat--; 8458 sfmmu_hat_exit(hatlockp); 8459 } 8460 8461 /* 8462 * Routines for entering or removing ourselves from the 8463 * ism_hat's mapping list. This is used for both private and 8464 * SCD hats. 8465 */ 8466 static void 8467 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8468 { 8469 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8470 8471 iment->iment_prev = NULL; 8472 iment->iment_next = ism_hat->sfmmu_iment; 8473 if (ism_hat->sfmmu_iment) { 8474 ism_hat->sfmmu_iment->iment_prev = iment; 8475 } 8476 ism_hat->sfmmu_iment = iment; 8477 } 8478 8479 static void 8480 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8481 { 8482 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8483 8484 if (ism_hat->sfmmu_iment == NULL) { 8485 panic("ism map entry remove - no entries"); 8486 } 8487 8488 if (iment->iment_prev) { 8489 ASSERT(ism_hat->sfmmu_iment != iment); 8490 iment->iment_prev->iment_next = iment->iment_next; 8491 } else { 8492 ASSERT(ism_hat->sfmmu_iment == iment); 8493 ism_hat->sfmmu_iment = iment->iment_next; 8494 } 8495 8496 if (iment->iment_next) { 8497 iment->iment_next->iment_prev = iment->iment_prev; 8498 } 8499 8500 /* 8501 * zero out the entry 8502 */ 8503 iment->iment_next = NULL; 8504 iment->iment_prev = NULL; 8505 iment->iment_hat = NULL; 8506 } 8507 8508 /* 8509 * Hat_share()/unshare() return an (non-zero) error 8510 * when saddr and daddr are not properly aligned. 8511 * 8512 * The top level mapping element determines the alignment 8513 * requirement for saddr and daddr, depending on different 8514 * architectures. 8515 * 8516 * When hat_share()/unshare() are not supported, 8517 * HATOP_SHARE()/UNSHARE() return 0 8518 */ 8519 int 8520 hat_share(struct hat *sfmmup, caddr_t addr, 8521 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8522 { 8523 ism_blk_t *ism_blkp; 8524 ism_blk_t *new_iblk; 8525 ism_map_t *ism_map; 8526 ism_ment_t *ism_ment; 8527 int i, added; 8528 hatlock_t *hatlockp; 8529 int reload_mmu = 0; 8530 uint_t ismshift = page_get_shift(ismszc); 8531 size_t ismpgsz = page_get_pagesize(ismszc); 8532 uint_t ismmask = (uint_t)ismpgsz - 1; 8533 size_t sh_size = ISM_SHIFT(ismshift, len); 8534 ushort_t ismhatflag; 8535 hat_region_cookie_t rcookie; 8536 sf_scd_t *old_scdp; 8537 8538 #ifdef DEBUG 8539 caddr_t eaddr = addr + len; 8540 #endif /* DEBUG */ 8541 8542 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8543 ASSERT(sptaddr == ISMID_STARTADDR); 8544 /* 8545 * Check the alignment. 8546 */ 8547 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8548 return (EINVAL); 8549 8550 /* 8551 * Check size alignment. 8552 */ 8553 if (!ISM_ALIGNED(ismshift, len)) 8554 return (EINVAL); 8555 8556 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 8557 8558 /* 8559 * Allocate ism_ment for the ism_hat's mapping list, and an 8560 * ism map blk in case we need one. We must do our 8561 * allocations before acquiring locks to prevent a deadlock 8562 * in the kmem allocator on the mapping list lock. 8563 */ 8564 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8565 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8566 8567 /* 8568 * Serialize ISM mappings with the ISM busy flag, and also the 8569 * trap handlers. 8570 */ 8571 sfmmu_ismhat_enter(sfmmup, 0); 8572 8573 /* 8574 * Allocate an ism map blk if necessary. 8575 */ 8576 if (sfmmup->sfmmu_iblk == NULL) { 8577 sfmmup->sfmmu_iblk = new_iblk; 8578 bzero(new_iblk, sizeof (*new_iblk)); 8579 new_iblk->iblk_nextpa = (uint64_t)-1; 8580 membar_stst(); /* make sure next ptr visible to all CPUs */ 8581 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8582 reload_mmu = 1; 8583 new_iblk = NULL; 8584 } 8585 8586 #ifdef DEBUG 8587 /* 8588 * Make sure mapping does not already exist. 8589 */ 8590 ism_blkp = sfmmup->sfmmu_iblk; 8591 while (ism_blkp != NULL) { 8592 ism_map = ism_blkp->iblk_maps; 8593 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8594 if ((addr >= ism_start(ism_map[i]) && 8595 addr < ism_end(ism_map[i])) || 8596 eaddr > ism_start(ism_map[i]) && 8597 eaddr <= ism_end(ism_map[i])) { 8598 panic("sfmmu_share: Already mapped!"); 8599 } 8600 } 8601 ism_blkp = ism_blkp->iblk_next; 8602 } 8603 #endif /* DEBUG */ 8604 8605 ASSERT(ismszc >= TTE4M); 8606 if (ismszc == TTE4M) { 8607 ismhatflag = HAT_4M_FLAG; 8608 } else if (ismszc == TTE32M) { 8609 ismhatflag = HAT_32M_FLAG; 8610 } else if (ismszc == TTE256M) { 8611 ismhatflag = HAT_256M_FLAG; 8612 } 8613 /* 8614 * Add mapping to first available mapping slot. 8615 */ 8616 ism_blkp = sfmmup->sfmmu_iblk; 8617 added = 0; 8618 while (!added) { 8619 ism_map = ism_blkp->iblk_maps; 8620 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8621 if (ism_map[i].imap_ismhat == NULL) { 8622 8623 ism_map[i].imap_ismhat = ism_hatid; 8624 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8625 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8626 ism_map[i].imap_hatflags = ismhatflag; 8627 ism_map[i].imap_sz_mask = ismmask; 8628 /* 8629 * imap_seg is checked in ISM_CHECK to see if 8630 * non-NULL, then other info assumed valid. 8631 */ 8632 membar_stst(); 8633 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8634 ism_map[i].imap_ment = ism_ment; 8635 8636 /* 8637 * Now add ourselves to the ism_hat's 8638 * mapping list. 8639 */ 8640 ism_ment->iment_hat = sfmmup; 8641 ism_ment->iment_base_va = addr; 8642 ism_hatid->sfmmu_ismhat = 1; 8643 mutex_enter(&ism_mlist_lock); 8644 iment_add(ism_ment, ism_hatid); 8645 mutex_exit(&ism_mlist_lock); 8646 added = 1; 8647 break; 8648 } 8649 } 8650 if (!added && ism_blkp->iblk_next == NULL) { 8651 ism_blkp->iblk_next = new_iblk; 8652 new_iblk = NULL; 8653 bzero(ism_blkp->iblk_next, 8654 sizeof (*ism_blkp->iblk_next)); 8655 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8656 membar_stst(); 8657 ism_blkp->iblk_nextpa = 8658 va_to_pa((caddr_t)ism_blkp->iblk_next); 8659 } 8660 ism_blkp = ism_blkp->iblk_next; 8661 } 8662 8663 /* 8664 * After calling hat_join_region, sfmmup may join a new SCD or 8665 * move from the old scd to a new scd, in which case, we want to 8666 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8667 * sfmmu_check_page_sizes at the end of this routine. 8668 */ 8669 old_scdp = sfmmup->sfmmu_scdp; 8670 8671 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8672 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8673 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8674 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8675 } 8676 /* 8677 * Update our counters for this sfmmup's ism mappings. 8678 */ 8679 for (i = 0; i <= ismszc; i++) { 8680 if (!(disable_ism_large_pages & (1 << i))) 8681 (void) ism_tsb_entries(sfmmup, i); 8682 } 8683 8684 /* 8685 * For ISM and DISM we do not support 512K pages, so we only only 8686 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8687 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8688 * 8689 * Need to set 32M/256M ISM flags to make sure 8690 * sfmmu_check_page_sizes() enables them on Panther. 8691 */ 8692 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8693 8694 switch (ismszc) { 8695 case TTE256M: 8696 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8697 hatlockp = sfmmu_hat_enter(sfmmup); 8698 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8699 sfmmu_hat_exit(hatlockp); 8700 } 8701 break; 8702 case TTE32M: 8703 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8704 hatlockp = sfmmu_hat_enter(sfmmup); 8705 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8706 sfmmu_hat_exit(hatlockp); 8707 } 8708 break; 8709 default: 8710 break; 8711 } 8712 8713 /* 8714 * If we updated the ismblkpa for this HAT we must make 8715 * sure all CPUs running this process reload their tsbmiss area. 8716 * Otherwise they will fail to load the mappings in the tsbmiss 8717 * handler and will loop calling pagefault(). 8718 */ 8719 if (reload_mmu) { 8720 hatlockp = sfmmu_hat_enter(sfmmup); 8721 sfmmu_sync_mmustate(sfmmup); 8722 sfmmu_hat_exit(hatlockp); 8723 } 8724 8725 sfmmu_ismhat_exit(sfmmup, 0); 8726 8727 /* 8728 * Free up ismblk if we didn't use it. 8729 */ 8730 if (new_iblk != NULL) 8731 kmem_cache_free(ism_blk_cache, new_iblk); 8732 8733 /* 8734 * Check TSB and TLB page sizes. 8735 */ 8736 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8737 sfmmu_check_page_sizes(sfmmup, 0); 8738 } else { 8739 sfmmu_check_page_sizes(sfmmup, 1); 8740 } 8741 return (0); 8742 } 8743 8744 /* 8745 * hat_unshare removes exactly one ism_map from 8746 * this process's as. It expects multiple calls 8747 * to hat_unshare for multiple shm segments. 8748 */ 8749 void 8750 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8751 { 8752 ism_map_t *ism_map; 8753 ism_ment_t *free_ment = NULL; 8754 ism_blk_t *ism_blkp; 8755 struct hat *ism_hatid; 8756 int found, i; 8757 hatlock_t *hatlockp; 8758 struct tsb_info *tsbinfo; 8759 uint_t ismshift = page_get_shift(ismszc); 8760 size_t sh_size = ISM_SHIFT(ismshift, len); 8761 uchar_t ism_rid; 8762 sf_scd_t *old_scdp; 8763 8764 ASSERT(ISM_ALIGNED(ismshift, addr)); 8765 ASSERT(ISM_ALIGNED(ismshift, len)); 8766 ASSERT(sfmmup != NULL); 8767 ASSERT(sfmmup != ksfmmup); 8768 8769 if (sfmmup->sfmmu_xhat_provider) { 8770 XHAT_UNSHARE(sfmmup, addr, len); 8771 return; 8772 } else { 8773 /* 8774 * This must be a CPU HAT. If the address space has 8775 * XHATs attached, inform all XHATs that ISM segment 8776 * is going away 8777 */ 8778 ASSERT(sfmmup->sfmmu_as != NULL); 8779 if (sfmmup->sfmmu_as->a_xhat != NULL) 8780 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 8781 } 8782 8783 /* 8784 * Make sure that during the entire time ISM mappings are removed, 8785 * the trap handlers serialize behind us, and that no one else 8786 * can be mucking with ISM mappings. This also lets us get away 8787 * with not doing expensive cross calls to flush the TLB -- we 8788 * just discard the context, flush the entire TSB, and call it 8789 * a day. 8790 */ 8791 sfmmu_ismhat_enter(sfmmup, 0); 8792 8793 /* 8794 * Remove the mapping. 8795 * 8796 * We can't have any holes in the ism map. 8797 * The tsb miss code while searching the ism map will 8798 * stop on an empty map slot. So we must move 8799 * everyone past the hole up 1 if any. 8800 * 8801 * Also empty ism map blks are not freed until the 8802 * process exits. This is to prevent a MT race condition 8803 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8804 */ 8805 found = 0; 8806 ism_blkp = sfmmup->sfmmu_iblk; 8807 while (!found && ism_blkp != NULL) { 8808 ism_map = ism_blkp->iblk_maps; 8809 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8810 if (addr == ism_start(ism_map[i]) && 8811 sh_size == (size_t)(ism_size(ism_map[i]))) { 8812 found = 1; 8813 break; 8814 } 8815 } 8816 if (!found) 8817 ism_blkp = ism_blkp->iblk_next; 8818 } 8819 8820 if (found) { 8821 ism_hatid = ism_map[i].imap_ismhat; 8822 ism_rid = ism_map[i].imap_rid; 8823 ASSERT(ism_hatid != NULL); 8824 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8825 8826 /* 8827 * After hat_leave_region, the sfmmup may leave SCD, 8828 * in which case, we want to grow the private tsb size when 8829 * calling sfmmu_check_page_sizes at the end of the routine. 8830 */ 8831 old_scdp = sfmmup->sfmmu_scdp; 8832 /* 8833 * Then remove ourselves from the region. 8834 */ 8835 if (ism_rid != SFMMU_INVALID_ISMRID) { 8836 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8837 HAT_REGION_ISM); 8838 } 8839 8840 /* 8841 * And now guarantee that any other cpu 8842 * that tries to process an ISM miss 8843 * will go to tl=0. 8844 */ 8845 hatlockp = sfmmu_hat_enter(sfmmup); 8846 sfmmu_invalidate_ctx(sfmmup); 8847 sfmmu_hat_exit(hatlockp); 8848 8849 /* 8850 * Remove ourselves from the ism mapping list. 8851 */ 8852 mutex_enter(&ism_mlist_lock); 8853 iment_sub(ism_map[i].imap_ment, ism_hatid); 8854 mutex_exit(&ism_mlist_lock); 8855 free_ment = ism_map[i].imap_ment; 8856 8857 /* 8858 * We delete the ism map by copying 8859 * the next map over the current one. 8860 * We will take the next one in the maps 8861 * array or from the next ism_blk. 8862 */ 8863 while (ism_blkp != NULL) { 8864 ism_map = ism_blkp->iblk_maps; 8865 while (i < (ISM_MAP_SLOTS - 1)) { 8866 ism_map[i] = ism_map[i + 1]; 8867 i++; 8868 } 8869 /* i == (ISM_MAP_SLOTS - 1) */ 8870 ism_blkp = ism_blkp->iblk_next; 8871 if (ism_blkp != NULL) { 8872 ism_map[i] = ism_blkp->iblk_maps[0]; 8873 i = 0; 8874 } else { 8875 ism_map[i].imap_seg = 0; 8876 ism_map[i].imap_vb_shift = 0; 8877 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8878 ism_map[i].imap_hatflags = 0; 8879 ism_map[i].imap_sz_mask = 0; 8880 ism_map[i].imap_ismhat = NULL; 8881 ism_map[i].imap_ment = NULL; 8882 } 8883 } 8884 8885 /* 8886 * Now flush entire TSB for the process, since 8887 * demapping page by page can be too expensive. 8888 * We don't have to flush the TLB here anymore 8889 * since we switch to a new TLB ctx instead. 8890 * Also, there is no need to flush if the process 8891 * is exiting since the TSB will be freed later. 8892 */ 8893 if (!sfmmup->sfmmu_free) { 8894 hatlockp = sfmmu_hat_enter(sfmmup); 8895 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8896 tsbinfo = tsbinfo->tsb_next) { 8897 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8898 continue; 8899 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8900 tsbinfo->tsb_flags |= 8901 TSB_FLUSH_NEEDED; 8902 continue; 8903 } 8904 8905 sfmmu_inv_tsb(tsbinfo->tsb_va, 8906 TSB_BYTES(tsbinfo->tsb_szc)); 8907 } 8908 sfmmu_hat_exit(hatlockp); 8909 } 8910 } 8911 8912 /* 8913 * Update our counters for this sfmmup's ism mappings. 8914 */ 8915 for (i = 0; i <= ismszc; i++) { 8916 if (!(disable_ism_large_pages & (1 << i))) 8917 (void) ism_tsb_entries(sfmmup, i); 8918 } 8919 8920 sfmmu_ismhat_exit(sfmmup, 0); 8921 8922 /* 8923 * We must do our freeing here after dropping locks 8924 * to prevent a deadlock in the kmem allocator on the 8925 * mapping list lock. 8926 */ 8927 if (free_ment != NULL) 8928 kmem_cache_free(ism_ment_cache, free_ment); 8929 8930 /* 8931 * Check TSB and TLB page sizes if the process isn't exiting. 8932 */ 8933 if (!sfmmup->sfmmu_free) { 8934 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8935 sfmmu_check_page_sizes(sfmmup, 1); 8936 } else { 8937 sfmmu_check_page_sizes(sfmmup, 0); 8938 } 8939 } 8940 } 8941 8942 /* ARGSUSED */ 8943 static int 8944 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8945 { 8946 /* void *buf is sfmmu_t pointer */ 8947 bzero(buf, sizeof (sfmmu_t)); 8948 8949 return (0); 8950 } 8951 8952 /* ARGSUSED */ 8953 static void 8954 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8955 { 8956 /* void *buf is sfmmu_t pointer */ 8957 } 8958 8959 /* 8960 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8961 * field to be the pa of this hmeblk 8962 */ 8963 /* ARGSUSED */ 8964 static int 8965 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8966 { 8967 struct hme_blk *hmeblkp; 8968 8969 bzero(buf, (size_t)cdrarg); 8970 hmeblkp = (struct hme_blk *)buf; 8971 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8972 8973 #ifdef HBLK_TRACE 8974 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8975 #endif /* HBLK_TRACE */ 8976 8977 return (0); 8978 } 8979 8980 /* ARGSUSED */ 8981 static void 8982 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8983 { 8984 8985 #ifdef HBLK_TRACE 8986 8987 struct hme_blk *hmeblkp; 8988 8989 hmeblkp = (struct hme_blk *)buf; 8990 mutex_destroy(&hmeblkp->hblk_audit_lock); 8991 8992 #endif /* HBLK_TRACE */ 8993 } 8994 8995 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8996 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8997 /* 8998 * The kmem allocator will callback into our reclaim routine when the system 8999 * is running low in memory. We traverse the hash and free up all unused but 9000 * still cached hme_blks. We also traverse the free list and free them up 9001 * as well. 9002 */ 9003 /*ARGSUSED*/ 9004 static void 9005 sfmmu_hblkcache_reclaim(void *cdrarg) 9006 { 9007 int i; 9008 uint64_t hblkpa, prevpa, nx_pa; 9009 struct hmehash_bucket *hmebp; 9010 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 9011 static struct hmehash_bucket *uhmehash_reclaim_hand; 9012 static struct hmehash_bucket *khmehash_reclaim_hand; 9013 struct hme_blk *list = NULL; 9014 9015 hmebp = uhmehash_reclaim_hand; 9016 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 9017 uhmehash_reclaim_hand = hmebp = uhme_hash; 9018 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9019 9020 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9021 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9022 hmeblkp = hmebp->hmeblkp; 9023 hblkpa = hmebp->hmeh_nextpa; 9024 prevpa = 0; 9025 pr_hblk = NULL; 9026 while (hmeblkp) { 9027 nx_hblk = hmeblkp->hblk_next; 9028 nx_pa = hmeblkp->hblk_nextpa; 9029 if (!hmeblkp->hblk_vcnt && 9030 !hmeblkp->hblk_hmecnt) { 9031 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9032 prevpa, pr_hblk); 9033 sfmmu_hblk_free(hmebp, hmeblkp, 9034 hblkpa, &list); 9035 } else { 9036 pr_hblk = hmeblkp; 9037 prevpa = hblkpa; 9038 } 9039 hmeblkp = nx_hblk; 9040 hblkpa = nx_pa; 9041 } 9042 SFMMU_HASH_UNLOCK(hmebp); 9043 } 9044 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9045 hmebp = uhme_hash; 9046 } 9047 9048 hmebp = khmehash_reclaim_hand; 9049 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9050 khmehash_reclaim_hand = hmebp = khme_hash; 9051 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9052 9053 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9054 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9055 hmeblkp = hmebp->hmeblkp; 9056 hblkpa = hmebp->hmeh_nextpa; 9057 prevpa = 0; 9058 pr_hblk = NULL; 9059 while (hmeblkp) { 9060 nx_hblk = hmeblkp->hblk_next; 9061 nx_pa = hmeblkp->hblk_nextpa; 9062 if (!hmeblkp->hblk_vcnt && 9063 !hmeblkp->hblk_hmecnt) { 9064 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9065 prevpa, pr_hblk); 9066 sfmmu_hblk_free(hmebp, hmeblkp, 9067 hblkpa, &list); 9068 } else { 9069 pr_hblk = hmeblkp; 9070 prevpa = hblkpa; 9071 } 9072 hmeblkp = nx_hblk; 9073 hblkpa = nx_pa; 9074 } 9075 SFMMU_HASH_UNLOCK(hmebp); 9076 } 9077 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9078 hmebp = khme_hash; 9079 } 9080 sfmmu_hblks_list_purge(&list); 9081 } 9082 9083 /* 9084 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9085 * same goes for sfmmu_get_addrvcolor(). 9086 * 9087 * This function will return the virtual color for the specified page. The 9088 * virtual color corresponds to this page current mapping or its last mapping. 9089 * It is used by memory allocators to choose addresses with the correct 9090 * alignment so vac consistency is automatically maintained. If the page 9091 * has no color it returns -1. 9092 */ 9093 /*ARGSUSED*/ 9094 int 9095 sfmmu_get_ppvcolor(struct page *pp) 9096 { 9097 #ifdef VAC 9098 int color; 9099 9100 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9101 return (-1); 9102 } 9103 color = PP_GET_VCOLOR(pp); 9104 ASSERT(color < mmu_btop(shm_alignment)); 9105 return (color); 9106 #else 9107 return (-1); 9108 #endif /* VAC */ 9109 } 9110 9111 /* 9112 * This function will return the desired alignment for vac consistency 9113 * (vac color) given a virtual address. If no vac is present it returns -1. 9114 */ 9115 /*ARGSUSED*/ 9116 int 9117 sfmmu_get_addrvcolor(caddr_t vaddr) 9118 { 9119 #ifdef VAC 9120 if (cache & CACHE_VAC) { 9121 return (addr_to_vcolor(vaddr)); 9122 } else { 9123 return (-1); 9124 } 9125 #else 9126 return (-1); 9127 #endif /* VAC */ 9128 } 9129 9130 #ifdef VAC 9131 /* 9132 * Check for conflicts. 9133 * A conflict exists if the new and existent mappings do not match in 9134 * their "shm_alignment fields. If conflicts exist, the existant mappings 9135 * are flushed unless one of them is locked. If one of them is locked, then 9136 * the mappings are flushed and converted to non-cacheable mappings. 9137 */ 9138 static void 9139 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9140 { 9141 struct hat *tmphat; 9142 struct sf_hment *sfhmep, *tmphme = NULL; 9143 struct hme_blk *hmeblkp; 9144 int vcolor; 9145 tte_t tte; 9146 9147 ASSERT(sfmmu_mlist_held(pp)); 9148 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9149 9150 vcolor = addr_to_vcolor(addr); 9151 if (PP_NEWPAGE(pp)) { 9152 PP_SET_VCOLOR(pp, vcolor); 9153 return; 9154 } 9155 9156 if (PP_GET_VCOLOR(pp) == vcolor) { 9157 return; 9158 } 9159 9160 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9161 /* 9162 * Previous user of page had a different color 9163 * but since there are no current users 9164 * we just flush the cache and change the color. 9165 */ 9166 SFMMU_STAT(sf_pgcolor_conflict); 9167 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9168 PP_SET_VCOLOR(pp, vcolor); 9169 return; 9170 } 9171 9172 /* 9173 * If we get here we have a vac conflict with a current 9174 * mapping. VAC conflict policy is as follows. 9175 * - The default is to unload the other mappings unless: 9176 * - If we have a large mapping we uncache the page. 9177 * We need to uncache the rest of the large page too. 9178 * - If any of the mappings are locked we uncache the page. 9179 * - If the requested mapping is inconsistent 9180 * with another mapping and that mapping 9181 * is in the same address space we have to 9182 * make it non-cached. The default thing 9183 * to do is unload the inconsistent mapping 9184 * but if they are in the same address space 9185 * we run the risk of unmapping the pc or the 9186 * stack which we will use as we return to the user, 9187 * in which case we can then fault on the thing 9188 * we just unloaded and get into an infinite loop. 9189 */ 9190 if (PP_ISMAPPED_LARGE(pp)) { 9191 int sz; 9192 9193 /* 9194 * Existing mapping is for big pages. We don't unload 9195 * existing big mappings to satisfy new mappings. 9196 * Always convert all mappings to TNC. 9197 */ 9198 sz = fnd_mapping_sz(pp); 9199 pp = PP_GROUPLEADER(pp, sz); 9200 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9201 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9202 TTEPAGES(sz)); 9203 9204 return; 9205 } 9206 9207 /* 9208 * check if any mapping is in same as or if it is locked 9209 * since in that case we need to uncache. 9210 */ 9211 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9212 tmphme = sfhmep->hme_next; 9213 if (IS_PAHME(sfhmep)) 9214 continue; 9215 hmeblkp = sfmmu_hmetohblk(sfhmep); 9216 if (hmeblkp->hblk_xhat_bit) 9217 continue; 9218 tmphat = hblktosfmmu(hmeblkp); 9219 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9220 ASSERT(TTE_IS_VALID(&tte)); 9221 if (hmeblkp->hblk_shared || tmphat == hat || 9222 hmeblkp->hblk_lckcnt) { 9223 /* 9224 * We have an uncache conflict 9225 */ 9226 SFMMU_STAT(sf_uncache_conflict); 9227 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9228 return; 9229 } 9230 } 9231 9232 /* 9233 * We have an unload conflict 9234 * We have already checked for LARGE mappings, therefore 9235 * the remaining mapping(s) must be TTE8K. 9236 */ 9237 SFMMU_STAT(sf_unload_conflict); 9238 9239 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9240 tmphme = sfhmep->hme_next; 9241 if (IS_PAHME(sfhmep)) 9242 continue; 9243 hmeblkp = sfmmu_hmetohblk(sfhmep); 9244 if (hmeblkp->hblk_xhat_bit) 9245 continue; 9246 ASSERT(!hmeblkp->hblk_shared); 9247 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9248 } 9249 9250 if (PP_ISMAPPED_KPM(pp)) 9251 sfmmu_kpm_vac_unload(pp, addr); 9252 9253 /* 9254 * Unloads only do TLB flushes so we need to flush the 9255 * cache here. 9256 */ 9257 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9258 PP_SET_VCOLOR(pp, vcolor); 9259 } 9260 9261 /* 9262 * Whenever a mapping is unloaded and the page is in TNC state, 9263 * we see if the page can be made cacheable again. 'pp' is 9264 * the page that we just unloaded a mapping from, the size 9265 * of mapping that was unloaded is 'ottesz'. 9266 * Remark: 9267 * The recache policy for mpss pages can leave a performance problem 9268 * under the following circumstances: 9269 * . A large page in uncached mode has just been unmapped. 9270 * . All constituent pages are TNC due to a conflicting small mapping. 9271 * . There are many other, non conflicting, small mappings around for 9272 * a lot of the constituent pages. 9273 * . We're called w/ the "old" groupleader page and the old ottesz, 9274 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9275 * we end up w/ TTE8K or npages == 1. 9276 * . We call tst_tnc w/ the old groupleader only, and if there is no 9277 * conflict, we re-cache only this page. 9278 * . All other small mappings are not checked and will be left in TNC mode. 9279 * The problem is not very serious because: 9280 * . mpss is actually only defined for heap and stack, so the probability 9281 * is not very high that a large page mapping exists in parallel to a small 9282 * one (this is possible, but seems to be bad programming style in the 9283 * appl). 9284 * . The problem gets a little bit more serious, when those TNC pages 9285 * have to be mapped into kernel space, e.g. for networking. 9286 * . When VAC alias conflicts occur in applications, this is regarded 9287 * as an application bug. So if kstat's show them, the appl should 9288 * be changed anyway. 9289 */ 9290 void 9291 conv_tnc(page_t *pp, int ottesz) 9292 { 9293 int cursz, dosz; 9294 pgcnt_t curnpgs, dopgs; 9295 pgcnt_t pg64k; 9296 page_t *pp2; 9297 9298 /* 9299 * Determine how big a range we check for TNC and find 9300 * leader page. cursz is the size of the biggest 9301 * mapping that still exist on 'pp'. 9302 */ 9303 if (PP_ISMAPPED_LARGE(pp)) { 9304 cursz = fnd_mapping_sz(pp); 9305 } else { 9306 cursz = TTE8K; 9307 } 9308 9309 if (ottesz >= cursz) { 9310 dosz = ottesz; 9311 pp2 = pp; 9312 } else { 9313 dosz = cursz; 9314 pp2 = PP_GROUPLEADER(pp, dosz); 9315 } 9316 9317 pg64k = TTEPAGES(TTE64K); 9318 dopgs = TTEPAGES(dosz); 9319 9320 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9321 9322 while (dopgs != 0) { 9323 curnpgs = TTEPAGES(cursz); 9324 if (tst_tnc(pp2, curnpgs)) { 9325 SFMMU_STAT_ADD(sf_recache, curnpgs); 9326 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9327 curnpgs); 9328 } 9329 9330 ASSERT(dopgs >= curnpgs); 9331 dopgs -= curnpgs; 9332 9333 if (dopgs == 0) { 9334 break; 9335 } 9336 9337 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9338 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9339 cursz = fnd_mapping_sz(pp2); 9340 } else { 9341 cursz = TTE8K; 9342 } 9343 } 9344 } 9345 9346 /* 9347 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9348 * returns 0 otherwise. Note that oaddr argument is valid for only 9349 * 8k pages. 9350 */ 9351 int 9352 tst_tnc(page_t *pp, pgcnt_t npages) 9353 { 9354 struct sf_hment *sfhme; 9355 struct hme_blk *hmeblkp; 9356 tte_t tte; 9357 caddr_t vaddr; 9358 int clr_valid = 0; 9359 int color, color1, bcolor; 9360 int i, ncolors; 9361 9362 ASSERT(pp != NULL); 9363 ASSERT(!(cache & CACHE_WRITEBACK)); 9364 9365 if (npages > 1) { 9366 ncolors = CACHE_NUM_COLOR; 9367 } 9368 9369 for (i = 0; i < npages; i++) { 9370 ASSERT(sfmmu_mlist_held(pp)); 9371 ASSERT(PP_ISTNC(pp)); 9372 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9373 9374 if (PP_ISPNC(pp)) { 9375 return (0); 9376 } 9377 9378 clr_valid = 0; 9379 if (PP_ISMAPPED_KPM(pp)) { 9380 caddr_t kpmvaddr; 9381 9382 ASSERT(kpm_enable); 9383 kpmvaddr = hat_kpm_page2va(pp, 1); 9384 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9385 color1 = addr_to_vcolor(kpmvaddr); 9386 clr_valid = 1; 9387 } 9388 9389 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9390 if (IS_PAHME(sfhme)) 9391 continue; 9392 hmeblkp = sfmmu_hmetohblk(sfhme); 9393 if (hmeblkp->hblk_xhat_bit) 9394 continue; 9395 9396 sfmmu_copytte(&sfhme->hme_tte, &tte); 9397 ASSERT(TTE_IS_VALID(&tte)); 9398 9399 vaddr = tte_to_vaddr(hmeblkp, tte); 9400 color = addr_to_vcolor(vaddr); 9401 9402 if (npages > 1) { 9403 /* 9404 * If there is a big mapping, make sure 9405 * 8K mapping is consistent with the big 9406 * mapping. 9407 */ 9408 bcolor = i % ncolors; 9409 if (color != bcolor) { 9410 return (0); 9411 } 9412 } 9413 if (!clr_valid) { 9414 clr_valid = 1; 9415 color1 = color; 9416 } 9417 9418 if (color1 != color) { 9419 return (0); 9420 } 9421 } 9422 9423 pp = PP_PAGENEXT(pp); 9424 } 9425 9426 return (1); 9427 } 9428 9429 void 9430 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9431 pgcnt_t npages) 9432 { 9433 kmutex_t *pmtx; 9434 int i, ncolors, bcolor; 9435 kpm_hlk_t *kpmp; 9436 cpuset_t cpuset; 9437 9438 ASSERT(pp != NULL); 9439 ASSERT(!(cache & CACHE_WRITEBACK)); 9440 9441 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9442 pmtx = sfmmu_page_enter(pp); 9443 9444 /* 9445 * Fast path caching single unmapped page 9446 */ 9447 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9448 flags == HAT_CACHE) { 9449 PP_CLRTNC(pp); 9450 PP_CLRPNC(pp); 9451 sfmmu_page_exit(pmtx); 9452 sfmmu_kpm_kpmp_exit(kpmp); 9453 return; 9454 } 9455 9456 /* 9457 * We need to capture all cpus in order to change cacheability 9458 * because we can't allow one cpu to access the same physical 9459 * page using a cacheable and a non-cachebale mapping at the same 9460 * time. Since we may end up walking the ism mapping list 9461 * have to grab it's lock now since we can't after all the 9462 * cpus have been captured. 9463 */ 9464 sfmmu_hat_lock_all(); 9465 mutex_enter(&ism_mlist_lock); 9466 kpreempt_disable(); 9467 cpuset = cpu_ready_set; 9468 xc_attention(cpuset); 9469 9470 if (npages > 1) { 9471 /* 9472 * Make sure all colors are flushed since the 9473 * sfmmu_page_cache() only flushes one color- 9474 * it does not know big pages. 9475 */ 9476 ncolors = CACHE_NUM_COLOR; 9477 if (flags & HAT_TMPNC) { 9478 for (i = 0; i < ncolors; i++) { 9479 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9480 } 9481 cache_flush_flag = CACHE_NO_FLUSH; 9482 } 9483 } 9484 9485 for (i = 0; i < npages; i++) { 9486 9487 ASSERT(sfmmu_mlist_held(pp)); 9488 9489 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9490 9491 if (npages > 1) { 9492 bcolor = i % ncolors; 9493 } else { 9494 bcolor = NO_VCOLOR; 9495 } 9496 9497 sfmmu_page_cache(pp, flags, cache_flush_flag, 9498 bcolor); 9499 } 9500 9501 pp = PP_PAGENEXT(pp); 9502 } 9503 9504 xt_sync(cpuset); 9505 xc_dismissed(cpuset); 9506 mutex_exit(&ism_mlist_lock); 9507 sfmmu_hat_unlock_all(); 9508 sfmmu_page_exit(pmtx); 9509 sfmmu_kpm_kpmp_exit(kpmp); 9510 kpreempt_enable(); 9511 } 9512 9513 /* 9514 * This function changes the virtual cacheability of all mappings to a 9515 * particular page. When changing from uncache to cacheable the mappings will 9516 * only be changed if all of them have the same virtual color. 9517 * We need to flush the cache in all cpus. It is possible that 9518 * a process referenced a page as cacheable but has sinced exited 9519 * and cleared the mapping list. We still to flush it but have no 9520 * state so all cpus is the only alternative. 9521 */ 9522 static void 9523 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9524 { 9525 struct sf_hment *sfhme; 9526 struct hme_blk *hmeblkp; 9527 sfmmu_t *sfmmup; 9528 tte_t tte, ttemod; 9529 caddr_t vaddr; 9530 int ret, color; 9531 pfn_t pfn; 9532 9533 color = bcolor; 9534 pfn = pp->p_pagenum; 9535 9536 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9537 9538 if (IS_PAHME(sfhme)) 9539 continue; 9540 hmeblkp = sfmmu_hmetohblk(sfhme); 9541 9542 if (hmeblkp->hblk_xhat_bit) 9543 continue; 9544 9545 sfmmu_copytte(&sfhme->hme_tte, &tte); 9546 ASSERT(TTE_IS_VALID(&tte)); 9547 vaddr = tte_to_vaddr(hmeblkp, tte); 9548 color = addr_to_vcolor(vaddr); 9549 9550 #ifdef DEBUG 9551 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9552 ASSERT(color == bcolor); 9553 } 9554 #endif 9555 9556 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9557 9558 ttemod = tte; 9559 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9560 TTE_CLR_VCACHEABLE(&ttemod); 9561 } else { /* flags & HAT_CACHE */ 9562 TTE_SET_VCACHEABLE(&ttemod); 9563 } 9564 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9565 if (ret < 0) { 9566 /* 9567 * Since all cpus are captured modifytte should not 9568 * fail. 9569 */ 9570 panic("sfmmu_page_cache: write to tte failed"); 9571 } 9572 9573 sfmmup = hblktosfmmu(hmeblkp); 9574 if (cache_flush_flag == CACHE_FLUSH) { 9575 /* 9576 * Flush TSBs, TLBs and caches 9577 */ 9578 if (hmeblkp->hblk_shared) { 9579 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9580 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9581 sf_region_t *rgnp; 9582 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9583 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9584 ASSERT(srdp != NULL); 9585 rgnp = srdp->srd_hmergnp[rid]; 9586 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9587 srdp, rgnp, rid); 9588 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9589 hmeblkp, 0); 9590 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9591 } else if (sfmmup->sfmmu_ismhat) { 9592 if (flags & HAT_CACHE) { 9593 SFMMU_STAT(sf_ism_recache); 9594 } else { 9595 SFMMU_STAT(sf_ism_uncache); 9596 } 9597 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9598 pfn, CACHE_FLUSH); 9599 } else { 9600 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9601 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9602 } 9603 9604 /* 9605 * all cache entries belonging to this pfn are 9606 * now flushed. 9607 */ 9608 cache_flush_flag = CACHE_NO_FLUSH; 9609 } else { 9610 /* 9611 * Flush only TSBs and TLBs. 9612 */ 9613 if (hmeblkp->hblk_shared) { 9614 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9615 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9616 sf_region_t *rgnp; 9617 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9618 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9619 ASSERT(srdp != NULL); 9620 rgnp = srdp->srd_hmergnp[rid]; 9621 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9622 srdp, rgnp, rid); 9623 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9624 hmeblkp, 0); 9625 } else if (sfmmup->sfmmu_ismhat) { 9626 if (flags & HAT_CACHE) { 9627 SFMMU_STAT(sf_ism_recache); 9628 } else { 9629 SFMMU_STAT(sf_ism_uncache); 9630 } 9631 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9632 pfn, CACHE_NO_FLUSH); 9633 } else { 9634 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9635 } 9636 } 9637 } 9638 9639 if (PP_ISMAPPED_KPM(pp)) 9640 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9641 9642 switch (flags) { 9643 9644 default: 9645 panic("sfmmu_pagecache: unknown flags"); 9646 break; 9647 9648 case HAT_CACHE: 9649 PP_CLRTNC(pp); 9650 PP_CLRPNC(pp); 9651 PP_SET_VCOLOR(pp, color); 9652 break; 9653 9654 case HAT_TMPNC: 9655 PP_SETTNC(pp); 9656 PP_SET_VCOLOR(pp, NO_VCOLOR); 9657 break; 9658 9659 case HAT_UNCACHE: 9660 PP_SETPNC(pp); 9661 PP_CLRTNC(pp); 9662 PP_SET_VCOLOR(pp, NO_VCOLOR); 9663 break; 9664 } 9665 } 9666 #endif /* VAC */ 9667 9668 9669 /* 9670 * Wrapper routine used to return a context. 9671 * 9672 * It's the responsibility of the caller to guarantee that the 9673 * process serializes on calls here by taking the HAT lock for 9674 * the hat. 9675 * 9676 */ 9677 static void 9678 sfmmu_get_ctx(sfmmu_t *sfmmup) 9679 { 9680 mmu_ctx_t *mmu_ctxp; 9681 uint_t pstate_save; 9682 int ret; 9683 9684 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9685 ASSERT(sfmmup != ksfmmup); 9686 9687 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9688 sfmmu_setup_tsbinfo(sfmmup); 9689 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9690 } 9691 9692 kpreempt_disable(); 9693 9694 mmu_ctxp = CPU_MMU_CTXP(CPU); 9695 ASSERT(mmu_ctxp); 9696 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9697 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9698 9699 /* 9700 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9701 */ 9702 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9703 sfmmu_ctx_wrap_around(mmu_ctxp); 9704 9705 /* 9706 * Let the MMU set up the page sizes to use for 9707 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9708 */ 9709 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9710 mmu_set_ctx_page_sizes(sfmmup); 9711 } 9712 9713 /* 9714 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9715 * interrupts disabled to prevent race condition with wrap-around 9716 * ctx invalidatation. In sun4v, ctx invalidation also involves 9717 * a HV call to set the number of TSBs to 0. If interrupts are not 9718 * disabled until after sfmmu_load_mmustate is complete TSBs may 9719 * become assigned to INVALID_CONTEXT. This is not allowed. 9720 */ 9721 pstate_save = sfmmu_disable_intrs(); 9722 9723 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9724 sfmmup->sfmmu_scdp != NULL) { 9725 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9726 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9727 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9728 /* debug purpose only */ 9729 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9730 != INVALID_CONTEXT); 9731 } 9732 sfmmu_load_mmustate(sfmmup); 9733 9734 sfmmu_enable_intrs(pstate_save); 9735 9736 kpreempt_enable(); 9737 } 9738 9739 /* 9740 * When all cnums are used up in a MMU, cnum will wrap around to the 9741 * next generation and start from 2. 9742 */ 9743 static void 9744 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 9745 { 9746 9747 /* caller must have disabled the preemption */ 9748 ASSERT(curthread->t_preempt >= 1); 9749 ASSERT(mmu_ctxp != NULL); 9750 9751 /* acquire Per-MMU (PM) spin lock */ 9752 mutex_enter(&mmu_ctxp->mmu_lock); 9753 9754 /* re-check to see if wrap-around is needed */ 9755 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9756 goto done; 9757 9758 SFMMU_MMU_STAT(mmu_wrap_around); 9759 9760 /* update gnum */ 9761 ASSERT(mmu_ctxp->mmu_gnum != 0); 9762 mmu_ctxp->mmu_gnum++; 9763 if (mmu_ctxp->mmu_gnum == 0 || 9764 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9765 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9766 (void *)mmu_ctxp); 9767 } 9768 9769 if (mmu_ctxp->mmu_ncpus > 1) { 9770 cpuset_t cpuset; 9771 9772 membar_enter(); /* make sure updated gnum visible */ 9773 9774 SFMMU_XCALL_STATS(NULL); 9775 9776 /* xcall to others on the same MMU to invalidate ctx */ 9777 cpuset = mmu_ctxp->mmu_cpuset; 9778 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 9779 CPUSET_DEL(cpuset, CPU->cpu_id); 9780 CPUSET_AND(cpuset, cpu_ready_set); 9781 9782 /* 9783 * Pass in INVALID_CONTEXT as the first parameter to 9784 * sfmmu_raise_tsb_exception, which invalidates the context 9785 * of any process running on the CPUs in the MMU. 9786 */ 9787 xt_some(cpuset, sfmmu_raise_tsb_exception, 9788 INVALID_CONTEXT, INVALID_CONTEXT); 9789 xt_sync(cpuset); 9790 9791 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9792 } 9793 9794 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9795 sfmmu_setctx_sec(INVALID_CONTEXT); 9796 sfmmu_clear_utsbinfo(); 9797 } 9798 9799 /* 9800 * No xcall is needed here. For sun4u systems all CPUs in context 9801 * domain share a single physical MMU therefore it's enough to flush 9802 * TLB on local CPU. On sun4v systems we use 1 global context 9803 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9804 * handler. Note that vtag_flushall_uctxs() is called 9805 * for Ultra II machine, where the equivalent flushall functionality 9806 * is implemented in SW, and only user ctx TLB entries are flushed. 9807 */ 9808 if (&vtag_flushall_uctxs != NULL) { 9809 vtag_flushall_uctxs(); 9810 } else { 9811 vtag_flushall(); 9812 } 9813 9814 /* reset mmu cnum, skips cnum 0 and 1 */ 9815 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9816 9817 done: 9818 mutex_exit(&mmu_ctxp->mmu_lock); 9819 } 9820 9821 9822 /* 9823 * For multi-threaded process, set the process context to INVALID_CONTEXT 9824 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9825 * process, we can just load the MMU state directly without having to 9826 * set context invalid. Caller must hold the hat lock since we don't 9827 * acquire it here. 9828 */ 9829 static void 9830 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9831 { 9832 uint_t cnum; 9833 uint_t pstate_save; 9834 9835 ASSERT(sfmmup != ksfmmup); 9836 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9837 9838 kpreempt_disable(); 9839 9840 /* 9841 * We check whether the pass'ed-in sfmmup is the same as the 9842 * current running proc. This is to makes sure the current proc 9843 * stays single-threaded if it already is. 9844 */ 9845 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9846 (curthread->t_procp->p_lwpcnt == 1)) { 9847 /* single-thread */ 9848 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9849 if (cnum != INVALID_CONTEXT) { 9850 uint_t curcnum; 9851 /* 9852 * Disable interrupts to prevent race condition 9853 * with sfmmu_ctx_wrap_around ctx invalidation. 9854 * In sun4v, ctx invalidation involves setting 9855 * TSB to NULL, hence, interrupts should be disabled 9856 * untill after sfmmu_load_mmustate is completed. 9857 */ 9858 pstate_save = sfmmu_disable_intrs(); 9859 curcnum = sfmmu_getctx_sec(); 9860 if (curcnum == cnum) 9861 sfmmu_load_mmustate(sfmmup); 9862 sfmmu_enable_intrs(pstate_save); 9863 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9864 } 9865 } else { 9866 /* 9867 * multi-thread 9868 * or when sfmmup is not the same as the curproc. 9869 */ 9870 sfmmu_invalidate_ctx(sfmmup); 9871 } 9872 9873 kpreempt_enable(); 9874 } 9875 9876 9877 /* 9878 * Replace the specified TSB with a new TSB. This function gets called when 9879 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9880 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9881 * (8K). 9882 * 9883 * Caller must hold the HAT lock, but should assume any tsb_info 9884 * pointers it has are no longer valid after calling this function. 9885 * 9886 * Return values: 9887 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9888 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9889 * something to this tsbinfo/TSB 9890 * TSB_SUCCESS Operation succeeded 9891 */ 9892 static tsb_replace_rc_t 9893 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9894 hatlock_t *hatlockp, uint_t flags) 9895 { 9896 struct tsb_info *new_tsbinfo = NULL; 9897 struct tsb_info *curtsb, *prevtsb; 9898 uint_t tte_sz_mask; 9899 int i; 9900 9901 ASSERT(sfmmup != ksfmmup); 9902 ASSERT(sfmmup->sfmmu_ismhat == 0); 9903 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9904 ASSERT(szc <= tsb_max_growsize); 9905 9906 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9907 return (TSB_LOSTRACE); 9908 9909 /* 9910 * Find the tsb_info ahead of this one in the list, and 9911 * also make sure that the tsb_info passed in really 9912 * exists! 9913 */ 9914 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9915 curtsb != old_tsbinfo && curtsb != NULL; 9916 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9917 ; 9918 ASSERT(curtsb != NULL); 9919 9920 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9921 /* 9922 * The process is swapped out, so just set the new size 9923 * code. When it swaps back in, we'll allocate a new one 9924 * of the new chosen size. 9925 */ 9926 curtsb->tsb_szc = szc; 9927 return (TSB_SUCCESS); 9928 } 9929 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9930 9931 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9932 9933 /* 9934 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9935 * If we fail to allocate a TSB, exit. 9936 * 9937 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9938 * then try 4M slab after the initial alloc fails. 9939 * 9940 * If tsb swapin with tsb size > 4M, then try 4M after the 9941 * initial alloc fails. 9942 */ 9943 sfmmu_hat_exit(hatlockp); 9944 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9945 tte_sz_mask, flags, sfmmup) && 9946 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9947 (!(flags & TSB_SWAPIN) && 9948 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9949 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9950 tte_sz_mask, flags, sfmmup))) { 9951 (void) sfmmu_hat_enter(sfmmup); 9952 if (!(flags & TSB_SWAPIN)) 9953 SFMMU_STAT(sf_tsb_resize_failures); 9954 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9955 return (TSB_ALLOCFAIL); 9956 } 9957 (void) sfmmu_hat_enter(sfmmup); 9958 9959 /* 9960 * Re-check to make sure somebody else didn't muck with us while we 9961 * didn't hold the HAT lock. If the process swapped out, fine, just 9962 * exit; this can happen if we try to shrink the TSB from the context 9963 * of another process (such as on an ISM unmap), though it is rare. 9964 */ 9965 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9966 SFMMU_STAT(sf_tsb_resize_failures); 9967 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9968 sfmmu_hat_exit(hatlockp); 9969 sfmmu_tsbinfo_free(new_tsbinfo); 9970 (void) sfmmu_hat_enter(sfmmup); 9971 return (TSB_LOSTRACE); 9972 } 9973 9974 #ifdef DEBUG 9975 /* Reverify that the tsb_info still exists.. for debugging only */ 9976 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9977 curtsb != old_tsbinfo && curtsb != NULL; 9978 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9979 ; 9980 ASSERT(curtsb != NULL); 9981 #endif /* DEBUG */ 9982 9983 /* 9984 * Quiesce any CPUs running this process on their next TLB miss 9985 * so they atomically see the new tsb_info. We temporarily set the 9986 * context to invalid context so new threads that come on processor 9987 * after we do the xcall to cpusran will also serialize behind the 9988 * HAT lock on TLB miss and will see the new TSB. Since this short 9989 * race with a new thread coming on processor is relatively rare, 9990 * this synchronization mechanism should be cheaper than always 9991 * pausing all CPUs for the duration of the setup, which is what 9992 * the old implementation did. This is particuarly true if we are 9993 * copying a huge chunk of memory around during that window. 9994 * 9995 * The memory barriers are to make sure things stay consistent 9996 * with resume() since it does not hold the HAT lock while 9997 * walking the list of tsb_info structures. 9998 */ 9999 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 10000 /* The TSB is either growing or shrinking. */ 10001 sfmmu_invalidate_ctx(sfmmup); 10002 } else { 10003 /* 10004 * It is illegal to swap in TSBs from a process other 10005 * than a process being swapped in. This in turn 10006 * implies we do not have a valid MMU context here 10007 * since a process needs one to resolve translation 10008 * misses. 10009 */ 10010 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 10011 } 10012 10013 #ifdef DEBUG 10014 ASSERT(max_mmu_ctxdoms > 0); 10015 10016 /* 10017 * Process should have INVALID_CONTEXT on all MMUs 10018 */ 10019 for (i = 0; i < max_mmu_ctxdoms; i++) { 10020 10021 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 10022 } 10023 #endif 10024 10025 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 10026 membar_stst(); /* strict ordering required */ 10027 if (prevtsb) 10028 prevtsb->tsb_next = new_tsbinfo; 10029 else 10030 sfmmup->sfmmu_tsb = new_tsbinfo; 10031 membar_enter(); /* make sure new TSB globally visible */ 10032 10033 /* 10034 * We need to migrate TSB entries from the old TSB to the new TSB 10035 * if tsb_remap_ttes is set and the TSB is growing. 10036 */ 10037 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 10038 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 10039 10040 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10041 10042 /* 10043 * Drop the HAT lock to free our old tsb_info. 10044 */ 10045 sfmmu_hat_exit(hatlockp); 10046 10047 if ((flags & TSB_GROW) == TSB_GROW) { 10048 SFMMU_STAT(sf_tsb_grow); 10049 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 10050 SFMMU_STAT(sf_tsb_shrink); 10051 } 10052 10053 sfmmu_tsbinfo_free(old_tsbinfo); 10054 10055 (void) sfmmu_hat_enter(sfmmup); 10056 return (TSB_SUCCESS); 10057 } 10058 10059 /* 10060 * This function will re-program hat pgsz array, and invalidate the 10061 * process' context, forcing the process to switch to another 10062 * context on the next TLB miss, and therefore start using the 10063 * TLB that is reprogrammed for the new page sizes. 10064 */ 10065 void 10066 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10067 { 10068 int i; 10069 hatlock_t *hatlockp = NULL; 10070 10071 hatlockp = sfmmu_hat_enter(sfmmup); 10072 /* USIII+-IV+ optimization, requires hat lock */ 10073 if (tmp_pgsz) { 10074 for (i = 0; i < mmu_page_sizes; i++) 10075 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10076 } 10077 SFMMU_STAT(sf_tlb_reprog_pgsz); 10078 10079 sfmmu_invalidate_ctx(sfmmup); 10080 10081 sfmmu_hat_exit(hatlockp); 10082 } 10083 10084 /* 10085 * The scd_rttecnt field in the SCD must be updated to take account of the 10086 * regions which it contains. 10087 */ 10088 static void 10089 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10090 { 10091 uint_t rid; 10092 uint_t i, j; 10093 ulong_t w; 10094 sf_region_t *rgnp; 10095 10096 ASSERT(srdp != NULL); 10097 10098 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10099 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10100 continue; 10101 } 10102 10103 j = 0; 10104 while (w) { 10105 if (!(w & 0x1)) { 10106 j++; 10107 w >>= 1; 10108 continue; 10109 } 10110 rid = (i << BT_ULSHIFT) | j; 10111 j++; 10112 w >>= 1; 10113 10114 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10115 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10116 rgnp = srdp->srd_hmergnp[rid]; 10117 ASSERT(rgnp->rgn_refcnt > 0); 10118 ASSERT(rgnp->rgn_id == rid); 10119 10120 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10121 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10122 10123 /* 10124 * Maintain the tsb0 inflation cnt for the regions 10125 * in the SCD. 10126 */ 10127 if (rgnp->rgn_pgszc >= TTE4M) { 10128 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10129 rgnp->rgn_size >> 10130 (TTE_PAGE_SHIFT(TTE8K) + 2); 10131 } 10132 } 10133 } 10134 } 10135 10136 /* 10137 * This function assumes that there are either four or six supported page 10138 * sizes and at most two programmable TLBs, so we need to decide which 10139 * page sizes are most important and then tell the MMU layer so it 10140 * can adjust the TLB page sizes accordingly (if supported). 10141 * 10142 * If these assumptions change, this function will need to be 10143 * updated to support whatever the new limits are. 10144 * 10145 * The growing flag is nonzero if we are growing the address space, 10146 * and zero if it is shrinking. This allows us to decide whether 10147 * to grow or shrink our TSB, depending upon available memory 10148 * conditions. 10149 */ 10150 static void 10151 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10152 { 10153 uint64_t ttecnt[MMU_PAGE_SIZES]; 10154 uint64_t tte8k_cnt, tte4m_cnt; 10155 uint8_t i; 10156 int sectsb_thresh; 10157 10158 /* 10159 * Kernel threads, processes with small address spaces not using 10160 * large pages, and dummy ISM HATs need not apply. 10161 */ 10162 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10163 return; 10164 10165 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10166 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10167 return; 10168 10169 for (i = 0; i < mmu_page_sizes; i++) { 10170 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10171 sfmmup->sfmmu_ismttecnt[i]; 10172 } 10173 10174 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10175 if (&mmu_check_page_sizes) 10176 mmu_check_page_sizes(sfmmup, ttecnt); 10177 10178 /* 10179 * Calculate the number of 8k ttes to represent the span of these 10180 * pages. 10181 */ 10182 tte8k_cnt = ttecnt[TTE8K] + 10183 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10184 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10185 if (mmu_page_sizes == max_mmu_page_sizes) { 10186 tte4m_cnt = ttecnt[TTE4M] + 10187 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10188 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10189 } else { 10190 tte4m_cnt = ttecnt[TTE4M]; 10191 } 10192 10193 /* 10194 * Inflate tte8k_cnt to allow for region large page allocation failure. 10195 */ 10196 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10197 10198 /* 10199 * Inflate TSB sizes by a factor of 2 if this process 10200 * uses 4M text pages to minimize extra conflict misses 10201 * in the first TSB since without counting text pages 10202 * 8K TSB may become too small. 10203 * 10204 * Also double the size of the second TSB to minimize 10205 * extra conflict misses due to competition between 4M text pages 10206 * and data pages. 10207 * 10208 * We need to adjust the second TSB allocation threshold by the 10209 * inflation factor, since there is no point in creating a second 10210 * TSB when we know all the mappings can fit in the I/D TLBs. 10211 */ 10212 sectsb_thresh = tsb_sectsb_threshold; 10213 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10214 tte8k_cnt <<= 1; 10215 tte4m_cnt <<= 1; 10216 sectsb_thresh <<= 1; 10217 } 10218 10219 /* 10220 * Check to see if our TSB is the right size; we may need to 10221 * grow or shrink it. If the process is small, our work is 10222 * finished at this point. 10223 */ 10224 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10225 return; 10226 } 10227 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10228 } 10229 10230 static void 10231 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10232 uint64_t tte4m_cnt, int sectsb_thresh) 10233 { 10234 int tsb_bits; 10235 uint_t tsb_szc; 10236 struct tsb_info *tsbinfop; 10237 hatlock_t *hatlockp = NULL; 10238 10239 hatlockp = sfmmu_hat_enter(sfmmup); 10240 ASSERT(hatlockp != NULL); 10241 tsbinfop = sfmmup->sfmmu_tsb; 10242 ASSERT(tsbinfop != NULL); 10243 10244 /* 10245 * If we're growing, select the size based on RSS. If we're 10246 * shrinking, leave some room so we don't have to turn around and 10247 * grow again immediately. 10248 */ 10249 if (growing) 10250 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10251 else 10252 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10253 10254 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10255 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10256 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10257 hatlockp, TSB_SHRINK); 10258 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10259 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10260 hatlockp, TSB_GROW); 10261 } 10262 tsbinfop = sfmmup->sfmmu_tsb; 10263 10264 /* 10265 * With the TLB and first TSB out of the way, we need to see if 10266 * we need a second TSB for 4M pages. If we managed to reprogram 10267 * the TLB page sizes above, the process will start using this new 10268 * TSB right away; otherwise, it will start using it on the next 10269 * context switch. Either way, it's no big deal so there's no 10270 * synchronization with the trap handlers here unless we grow the 10271 * TSB (in which case it's required to prevent using the old one 10272 * after it's freed). Note: second tsb is required for 32M/256M 10273 * page sizes. 10274 */ 10275 if (tte4m_cnt > sectsb_thresh) { 10276 /* 10277 * If we're growing, select the size based on RSS. If we're 10278 * shrinking, leave some room so we don't have to turn 10279 * around and grow again immediately. 10280 */ 10281 if (growing) 10282 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10283 else 10284 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10285 if (tsbinfop->tsb_next == NULL) { 10286 struct tsb_info *newtsb; 10287 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10288 0 : TSB_ALLOC; 10289 10290 sfmmu_hat_exit(hatlockp); 10291 10292 /* 10293 * Try to allocate a TSB for 4[32|256]M pages. If we 10294 * can't get the size we want, retry w/a minimum sized 10295 * TSB. If that still didn't work, give up; we can 10296 * still run without one. 10297 */ 10298 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10299 TSB4M|TSB32M|TSB256M:TSB4M; 10300 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10301 allocflags, sfmmup)) && 10302 (tsb_szc <= TSB_4M_SZCODE || 10303 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10304 tsb_bits, allocflags, sfmmup)) && 10305 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10306 tsb_bits, allocflags, sfmmup)) { 10307 return; 10308 } 10309 10310 hatlockp = sfmmu_hat_enter(sfmmup); 10311 10312 sfmmu_invalidate_ctx(sfmmup); 10313 10314 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10315 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10316 SFMMU_STAT(sf_tsb_sectsb_create); 10317 sfmmu_hat_exit(hatlockp); 10318 return; 10319 } else { 10320 /* 10321 * It's annoying, but possible for us 10322 * to get here.. we dropped the HAT lock 10323 * because of locking order in the kmem 10324 * allocator, and while we were off getting 10325 * our memory, some other thread decided to 10326 * do us a favor and won the race to get a 10327 * second TSB for this process. Sigh. 10328 */ 10329 sfmmu_hat_exit(hatlockp); 10330 sfmmu_tsbinfo_free(newtsb); 10331 return; 10332 } 10333 } 10334 10335 /* 10336 * We have a second TSB, see if it's big enough. 10337 */ 10338 tsbinfop = tsbinfop->tsb_next; 10339 10340 /* 10341 * Check to see if our second TSB is the right size; 10342 * we may need to grow or shrink it. 10343 * To prevent thrashing (e.g. growing the TSB on a 10344 * subsequent map operation), only try to shrink if 10345 * the TSB reach exceeds twice the virtual address 10346 * space size. 10347 */ 10348 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10349 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10350 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10351 tsb_szc, hatlockp, TSB_SHRINK); 10352 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10353 TSB_OK_GROW()) { 10354 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10355 tsb_szc, hatlockp, TSB_GROW); 10356 } 10357 } 10358 10359 sfmmu_hat_exit(hatlockp); 10360 } 10361 10362 /* 10363 * Free up a sfmmu 10364 * Since the sfmmu is currently embedded in the hat struct we simply zero 10365 * out our fields and free up the ism map blk list if any. 10366 */ 10367 static void 10368 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10369 { 10370 ism_blk_t *blkp, *nx_blkp; 10371 #ifdef DEBUG 10372 ism_map_t *map; 10373 int i; 10374 #endif 10375 10376 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10377 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10378 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10379 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10380 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10381 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10382 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10383 10384 sfmmup->sfmmu_free = 0; 10385 sfmmup->sfmmu_ismhat = 0; 10386 10387 blkp = sfmmup->sfmmu_iblk; 10388 sfmmup->sfmmu_iblk = NULL; 10389 10390 while (blkp) { 10391 #ifdef DEBUG 10392 map = blkp->iblk_maps; 10393 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10394 ASSERT(map[i].imap_seg == 0); 10395 ASSERT(map[i].imap_ismhat == NULL); 10396 ASSERT(map[i].imap_ment == NULL); 10397 } 10398 #endif 10399 nx_blkp = blkp->iblk_next; 10400 blkp->iblk_next = NULL; 10401 blkp->iblk_nextpa = (uint64_t)-1; 10402 kmem_cache_free(ism_blk_cache, blkp); 10403 blkp = nx_blkp; 10404 } 10405 } 10406 10407 /* 10408 * Locking primitves accessed by HATLOCK macros 10409 */ 10410 10411 #define SFMMU_SPL_MTX (0x0) 10412 #define SFMMU_ML_MTX (0x1) 10413 10414 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10415 SPL_HASH(pg) : MLIST_HASH(pg)) 10416 10417 kmutex_t * 10418 sfmmu_page_enter(struct page *pp) 10419 { 10420 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10421 } 10422 10423 void 10424 sfmmu_page_exit(kmutex_t *spl) 10425 { 10426 mutex_exit(spl); 10427 } 10428 10429 int 10430 sfmmu_page_spl_held(struct page *pp) 10431 { 10432 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10433 } 10434 10435 kmutex_t * 10436 sfmmu_mlist_enter(struct page *pp) 10437 { 10438 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10439 } 10440 10441 void 10442 sfmmu_mlist_exit(kmutex_t *mml) 10443 { 10444 mutex_exit(mml); 10445 } 10446 10447 int 10448 sfmmu_mlist_held(struct page *pp) 10449 { 10450 10451 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10452 } 10453 10454 /* 10455 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10456 * sfmmu_mlist_enter() case mml_table lock array is used and for 10457 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10458 * 10459 * The lock is taken on a root page so that it protects an operation on all 10460 * constituent pages of a large page pp belongs to. 10461 * 10462 * The routine takes a lock from the appropriate array. The lock is determined 10463 * by hashing the root page. After taking the lock this routine checks if the 10464 * root page has the same size code that was used to determine the root (i.e 10465 * that root hasn't changed). If root page has the expected p_szc field we 10466 * have the right lock and it's returned to the caller. If root's p_szc 10467 * decreased we release the lock and retry from the beginning. This case can 10468 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10469 * value and taking the lock. The number of retries due to p_szc decrease is 10470 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10471 * determined by hashing pp itself. 10472 * 10473 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10474 * possible that p_szc can increase. To increase p_szc a thread has to lock 10475 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10476 * callers that don't hold a page locked recheck if hmeblk through which pp 10477 * was found still maps this pp. If it doesn't map it anymore returned lock 10478 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10479 * p_szc increase after taking the lock it returns this lock without further 10480 * retries because in this case the caller doesn't care about which lock was 10481 * taken. The caller will drop it right away. 10482 * 10483 * After the routine returns it's guaranteed that hat_page_demote() can't 10484 * change p_szc field of any of constituent pages of a large page pp belongs 10485 * to as long as pp was either locked at least SHARED prior to this call or 10486 * the caller finds that hment that pointed to this pp still references this 10487 * pp (this also assumes that the caller holds hme hash bucket lock so that 10488 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10489 * hat_pageunload()). 10490 */ 10491 static kmutex_t * 10492 sfmmu_mlspl_enter(struct page *pp, int type) 10493 { 10494 kmutex_t *mtx; 10495 uint_t prev_rszc = UINT_MAX; 10496 page_t *rootpp; 10497 uint_t szc; 10498 uint_t rszc; 10499 uint_t pszc = pp->p_szc; 10500 10501 ASSERT(pp != NULL); 10502 10503 again: 10504 if (pszc == 0) { 10505 mtx = SFMMU_MLSPL_MTX(type, pp); 10506 mutex_enter(mtx); 10507 return (mtx); 10508 } 10509 10510 /* The lock lives in the root page */ 10511 rootpp = PP_GROUPLEADER(pp, pszc); 10512 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10513 mutex_enter(mtx); 10514 10515 /* 10516 * Return mml in the following 3 cases: 10517 * 10518 * 1) If pp itself is root since if its p_szc decreased before we took 10519 * the lock pp is still the root of smaller szc page. And if its p_szc 10520 * increased it doesn't matter what lock we return (see comment in 10521 * front of this routine). 10522 * 10523 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10524 * large page we have the right lock since any previous potential 10525 * hat_page_demote() is done demoting from greater than current root's 10526 * p_szc because hat_page_demote() changes root's p_szc last. No 10527 * further hat_page_demote() can start or be in progress since it 10528 * would need the same lock we currently hold. 10529 * 10530 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10531 * matter what lock we return (see comment in front of this routine). 10532 */ 10533 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10534 rszc >= prev_rszc) { 10535 return (mtx); 10536 } 10537 10538 /* 10539 * hat_page_demote() could have decreased root's p_szc. 10540 * In this case pp's p_szc must also be smaller than pszc. 10541 * Retry. 10542 */ 10543 if (rszc < pszc) { 10544 szc = pp->p_szc; 10545 if (szc < pszc) { 10546 mutex_exit(mtx); 10547 pszc = szc; 10548 goto again; 10549 } 10550 /* 10551 * pp's p_szc increased after it was decreased. 10552 * page cannot be mapped. Return current lock. The caller 10553 * will drop it right away. 10554 */ 10555 return (mtx); 10556 } 10557 10558 /* 10559 * root's p_szc is greater than pp's p_szc. 10560 * hat_page_demote() is not done with all pages 10561 * yet. Wait for it to complete. 10562 */ 10563 mutex_exit(mtx); 10564 rootpp = PP_GROUPLEADER(rootpp, rszc); 10565 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10566 mutex_enter(mtx); 10567 mutex_exit(mtx); 10568 prev_rszc = rszc; 10569 goto again; 10570 } 10571 10572 static int 10573 sfmmu_mlspl_held(struct page *pp, int type) 10574 { 10575 kmutex_t *mtx; 10576 10577 ASSERT(pp != NULL); 10578 /* The lock lives in the root page */ 10579 pp = PP_PAGEROOT(pp); 10580 ASSERT(pp != NULL); 10581 10582 mtx = SFMMU_MLSPL_MTX(type, pp); 10583 return (MUTEX_HELD(mtx)); 10584 } 10585 10586 static uint_t 10587 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10588 { 10589 struct hme_blk *hblkp; 10590 10591 if (freehblkp != NULL) { 10592 mutex_enter(&freehblkp_lock); 10593 if (freehblkp != NULL) { 10594 /* 10595 * If the current thread is owning hblk_reserve OR 10596 * critical request from sfmmu_hblk_steal() 10597 * let it succeed even if freehblkcnt is really low. 10598 */ 10599 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10600 SFMMU_STAT(sf_get_free_throttle); 10601 mutex_exit(&freehblkp_lock); 10602 return (0); 10603 } 10604 freehblkcnt--; 10605 *hmeblkpp = freehblkp; 10606 hblkp = *hmeblkpp; 10607 freehblkp = hblkp->hblk_next; 10608 mutex_exit(&freehblkp_lock); 10609 hblkp->hblk_next = NULL; 10610 SFMMU_STAT(sf_get_free_success); 10611 return (1); 10612 } 10613 mutex_exit(&freehblkp_lock); 10614 } 10615 SFMMU_STAT(sf_get_free_fail); 10616 return (0); 10617 } 10618 10619 static uint_t 10620 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10621 { 10622 struct hme_blk *hblkp; 10623 10624 /* 10625 * If the current thread is mapping into kernel space, 10626 * let it succede even if freehblkcnt is max 10627 * so that it will avoid freeing it to kmem. 10628 * This will prevent stack overflow due to 10629 * possible recursion since kmem_cache_free() 10630 * might require creation of a slab which 10631 * in turn needs an hmeblk to map that slab; 10632 * let's break this vicious chain at the first 10633 * opportunity. 10634 */ 10635 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10636 mutex_enter(&freehblkp_lock); 10637 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10638 SFMMU_STAT(sf_put_free_success); 10639 freehblkcnt++; 10640 hmeblkp->hblk_next = freehblkp; 10641 freehblkp = hmeblkp; 10642 mutex_exit(&freehblkp_lock); 10643 return (1); 10644 } 10645 mutex_exit(&freehblkp_lock); 10646 } 10647 10648 /* 10649 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10650 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10651 * we are not in the process of mapping into kernel space. 10652 */ 10653 ASSERT(!critical); 10654 while (freehblkcnt > HBLK_RESERVE_CNT) { 10655 mutex_enter(&freehblkp_lock); 10656 if (freehblkcnt > HBLK_RESERVE_CNT) { 10657 freehblkcnt--; 10658 hblkp = freehblkp; 10659 freehblkp = hblkp->hblk_next; 10660 mutex_exit(&freehblkp_lock); 10661 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10662 kmem_cache_free(sfmmu8_cache, hblkp); 10663 continue; 10664 } 10665 mutex_exit(&freehblkp_lock); 10666 } 10667 SFMMU_STAT(sf_put_free_fail); 10668 return (0); 10669 } 10670 10671 static void 10672 sfmmu_hblk_swap(struct hme_blk *new) 10673 { 10674 struct hme_blk *old, *hblkp, *prev; 10675 uint64_t hblkpa, prevpa, newpa; 10676 caddr_t base, vaddr, endaddr; 10677 struct hmehash_bucket *hmebp; 10678 struct sf_hment *osfhme, *nsfhme; 10679 page_t *pp; 10680 kmutex_t *pml; 10681 tte_t tte; 10682 10683 #ifdef DEBUG 10684 hmeblk_tag hblktag; 10685 struct hme_blk *found; 10686 #endif 10687 old = HBLK_RESERVE; 10688 ASSERT(!old->hblk_shared); 10689 10690 /* 10691 * save pa before bcopy clobbers it 10692 */ 10693 newpa = new->hblk_nextpa; 10694 10695 base = (caddr_t)get_hblk_base(old); 10696 endaddr = base + get_hblk_span(old); 10697 10698 /* 10699 * acquire hash bucket lock. 10700 */ 10701 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10702 SFMMU_INVALID_SHMERID); 10703 10704 /* 10705 * copy contents from old to new 10706 */ 10707 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10708 10709 /* 10710 * add new to hash chain 10711 */ 10712 sfmmu_hblk_hash_add(hmebp, new, newpa); 10713 10714 /* 10715 * search hash chain for hblk_reserve; this needs to be performed 10716 * after adding new, otherwise prevpa and prev won't correspond 10717 * to the hblk which is prior to old in hash chain when we call 10718 * sfmmu_hblk_hash_rm to remove old later. 10719 */ 10720 for (prevpa = 0, prev = NULL, 10721 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 10722 hblkp != NULL && hblkp != old; 10723 prevpa = hblkpa, prev = hblkp, 10724 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next) 10725 ; 10726 10727 if (hblkp != old) 10728 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10729 10730 /* 10731 * p_mapping list is still pointing to hments in hblk_reserve; 10732 * fix up p_mapping list so that they point to hments in new. 10733 * 10734 * Since all these mappings are created by hblk_reserve_thread 10735 * on the way and it's using at least one of the buffers from each of 10736 * the newly minted slabs, there is no danger of any of these 10737 * mappings getting unloaded by another thread. 10738 * 10739 * tsbmiss could only modify ref/mod bits of hments in old/new. 10740 * Since all of these hments hold mappings established by segkmem 10741 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10742 * have no meaning for the mappings in hblk_reserve. hments in 10743 * old and new are identical except for ref/mod bits. 10744 */ 10745 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10746 10747 HBLKTOHME(osfhme, old, vaddr); 10748 sfmmu_copytte(&osfhme->hme_tte, &tte); 10749 10750 if (TTE_IS_VALID(&tte)) { 10751 if ((pp = osfhme->hme_page) == NULL) 10752 panic("sfmmu_hblk_swap: page not mapped"); 10753 10754 pml = sfmmu_mlist_enter(pp); 10755 10756 if (pp != osfhme->hme_page) 10757 panic("sfmmu_hblk_swap: mapping changed"); 10758 10759 HBLKTOHME(nsfhme, new, vaddr); 10760 10761 HME_ADD(nsfhme, pp); 10762 HME_SUB(osfhme, pp); 10763 10764 sfmmu_mlist_exit(pml); 10765 } 10766 } 10767 10768 /* 10769 * remove old from hash chain 10770 */ 10771 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 10772 10773 #ifdef DEBUG 10774 10775 hblktag.htag_id = ksfmmup; 10776 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10777 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10778 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10779 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10780 10781 if (found != new) 10782 panic("sfmmu_hblk_swap: new hblk not found"); 10783 #endif 10784 10785 SFMMU_HASH_UNLOCK(hmebp); 10786 10787 /* 10788 * Reset hblk_reserve 10789 */ 10790 bzero((void *)old, HME8BLK_SZ); 10791 old->hblk_nextpa = va_to_pa((caddr_t)old); 10792 } 10793 10794 /* 10795 * Grab the mlist mutex for both pages passed in. 10796 * 10797 * low and high will be returned as pointers to the mutexes for these pages. 10798 * low refers to the mutex residing in the lower bin of the mlist hash, while 10799 * high refers to the mutex residing in the higher bin of the mlist hash. This 10800 * is due to the locking order restrictions on the same thread grabbing 10801 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10802 * 10803 * If both pages hash to the same mutex, only grab that single mutex, and 10804 * high will be returned as NULL 10805 * If the pages hash to different bins in the hash, grab the lower addressed 10806 * lock first and then the higher addressed lock in order to follow the locking 10807 * rules involved with the same thread grabbing multiple mlist mutexes. 10808 * low and high will both have non-NULL values. 10809 */ 10810 static void 10811 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10812 kmutex_t **low, kmutex_t **high) 10813 { 10814 kmutex_t *mml_targ, *mml_repl; 10815 10816 /* 10817 * no need to do the dance around szc as in sfmmu_mlist_enter() 10818 * because this routine is only called by hat_page_relocate() and all 10819 * targ and repl pages are already locked EXCL so szc can't change. 10820 */ 10821 10822 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10823 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10824 10825 if (mml_targ == mml_repl) { 10826 *low = mml_targ; 10827 *high = NULL; 10828 } else { 10829 if (mml_targ < mml_repl) { 10830 *low = mml_targ; 10831 *high = mml_repl; 10832 } else { 10833 *low = mml_repl; 10834 *high = mml_targ; 10835 } 10836 } 10837 10838 mutex_enter(*low); 10839 if (*high) 10840 mutex_enter(*high); 10841 } 10842 10843 static void 10844 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10845 { 10846 if (high) 10847 mutex_exit(high); 10848 mutex_exit(low); 10849 } 10850 10851 static hatlock_t * 10852 sfmmu_hat_enter(sfmmu_t *sfmmup) 10853 { 10854 hatlock_t *hatlockp; 10855 10856 if (sfmmup != ksfmmup) { 10857 hatlockp = TSB_HASH(sfmmup); 10858 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10859 return (hatlockp); 10860 } 10861 return (NULL); 10862 } 10863 10864 static hatlock_t * 10865 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10866 { 10867 hatlock_t *hatlockp; 10868 10869 if (sfmmup != ksfmmup) { 10870 hatlockp = TSB_HASH(sfmmup); 10871 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10872 return (NULL); 10873 return (hatlockp); 10874 } 10875 return (NULL); 10876 } 10877 10878 static void 10879 sfmmu_hat_exit(hatlock_t *hatlockp) 10880 { 10881 if (hatlockp != NULL) 10882 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10883 } 10884 10885 static void 10886 sfmmu_hat_lock_all(void) 10887 { 10888 int i; 10889 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10890 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10891 } 10892 10893 static void 10894 sfmmu_hat_unlock_all(void) 10895 { 10896 int i; 10897 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10898 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10899 } 10900 10901 int 10902 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10903 { 10904 ASSERT(sfmmup != ksfmmup); 10905 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10906 } 10907 10908 /* 10909 * Locking primitives to provide consistency between ISM unmap 10910 * and other operations. Since ISM unmap can take a long time, we 10911 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10912 * contention on the hatlock buckets while ISM segments are being 10913 * unmapped. The tradeoff is that the flags don't prevent priority 10914 * inversion from occurring, so we must request kernel priority in 10915 * case we have to sleep to keep from getting buried while holding 10916 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10917 * threads from running (for example, in sfmmu_uvatopfn()). 10918 */ 10919 static void 10920 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10921 { 10922 hatlock_t *hatlockp; 10923 10924 THREAD_KPRI_REQUEST(); 10925 if (!hatlock_held) 10926 hatlockp = sfmmu_hat_enter(sfmmup); 10927 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10928 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10929 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10930 if (!hatlock_held) 10931 sfmmu_hat_exit(hatlockp); 10932 } 10933 10934 static void 10935 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10936 { 10937 hatlock_t *hatlockp; 10938 10939 if (!hatlock_held) 10940 hatlockp = sfmmu_hat_enter(sfmmup); 10941 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10942 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10943 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10944 if (!hatlock_held) 10945 sfmmu_hat_exit(hatlockp); 10946 THREAD_KPRI_RELEASE(); 10947 } 10948 10949 /* 10950 * 10951 * Algorithm: 10952 * 10953 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10954 * hblks. 10955 * 10956 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10957 * 10958 * (a) try to return an hblk from reserve pool of free hblks; 10959 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10960 * and return hblk_reserve. 10961 * 10962 * (3) call kmem_cache_alloc() to allocate hblk; 10963 * 10964 * (a) if hblk_reserve_lock is held by the current thread, 10965 * atomically replace hblk_reserve by the hblk that is 10966 * returned by kmem_cache_alloc; release hblk_reserve_lock 10967 * and call kmem_cache_alloc() again. 10968 * (b) if reserve pool is not full, add the hblk that is 10969 * returned by kmem_cache_alloc to reserve pool and 10970 * call kmem_cache_alloc again. 10971 * 10972 */ 10973 static struct hme_blk * 10974 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10975 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10976 uint_t flags, uint_t rid) 10977 { 10978 struct hme_blk *hmeblkp = NULL; 10979 struct hme_blk *newhblkp; 10980 struct hme_blk *shw_hblkp = NULL; 10981 struct kmem_cache *sfmmu_cache = NULL; 10982 uint64_t hblkpa; 10983 ulong_t index; 10984 uint_t owner; /* set to 1 if using hblk_reserve */ 10985 uint_t forcefree; 10986 int sleep; 10987 sf_srd_t *srdp; 10988 sf_region_t *rgnp; 10989 10990 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10991 ASSERT(hblktag.htag_rid == rid); 10992 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10993 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10994 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10995 10996 /* 10997 * If segkmem is not created yet, allocate from static hmeblks 10998 * created at the end of startup_modules(). See the block comment 10999 * in startup_modules() describing how we estimate the number of 11000 * static hmeblks that will be needed during re-map. 11001 */ 11002 if (!hblk_alloc_dynamic) { 11003 11004 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11005 11006 if (size == TTE8K) { 11007 index = nucleus_hblk8.index; 11008 if (index >= nucleus_hblk8.len) { 11009 /* 11010 * If we panic here, see startup_modules() to 11011 * make sure that we are calculating the 11012 * number of hblk8's that we need correctly. 11013 */ 11014 prom_panic("no nucleus hblk8 to allocate"); 11015 } 11016 hmeblkp = 11017 (struct hme_blk *)&nucleus_hblk8.list[index]; 11018 nucleus_hblk8.index++; 11019 SFMMU_STAT(sf_hblk8_nalloc); 11020 } else { 11021 index = nucleus_hblk1.index; 11022 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 11023 /* 11024 * If we panic here, see startup_modules(). 11025 * Most likely you need to update the 11026 * calculation of the number of hblk1 elements 11027 * that the kernel needs to boot. 11028 */ 11029 prom_panic("no nucleus hblk1 to allocate"); 11030 } 11031 hmeblkp = 11032 (struct hme_blk *)&nucleus_hblk1.list[index]; 11033 nucleus_hblk1.index++; 11034 SFMMU_STAT(sf_hblk1_nalloc); 11035 } 11036 11037 goto hblk_init; 11038 } 11039 11040 SFMMU_HASH_UNLOCK(hmebp); 11041 11042 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11043 if (mmu_page_sizes == max_mmu_page_sizes) { 11044 if (size < TTE256M) 11045 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11046 size, flags); 11047 } else { 11048 if (size < TTE4M) 11049 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11050 size, flags); 11051 } 11052 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11053 /* 11054 * Shared hmes use per region bitmaps in rgn_hmeflag 11055 * rather than shadow hmeblks to keep track of the 11056 * mapping sizes which have been allocated for the region. 11057 * Here we cleanup old invalid hmeblks with this rid, 11058 * which may be left around by pageunload(). 11059 */ 11060 int ttesz; 11061 caddr_t va; 11062 caddr_t eva = vaddr + TTEBYTES(size); 11063 11064 ASSERT(sfmmup != KHATID); 11065 11066 srdp = sfmmup->sfmmu_srdp; 11067 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11068 rgnp = srdp->srd_hmergnp[rid]; 11069 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11070 ASSERT(rgnp->rgn_refcnt != 0); 11071 ASSERT(size <= rgnp->rgn_pgszc); 11072 11073 ttesz = HBLK_MIN_TTESZ; 11074 do { 11075 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11076 continue; 11077 } 11078 11079 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11080 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11081 } else if (ttesz < size) { 11082 for (va = vaddr; va < eva; 11083 va += TTEBYTES(ttesz)) { 11084 sfmmu_cleanup_rhblk(srdp, va, rid, 11085 ttesz); 11086 } 11087 } 11088 } while (++ttesz <= rgnp->rgn_pgszc); 11089 } 11090 11091 fill_hblk: 11092 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11093 11094 if (owner && size == TTE8K) { 11095 11096 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11097 /* 11098 * We are really in a tight spot. We already own 11099 * hblk_reserve and we need another hblk. In anticipation 11100 * of this kind of scenario, we specifically set aside 11101 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11102 * by owner of hblk_reserve. 11103 */ 11104 SFMMU_STAT(sf_hblk_recurse_cnt); 11105 11106 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11107 panic("sfmmu_hblk_alloc: reserve list is empty"); 11108 11109 goto hblk_verify; 11110 } 11111 11112 ASSERT(!owner); 11113 11114 if ((flags & HAT_NO_KALLOC) == 0) { 11115 11116 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11117 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11118 11119 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11120 hmeblkp = sfmmu_hblk_steal(size); 11121 } else { 11122 /* 11123 * if we are the owner of hblk_reserve, 11124 * swap hblk_reserve with hmeblkp and 11125 * start a fresh life. Hope things go 11126 * better this time. 11127 */ 11128 if (hblk_reserve_thread == curthread) { 11129 ASSERT(sfmmu_cache == sfmmu8_cache); 11130 sfmmu_hblk_swap(hmeblkp); 11131 hblk_reserve_thread = NULL; 11132 mutex_exit(&hblk_reserve_lock); 11133 goto fill_hblk; 11134 } 11135 /* 11136 * let's donate this hblk to our reserve list if 11137 * we are not mapping kernel range 11138 */ 11139 if (size == TTE8K && sfmmup != KHATID) 11140 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11141 goto fill_hblk; 11142 } 11143 } else { 11144 /* 11145 * We are here to map the slab in sfmmu8_cache; let's 11146 * check if we could tap our reserve list; if successful, 11147 * this will avoid the pain of going thru sfmmu_hblk_swap 11148 */ 11149 SFMMU_STAT(sf_hblk_slab_cnt); 11150 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11151 /* 11152 * let's start hblk_reserve dance 11153 */ 11154 SFMMU_STAT(sf_hblk_reserve_cnt); 11155 owner = 1; 11156 mutex_enter(&hblk_reserve_lock); 11157 hmeblkp = HBLK_RESERVE; 11158 hblk_reserve_thread = curthread; 11159 } 11160 } 11161 11162 hblk_verify: 11163 ASSERT(hmeblkp != NULL); 11164 set_hblk_sz(hmeblkp, size); 11165 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11166 SFMMU_HASH_LOCK(hmebp); 11167 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11168 if (newhblkp != NULL) { 11169 SFMMU_HASH_UNLOCK(hmebp); 11170 if (hmeblkp != HBLK_RESERVE) { 11171 /* 11172 * This is really tricky! 11173 * 11174 * vmem_alloc(vmem_seg_arena) 11175 * vmem_alloc(vmem_internal_arena) 11176 * segkmem_alloc(heap_arena) 11177 * vmem_alloc(heap_arena) 11178 * page_create() 11179 * hat_memload() 11180 * kmem_cache_free() 11181 * kmem_cache_alloc() 11182 * kmem_slab_create() 11183 * vmem_alloc(kmem_internal_arena) 11184 * segkmem_alloc(heap_arena) 11185 * vmem_alloc(heap_arena) 11186 * page_create() 11187 * hat_memload() 11188 * kmem_cache_free() 11189 * ... 11190 * 11191 * Thus, hat_memload() could call kmem_cache_free 11192 * for enough number of times that we could easily 11193 * hit the bottom of the stack or run out of reserve 11194 * list of vmem_seg structs. So, we must donate 11195 * this hblk to reserve list if it's allocated 11196 * from sfmmu8_cache *and* mapping kernel range. 11197 * We don't need to worry about freeing hmeblk1's 11198 * to kmem since they don't map any kmem slabs. 11199 * 11200 * Note: When segkmem supports largepages, we must 11201 * free hmeblk1's to reserve list as well. 11202 */ 11203 forcefree = (sfmmup == KHATID) ? 1 : 0; 11204 if (size == TTE8K && 11205 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11206 goto re_verify; 11207 } 11208 ASSERT(sfmmup != KHATID); 11209 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11210 } else { 11211 /* 11212 * Hey! we don't need hblk_reserve any more. 11213 */ 11214 ASSERT(owner); 11215 hblk_reserve_thread = NULL; 11216 mutex_exit(&hblk_reserve_lock); 11217 owner = 0; 11218 } 11219 re_verify: 11220 /* 11221 * let's check if the goodies are still present 11222 */ 11223 SFMMU_HASH_LOCK(hmebp); 11224 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11225 if (newhblkp != NULL) { 11226 /* 11227 * return newhblkp if it's not hblk_reserve; 11228 * if newhblkp is hblk_reserve, return it 11229 * _only if_ we are the owner of hblk_reserve. 11230 */ 11231 if (newhblkp != HBLK_RESERVE || owner) { 11232 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11233 newhblkp->hblk_shared); 11234 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11235 !newhblkp->hblk_shared); 11236 return (newhblkp); 11237 } else { 11238 /* 11239 * we just hit hblk_reserve in the hash and 11240 * we are not the owner of that; 11241 * 11242 * block until hblk_reserve_thread completes 11243 * swapping hblk_reserve and try the dance 11244 * once again. 11245 */ 11246 SFMMU_HASH_UNLOCK(hmebp); 11247 mutex_enter(&hblk_reserve_lock); 11248 mutex_exit(&hblk_reserve_lock); 11249 SFMMU_STAT(sf_hblk_reserve_hit); 11250 goto fill_hblk; 11251 } 11252 } else { 11253 /* 11254 * it's no more! try the dance once again. 11255 */ 11256 SFMMU_HASH_UNLOCK(hmebp); 11257 goto fill_hblk; 11258 } 11259 } 11260 11261 hblk_init: 11262 if (SFMMU_IS_SHMERID_VALID(rid)) { 11263 uint16_t tteflag = 0x1 << 11264 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11265 11266 if (!(rgnp->rgn_hmeflags & tteflag)) { 11267 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11268 } 11269 hmeblkp->hblk_shared = 1; 11270 } else { 11271 hmeblkp->hblk_shared = 0; 11272 } 11273 set_hblk_sz(hmeblkp, size); 11274 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11275 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11276 hmeblkp->hblk_tag = hblktag; 11277 hmeblkp->hblk_shadow = shw_hblkp; 11278 hblkpa = hmeblkp->hblk_nextpa; 11279 hmeblkp->hblk_nextpa = 0; 11280 11281 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11282 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11283 ASSERT(hmeblkp->hblk_hmecnt == 0); 11284 ASSERT(hmeblkp->hblk_vcnt == 0); 11285 ASSERT(hmeblkp->hblk_lckcnt == 0); 11286 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11287 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11288 return (hmeblkp); 11289 } 11290 11291 /* 11292 * This function performs any cleanup required on the hme_blk 11293 * and returns it to the free list. 11294 */ 11295 /* ARGSUSED */ 11296 static void 11297 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11298 uint64_t hblkpa, struct hme_blk **listp) 11299 { 11300 int shw_size, vshift; 11301 struct hme_blk *shw_hblkp; 11302 uint_t shw_mask, newshw_mask; 11303 caddr_t vaddr; 11304 int size; 11305 uint_t critical; 11306 11307 ASSERT(hmeblkp); 11308 ASSERT(!hmeblkp->hblk_hmecnt); 11309 ASSERT(!hmeblkp->hblk_vcnt); 11310 ASSERT(!hmeblkp->hblk_lckcnt); 11311 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11312 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11313 11314 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11315 11316 size = get_hblk_ttesz(hmeblkp); 11317 shw_hblkp = hmeblkp->hblk_shadow; 11318 if (shw_hblkp) { 11319 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 11320 ASSERT(!hmeblkp->hblk_shared); 11321 if (mmu_page_sizes == max_mmu_page_sizes) { 11322 ASSERT(size < TTE256M); 11323 } else { 11324 ASSERT(size < TTE4M); 11325 } 11326 11327 shw_size = get_hblk_ttesz(shw_hblkp); 11328 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11329 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11330 ASSERT(vshift < 8); 11331 /* 11332 * Atomically clear shadow mask bit 11333 */ 11334 do { 11335 shw_mask = shw_hblkp->hblk_shw_mask; 11336 ASSERT(shw_mask & (1 << vshift)); 11337 newshw_mask = shw_mask & ~(1 << vshift); 11338 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11339 shw_mask, newshw_mask); 11340 } while (newshw_mask != shw_mask); 11341 hmeblkp->hblk_shadow = NULL; 11342 } 11343 hmeblkp->hblk_next = NULL; 11344 hmeblkp->hblk_nextpa = hblkpa; 11345 hmeblkp->hblk_shw_bit = 0; 11346 11347 if (hmeblkp->hblk_shared) { 11348 sf_srd_t *srdp; 11349 sf_region_t *rgnp; 11350 uint_t rid; 11351 11352 srdp = hblktosrd(hmeblkp); 11353 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11354 rid = hmeblkp->hblk_tag.htag_rid; 11355 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11356 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11357 rgnp = srdp->srd_hmergnp[rid]; 11358 ASSERT(rgnp != NULL); 11359 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11360 hmeblkp->hblk_shared = 0; 11361 } 11362 11363 if (hmeblkp->hblk_nuc_bit == 0) { 11364 11365 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 11366 return; 11367 11368 hmeblkp->hblk_next = *listp; 11369 *listp = hmeblkp; 11370 } 11371 } 11372 11373 static void 11374 sfmmu_hblks_list_purge(struct hme_blk **listp) 11375 { 11376 struct hme_blk *hmeblkp; 11377 11378 while ((hmeblkp = *listp) != NULL) { 11379 *listp = hmeblkp->hblk_next; 11380 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11381 } 11382 } 11383 11384 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11385 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11386 11387 static uint_t sfmmu_hblk_steal_twice; 11388 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11389 11390 /* 11391 * Steal a hmeblk from user or kernel hme hash lists. 11392 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11393 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11394 * tap into critical reserve of freehblkp. 11395 * Note: We remain looping in this routine until we find one. 11396 */ 11397 static struct hme_blk * 11398 sfmmu_hblk_steal(int size) 11399 { 11400 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11401 struct hmehash_bucket *hmebp; 11402 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11403 uint64_t hblkpa, prevpa; 11404 int i; 11405 uint_t loop_cnt = 0, critical; 11406 11407 for (;;) { 11408 if (size == TTE8K) { 11409 critical = 11410 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11411 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11412 return (hmeblkp); 11413 } 11414 11415 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11416 uhmehash_steal_hand; 11417 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11418 11419 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11420 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11421 SFMMU_HASH_LOCK(hmebp); 11422 hmeblkp = hmebp->hmeblkp; 11423 hblkpa = hmebp->hmeh_nextpa; 11424 prevpa = 0; 11425 pr_hblk = NULL; 11426 while (hmeblkp) { 11427 /* 11428 * check if it is a hmeblk that is not locked 11429 * and not shared. skip shadow hmeblks with 11430 * shadow_mask set i.e valid count non zero. 11431 */ 11432 if ((get_hblk_ttesz(hmeblkp) == size) && 11433 (hmeblkp->hblk_shw_bit == 0 || 11434 hmeblkp->hblk_vcnt == 0) && 11435 (hmeblkp->hblk_lckcnt == 0)) { 11436 /* 11437 * there is a high probability that we 11438 * will find a free one. search some 11439 * buckets for a free hmeblk initially 11440 * before unloading a valid hmeblk. 11441 */ 11442 if ((hmeblkp->hblk_vcnt == 0 && 11443 hmeblkp->hblk_hmecnt == 0) || (i >= 11444 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11445 if (sfmmu_steal_this_hblk(hmebp, 11446 hmeblkp, hblkpa, prevpa, 11447 pr_hblk)) { 11448 /* 11449 * Hblk is unloaded 11450 * successfully 11451 */ 11452 break; 11453 } 11454 } 11455 } 11456 pr_hblk = hmeblkp; 11457 prevpa = hblkpa; 11458 hblkpa = hmeblkp->hblk_nextpa; 11459 hmeblkp = hmeblkp->hblk_next; 11460 } 11461 11462 SFMMU_HASH_UNLOCK(hmebp); 11463 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11464 hmebp = uhme_hash; 11465 } 11466 uhmehash_steal_hand = hmebp; 11467 11468 if (hmeblkp != NULL) 11469 break; 11470 11471 /* 11472 * in the worst case, look for a free one in the kernel 11473 * hash table. 11474 */ 11475 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11476 SFMMU_HASH_LOCK(hmebp); 11477 hmeblkp = hmebp->hmeblkp; 11478 hblkpa = hmebp->hmeh_nextpa; 11479 prevpa = 0; 11480 pr_hblk = NULL; 11481 while (hmeblkp) { 11482 /* 11483 * check if it is free hmeblk 11484 */ 11485 if ((get_hblk_ttesz(hmeblkp) == size) && 11486 (hmeblkp->hblk_lckcnt == 0) && 11487 (hmeblkp->hblk_vcnt == 0) && 11488 (hmeblkp->hblk_hmecnt == 0)) { 11489 if (sfmmu_steal_this_hblk(hmebp, 11490 hmeblkp, hblkpa, prevpa, pr_hblk)) { 11491 break; 11492 } else { 11493 /* 11494 * Cannot fail since we have 11495 * hash lock. 11496 */ 11497 panic("fail to steal?"); 11498 } 11499 } 11500 11501 pr_hblk = hmeblkp; 11502 prevpa = hblkpa; 11503 hblkpa = hmeblkp->hblk_nextpa; 11504 hmeblkp = hmeblkp->hblk_next; 11505 } 11506 11507 SFMMU_HASH_UNLOCK(hmebp); 11508 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11509 hmebp = khme_hash; 11510 } 11511 11512 if (hmeblkp != NULL) 11513 break; 11514 sfmmu_hblk_steal_twice++; 11515 } 11516 return (hmeblkp); 11517 } 11518 11519 /* 11520 * This routine does real work to prepare a hblk to be "stolen" by 11521 * unloading the mappings, updating shadow counts .... 11522 * It returns 1 if the block is ready to be reused (stolen), or 0 11523 * means the block cannot be stolen yet- pageunload is still working 11524 * on this hblk. 11525 */ 11526 static int 11527 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11528 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 11529 { 11530 int shw_size, vshift; 11531 struct hme_blk *shw_hblkp; 11532 caddr_t vaddr; 11533 uint_t shw_mask, newshw_mask; 11534 11535 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11536 11537 /* 11538 * check if the hmeblk is free, unload if necessary 11539 */ 11540 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11541 sfmmu_t *sfmmup; 11542 demap_range_t dmr; 11543 11544 sfmmup = hblktosfmmu(hmeblkp); 11545 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11546 return (0); 11547 } 11548 DEMAP_RANGE_INIT(sfmmup, &dmr); 11549 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11550 (caddr_t)get_hblk_base(hmeblkp), 11551 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11552 DEMAP_RANGE_FLUSH(&dmr); 11553 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11554 /* 11555 * Pageunload is working on the same hblk. 11556 */ 11557 return (0); 11558 } 11559 11560 sfmmu_hblk_steal_unload_count++; 11561 } 11562 11563 ASSERT(hmeblkp->hblk_lckcnt == 0); 11564 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11565 11566 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 11567 hmeblkp->hblk_nextpa = hblkpa; 11568 11569 shw_hblkp = hmeblkp->hblk_shadow; 11570 if (shw_hblkp) { 11571 ASSERT(!hmeblkp->hblk_shared); 11572 shw_size = get_hblk_ttesz(shw_hblkp); 11573 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11574 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11575 ASSERT(vshift < 8); 11576 /* 11577 * Atomically clear shadow mask bit 11578 */ 11579 do { 11580 shw_mask = shw_hblkp->hblk_shw_mask; 11581 ASSERT(shw_mask & (1 << vshift)); 11582 newshw_mask = shw_mask & ~(1 << vshift); 11583 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11584 shw_mask, newshw_mask); 11585 } while (newshw_mask != shw_mask); 11586 hmeblkp->hblk_shadow = NULL; 11587 } 11588 11589 /* 11590 * remove shadow bit if we are stealing an unused shadow hmeblk. 11591 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11592 * we are indeed allocating a shadow hmeblk. 11593 */ 11594 hmeblkp->hblk_shw_bit = 0; 11595 11596 if (hmeblkp->hblk_shared) { 11597 sf_srd_t *srdp; 11598 sf_region_t *rgnp; 11599 uint_t rid; 11600 11601 srdp = hblktosrd(hmeblkp); 11602 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11603 rid = hmeblkp->hblk_tag.htag_rid; 11604 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11605 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11606 rgnp = srdp->srd_hmergnp[rid]; 11607 ASSERT(rgnp != NULL); 11608 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11609 hmeblkp->hblk_shared = 0; 11610 } 11611 11612 sfmmu_hblk_steal_count++; 11613 SFMMU_STAT(sf_steal_count); 11614 11615 return (1); 11616 } 11617 11618 struct hme_blk * 11619 sfmmu_hmetohblk(struct sf_hment *sfhme) 11620 { 11621 struct hme_blk *hmeblkp; 11622 struct sf_hment *sfhme0; 11623 struct hme_blk *hblk_dummy = 0; 11624 11625 /* 11626 * No dummy sf_hments, please. 11627 */ 11628 ASSERT(sfhme->hme_tte.ll != 0); 11629 11630 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11631 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11632 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11633 11634 return (hmeblkp); 11635 } 11636 11637 /* 11638 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11639 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11640 * KM_SLEEP allocation. 11641 * 11642 * Return 0 on success, -1 otherwise. 11643 */ 11644 static void 11645 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11646 { 11647 struct tsb_info *tsbinfop, *next; 11648 tsb_replace_rc_t rc; 11649 boolean_t gotfirst = B_FALSE; 11650 11651 ASSERT(sfmmup != ksfmmup); 11652 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11653 11654 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11655 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11656 } 11657 11658 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11659 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11660 } else { 11661 return; 11662 } 11663 11664 ASSERT(sfmmup->sfmmu_tsb != NULL); 11665 11666 /* 11667 * Loop over all tsbinfo's replacing them with ones that actually have 11668 * a TSB. If any of the replacements ever fail, bail out of the loop. 11669 */ 11670 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11671 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11672 next = tsbinfop->tsb_next; 11673 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11674 hatlockp, TSB_SWAPIN); 11675 if (rc != TSB_SUCCESS) { 11676 break; 11677 } 11678 gotfirst = B_TRUE; 11679 } 11680 11681 switch (rc) { 11682 case TSB_SUCCESS: 11683 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11684 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11685 return; 11686 case TSB_LOSTRACE: 11687 break; 11688 case TSB_ALLOCFAIL: 11689 break; 11690 default: 11691 panic("sfmmu_replace_tsb returned unrecognized failure code " 11692 "%d", rc); 11693 } 11694 11695 /* 11696 * In this case, we failed to get one of our TSBs. If we failed to 11697 * get the first TSB, get one of minimum size (8KB). Walk the list 11698 * and throw away the tsbinfos, starting where the allocation failed; 11699 * we can get by with just one TSB as long as we don't leave the 11700 * SWAPPED tsbinfo structures lying around. 11701 */ 11702 tsbinfop = sfmmup->sfmmu_tsb; 11703 next = tsbinfop->tsb_next; 11704 tsbinfop->tsb_next = NULL; 11705 11706 sfmmu_hat_exit(hatlockp); 11707 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11708 next = tsbinfop->tsb_next; 11709 sfmmu_tsbinfo_free(tsbinfop); 11710 } 11711 hatlockp = sfmmu_hat_enter(sfmmup); 11712 11713 /* 11714 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11715 * pages. 11716 */ 11717 if (!gotfirst) { 11718 tsbinfop = sfmmup->sfmmu_tsb; 11719 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11720 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11721 ASSERT(rc == TSB_SUCCESS); 11722 } 11723 11724 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11725 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11726 } 11727 11728 static int 11729 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11730 { 11731 ulong_t bix = 0; 11732 uint_t rid; 11733 sf_region_t *rgnp; 11734 11735 ASSERT(srdp != NULL); 11736 ASSERT(srdp->srd_refcnt != 0); 11737 11738 w <<= BT_ULSHIFT; 11739 while (bmw) { 11740 if (!(bmw & 0x1)) { 11741 bix++; 11742 bmw >>= 1; 11743 continue; 11744 } 11745 rid = w | bix; 11746 rgnp = srdp->srd_hmergnp[rid]; 11747 ASSERT(rgnp->rgn_refcnt > 0); 11748 ASSERT(rgnp->rgn_id == rid); 11749 if (addr < rgnp->rgn_saddr || 11750 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11751 bix++; 11752 bmw >>= 1; 11753 } else { 11754 return (1); 11755 } 11756 } 11757 return (0); 11758 } 11759 11760 /* 11761 * Handle exceptions for low level tsb_handler. 11762 * 11763 * There are many scenarios that could land us here: 11764 * 11765 * If the context is invalid we land here. The context can be invalid 11766 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11767 * perform a wrap around operation in order to allocate a new context. 11768 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11769 * TSBs configuration is changeing for this process and we are forced into 11770 * here to do a syncronization operation. If the context is valid we can 11771 * be here from window trap hanlder. In this case just call trap to handle 11772 * the fault. 11773 * 11774 * Note that the process will run in INVALID_CONTEXT before 11775 * faulting into here and subsequently loading the MMU registers 11776 * (including the TSB base register) associated with this process. 11777 * For this reason, the trap handlers must all test for 11778 * INVALID_CONTEXT before attempting to access any registers other 11779 * than the context registers. 11780 */ 11781 void 11782 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11783 { 11784 sfmmu_t *sfmmup, *shsfmmup; 11785 uint_t ctxtype; 11786 klwp_id_t lwp; 11787 char lwp_save_state; 11788 hatlock_t *hatlockp, *shatlockp; 11789 struct tsb_info *tsbinfop; 11790 struct tsbmiss *tsbmp; 11791 sf_scd_t *scdp; 11792 11793 SFMMU_STAT(sf_tsb_exceptions); 11794 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11795 sfmmup = astosfmmu(curthread->t_procp->p_as); 11796 /* 11797 * note that in sun4u, tagacces register contains ctxnum 11798 * while sun4v passes ctxtype in the tagaccess register. 11799 */ 11800 ctxtype = tagaccess & TAGACC_CTX_MASK; 11801 11802 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11803 ASSERT(sfmmup->sfmmu_ismhat == 0); 11804 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11805 ctxtype == INVALID_CONTEXT); 11806 11807 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11808 /* 11809 * We may land here because shme bitmap and pagesize 11810 * flags are updated lazily in tsbmiss area on other cpus. 11811 * If we detect here that tsbmiss area is out of sync with 11812 * sfmmu update it and retry the trapped instruction. 11813 * Otherwise call trap(). 11814 */ 11815 int ret = 0; 11816 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11817 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11818 11819 /* 11820 * Must set lwp state to LWP_SYS before 11821 * trying to acquire any adaptive lock 11822 */ 11823 lwp = ttolwp(curthread); 11824 ASSERT(lwp); 11825 lwp_save_state = lwp->lwp_state; 11826 lwp->lwp_state = LWP_SYS; 11827 11828 hatlockp = sfmmu_hat_enter(sfmmup); 11829 kpreempt_disable(); 11830 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11831 ASSERT(sfmmup == tsbmp->usfmmup); 11832 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11833 ~tteflag_mask) || 11834 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11835 ~tteflag_mask)) { 11836 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11837 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11838 ret = 1; 11839 } 11840 if (sfmmup->sfmmu_srdp != NULL) { 11841 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11842 ulong_t *tm = tsbmp->shmermap; 11843 ulong_t i; 11844 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11845 ulong_t d = tm[i] ^ sm[i]; 11846 if (d) { 11847 if (d & sm[i]) { 11848 if (!ret && sfmmu_is_rgnva( 11849 sfmmup->sfmmu_srdp, 11850 addr, i, d & sm[i])) { 11851 ret = 1; 11852 } 11853 } 11854 tm[i] = sm[i]; 11855 } 11856 } 11857 } 11858 kpreempt_enable(); 11859 sfmmu_hat_exit(hatlockp); 11860 lwp->lwp_state = lwp_save_state; 11861 if (ret) { 11862 return; 11863 } 11864 } else if (ctxtype == INVALID_CONTEXT) { 11865 /* 11866 * First, make sure we come out of here with a valid ctx, 11867 * since if we don't get one we'll simply loop on the 11868 * faulting instruction. 11869 * 11870 * If the ISM mappings are changing, the TSB is relocated, 11871 * the process is swapped, the process is joining SCD or 11872 * leaving SCD or shared regions we serialize behind the 11873 * controlling thread with hat lock, sfmmu_flags and 11874 * sfmmu_tsb_cv condition variable. 11875 */ 11876 11877 /* 11878 * Must set lwp state to LWP_SYS before 11879 * trying to acquire any adaptive lock 11880 */ 11881 lwp = ttolwp(curthread); 11882 ASSERT(lwp); 11883 lwp_save_state = lwp->lwp_state; 11884 lwp->lwp_state = LWP_SYS; 11885 11886 hatlockp = sfmmu_hat_enter(sfmmup); 11887 retry: 11888 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11889 shsfmmup = scdp->scd_sfmmup; 11890 ASSERT(shsfmmup != NULL); 11891 11892 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11893 tsbinfop = tsbinfop->tsb_next) { 11894 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11895 /* drop the private hat lock */ 11896 sfmmu_hat_exit(hatlockp); 11897 /* acquire the shared hat lock */ 11898 shatlockp = sfmmu_hat_enter(shsfmmup); 11899 /* 11900 * recheck to see if anything changed 11901 * after we drop the private hat lock. 11902 */ 11903 if (sfmmup->sfmmu_scdp == scdp && 11904 shsfmmup == scdp->scd_sfmmup) { 11905 sfmmu_tsb_chk_reloc(shsfmmup, 11906 shatlockp); 11907 } 11908 sfmmu_hat_exit(shatlockp); 11909 hatlockp = sfmmu_hat_enter(sfmmup); 11910 goto retry; 11911 } 11912 } 11913 } 11914 11915 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11916 tsbinfop = tsbinfop->tsb_next) { 11917 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11918 cv_wait(&sfmmup->sfmmu_tsb_cv, 11919 HATLOCK_MUTEXP(hatlockp)); 11920 goto retry; 11921 } 11922 } 11923 11924 /* 11925 * Wait for ISM maps to be updated. 11926 */ 11927 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11928 cv_wait(&sfmmup->sfmmu_tsb_cv, 11929 HATLOCK_MUTEXP(hatlockp)); 11930 goto retry; 11931 } 11932 11933 /* Is this process joining an SCD? */ 11934 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11935 /* 11936 * Flush private TSB and setup shared TSB. 11937 * sfmmu_finish_join_scd() does not drop the 11938 * hat lock. 11939 */ 11940 sfmmu_finish_join_scd(sfmmup); 11941 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11942 } 11943 11944 /* 11945 * If we're swapping in, get TSB(s). Note that we must do 11946 * this before we get a ctx or load the MMU state. Once 11947 * we swap in we have to recheck to make sure the TSB(s) and 11948 * ISM mappings didn't change while we slept. 11949 */ 11950 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11951 sfmmu_tsb_swapin(sfmmup, hatlockp); 11952 goto retry; 11953 } 11954 11955 sfmmu_get_ctx(sfmmup); 11956 11957 sfmmu_hat_exit(hatlockp); 11958 /* 11959 * Must restore lwp_state if not calling 11960 * trap() for further processing. Restore 11961 * it anyway. 11962 */ 11963 lwp->lwp_state = lwp_save_state; 11964 return; 11965 } 11966 trap(rp, (caddr_t)tagaccess, traptype, 0); 11967 } 11968 11969 static void 11970 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11971 { 11972 struct tsb_info *tp; 11973 11974 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11975 11976 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11977 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11978 cv_wait(&sfmmup->sfmmu_tsb_cv, 11979 HATLOCK_MUTEXP(hatlockp)); 11980 break; 11981 } 11982 } 11983 } 11984 11985 /* 11986 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11987 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11988 * rather than spinning to avoid send mondo timeouts with 11989 * interrupts enabled. When the lock is acquired it is immediately 11990 * released and we return back to sfmmu_vatopfn just after 11991 * the GET_TTE call. 11992 */ 11993 void 11994 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11995 { 11996 struct page **pp; 11997 11998 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11999 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12000 } 12001 12002 /* 12003 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 12004 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 12005 * cross traps which cannot be handled while spinning in the 12006 * trap handlers. Simply enter and exit the kpr_suspendlock spin 12007 * mutex, which is held by the holder of the suspend bit, and then 12008 * retry the trapped instruction after unwinding. 12009 */ 12010 /*ARGSUSED*/ 12011 void 12012 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 12013 { 12014 ASSERT(curthread != kreloc_thread); 12015 mutex_enter(&kpr_suspendlock); 12016 mutex_exit(&kpr_suspendlock); 12017 } 12018 12019 /* 12020 * This routine could be optimized to reduce the number of xcalls by flushing 12021 * the entire TLBs if region reference count is above some threshold but the 12022 * tradeoff will depend on the size of the TLB. So for now flush the specific 12023 * page a context at a time. 12024 * 12025 * If uselocks is 0 then it's called after all cpus were captured and all the 12026 * hat locks were taken. In this case don't take the region lock by relying on 12027 * the order of list region update operations in hat_join_region(), 12028 * hat_leave_region() and hat_dup_region(). The ordering in those routines 12029 * guarantees that list is always forward walkable and reaches active sfmmus 12030 * regardless of where xc_attention() captures a cpu. 12031 */ 12032 cpuset_t 12033 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 12034 struct hme_blk *hmeblkp, int uselocks) 12035 { 12036 sfmmu_t *sfmmup; 12037 cpuset_t cpuset; 12038 cpuset_t rcpuset; 12039 hatlock_t *hatlockp; 12040 uint_t rid = rgnp->rgn_id; 12041 sf_rgn_link_t *rlink; 12042 sf_scd_t *scdp; 12043 12044 ASSERT(hmeblkp->hblk_shared); 12045 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 12046 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 12047 12048 CPUSET_ZERO(rcpuset); 12049 if (uselocks) { 12050 mutex_enter(&rgnp->rgn_mutex); 12051 } 12052 sfmmup = rgnp->rgn_sfmmu_head; 12053 while (sfmmup != NULL) { 12054 if (uselocks) { 12055 hatlockp = sfmmu_hat_enter(sfmmup); 12056 } 12057 12058 /* 12059 * When an SCD is created the SCD hat is linked on the sfmmu 12060 * region lists for each hme region which is part of the 12061 * SCD. If we find an SCD hat, when walking these lists, 12062 * then we flush the shared TSBs, if we find a private hat, 12063 * which is part of an SCD, but where the region 12064 * is not part of the SCD then we flush the private TSBs. 12065 */ 12066 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12067 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12068 scdp = sfmmup->sfmmu_scdp; 12069 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 12070 if (uselocks) { 12071 sfmmu_hat_exit(hatlockp); 12072 } 12073 goto next; 12074 } 12075 } 12076 12077 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12078 12079 kpreempt_disable(); 12080 cpuset = sfmmup->sfmmu_cpusran; 12081 CPUSET_AND(cpuset, cpu_ready_set); 12082 CPUSET_DEL(cpuset, CPU->cpu_id); 12083 SFMMU_XCALL_STATS(sfmmup); 12084 xt_some(cpuset, vtag_flushpage_tl1, 12085 (uint64_t)addr, (uint64_t)sfmmup); 12086 vtag_flushpage(addr, (uint64_t)sfmmup); 12087 if (uselocks) { 12088 sfmmu_hat_exit(hatlockp); 12089 } 12090 kpreempt_enable(); 12091 CPUSET_OR(rcpuset, cpuset); 12092 12093 next: 12094 /* LINTED: constant in conditional context */ 12095 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12096 ASSERT(rlink != NULL); 12097 sfmmup = rlink->next; 12098 } 12099 if (uselocks) { 12100 mutex_exit(&rgnp->rgn_mutex); 12101 } 12102 return (rcpuset); 12103 } 12104 12105 /* 12106 * This routine takes an sfmmu pointer and the va for an adddress in an 12107 * ISM region as input and returns the corresponding region id in ism_rid. 12108 * The return value of 1 indicates that a region has been found and ism_rid 12109 * is valid, otherwise 0 is returned. 12110 */ 12111 static int 12112 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12113 { 12114 ism_blk_t *ism_blkp; 12115 int i; 12116 ism_map_t *ism_map; 12117 #ifdef DEBUG 12118 struct hat *ism_hatid; 12119 #endif 12120 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12121 12122 ism_blkp = sfmmup->sfmmu_iblk; 12123 while (ism_blkp != NULL) { 12124 ism_map = ism_blkp->iblk_maps; 12125 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12126 if ((va >= ism_start(ism_map[i])) && 12127 (va < ism_end(ism_map[i]))) { 12128 12129 *ism_rid = ism_map[i].imap_rid; 12130 #ifdef DEBUG 12131 ism_hatid = ism_map[i].imap_ismhat; 12132 ASSERT(ism_hatid == ism_sfmmup); 12133 ASSERT(ism_hatid->sfmmu_ismhat); 12134 #endif 12135 return (1); 12136 } 12137 } 12138 ism_blkp = ism_blkp->iblk_next; 12139 } 12140 return (0); 12141 } 12142 12143 /* 12144 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12145 * This routine may be called with all cpu's captured. Therefore, the 12146 * caller is responsible for holding all locks and disabling kernel 12147 * preemption. 12148 */ 12149 /* ARGSUSED */ 12150 static void 12151 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12152 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12153 { 12154 cpuset_t cpuset; 12155 caddr_t va; 12156 ism_ment_t *ment; 12157 sfmmu_t *sfmmup; 12158 #ifdef VAC 12159 int vcolor; 12160 #endif 12161 12162 sf_scd_t *scdp; 12163 uint_t ism_rid; 12164 12165 ASSERT(!hmeblkp->hblk_shared); 12166 /* 12167 * Walk the ism_hat's mapping list and flush the page 12168 * from every hat sharing this ism_hat. This routine 12169 * may be called while all cpu's have been captured. 12170 * Therefore we can't attempt to grab any locks. For now 12171 * this means we will protect the ism mapping list under 12172 * a single lock which will be grabbed by the caller. 12173 * If hat_share/unshare scalibility becomes a performance 12174 * problem then we may need to re-think ism mapping list locking. 12175 */ 12176 ASSERT(ism_sfmmup->sfmmu_ismhat); 12177 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12178 addr = addr - ISMID_STARTADDR; 12179 12180 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12181 12182 sfmmup = ment->iment_hat; 12183 12184 va = ment->iment_base_va; 12185 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12186 12187 /* 12188 * When an SCD is created the SCD hat is linked on the ism 12189 * mapping lists for each ISM segment which is part of the 12190 * SCD. If we find an SCD hat, when walking these lists, 12191 * then we flush the shared TSBs, if we find a private hat, 12192 * which is part of an SCD, but where the region 12193 * corresponding to this va is not part of the SCD then we 12194 * flush the private TSBs. 12195 */ 12196 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12197 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12198 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12199 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12200 &ism_rid)) { 12201 cmn_err(CE_PANIC, 12202 "can't find matching ISM rid!"); 12203 } 12204 12205 scdp = sfmmup->sfmmu_scdp; 12206 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12207 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12208 ism_rid)) { 12209 continue; 12210 } 12211 } 12212 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12213 12214 cpuset = sfmmup->sfmmu_cpusran; 12215 CPUSET_AND(cpuset, cpu_ready_set); 12216 CPUSET_DEL(cpuset, CPU->cpu_id); 12217 SFMMU_XCALL_STATS(sfmmup); 12218 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12219 (uint64_t)sfmmup); 12220 vtag_flushpage(va, (uint64_t)sfmmup); 12221 12222 #ifdef VAC 12223 /* 12224 * Flush D$ 12225 * When flushing D$ we must flush all 12226 * cpu's. See sfmmu_cache_flush(). 12227 */ 12228 if (cache_flush_flag == CACHE_FLUSH) { 12229 cpuset = cpu_ready_set; 12230 CPUSET_DEL(cpuset, CPU->cpu_id); 12231 12232 SFMMU_XCALL_STATS(sfmmup); 12233 vcolor = addr_to_vcolor(va); 12234 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12235 vac_flushpage(pfnum, vcolor); 12236 } 12237 #endif /* VAC */ 12238 } 12239 } 12240 12241 /* 12242 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12243 * a particular virtual address and ctx. If noflush is set we do not 12244 * flush the TLB/TSB. This function may or may not be called with the 12245 * HAT lock held. 12246 */ 12247 static void 12248 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12249 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12250 int hat_lock_held) 12251 { 12252 #ifdef VAC 12253 int vcolor; 12254 #endif 12255 cpuset_t cpuset; 12256 hatlock_t *hatlockp; 12257 12258 ASSERT(!hmeblkp->hblk_shared); 12259 12260 #if defined(lint) && !defined(VAC) 12261 pfnum = pfnum; 12262 cpu_flag = cpu_flag; 12263 cache_flush_flag = cache_flush_flag; 12264 #endif 12265 12266 /* 12267 * There is no longer a need to protect against ctx being 12268 * stolen here since we don't store the ctx in the TSB anymore. 12269 */ 12270 #ifdef VAC 12271 vcolor = addr_to_vcolor(addr); 12272 #endif 12273 12274 /* 12275 * We must hold the hat lock during the flush of TLB, 12276 * to avoid a race with sfmmu_invalidate_ctx(), where 12277 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12278 * causing TLB demap routine to skip flush on that MMU. 12279 * If the context on a MMU has already been set to 12280 * INVALID_CONTEXT, we just get an extra flush on 12281 * that MMU. 12282 */ 12283 if (!hat_lock_held && !tlb_noflush) 12284 hatlockp = sfmmu_hat_enter(sfmmup); 12285 12286 kpreempt_disable(); 12287 if (!tlb_noflush) { 12288 /* 12289 * Flush the TSB and TLB. 12290 */ 12291 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12292 12293 cpuset = sfmmup->sfmmu_cpusran; 12294 CPUSET_AND(cpuset, cpu_ready_set); 12295 CPUSET_DEL(cpuset, CPU->cpu_id); 12296 12297 SFMMU_XCALL_STATS(sfmmup); 12298 12299 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12300 (uint64_t)sfmmup); 12301 12302 vtag_flushpage(addr, (uint64_t)sfmmup); 12303 } 12304 12305 if (!hat_lock_held && !tlb_noflush) 12306 sfmmu_hat_exit(hatlockp); 12307 12308 #ifdef VAC 12309 /* 12310 * Flush the D$ 12311 * 12312 * Even if the ctx is stolen, we need to flush the 12313 * cache. Our ctx stealer only flushes the TLBs. 12314 */ 12315 if (cache_flush_flag == CACHE_FLUSH) { 12316 if (cpu_flag & FLUSH_ALL_CPUS) { 12317 cpuset = cpu_ready_set; 12318 } else { 12319 cpuset = sfmmup->sfmmu_cpusran; 12320 CPUSET_AND(cpuset, cpu_ready_set); 12321 } 12322 CPUSET_DEL(cpuset, CPU->cpu_id); 12323 SFMMU_XCALL_STATS(sfmmup); 12324 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12325 vac_flushpage(pfnum, vcolor); 12326 } 12327 #endif /* VAC */ 12328 kpreempt_enable(); 12329 } 12330 12331 /* 12332 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12333 * address and ctx. If noflush is set we do not currently do anything. 12334 * This function may or may not be called with the HAT lock held. 12335 */ 12336 static void 12337 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12338 int tlb_noflush, int hat_lock_held) 12339 { 12340 cpuset_t cpuset; 12341 hatlock_t *hatlockp; 12342 12343 ASSERT(!hmeblkp->hblk_shared); 12344 12345 /* 12346 * If the process is exiting we have nothing to do. 12347 */ 12348 if (tlb_noflush) 12349 return; 12350 12351 /* 12352 * Flush TSB. 12353 */ 12354 if (!hat_lock_held) 12355 hatlockp = sfmmu_hat_enter(sfmmup); 12356 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12357 12358 kpreempt_disable(); 12359 12360 cpuset = sfmmup->sfmmu_cpusran; 12361 CPUSET_AND(cpuset, cpu_ready_set); 12362 CPUSET_DEL(cpuset, CPU->cpu_id); 12363 12364 SFMMU_XCALL_STATS(sfmmup); 12365 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12366 12367 vtag_flushpage(addr, (uint64_t)sfmmup); 12368 12369 if (!hat_lock_held) 12370 sfmmu_hat_exit(hatlockp); 12371 12372 kpreempt_enable(); 12373 12374 } 12375 12376 /* 12377 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12378 * call handler that can flush a range of pages to save on xcalls. 12379 */ 12380 static int sfmmu_xcall_save; 12381 12382 /* 12383 * this routine is never used for demaping addresses backed by SRD hmeblks. 12384 */ 12385 static void 12386 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12387 { 12388 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12389 hatlock_t *hatlockp; 12390 cpuset_t cpuset; 12391 uint64_t sfmmu_pgcnt; 12392 pgcnt_t pgcnt = 0; 12393 int pgunload = 0; 12394 int dirtypg = 0; 12395 caddr_t addr = dmrp->dmr_addr; 12396 caddr_t eaddr; 12397 uint64_t bitvec = dmrp->dmr_bitvec; 12398 12399 ASSERT(bitvec & 1); 12400 12401 /* 12402 * Flush TSB and calculate number of pages to flush. 12403 */ 12404 while (bitvec != 0) { 12405 dirtypg = 0; 12406 /* 12407 * Find the first page to flush and then count how many 12408 * pages there are after it that also need to be flushed. 12409 * This way the number of TSB flushes is minimized. 12410 */ 12411 while ((bitvec & 1) == 0) { 12412 pgcnt++; 12413 addr += MMU_PAGESIZE; 12414 bitvec >>= 1; 12415 } 12416 while (bitvec & 1) { 12417 dirtypg++; 12418 bitvec >>= 1; 12419 } 12420 eaddr = addr + ptob(dirtypg); 12421 hatlockp = sfmmu_hat_enter(sfmmup); 12422 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12423 sfmmu_hat_exit(hatlockp); 12424 pgunload += dirtypg; 12425 addr = eaddr; 12426 pgcnt += dirtypg; 12427 } 12428 12429 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12430 if (sfmmup->sfmmu_free == 0) { 12431 addr = dmrp->dmr_addr; 12432 bitvec = dmrp->dmr_bitvec; 12433 12434 /* 12435 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12436 * as it will be used to pack argument for xt_some 12437 */ 12438 ASSERT((pgcnt > 0) && 12439 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12440 12441 /* 12442 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12443 * the low 6 bits of sfmmup. This is doable since pgcnt 12444 * always >= 1. 12445 */ 12446 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12447 sfmmu_pgcnt = (uint64_t)sfmmup | 12448 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12449 12450 /* 12451 * We must hold the hat lock during the flush of TLB, 12452 * to avoid a race with sfmmu_invalidate_ctx(), where 12453 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12454 * causing TLB demap routine to skip flush on that MMU. 12455 * If the context on a MMU has already been set to 12456 * INVALID_CONTEXT, we just get an extra flush on 12457 * that MMU. 12458 */ 12459 hatlockp = sfmmu_hat_enter(sfmmup); 12460 kpreempt_disable(); 12461 12462 cpuset = sfmmup->sfmmu_cpusran; 12463 CPUSET_AND(cpuset, cpu_ready_set); 12464 CPUSET_DEL(cpuset, CPU->cpu_id); 12465 12466 SFMMU_XCALL_STATS(sfmmup); 12467 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12468 sfmmu_pgcnt); 12469 12470 for (; bitvec != 0; bitvec >>= 1) { 12471 if (bitvec & 1) 12472 vtag_flushpage(addr, (uint64_t)sfmmup); 12473 addr += MMU_PAGESIZE; 12474 } 12475 kpreempt_enable(); 12476 sfmmu_hat_exit(hatlockp); 12477 12478 sfmmu_xcall_save += (pgunload-1); 12479 } 12480 dmrp->dmr_bitvec = 0; 12481 } 12482 12483 /* 12484 * In cases where we need to synchronize with TLB/TSB miss trap 12485 * handlers, _and_ need to flush the TLB, it's a lot easier to 12486 * throw away the context from the process than to do a 12487 * special song and dance to keep things consistent for the 12488 * handlers. 12489 * 12490 * Since the process suddenly ends up without a context and our caller 12491 * holds the hat lock, threads that fault after this function is called 12492 * will pile up on the lock. We can then do whatever we need to 12493 * atomically from the context of the caller. The first blocked thread 12494 * to resume executing will get the process a new context, and the 12495 * process will resume executing. 12496 * 12497 * One added advantage of this approach is that on MMUs that 12498 * support a "flush all" operation, we will delay the flush until 12499 * cnum wrap-around, and then flush the TLB one time. This 12500 * is rather rare, so it's a lot less expensive than making 8000 12501 * x-calls to flush the TLB 8000 times. 12502 * 12503 * A per-process (PP) lock is used to synchronize ctx allocations in 12504 * resume() and ctx invalidations here. 12505 */ 12506 static void 12507 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12508 { 12509 cpuset_t cpuset; 12510 int cnum, currcnum; 12511 mmu_ctx_t *mmu_ctxp; 12512 int i; 12513 uint_t pstate_save; 12514 12515 SFMMU_STAT(sf_ctx_inv); 12516 12517 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12518 ASSERT(sfmmup != ksfmmup); 12519 12520 kpreempt_disable(); 12521 12522 mmu_ctxp = CPU_MMU_CTXP(CPU); 12523 ASSERT(mmu_ctxp); 12524 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12525 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12526 12527 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12528 12529 pstate_save = sfmmu_disable_intrs(); 12530 12531 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12532 /* set HAT cnum invalid across all context domains. */ 12533 for (i = 0; i < max_mmu_ctxdoms; i++) { 12534 12535 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12536 if (cnum == INVALID_CONTEXT) { 12537 continue; 12538 } 12539 12540 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12541 } 12542 membar_enter(); /* make sure globally visible to all CPUs */ 12543 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12544 12545 sfmmu_enable_intrs(pstate_save); 12546 12547 cpuset = sfmmup->sfmmu_cpusran; 12548 CPUSET_DEL(cpuset, CPU->cpu_id); 12549 CPUSET_AND(cpuset, cpu_ready_set); 12550 if (!CPUSET_ISNULL(cpuset)) { 12551 SFMMU_XCALL_STATS(sfmmup); 12552 xt_some(cpuset, sfmmu_raise_tsb_exception, 12553 (uint64_t)sfmmup, INVALID_CONTEXT); 12554 xt_sync(cpuset); 12555 SFMMU_STAT(sf_tsb_raise_exception); 12556 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12557 } 12558 12559 /* 12560 * If the hat to-be-invalidated is the same as the current 12561 * process on local CPU we need to invalidate 12562 * this CPU context as well. 12563 */ 12564 if ((sfmmu_getctx_sec() == currcnum) && 12565 (currcnum != INVALID_CONTEXT)) { 12566 /* sets shared context to INVALID too */ 12567 sfmmu_setctx_sec(INVALID_CONTEXT); 12568 sfmmu_clear_utsbinfo(); 12569 } 12570 12571 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12572 12573 kpreempt_enable(); 12574 12575 /* 12576 * we hold the hat lock, so nobody should allocate a context 12577 * for us yet 12578 */ 12579 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12580 } 12581 12582 #ifdef VAC 12583 /* 12584 * We need to flush the cache in all cpus. It is possible that 12585 * a process referenced a page as cacheable but has sinced exited 12586 * and cleared the mapping list. We still to flush it but have no 12587 * state so all cpus is the only alternative. 12588 */ 12589 void 12590 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12591 { 12592 cpuset_t cpuset; 12593 12594 kpreempt_disable(); 12595 cpuset = cpu_ready_set; 12596 CPUSET_DEL(cpuset, CPU->cpu_id); 12597 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12598 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12599 xt_sync(cpuset); 12600 vac_flushpage(pfnum, vcolor); 12601 kpreempt_enable(); 12602 } 12603 12604 void 12605 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12606 { 12607 cpuset_t cpuset; 12608 12609 ASSERT(vcolor >= 0); 12610 12611 kpreempt_disable(); 12612 cpuset = cpu_ready_set; 12613 CPUSET_DEL(cpuset, CPU->cpu_id); 12614 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12615 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12616 xt_sync(cpuset); 12617 vac_flushcolor(vcolor, pfnum); 12618 kpreempt_enable(); 12619 } 12620 #endif /* VAC */ 12621 12622 /* 12623 * We need to prevent processes from accessing the TSB using a cached physical 12624 * address. It's alright if they try to access the TSB via virtual address 12625 * since they will just fault on that virtual address once the mapping has 12626 * been suspended. 12627 */ 12628 #pragma weak sendmondo_in_recover 12629 12630 /* ARGSUSED */ 12631 static int 12632 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12633 { 12634 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12635 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12636 hatlock_t *hatlockp; 12637 sf_scd_t *scdp; 12638 12639 if (flags != HAT_PRESUSPEND) 12640 return (0); 12641 12642 /* 12643 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12644 * be a shared hat, then set SCD's tsbinfo's flag. 12645 * If tsb is not shared, sfmmup is a private hat, then set 12646 * its private tsbinfo's flag. 12647 */ 12648 hatlockp = sfmmu_hat_enter(sfmmup); 12649 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12650 12651 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12652 sfmmu_tsb_inv_ctx(sfmmup); 12653 sfmmu_hat_exit(hatlockp); 12654 } else { 12655 /* release lock on the shared hat */ 12656 sfmmu_hat_exit(hatlockp); 12657 /* sfmmup is a shared hat */ 12658 ASSERT(sfmmup->sfmmu_scdhat); 12659 scdp = sfmmup->sfmmu_scdp; 12660 ASSERT(scdp != NULL); 12661 /* get private hat from the scd list */ 12662 mutex_enter(&scdp->scd_mutex); 12663 sfmmup = scdp->scd_sf_list; 12664 while (sfmmup != NULL) { 12665 hatlockp = sfmmu_hat_enter(sfmmup); 12666 /* 12667 * We do not call sfmmu_tsb_inv_ctx here because 12668 * sendmondo_in_recover check is only needed for 12669 * sun4u. 12670 */ 12671 sfmmu_invalidate_ctx(sfmmup); 12672 sfmmu_hat_exit(hatlockp); 12673 sfmmup = sfmmup->sfmmu_scd_link.next; 12674 12675 } 12676 mutex_exit(&scdp->scd_mutex); 12677 } 12678 return (0); 12679 } 12680 12681 static void 12682 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12683 { 12684 extern uint32_t sendmondo_in_recover; 12685 12686 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12687 12688 /* 12689 * For Cheetah+ Erratum 25: 12690 * Wait for any active recovery to finish. We can't risk 12691 * relocating the TSB of the thread running mondo_recover_proc() 12692 * since, if we did that, we would deadlock. The scenario we are 12693 * trying to avoid is as follows: 12694 * 12695 * THIS CPU RECOVER CPU 12696 * -------- ----------- 12697 * Begins recovery, walking through TSB 12698 * hat_pagesuspend() TSB TTE 12699 * TLB miss on TSB TTE, spins at TL1 12700 * xt_sync() 12701 * send_mondo_timeout() 12702 * mondo_recover_proc() 12703 * ((deadlocked)) 12704 * 12705 * The second half of the workaround is that mondo_recover_proc() 12706 * checks to see if the tsb_info has the RELOC flag set, and if it 12707 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12708 * and hence avoiding the TLB miss that could result in a deadlock. 12709 */ 12710 if (&sendmondo_in_recover) { 12711 membar_enter(); /* make sure RELOC flag visible */ 12712 while (sendmondo_in_recover) { 12713 drv_usecwait(1); 12714 membar_consumer(); 12715 } 12716 } 12717 12718 sfmmu_invalidate_ctx(sfmmup); 12719 } 12720 12721 /* ARGSUSED */ 12722 static int 12723 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12724 void *tsbinfo, pfn_t newpfn) 12725 { 12726 hatlock_t *hatlockp; 12727 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12728 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12729 12730 if (flags != HAT_POSTUNSUSPEND) 12731 return (0); 12732 12733 hatlockp = sfmmu_hat_enter(sfmmup); 12734 12735 SFMMU_STAT(sf_tsb_reloc); 12736 12737 /* 12738 * The process may have swapped out while we were relocating one 12739 * of its TSBs. If so, don't bother doing the setup since the 12740 * process can't be using the memory anymore. 12741 */ 12742 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12743 ASSERT(va == tsbinfop->tsb_va); 12744 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12745 12746 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12747 sfmmu_inv_tsb(tsbinfop->tsb_va, 12748 TSB_BYTES(tsbinfop->tsb_szc)); 12749 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12750 } 12751 } 12752 12753 membar_exit(); 12754 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12755 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12756 12757 sfmmu_hat_exit(hatlockp); 12758 12759 return (0); 12760 } 12761 12762 /* 12763 * Allocate and initialize a tsb_info structure. Note that we may or may not 12764 * allocate a TSB here, depending on the flags passed in. 12765 */ 12766 static int 12767 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12768 uint_t flags, sfmmu_t *sfmmup) 12769 { 12770 int err; 12771 12772 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12773 sfmmu_tsbinfo_cache, KM_SLEEP); 12774 12775 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12776 tsb_szc, flags, sfmmup)) != 0) { 12777 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12778 SFMMU_STAT(sf_tsb_allocfail); 12779 *tsbinfopp = NULL; 12780 return (err); 12781 } 12782 SFMMU_STAT(sf_tsb_alloc); 12783 12784 /* 12785 * Bump the TSB size counters for this TSB size. 12786 */ 12787 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12788 return (0); 12789 } 12790 12791 static void 12792 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12793 { 12794 caddr_t tsbva = tsbinfo->tsb_va; 12795 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12796 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12797 vmem_t *vmp = tsbinfo->tsb_vmp; 12798 12799 /* 12800 * If we allocated this TSB from relocatable kernel memory, then we 12801 * need to uninstall the callback handler. 12802 */ 12803 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12804 uintptr_t slab_mask; 12805 caddr_t slab_vaddr; 12806 page_t **ppl; 12807 int ret; 12808 12809 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12810 if (tsb_size > MMU_PAGESIZE4M) 12811 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12812 else 12813 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12814 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12815 12816 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12817 ASSERT(ret == 0); 12818 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12819 0, NULL); 12820 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12821 } 12822 12823 if (kmem_cachep != NULL) { 12824 kmem_cache_free(kmem_cachep, tsbva); 12825 } else { 12826 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12827 } 12828 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12829 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12830 } 12831 12832 static void 12833 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12834 { 12835 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12836 sfmmu_tsb_free(tsbinfo); 12837 } 12838 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12839 12840 } 12841 12842 /* 12843 * Setup all the references to physical memory for this tsbinfo. 12844 * The underlying page(s) must be locked. 12845 */ 12846 static void 12847 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12848 { 12849 ASSERT(pfn != PFN_INVALID); 12850 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12851 12852 #ifndef sun4v 12853 if (tsbinfo->tsb_szc == 0) { 12854 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12855 PROT_WRITE|PROT_READ, TTE8K); 12856 } else { 12857 /* 12858 * Round down PA and use a large mapping; the handlers will 12859 * compute the TSB pointer at the correct offset into the 12860 * big virtual page. NOTE: this assumes all TSBs larger 12861 * than 8K must come from physically contiguous slabs of 12862 * size tsb_slab_size. 12863 */ 12864 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12865 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12866 } 12867 tsbinfo->tsb_pa = ptob(pfn); 12868 12869 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12870 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12871 12872 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12873 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12874 #else /* sun4v */ 12875 tsbinfo->tsb_pa = ptob(pfn); 12876 #endif /* sun4v */ 12877 } 12878 12879 12880 /* 12881 * Returns zero on success, ENOMEM if over the high water mark, 12882 * or EAGAIN if the caller needs to retry with a smaller TSB 12883 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12884 * 12885 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12886 * is specified and the TSB requested is PAGESIZE, though it 12887 * may sleep waiting for memory if sufficient memory is not 12888 * available. 12889 */ 12890 static int 12891 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12892 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12893 { 12894 caddr_t vaddr = NULL; 12895 caddr_t slab_vaddr; 12896 uintptr_t slab_mask; 12897 int tsbbytes = TSB_BYTES(tsbcode); 12898 int lowmem = 0; 12899 struct kmem_cache *kmem_cachep = NULL; 12900 vmem_t *vmp = NULL; 12901 lgrp_id_t lgrpid = LGRP_NONE; 12902 pfn_t pfn; 12903 uint_t cbflags = HAC_SLEEP; 12904 page_t **pplist; 12905 int ret; 12906 12907 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12908 if (tsbbytes > MMU_PAGESIZE4M) 12909 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12910 else 12911 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12912 12913 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12914 flags |= TSB_ALLOC; 12915 12916 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12917 12918 tsbinfo->tsb_sfmmu = sfmmup; 12919 12920 /* 12921 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12922 * return. 12923 */ 12924 if ((flags & TSB_ALLOC) == 0) { 12925 tsbinfo->tsb_szc = tsbcode; 12926 tsbinfo->tsb_ttesz_mask = tteszmask; 12927 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12928 tsbinfo->tsb_pa = -1; 12929 tsbinfo->tsb_tte.ll = 0; 12930 tsbinfo->tsb_next = NULL; 12931 tsbinfo->tsb_flags = TSB_SWAPPED; 12932 tsbinfo->tsb_cache = NULL; 12933 tsbinfo->tsb_vmp = NULL; 12934 return (0); 12935 } 12936 12937 #ifdef DEBUG 12938 /* 12939 * For debugging: 12940 * Randomly force allocation failures every tsb_alloc_mtbf 12941 * tries if TSB_FORCEALLOC is not specified. This will 12942 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12943 * it is even, to allow testing of both failure paths... 12944 */ 12945 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12946 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12947 tsb_alloc_count = 0; 12948 tsb_alloc_fail_mtbf++; 12949 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12950 } 12951 #endif /* DEBUG */ 12952 12953 /* 12954 * Enforce high water mark if we are not doing a forced allocation 12955 * and are not shrinking a process' TSB. 12956 */ 12957 if ((flags & TSB_SHRINK) == 0 && 12958 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12959 if ((flags & TSB_FORCEALLOC) == 0) 12960 return (ENOMEM); 12961 lowmem = 1; 12962 } 12963 12964 /* 12965 * Allocate from the correct location based upon the size of the TSB 12966 * compared to the base page size, and what memory conditions dictate. 12967 * Note we always do nonblocking allocations from the TSB arena since 12968 * we don't want memory fragmentation to cause processes to block 12969 * indefinitely waiting for memory; until the kernel algorithms that 12970 * coalesce large pages are improved this is our best option. 12971 * 12972 * Algorithm: 12973 * If allocating a "large" TSB (>8K), allocate from the 12974 * appropriate kmem_tsb_default_arena vmem arena 12975 * else if low on memory or the TSB_FORCEALLOC flag is set or 12976 * tsb_forceheap is set 12977 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12978 * KM_SLEEP (never fails) 12979 * else 12980 * Allocate from appropriate sfmmu_tsb_cache with 12981 * KM_NOSLEEP 12982 * endif 12983 */ 12984 if (tsb_lgrp_affinity) 12985 lgrpid = lgrp_home_id(curthread); 12986 if (lgrpid == LGRP_NONE) 12987 lgrpid = 0; /* use lgrp of boot CPU */ 12988 12989 if (tsbbytes > MMU_PAGESIZE) { 12990 if (tsbbytes > MMU_PAGESIZE4M) { 12991 vmp = kmem_bigtsb_default_arena[lgrpid]; 12992 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12993 0, 0, NULL, NULL, VM_NOSLEEP); 12994 } else { 12995 vmp = kmem_tsb_default_arena[lgrpid]; 12996 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12997 0, 0, NULL, NULL, VM_NOSLEEP); 12998 } 12999 #ifdef DEBUG 13000 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 13001 #else /* !DEBUG */ 13002 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 13003 #endif /* DEBUG */ 13004 kmem_cachep = sfmmu_tsb8k_cache; 13005 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 13006 ASSERT(vaddr != NULL); 13007 } else { 13008 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 13009 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 13010 } 13011 13012 tsbinfo->tsb_cache = kmem_cachep; 13013 tsbinfo->tsb_vmp = vmp; 13014 13015 if (vaddr == NULL) { 13016 return (EAGAIN); 13017 } 13018 13019 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 13020 kmem_cachep = tsbinfo->tsb_cache; 13021 13022 /* 13023 * If we are allocating from outside the cage, then we need to 13024 * register a relocation callback handler. Note that for now 13025 * since pseudo mappings always hang off of the slab's root page, 13026 * we need only lock the first 8K of the TSB slab. This is a bit 13027 * hacky but it is good for performance. 13028 */ 13029 if (kmem_cachep != sfmmu_tsb8k_cache) { 13030 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 13031 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 13032 ASSERT(ret == 0); 13033 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 13034 cbflags, (void *)tsbinfo, &pfn, NULL); 13035 13036 /* 13037 * Need to free up resources if we could not successfully 13038 * add the callback function and return an error condition. 13039 */ 13040 if (ret != 0) { 13041 if (kmem_cachep) { 13042 kmem_cache_free(kmem_cachep, vaddr); 13043 } else { 13044 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 13045 } 13046 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 13047 S_WRITE); 13048 return (EAGAIN); 13049 } 13050 } else { 13051 /* 13052 * Since allocation of 8K TSBs from heap is rare and occurs 13053 * during memory pressure we allocate them from permanent 13054 * memory rather than using callbacks to get the PFN. 13055 */ 13056 pfn = hat_getpfnum(kas.a_hat, vaddr); 13057 } 13058 13059 tsbinfo->tsb_va = vaddr; 13060 tsbinfo->tsb_szc = tsbcode; 13061 tsbinfo->tsb_ttesz_mask = tteszmask; 13062 tsbinfo->tsb_next = NULL; 13063 tsbinfo->tsb_flags = 0; 13064 13065 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 13066 13067 sfmmu_inv_tsb(vaddr, tsbbytes); 13068 13069 if (kmem_cachep != sfmmu_tsb8k_cache) { 13070 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 13071 } 13072 13073 return (0); 13074 } 13075 13076 /* 13077 * Initialize per cpu tsb and per cpu tsbmiss_area 13078 */ 13079 void 13080 sfmmu_init_tsbs(void) 13081 { 13082 int i; 13083 struct tsbmiss *tsbmissp; 13084 struct kpmtsbm *kpmtsbmp; 13085 #ifndef sun4v 13086 extern int dcache_line_mask; 13087 #endif /* sun4v */ 13088 extern uint_t vac_colors; 13089 13090 /* 13091 * Init. tsb miss area. 13092 */ 13093 tsbmissp = tsbmiss_area; 13094 13095 for (i = 0; i < NCPU; tsbmissp++, i++) { 13096 /* 13097 * initialize the tsbmiss area. 13098 * Do this for all possible CPUs as some may be added 13099 * while the system is running. There is no cost to this. 13100 */ 13101 tsbmissp->ksfmmup = ksfmmup; 13102 #ifndef sun4v 13103 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13104 #endif /* sun4v */ 13105 tsbmissp->khashstart = 13106 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13107 tsbmissp->uhashstart = 13108 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13109 tsbmissp->khashsz = khmehash_num; 13110 tsbmissp->uhashsz = uhmehash_num; 13111 } 13112 13113 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13114 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13115 13116 if (kpm_enable == 0) 13117 return; 13118 13119 /* -- Begin KPM specific init -- */ 13120 13121 if (kpm_smallpages) { 13122 /* 13123 * If we're using base pagesize pages for seg_kpm 13124 * mappings, we use the kernel TSB since we can't afford 13125 * to allocate a second huge TSB for these mappings. 13126 */ 13127 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13128 kpm_tsbsz = ktsb_szcode; 13129 kpmsm_tsbbase = kpm_tsbbase; 13130 kpmsm_tsbsz = kpm_tsbsz; 13131 } else { 13132 /* 13133 * In VAC conflict case, just put the entries in the 13134 * kernel 8K indexed TSB for now so we can find them. 13135 * This could really be changed in the future if we feel 13136 * the need... 13137 */ 13138 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13139 kpmsm_tsbsz = ktsb_szcode; 13140 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13141 kpm_tsbsz = ktsb4m_szcode; 13142 } 13143 13144 kpmtsbmp = kpmtsbm_area; 13145 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13146 /* 13147 * Initialize the kpmtsbm area. 13148 * Do this for all possible CPUs as some may be added 13149 * while the system is running. There is no cost to this. 13150 */ 13151 kpmtsbmp->vbase = kpm_vbase; 13152 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13153 kpmtsbmp->sz_shift = kpm_size_shift; 13154 kpmtsbmp->kpmp_shift = kpmp_shift; 13155 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13156 if (kpm_smallpages == 0) { 13157 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13158 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13159 } else { 13160 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13161 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13162 } 13163 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13164 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13165 #ifdef DEBUG 13166 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13167 #endif /* DEBUG */ 13168 if (ktsb_phys) 13169 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13170 } 13171 13172 /* -- End KPM specific init -- */ 13173 } 13174 13175 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13176 struct tsb_info ktsb_info[2]; 13177 13178 /* 13179 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13180 */ 13181 void 13182 sfmmu_init_ktsbinfo() 13183 { 13184 ASSERT(ksfmmup != NULL); 13185 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13186 /* 13187 * Allocate tsbinfos for kernel and copy in data 13188 * to make debug easier and sun4v setup easier. 13189 */ 13190 ktsb_info[0].tsb_sfmmu = ksfmmup; 13191 ktsb_info[0].tsb_szc = ktsb_szcode; 13192 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13193 ktsb_info[0].tsb_va = ktsb_base; 13194 ktsb_info[0].tsb_pa = ktsb_pbase; 13195 ktsb_info[0].tsb_flags = 0; 13196 ktsb_info[0].tsb_tte.ll = 0; 13197 ktsb_info[0].tsb_cache = NULL; 13198 13199 ktsb_info[1].tsb_sfmmu = ksfmmup; 13200 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13201 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13202 ktsb_info[1].tsb_va = ktsb4m_base; 13203 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13204 ktsb_info[1].tsb_flags = 0; 13205 ktsb_info[1].tsb_tte.ll = 0; 13206 ktsb_info[1].tsb_cache = NULL; 13207 13208 /* Link them into ksfmmup. */ 13209 ktsb_info[0].tsb_next = &ktsb_info[1]; 13210 ktsb_info[1].tsb_next = NULL; 13211 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13212 13213 sfmmu_setup_tsbinfo(ksfmmup); 13214 } 13215 13216 /* 13217 * Cache the last value returned from va_to_pa(). If the VA specified 13218 * in the current call to cached_va_to_pa() maps to the same Page (as the 13219 * previous call to cached_va_to_pa()), then compute the PA using 13220 * cached info, else call va_to_pa(). 13221 * 13222 * Note: this function is neither MT-safe nor consistent in the presence 13223 * of multiple, interleaved threads. This function was created to enable 13224 * an optimization used during boot (at a point when there's only one thread 13225 * executing on the "boot CPU", and before startup_vm() has been called). 13226 */ 13227 static uint64_t 13228 cached_va_to_pa(void *vaddr) 13229 { 13230 static uint64_t prev_vaddr_base = 0; 13231 static uint64_t prev_pfn = 0; 13232 13233 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13234 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13235 } else { 13236 uint64_t pa = va_to_pa(vaddr); 13237 13238 if (pa != ((uint64_t)-1)) { 13239 /* 13240 * Computed physical address is valid. Cache its 13241 * related info for the next cached_va_to_pa() call. 13242 */ 13243 prev_pfn = pa & MMU_PAGEMASK; 13244 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13245 } 13246 13247 return (pa); 13248 } 13249 } 13250 13251 /* 13252 * Carve up our nucleus hblk region. We may allocate more hblks than 13253 * asked due to rounding errors but we are guaranteed to have at least 13254 * enough space to allocate the requested number of hblk8's and hblk1's. 13255 */ 13256 void 13257 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13258 { 13259 struct hme_blk *hmeblkp; 13260 size_t hme8blk_sz, hme1blk_sz; 13261 size_t i; 13262 size_t hblk8_bound; 13263 ulong_t j = 0, k = 0; 13264 13265 ASSERT(addr != NULL && size != 0); 13266 13267 /* Need to use proper structure alignment */ 13268 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13269 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13270 13271 nucleus_hblk8.list = (void *)addr; 13272 nucleus_hblk8.index = 0; 13273 13274 /* 13275 * Use as much memory as possible for hblk8's since we 13276 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13277 * We need to hold back enough space for the hblk1's which 13278 * we'll allocate next. 13279 */ 13280 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13281 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13282 hmeblkp = (struct hme_blk *)addr; 13283 addr += hme8blk_sz; 13284 hmeblkp->hblk_nuc_bit = 1; 13285 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13286 } 13287 nucleus_hblk8.len = j; 13288 ASSERT(j >= nhblk8); 13289 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13290 13291 nucleus_hblk1.list = (void *)addr; 13292 nucleus_hblk1.index = 0; 13293 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13294 hmeblkp = (struct hme_blk *)addr; 13295 addr += hme1blk_sz; 13296 hmeblkp->hblk_nuc_bit = 1; 13297 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13298 } 13299 ASSERT(k >= nhblk1); 13300 nucleus_hblk1.len = k; 13301 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13302 } 13303 13304 /* 13305 * This function is currently not supported on this platform. For what 13306 * it's supposed to do, see hat.c and hat_srmmu.c 13307 */ 13308 /* ARGSUSED */ 13309 faultcode_t 13310 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13311 uint_t flags) 13312 { 13313 ASSERT(hat->sfmmu_xhat_provider == NULL); 13314 return (FC_NOSUPPORT); 13315 } 13316 13317 /* 13318 * Searchs the mapping list of the page for a mapping of the same size. If not 13319 * found the corresponding bit is cleared in the p_index field. When large 13320 * pages are more prevalent in the system, we can maintain the mapping list 13321 * in order and we don't have to traverse the list each time. Just check the 13322 * next and prev entries, and if both are of different size, we clear the bit. 13323 */ 13324 static void 13325 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13326 { 13327 struct sf_hment *sfhmep; 13328 struct hme_blk *hmeblkp; 13329 int index; 13330 pgcnt_t npgs; 13331 13332 ASSERT(ttesz > TTE8K); 13333 13334 ASSERT(sfmmu_mlist_held(pp)); 13335 13336 ASSERT(PP_ISMAPPED_LARGE(pp)); 13337 13338 /* 13339 * Traverse mapping list looking for another mapping of same size. 13340 * since we only want to clear index field if all mappings of 13341 * that size are gone. 13342 */ 13343 13344 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13345 if (IS_PAHME(sfhmep)) 13346 continue; 13347 hmeblkp = sfmmu_hmetohblk(sfhmep); 13348 if (hmeblkp->hblk_xhat_bit) 13349 continue; 13350 if (hme_size(sfhmep) == ttesz) { 13351 /* 13352 * another mapping of the same size. don't clear index. 13353 */ 13354 return; 13355 } 13356 } 13357 13358 /* 13359 * Clear the p_index bit for large page. 13360 */ 13361 index = PAGESZ_TO_INDEX(ttesz); 13362 npgs = TTEPAGES(ttesz); 13363 while (npgs-- > 0) { 13364 ASSERT(pp->p_index & index); 13365 pp->p_index &= ~index; 13366 pp = PP_PAGENEXT(pp); 13367 } 13368 } 13369 13370 /* 13371 * return supported features 13372 */ 13373 /* ARGSUSED */ 13374 int 13375 hat_supported(enum hat_features feature, void *arg) 13376 { 13377 switch (feature) { 13378 case HAT_SHARED_PT: 13379 case HAT_DYNAMIC_ISM_UNMAP: 13380 case HAT_VMODSORT: 13381 return (1); 13382 case HAT_SHARED_REGIONS: 13383 if (shctx_on) 13384 return (1); 13385 else 13386 return (0); 13387 default: 13388 return (0); 13389 } 13390 } 13391 13392 void 13393 hat_enter(struct hat *hat) 13394 { 13395 hatlock_t *hatlockp; 13396 13397 if (hat != ksfmmup) { 13398 hatlockp = TSB_HASH(hat); 13399 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13400 } 13401 } 13402 13403 void 13404 hat_exit(struct hat *hat) 13405 { 13406 hatlock_t *hatlockp; 13407 13408 if (hat != ksfmmup) { 13409 hatlockp = TSB_HASH(hat); 13410 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13411 } 13412 } 13413 13414 /*ARGSUSED*/ 13415 void 13416 hat_reserve(struct as *as, caddr_t addr, size_t len) 13417 { 13418 } 13419 13420 static void 13421 hat_kstat_init(void) 13422 { 13423 kstat_t *ksp; 13424 13425 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13426 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13427 KSTAT_FLAG_VIRTUAL); 13428 if (ksp) { 13429 ksp->ks_data = (void *) &sfmmu_global_stat; 13430 kstat_install(ksp); 13431 } 13432 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13433 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13434 KSTAT_FLAG_VIRTUAL); 13435 if (ksp) { 13436 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13437 kstat_install(ksp); 13438 } 13439 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13440 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13441 KSTAT_FLAG_WRITABLE); 13442 if (ksp) { 13443 ksp->ks_update = sfmmu_kstat_percpu_update; 13444 kstat_install(ksp); 13445 } 13446 } 13447 13448 /* ARGSUSED */ 13449 static int 13450 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13451 { 13452 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13453 struct tsbmiss *tsbm = tsbmiss_area; 13454 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13455 int i; 13456 13457 ASSERT(cpu_kstat); 13458 if (rw == KSTAT_READ) { 13459 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13460 cpu_kstat->sf_itlb_misses = 0; 13461 cpu_kstat->sf_dtlb_misses = 0; 13462 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13463 tsbm->uprot_traps; 13464 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13465 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13466 cpu_kstat->sf_tsb_hits = 0; 13467 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13468 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13469 } 13470 } else { 13471 /* KSTAT_WRITE is used to clear stats */ 13472 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13473 tsbm->utsb_misses = 0; 13474 tsbm->ktsb_misses = 0; 13475 tsbm->uprot_traps = 0; 13476 tsbm->kprot_traps = 0; 13477 kpmtsbm->kpm_dtlb_misses = 0; 13478 kpmtsbm->kpm_tsb_misses = 0; 13479 } 13480 } 13481 return (0); 13482 } 13483 13484 #ifdef DEBUG 13485 13486 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13487 13488 /* 13489 * A tte checker. *orig_old is the value we read before cas. 13490 * *cur is the value returned by cas. 13491 * *new is the desired value when we do the cas. 13492 * 13493 * *hmeblkp is currently unused. 13494 */ 13495 13496 /* ARGSUSED */ 13497 void 13498 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13499 { 13500 pfn_t i, j, k; 13501 int cpuid = CPU->cpu_id; 13502 13503 gorig[cpuid] = orig_old; 13504 gcur[cpuid] = cur; 13505 gnew[cpuid] = new; 13506 13507 #ifdef lint 13508 hmeblkp = hmeblkp; 13509 #endif 13510 13511 if (TTE_IS_VALID(orig_old)) { 13512 if (TTE_IS_VALID(cur)) { 13513 i = TTE_TO_TTEPFN(orig_old); 13514 j = TTE_TO_TTEPFN(cur); 13515 k = TTE_TO_TTEPFN(new); 13516 if (i != j) { 13517 /* remap error? */ 13518 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13519 } 13520 13521 if (i != k) { 13522 /* remap error? */ 13523 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13524 } 13525 } else { 13526 if (TTE_IS_VALID(new)) { 13527 panic("chk_tte: invalid cur? "); 13528 } 13529 13530 i = TTE_TO_TTEPFN(orig_old); 13531 k = TTE_TO_TTEPFN(new); 13532 if (i != k) { 13533 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13534 } 13535 } 13536 } else { 13537 if (TTE_IS_VALID(cur)) { 13538 j = TTE_TO_TTEPFN(cur); 13539 if (TTE_IS_VALID(new)) { 13540 k = TTE_TO_TTEPFN(new); 13541 if (j != k) { 13542 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13543 j, k); 13544 } 13545 } else { 13546 panic("chk_tte: why here?"); 13547 } 13548 } else { 13549 if (!TTE_IS_VALID(new)) { 13550 panic("chk_tte: why here2 ?"); 13551 } 13552 } 13553 } 13554 } 13555 13556 #endif /* DEBUG */ 13557 13558 extern void prefetch_tsbe_read(struct tsbe *); 13559 extern void prefetch_tsbe_write(struct tsbe *); 13560 13561 13562 /* 13563 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13564 * us optimal performance on Cheetah+. You can only have 8 outstanding 13565 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13566 * prefetch to make the most utilization of the prefetch capability. 13567 */ 13568 #define TSBE_PREFETCH_STRIDE (7) 13569 13570 void 13571 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13572 { 13573 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13574 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13575 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13576 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13577 struct tsbe *old; 13578 struct tsbe *new; 13579 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13580 uint64_t va; 13581 int new_offset; 13582 int i; 13583 int vpshift; 13584 int last_prefetch; 13585 13586 if (old_bytes == new_bytes) { 13587 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13588 } else { 13589 13590 /* 13591 * A TSBE is 16 bytes which means there are four TSBE's per 13592 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13593 */ 13594 old = (struct tsbe *)old_tsbinfo->tsb_va; 13595 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13596 for (i = 0; i < old_entries; i++, old++) { 13597 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13598 prefetch_tsbe_read(old); 13599 if (!old->tte_tag.tag_invalid) { 13600 /* 13601 * We have a valid TTE to remap. Check the 13602 * size. We won't remap 64K or 512K TTEs 13603 * because they span more than one TSB entry 13604 * and are indexed using an 8K virt. page. 13605 * Ditto for 32M and 256M TTEs. 13606 */ 13607 if (TTE_CSZ(&old->tte_data) == TTE64K || 13608 TTE_CSZ(&old->tte_data) == TTE512K) 13609 continue; 13610 if (mmu_page_sizes == max_mmu_page_sizes) { 13611 if (TTE_CSZ(&old->tte_data) == TTE32M || 13612 TTE_CSZ(&old->tte_data) == TTE256M) 13613 continue; 13614 } 13615 13616 /* clear the lower 22 bits of the va */ 13617 va = *(uint64_t *)old << 22; 13618 /* turn va into a virtual pfn */ 13619 va >>= 22 - TSB_START_SIZE; 13620 /* 13621 * or in bits from the offset in the tsb 13622 * to get the real virtual pfn. These 13623 * correspond to bits [21:13] in the va 13624 */ 13625 vpshift = 13626 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13627 0x1ff; 13628 va |= (i << vpshift); 13629 va >>= vpshift; 13630 new_offset = va & (new_entries - 1); 13631 new = new_base + new_offset; 13632 prefetch_tsbe_write(new); 13633 *new = *old; 13634 } 13635 } 13636 } 13637 } 13638 13639 /* 13640 * unused in sfmmu 13641 */ 13642 void 13643 hat_dump(void) 13644 { 13645 } 13646 13647 /* 13648 * Called when a thread is exiting and we have switched to the kernel address 13649 * space. Perform the same VM initialization resume() uses when switching 13650 * processes. 13651 * 13652 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13653 * we call it anyway in case the semantics change in the future. 13654 */ 13655 /*ARGSUSED*/ 13656 void 13657 hat_thread_exit(kthread_t *thd) 13658 { 13659 uint_t pgsz_cnum; 13660 uint_t pstate_save; 13661 13662 ASSERT(thd->t_procp->p_as == &kas); 13663 13664 pgsz_cnum = KCONTEXT; 13665 #ifdef sun4u 13666 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13667 #endif 13668 13669 /* 13670 * Note that sfmmu_load_mmustate() is currently a no-op for 13671 * kernel threads. We need to disable interrupts here, 13672 * simply because otherwise sfmmu_load_mmustate() would panic 13673 * if the caller does not disable interrupts. 13674 */ 13675 pstate_save = sfmmu_disable_intrs(); 13676 13677 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13678 sfmmu_setctx_sec(pgsz_cnum); 13679 sfmmu_load_mmustate(ksfmmup); 13680 sfmmu_enable_intrs(pstate_save); 13681 } 13682 13683 13684 /* 13685 * SRD support 13686 */ 13687 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13688 (((uintptr_t)(vp)) >> 11)) & \ 13689 srd_hashmask) 13690 13691 /* 13692 * Attach the process to the srd struct associated with the exec vnode 13693 * from which the process is started. 13694 */ 13695 void 13696 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13697 { 13698 uint_t hash = SRD_HASH_FUNCTION(evp); 13699 sf_srd_t *srdp; 13700 sf_srd_t *newsrdp; 13701 13702 ASSERT(sfmmup != ksfmmup); 13703 ASSERT(sfmmup->sfmmu_srdp == NULL); 13704 13705 if (!shctx_on) { 13706 return; 13707 } 13708 13709 VN_HOLD(evp); 13710 13711 if (srd_buckets[hash].srdb_srdp != NULL) { 13712 mutex_enter(&srd_buckets[hash].srdb_lock); 13713 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13714 srdp = srdp->srd_hash) { 13715 if (srdp->srd_evp == evp) { 13716 ASSERT(srdp->srd_refcnt >= 0); 13717 sfmmup->sfmmu_srdp = srdp; 13718 atomic_add_32( 13719 (volatile uint_t *)&srdp->srd_refcnt, 1); 13720 mutex_exit(&srd_buckets[hash].srdb_lock); 13721 return; 13722 } 13723 } 13724 mutex_exit(&srd_buckets[hash].srdb_lock); 13725 } 13726 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13727 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13728 13729 newsrdp->srd_evp = evp; 13730 newsrdp->srd_refcnt = 1; 13731 newsrdp->srd_hmergnfree = NULL; 13732 newsrdp->srd_ismrgnfree = NULL; 13733 13734 mutex_enter(&srd_buckets[hash].srdb_lock); 13735 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13736 srdp = srdp->srd_hash) { 13737 if (srdp->srd_evp == evp) { 13738 ASSERT(srdp->srd_refcnt >= 0); 13739 sfmmup->sfmmu_srdp = srdp; 13740 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 13741 mutex_exit(&srd_buckets[hash].srdb_lock); 13742 kmem_cache_free(srd_cache, newsrdp); 13743 return; 13744 } 13745 } 13746 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13747 srd_buckets[hash].srdb_srdp = newsrdp; 13748 sfmmup->sfmmu_srdp = newsrdp; 13749 13750 mutex_exit(&srd_buckets[hash].srdb_lock); 13751 13752 } 13753 13754 static void 13755 sfmmu_leave_srd(sfmmu_t *sfmmup) 13756 { 13757 vnode_t *evp; 13758 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13759 uint_t hash; 13760 sf_srd_t **prev_srdpp; 13761 sf_region_t *rgnp; 13762 sf_region_t *nrgnp; 13763 #ifdef DEBUG 13764 int rgns = 0; 13765 #endif 13766 int i; 13767 13768 ASSERT(sfmmup != ksfmmup); 13769 ASSERT(srdp != NULL); 13770 ASSERT(srdp->srd_refcnt > 0); 13771 ASSERT(sfmmup->sfmmu_scdp == NULL); 13772 ASSERT(sfmmup->sfmmu_free == 1); 13773 13774 sfmmup->sfmmu_srdp = NULL; 13775 evp = srdp->srd_evp; 13776 ASSERT(evp != NULL); 13777 if (atomic_add_32_nv( 13778 (volatile uint_t *)&srdp->srd_refcnt, -1)) { 13779 VN_RELE(evp); 13780 return; 13781 } 13782 13783 hash = SRD_HASH_FUNCTION(evp); 13784 mutex_enter(&srd_buckets[hash].srdb_lock); 13785 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13786 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13787 if (srdp->srd_evp == evp) { 13788 break; 13789 } 13790 } 13791 if (srdp == NULL || srdp->srd_refcnt) { 13792 mutex_exit(&srd_buckets[hash].srdb_lock); 13793 VN_RELE(evp); 13794 return; 13795 } 13796 *prev_srdpp = srdp->srd_hash; 13797 mutex_exit(&srd_buckets[hash].srdb_lock); 13798 13799 ASSERT(srdp->srd_refcnt == 0); 13800 VN_RELE(evp); 13801 13802 #ifdef DEBUG 13803 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13804 ASSERT(srdp->srd_rgnhash[i] == NULL); 13805 } 13806 #endif /* DEBUG */ 13807 13808 /* free each hme regions in the srd */ 13809 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13810 nrgnp = rgnp->rgn_next; 13811 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13812 ASSERT(rgnp->rgn_refcnt == 0); 13813 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13814 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13815 ASSERT(rgnp->rgn_hmeflags == 0); 13816 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13817 #ifdef DEBUG 13818 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13819 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13820 } 13821 rgns++; 13822 #endif /* DEBUG */ 13823 kmem_cache_free(region_cache, rgnp); 13824 } 13825 ASSERT(rgns == srdp->srd_next_hmerid); 13826 13827 #ifdef DEBUG 13828 rgns = 0; 13829 #endif 13830 /* free each ism rgns in the srd */ 13831 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13832 nrgnp = rgnp->rgn_next; 13833 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13834 ASSERT(rgnp->rgn_refcnt == 0); 13835 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13836 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13837 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13838 #ifdef DEBUG 13839 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13840 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13841 } 13842 rgns++; 13843 #endif /* DEBUG */ 13844 kmem_cache_free(region_cache, rgnp); 13845 } 13846 ASSERT(rgns == srdp->srd_next_ismrid); 13847 ASSERT(srdp->srd_ismbusyrgns == 0); 13848 ASSERT(srdp->srd_hmebusyrgns == 0); 13849 13850 srdp->srd_next_ismrid = 0; 13851 srdp->srd_next_hmerid = 0; 13852 13853 bzero((void *)srdp->srd_ismrgnp, 13854 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13855 bzero((void *)srdp->srd_hmergnp, 13856 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13857 13858 ASSERT(srdp->srd_scdp == NULL); 13859 kmem_cache_free(srd_cache, srdp); 13860 } 13861 13862 /* ARGSUSED */ 13863 static int 13864 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13865 { 13866 sf_srd_t *srdp = (sf_srd_t *)buf; 13867 bzero(buf, sizeof (*srdp)); 13868 13869 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13870 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13871 return (0); 13872 } 13873 13874 /* ARGSUSED */ 13875 static void 13876 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13877 { 13878 sf_srd_t *srdp = (sf_srd_t *)buf; 13879 13880 mutex_destroy(&srdp->srd_mutex); 13881 mutex_destroy(&srdp->srd_scd_mutex); 13882 } 13883 13884 /* 13885 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13886 * at the same time for the same process and address range. This is ensured by 13887 * the fact that address space is locked as writer when a process joins the 13888 * regions. Therefore there's no need to hold an srd lock during the entire 13889 * execution of hat_join_region()/hat_leave_region(). 13890 */ 13891 13892 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13893 (((uintptr_t)(obj)) >> 11)) & \ 13894 srd_rgn_hashmask) 13895 /* 13896 * This routine implements the shared context functionality required when 13897 * attaching a segment to an address space. It must be called from 13898 * hat_share() for D(ISM) segments and from segvn_create() for segments 13899 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13900 * which is saved in the private segment data for hme segments and 13901 * the ism_map structure for ism segments. 13902 */ 13903 hat_region_cookie_t 13904 hat_join_region(struct hat *sfmmup, 13905 caddr_t r_saddr, 13906 size_t r_size, 13907 void *r_obj, 13908 u_offset_t r_objoff, 13909 uchar_t r_perm, 13910 uchar_t r_pgszc, 13911 hat_rgn_cb_func_t r_cb_function, 13912 uint_t flags) 13913 { 13914 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13915 uint_t rhash; 13916 uint_t rid; 13917 hatlock_t *hatlockp; 13918 sf_region_t *rgnp; 13919 sf_region_t *new_rgnp = NULL; 13920 int i; 13921 uint16_t *nextidp; 13922 sf_region_t **freelistp; 13923 int maxids; 13924 sf_region_t **rarrp; 13925 uint16_t *busyrgnsp; 13926 ulong_t rttecnt; 13927 uchar_t tteflag; 13928 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13929 int text = (r_type == HAT_REGION_TEXT); 13930 13931 if (srdp == NULL || r_size == 0) { 13932 return (HAT_INVALID_REGION_COOKIE); 13933 } 13934 13935 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 13936 ASSERT(sfmmup != ksfmmup); 13937 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 13938 ASSERT(srdp->srd_refcnt > 0); 13939 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13940 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13941 ASSERT(r_pgszc < mmu_page_sizes); 13942 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13943 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13944 panic("hat_join_region: region addr or size is not aligned\n"); 13945 } 13946 13947 13948 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13949 SFMMU_REGION_HME; 13950 /* 13951 * Currently only support shared hmes for the read only main text 13952 * region. 13953 */ 13954 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13955 (r_perm & PROT_WRITE))) { 13956 return (HAT_INVALID_REGION_COOKIE); 13957 } 13958 13959 rhash = RGN_HASH_FUNCTION(r_obj); 13960 13961 if (r_type == SFMMU_REGION_ISM) { 13962 nextidp = &srdp->srd_next_ismrid; 13963 freelistp = &srdp->srd_ismrgnfree; 13964 maxids = SFMMU_MAX_ISM_REGIONS; 13965 rarrp = srdp->srd_ismrgnp; 13966 busyrgnsp = &srdp->srd_ismbusyrgns; 13967 } else { 13968 nextidp = &srdp->srd_next_hmerid; 13969 freelistp = &srdp->srd_hmergnfree; 13970 maxids = SFMMU_MAX_HME_REGIONS; 13971 rarrp = srdp->srd_hmergnp; 13972 busyrgnsp = &srdp->srd_hmebusyrgns; 13973 } 13974 13975 mutex_enter(&srdp->srd_mutex); 13976 13977 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13978 rgnp = rgnp->rgn_hash) { 13979 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13980 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13981 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13982 break; 13983 } 13984 } 13985 13986 rfound: 13987 if (rgnp != NULL) { 13988 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13989 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13990 ASSERT(rgnp->rgn_refcnt >= 0); 13991 rid = rgnp->rgn_id; 13992 ASSERT(rid < maxids); 13993 ASSERT(rarrp[rid] == rgnp); 13994 ASSERT(rid < *nextidp); 13995 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 13996 mutex_exit(&srdp->srd_mutex); 13997 if (new_rgnp != NULL) { 13998 kmem_cache_free(region_cache, new_rgnp); 13999 } 14000 if (r_type == SFMMU_REGION_HME) { 14001 int myjoin = 14002 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 14003 14004 sfmmu_link_to_hmeregion(sfmmup, rgnp); 14005 /* 14006 * bitmap should be updated after linking sfmmu on 14007 * region list so that pageunload() doesn't skip 14008 * TSB/TLB flush. As soon as bitmap is updated another 14009 * thread in this process can already start accessing 14010 * this region. 14011 */ 14012 /* 14013 * Normally ttecnt accounting is done as part of 14014 * pagefault handling. But a process may not take any 14015 * pagefaults on shared hmeblks created by some other 14016 * process. To compensate for this assume that the 14017 * entire region will end up faulted in using 14018 * the region's pagesize. 14019 * 14020 */ 14021 if (r_pgszc > TTE8K) { 14022 tteflag = 1 << r_pgszc; 14023 if (disable_large_pages & tteflag) { 14024 tteflag = 0; 14025 } 14026 } else { 14027 tteflag = 0; 14028 } 14029 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 14030 hatlockp = sfmmu_hat_enter(sfmmup); 14031 sfmmup->sfmmu_rtteflags |= tteflag; 14032 sfmmu_hat_exit(hatlockp); 14033 } 14034 hatlockp = sfmmu_hat_enter(sfmmup); 14035 14036 /* 14037 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 14038 * region to allow for large page allocation failure. 14039 */ 14040 if (r_pgszc >= TTE4M) { 14041 sfmmup->sfmmu_tsb0_4minflcnt += 14042 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14043 } 14044 14045 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14046 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14047 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14048 rttecnt); 14049 14050 if (text && r_pgszc >= TTE4M && 14051 (tteflag || ((disable_large_pages >> TTE4M) & 14052 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 14053 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 14054 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 14055 } 14056 14057 sfmmu_hat_exit(hatlockp); 14058 /* 14059 * On Panther we need to make sure TLB is programmed 14060 * to accept 32M/256M pages. Call 14061 * sfmmu_check_page_sizes() now to make sure TLB is 14062 * setup before making hmeregions visible to other 14063 * threads. 14064 */ 14065 sfmmu_check_page_sizes(sfmmup, 1); 14066 hatlockp = sfmmu_hat_enter(sfmmup); 14067 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14068 14069 /* 14070 * if context is invalid tsb miss exception code will 14071 * call sfmmu_check_page_sizes() and update tsbmiss 14072 * area later. 14073 */ 14074 kpreempt_disable(); 14075 if (myjoin && 14076 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 14077 != INVALID_CONTEXT)) { 14078 struct tsbmiss *tsbmp; 14079 14080 tsbmp = &tsbmiss_area[CPU->cpu_id]; 14081 ASSERT(sfmmup == tsbmp->usfmmup); 14082 BT_SET(tsbmp->shmermap, rid); 14083 if (r_pgszc > TTE64K) { 14084 tsbmp->uhat_rtteflags |= tteflag; 14085 } 14086 14087 } 14088 kpreempt_enable(); 14089 14090 sfmmu_hat_exit(hatlockp); 14091 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14092 HAT_INVALID_REGION_COOKIE); 14093 } else { 14094 hatlockp = sfmmu_hat_enter(sfmmup); 14095 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14096 sfmmu_hat_exit(hatlockp); 14097 } 14098 ASSERT(rid < maxids); 14099 14100 if (r_type == SFMMU_REGION_ISM) { 14101 sfmmu_find_scd(sfmmup); 14102 } 14103 return ((hat_region_cookie_t)((uint64_t)rid)); 14104 } 14105 14106 ASSERT(new_rgnp == NULL); 14107 14108 if (*busyrgnsp >= maxids) { 14109 mutex_exit(&srdp->srd_mutex); 14110 return (HAT_INVALID_REGION_COOKIE); 14111 } 14112 14113 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14114 if (*freelistp != NULL) { 14115 rgnp = *freelistp; 14116 *freelistp = rgnp->rgn_next; 14117 ASSERT(rgnp->rgn_id < *nextidp); 14118 ASSERT(rgnp->rgn_id < maxids); 14119 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14120 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14121 == r_type); 14122 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14123 ASSERT(rgnp->rgn_hmeflags == 0); 14124 } else { 14125 /* 14126 * release local locks before memory allocation. 14127 */ 14128 mutex_exit(&srdp->srd_mutex); 14129 14130 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14131 14132 mutex_enter(&srdp->srd_mutex); 14133 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14134 rgnp = rgnp->rgn_hash) { 14135 if (rgnp->rgn_saddr == r_saddr && 14136 rgnp->rgn_size == r_size && 14137 rgnp->rgn_obj == r_obj && 14138 rgnp->rgn_objoff == r_objoff && 14139 rgnp->rgn_perm == r_perm && 14140 rgnp->rgn_pgszc == r_pgszc) { 14141 break; 14142 } 14143 } 14144 if (rgnp != NULL) { 14145 goto rfound; 14146 } 14147 14148 if (*nextidp >= maxids) { 14149 mutex_exit(&srdp->srd_mutex); 14150 goto fail; 14151 } 14152 rgnp = new_rgnp; 14153 new_rgnp = NULL; 14154 rgnp->rgn_id = (*nextidp)++; 14155 ASSERT(rgnp->rgn_id < maxids); 14156 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14157 rarrp[rgnp->rgn_id] = rgnp; 14158 } 14159 14160 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14161 ASSERT(rgnp->rgn_hmeflags == 0); 14162 #ifdef DEBUG 14163 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14164 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14165 } 14166 #endif 14167 rgnp->rgn_saddr = r_saddr; 14168 rgnp->rgn_size = r_size; 14169 rgnp->rgn_obj = r_obj; 14170 rgnp->rgn_objoff = r_objoff; 14171 rgnp->rgn_perm = r_perm; 14172 rgnp->rgn_pgszc = r_pgszc; 14173 rgnp->rgn_flags = r_type; 14174 rgnp->rgn_refcnt = 0; 14175 rgnp->rgn_cb_function = r_cb_function; 14176 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14177 srdp->srd_rgnhash[rhash] = rgnp; 14178 (*busyrgnsp)++; 14179 ASSERT(*busyrgnsp <= maxids); 14180 goto rfound; 14181 14182 fail: 14183 ASSERT(new_rgnp != NULL); 14184 kmem_cache_free(region_cache, new_rgnp); 14185 return (HAT_INVALID_REGION_COOKIE); 14186 } 14187 14188 /* 14189 * This function implements the shared context functionality required 14190 * when detaching a segment from an address space. It must be called 14191 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14192 * for segments with a valid region_cookie. 14193 * It will also be called from all seg_vn routines which change a 14194 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14195 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14196 * from segvn_fault(). 14197 */ 14198 void 14199 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14200 { 14201 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14202 sf_scd_t *scdp; 14203 uint_t rhash; 14204 uint_t rid = (uint_t)((uint64_t)rcookie); 14205 hatlock_t *hatlockp = NULL; 14206 sf_region_t *rgnp; 14207 sf_region_t **prev_rgnpp; 14208 sf_region_t *cur_rgnp; 14209 void *r_obj; 14210 int i; 14211 caddr_t r_saddr; 14212 caddr_t r_eaddr; 14213 size_t r_size; 14214 uchar_t r_pgszc; 14215 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14216 14217 ASSERT(sfmmup != ksfmmup); 14218 ASSERT(srdp != NULL); 14219 ASSERT(srdp->srd_refcnt > 0); 14220 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14221 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14222 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14223 14224 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14225 SFMMU_REGION_HME; 14226 14227 if (r_type == SFMMU_REGION_ISM) { 14228 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14229 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14230 rgnp = srdp->srd_ismrgnp[rid]; 14231 } else { 14232 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14233 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14234 rgnp = srdp->srd_hmergnp[rid]; 14235 } 14236 ASSERT(rgnp != NULL); 14237 ASSERT(rgnp->rgn_id == rid); 14238 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14239 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14240 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14241 14242 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14243 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { 14244 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, 14245 rgnp->rgn_size, 0, NULL); 14246 } 14247 14248 if (sfmmup->sfmmu_free) { 14249 ulong_t rttecnt; 14250 r_pgszc = rgnp->rgn_pgszc; 14251 r_size = rgnp->rgn_size; 14252 14253 ASSERT(sfmmup->sfmmu_scdp == NULL); 14254 if (r_type == SFMMU_REGION_ISM) { 14255 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14256 } else { 14257 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14258 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14259 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14260 14261 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14262 -rttecnt); 14263 14264 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14265 } 14266 } else if (r_type == SFMMU_REGION_ISM) { 14267 hatlockp = sfmmu_hat_enter(sfmmup); 14268 ASSERT(rid < srdp->srd_next_ismrid); 14269 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14270 scdp = sfmmup->sfmmu_scdp; 14271 if (scdp != NULL && 14272 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14273 sfmmu_leave_scd(sfmmup, r_type); 14274 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14275 } 14276 sfmmu_hat_exit(hatlockp); 14277 } else { 14278 ulong_t rttecnt; 14279 r_pgszc = rgnp->rgn_pgszc; 14280 r_saddr = rgnp->rgn_saddr; 14281 r_size = rgnp->rgn_size; 14282 r_eaddr = r_saddr + r_size; 14283 14284 ASSERT(r_type == SFMMU_REGION_HME); 14285 hatlockp = sfmmu_hat_enter(sfmmup); 14286 ASSERT(rid < srdp->srd_next_hmerid); 14287 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14288 14289 /* 14290 * If region is part of an SCD call sfmmu_leave_scd(). 14291 * Otherwise if process is not exiting and has valid context 14292 * just drop the context on the floor to lose stale TLB 14293 * entries and force the update of tsb miss area to reflect 14294 * the new region map. After that clean our TSB entries. 14295 */ 14296 scdp = sfmmup->sfmmu_scdp; 14297 if (scdp != NULL && 14298 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14299 sfmmu_leave_scd(sfmmup, r_type); 14300 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14301 } 14302 sfmmu_invalidate_ctx(sfmmup); 14303 14304 i = TTE8K; 14305 while (i < mmu_page_sizes) { 14306 if (rgnp->rgn_ttecnt[i] != 0) { 14307 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14308 r_eaddr, i); 14309 if (i < TTE4M) { 14310 i = TTE4M; 14311 continue; 14312 } else { 14313 break; 14314 } 14315 } 14316 i++; 14317 } 14318 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14319 if (r_pgszc >= TTE4M) { 14320 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14321 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14322 rttecnt); 14323 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14324 } 14325 14326 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14327 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14328 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14329 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14330 14331 sfmmu_hat_exit(hatlockp); 14332 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14333 /* sfmmup left the scd, grow private tsb */ 14334 sfmmu_check_page_sizes(sfmmup, 1); 14335 } else { 14336 sfmmu_check_page_sizes(sfmmup, 0); 14337 } 14338 } 14339 14340 if (r_type == SFMMU_REGION_HME) { 14341 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14342 } 14343 14344 r_obj = rgnp->rgn_obj; 14345 if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { 14346 return; 14347 } 14348 14349 /* 14350 * looks like nobody uses this region anymore. Free it. 14351 */ 14352 rhash = RGN_HASH_FUNCTION(r_obj); 14353 mutex_enter(&srdp->srd_mutex); 14354 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14355 (cur_rgnp = *prev_rgnpp) != NULL; 14356 prev_rgnpp = &cur_rgnp->rgn_hash) { 14357 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14358 break; 14359 } 14360 } 14361 14362 if (cur_rgnp == NULL) { 14363 mutex_exit(&srdp->srd_mutex); 14364 return; 14365 } 14366 14367 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14368 *prev_rgnpp = rgnp->rgn_hash; 14369 if (r_type == SFMMU_REGION_ISM) { 14370 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14371 ASSERT(rid < srdp->srd_next_ismrid); 14372 rgnp->rgn_next = srdp->srd_ismrgnfree; 14373 srdp->srd_ismrgnfree = rgnp; 14374 ASSERT(srdp->srd_ismbusyrgns > 0); 14375 srdp->srd_ismbusyrgns--; 14376 mutex_exit(&srdp->srd_mutex); 14377 return; 14378 } 14379 mutex_exit(&srdp->srd_mutex); 14380 14381 /* 14382 * Destroy region's hmeblks. 14383 */ 14384 sfmmu_unload_hmeregion(srdp, rgnp); 14385 14386 rgnp->rgn_hmeflags = 0; 14387 14388 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14389 ASSERT(rgnp->rgn_id == rid); 14390 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14391 rgnp->rgn_ttecnt[i] = 0; 14392 } 14393 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14394 mutex_enter(&srdp->srd_mutex); 14395 ASSERT(rid < srdp->srd_next_hmerid); 14396 rgnp->rgn_next = srdp->srd_hmergnfree; 14397 srdp->srd_hmergnfree = rgnp; 14398 ASSERT(srdp->srd_hmebusyrgns > 0); 14399 srdp->srd_hmebusyrgns--; 14400 mutex_exit(&srdp->srd_mutex); 14401 } 14402 14403 /* 14404 * For now only called for hmeblk regions and not for ISM regions. 14405 */ 14406 void 14407 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14408 { 14409 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14410 uint_t rid = (uint_t)((uint64_t)rcookie); 14411 sf_region_t *rgnp; 14412 sf_rgn_link_t *rlink; 14413 sf_rgn_link_t *hrlink; 14414 ulong_t rttecnt; 14415 14416 ASSERT(sfmmup != ksfmmup); 14417 ASSERT(srdp != NULL); 14418 ASSERT(srdp->srd_refcnt > 0); 14419 14420 ASSERT(rid < srdp->srd_next_hmerid); 14421 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14422 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14423 14424 rgnp = srdp->srd_hmergnp[rid]; 14425 ASSERT(rgnp->rgn_refcnt > 0); 14426 ASSERT(rgnp->rgn_id == rid); 14427 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14428 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14429 14430 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14431 14432 /* LINTED: constant in conditional context */ 14433 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14434 ASSERT(rlink != NULL); 14435 mutex_enter(&rgnp->rgn_mutex); 14436 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14437 /* LINTED: constant in conditional context */ 14438 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14439 ASSERT(hrlink != NULL); 14440 ASSERT(hrlink->prev == NULL); 14441 rlink->next = rgnp->rgn_sfmmu_head; 14442 rlink->prev = NULL; 14443 hrlink->prev = sfmmup; 14444 /* 14445 * make sure rlink's next field is correct 14446 * before making this link visible. 14447 */ 14448 membar_stst(); 14449 rgnp->rgn_sfmmu_head = sfmmup; 14450 mutex_exit(&rgnp->rgn_mutex); 14451 14452 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14453 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14454 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14455 /* update tsb0 inflation count */ 14456 if (rgnp->rgn_pgszc >= TTE4M) { 14457 sfmmup->sfmmu_tsb0_4minflcnt += 14458 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14459 } 14460 /* 14461 * Update regionid bitmask without hat lock since no other thread 14462 * can update this region bitmask right now. 14463 */ 14464 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14465 } 14466 14467 /* ARGSUSED */ 14468 static int 14469 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14470 { 14471 sf_region_t *rgnp = (sf_region_t *)buf; 14472 bzero(buf, sizeof (*rgnp)); 14473 14474 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14475 14476 return (0); 14477 } 14478 14479 /* ARGSUSED */ 14480 static void 14481 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14482 { 14483 sf_region_t *rgnp = (sf_region_t *)buf; 14484 mutex_destroy(&rgnp->rgn_mutex); 14485 } 14486 14487 static int 14488 sfrgnmap_isnull(sf_region_map_t *map) 14489 { 14490 int i; 14491 14492 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14493 if (map->bitmap[i] != 0) { 14494 return (0); 14495 } 14496 } 14497 return (1); 14498 } 14499 14500 static int 14501 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14502 { 14503 int i; 14504 14505 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14506 if (map->bitmap[i] != 0) { 14507 return (0); 14508 } 14509 } 14510 return (1); 14511 } 14512 14513 #ifdef DEBUG 14514 static void 14515 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14516 { 14517 sfmmu_t *sp; 14518 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14519 14520 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14521 ASSERT(srdp == sp->sfmmu_srdp); 14522 if (sp == sfmmup) { 14523 if (onlist) { 14524 return; 14525 } else { 14526 panic("shctx: sfmmu 0x%p found on scd" 14527 "list 0x%p", sfmmup, *headp); 14528 } 14529 } 14530 } 14531 if (onlist) { 14532 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14533 sfmmup, *headp); 14534 } else { 14535 return; 14536 } 14537 } 14538 #else /* DEBUG */ 14539 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14540 #endif /* DEBUG */ 14541 14542 /* 14543 * Removes an sfmmu from the SCD sfmmu list. 14544 */ 14545 static void 14546 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14547 { 14548 ASSERT(sfmmup->sfmmu_srdp != NULL); 14549 check_scd_sfmmu_list(headp, sfmmup, 1); 14550 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14551 ASSERT(*headp != sfmmup); 14552 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14553 sfmmup->sfmmu_scd_link.next; 14554 } else { 14555 ASSERT(*headp == sfmmup); 14556 *headp = sfmmup->sfmmu_scd_link.next; 14557 } 14558 if (sfmmup->sfmmu_scd_link.next != NULL) { 14559 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14560 sfmmup->sfmmu_scd_link.prev; 14561 } 14562 } 14563 14564 14565 /* 14566 * Adds an sfmmu to the start of the queue. 14567 */ 14568 static void 14569 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14570 { 14571 check_scd_sfmmu_list(headp, sfmmup, 0); 14572 sfmmup->sfmmu_scd_link.prev = NULL; 14573 sfmmup->sfmmu_scd_link.next = *headp; 14574 if (*headp != NULL) 14575 (*headp)->sfmmu_scd_link.prev = sfmmup; 14576 *headp = sfmmup; 14577 } 14578 14579 /* 14580 * Remove an scd from the start of the queue. 14581 */ 14582 static void 14583 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14584 { 14585 if (scdp->scd_prev != NULL) { 14586 ASSERT(*headp != scdp); 14587 scdp->scd_prev->scd_next = scdp->scd_next; 14588 } else { 14589 ASSERT(*headp == scdp); 14590 *headp = scdp->scd_next; 14591 } 14592 14593 if (scdp->scd_next != NULL) { 14594 scdp->scd_next->scd_prev = scdp->scd_prev; 14595 } 14596 } 14597 14598 /* 14599 * Add an scd to the start of the queue. 14600 */ 14601 static void 14602 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14603 { 14604 scdp->scd_prev = NULL; 14605 scdp->scd_next = *headp; 14606 if (*headp != NULL) { 14607 (*headp)->scd_prev = scdp; 14608 } 14609 *headp = scdp; 14610 } 14611 14612 static int 14613 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14614 { 14615 uint_t rid; 14616 uint_t i; 14617 uint_t j; 14618 ulong_t w; 14619 sf_region_t *rgnp; 14620 ulong_t tte8k_cnt = 0; 14621 ulong_t tte4m_cnt = 0; 14622 uint_t tsb_szc; 14623 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14624 sfmmu_t *ism_hatid; 14625 struct tsb_info *newtsb; 14626 int szc; 14627 14628 ASSERT(srdp != NULL); 14629 14630 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14631 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14632 continue; 14633 } 14634 j = 0; 14635 while (w) { 14636 if (!(w & 0x1)) { 14637 j++; 14638 w >>= 1; 14639 continue; 14640 } 14641 rid = (i << BT_ULSHIFT) | j; 14642 j++; 14643 w >>= 1; 14644 14645 if (rid < SFMMU_MAX_HME_REGIONS) { 14646 rgnp = srdp->srd_hmergnp[rid]; 14647 ASSERT(rgnp->rgn_id == rid); 14648 ASSERT(rgnp->rgn_refcnt > 0); 14649 14650 if (rgnp->rgn_pgszc < TTE4M) { 14651 tte8k_cnt += rgnp->rgn_size >> 14652 TTE_PAGE_SHIFT(TTE8K); 14653 } else { 14654 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14655 tte4m_cnt += rgnp->rgn_size >> 14656 TTE_PAGE_SHIFT(TTE4M); 14657 /* 14658 * Inflate SCD tsb0 by preallocating 14659 * 1/4 8k ttecnt for 4M regions to 14660 * allow for lgpg alloc failure. 14661 */ 14662 tte8k_cnt += rgnp->rgn_size >> 14663 (TTE_PAGE_SHIFT(TTE8K) + 2); 14664 } 14665 } else { 14666 rid -= SFMMU_MAX_HME_REGIONS; 14667 rgnp = srdp->srd_ismrgnp[rid]; 14668 ASSERT(rgnp->rgn_id == rid); 14669 ASSERT(rgnp->rgn_refcnt > 0); 14670 14671 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14672 ASSERT(ism_hatid->sfmmu_ismhat); 14673 14674 for (szc = 0; szc < TTE4M; szc++) { 14675 tte8k_cnt += 14676 ism_hatid->sfmmu_ttecnt[szc] << 14677 TTE_BSZS_SHIFT(szc); 14678 } 14679 14680 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14681 if (rgnp->rgn_pgszc >= TTE4M) { 14682 tte4m_cnt += rgnp->rgn_size >> 14683 TTE_PAGE_SHIFT(TTE4M); 14684 } 14685 } 14686 } 14687 } 14688 14689 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14690 14691 /* Allocate both the SCD TSBs here. */ 14692 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14693 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14694 (tsb_szc <= TSB_4M_SZCODE || 14695 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14696 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14697 TSB_ALLOC, scsfmmup))) { 14698 14699 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14700 return (TSB_ALLOCFAIL); 14701 } else { 14702 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14703 14704 if (tte4m_cnt) { 14705 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14706 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14707 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14708 (tsb_szc <= TSB_4M_SZCODE || 14709 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14710 TSB4M|TSB32M|TSB256M, 14711 TSB_ALLOC, scsfmmup))) { 14712 /* 14713 * If we fail to allocate the 2nd shared tsb, 14714 * just free the 1st tsb, return failure. 14715 */ 14716 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14717 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14718 return (TSB_ALLOCFAIL); 14719 } else { 14720 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14721 newtsb->tsb_flags |= TSB_SHAREDCTX; 14722 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14723 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14724 } 14725 } 14726 SFMMU_STAT(sf_scd_1sttsb_alloc); 14727 } 14728 return (TSB_SUCCESS); 14729 } 14730 14731 static void 14732 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14733 { 14734 while (scd_sfmmu->sfmmu_tsb != NULL) { 14735 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14736 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14737 scd_sfmmu->sfmmu_tsb = next; 14738 } 14739 } 14740 14741 /* 14742 * Link the sfmmu onto the hme region list. 14743 */ 14744 void 14745 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14746 { 14747 uint_t rid; 14748 sf_rgn_link_t *rlink; 14749 sfmmu_t *head; 14750 sf_rgn_link_t *hrlink; 14751 14752 rid = rgnp->rgn_id; 14753 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14754 14755 /* LINTED: constant in conditional context */ 14756 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14757 ASSERT(rlink != NULL); 14758 mutex_enter(&rgnp->rgn_mutex); 14759 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14760 rlink->next = NULL; 14761 rlink->prev = NULL; 14762 /* 14763 * make sure rlink's next field is NULL 14764 * before making this link visible. 14765 */ 14766 membar_stst(); 14767 rgnp->rgn_sfmmu_head = sfmmup; 14768 } else { 14769 /* LINTED: constant in conditional context */ 14770 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14771 ASSERT(hrlink != NULL); 14772 ASSERT(hrlink->prev == NULL); 14773 rlink->next = head; 14774 rlink->prev = NULL; 14775 hrlink->prev = sfmmup; 14776 /* 14777 * make sure rlink's next field is correct 14778 * before making this link visible. 14779 */ 14780 membar_stst(); 14781 rgnp->rgn_sfmmu_head = sfmmup; 14782 } 14783 mutex_exit(&rgnp->rgn_mutex); 14784 } 14785 14786 /* 14787 * Unlink the sfmmu from the hme region list. 14788 */ 14789 void 14790 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14791 { 14792 uint_t rid; 14793 sf_rgn_link_t *rlink; 14794 14795 rid = rgnp->rgn_id; 14796 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14797 14798 /* LINTED: constant in conditional context */ 14799 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14800 ASSERT(rlink != NULL); 14801 mutex_enter(&rgnp->rgn_mutex); 14802 if (rgnp->rgn_sfmmu_head == sfmmup) { 14803 sfmmu_t *next = rlink->next; 14804 rgnp->rgn_sfmmu_head = next; 14805 /* 14806 * if we are stopped by xc_attention() after this 14807 * point the forward link walking in 14808 * sfmmu_rgntlb_demap() will work correctly since the 14809 * head correctly points to the next element. 14810 */ 14811 membar_stst(); 14812 rlink->next = NULL; 14813 ASSERT(rlink->prev == NULL); 14814 if (next != NULL) { 14815 sf_rgn_link_t *nrlink; 14816 /* LINTED: constant in conditional context */ 14817 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14818 ASSERT(nrlink != NULL); 14819 ASSERT(nrlink->prev == sfmmup); 14820 nrlink->prev = NULL; 14821 } 14822 } else { 14823 sfmmu_t *next = rlink->next; 14824 sfmmu_t *prev = rlink->prev; 14825 sf_rgn_link_t *prlink; 14826 14827 ASSERT(prev != NULL); 14828 /* LINTED: constant in conditional context */ 14829 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14830 ASSERT(prlink != NULL); 14831 ASSERT(prlink->next == sfmmup); 14832 prlink->next = next; 14833 /* 14834 * if we are stopped by xc_attention() 14835 * after this point the forward link walking 14836 * will work correctly since the prev element 14837 * correctly points to the next element. 14838 */ 14839 membar_stst(); 14840 rlink->next = NULL; 14841 rlink->prev = NULL; 14842 if (next != NULL) { 14843 sf_rgn_link_t *nrlink; 14844 /* LINTED: constant in conditional context */ 14845 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14846 ASSERT(nrlink != NULL); 14847 ASSERT(nrlink->prev == sfmmup); 14848 nrlink->prev = prev; 14849 } 14850 } 14851 mutex_exit(&rgnp->rgn_mutex); 14852 } 14853 14854 /* 14855 * Link scd sfmmu onto ism or hme region list for each region in the 14856 * scd region map. 14857 */ 14858 void 14859 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14860 { 14861 uint_t rid; 14862 uint_t i; 14863 uint_t j; 14864 ulong_t w; 14865 sf_region_t *rgnp; 14866 sfmmu_t *scsfmmup; 14867 14868 scsfmmup = scdp->scd_sfmmup; 14869 ASSERT(scsfmmup->sfmmu_scdhat); 14870 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14871 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14872 continue; 14873 } 14874 j = 0; 14875 while (w) { 14876 if (!(w & 0x1)) { 14877 j++; 14878 w >>= 1; 14879 continue; 14880 } 14881 rid = (i << BT_ULSHIFT) | j; 14882 j++; 14883 w >>= 1; 14884 14885 if (rid < SFMMU_MAX_HME_REGIONS) { 14886 rgnp = srdp->srd_hmergnp[rid]; 14887 ASSERT(rgnp->rgn_id == rid); 14888 ASSERT(rgnp->rgn_refcnt > 0); 14889 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14890 } else { 14891 sfmmu_t *ism_hatid = NULL; 14892 ism_ment_t *ism_ment; 14893 rid -= SFMMU_MAX_HME_REGIONS; 14894 rgnp = srdp->srd_ismrgnp[rid]; 14895 ASSERT(rgnp->rgn_id == rid); 14896 ASSERT(rgnp->rgn_refcnt > 0); 14897 14898 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14899 ASSERT(ism_hatid->sfmmu_ismhat); 14900 ism_ment = &scdp->scd_ism_links[rid]; 14901 ism_ment->iment_hat = scsfmmup; 14902 ism_ment->iment_base_va = rgnp->rgn_saddr; 14903 mutex_enter(&ism_mlist_lock); 14904 iment_add(ism_ment, ism_hatid); 14905 mutex_exit(&ism_mlist_lock); 14906 14907 } 14908 } 14909 } 14910 } 14911 /* 14912 * Unlink scd sfmmu from ism or hme region list for each region in the 14913 * scd region map. 14914 */ 14915 void 14916 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14917 { 14918 uint_t rid; 14919 uint_t i; 14920 uint_t j; 14921 ulong_t w; 14922 sf_region_t *rgnp; 14923 sfmmu_t *scsfmmup; 14924 14925 scsfmmup = scdp->scd_sfmmup; 14926 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14927 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14928 continue; 14929 } 14930 j = 0; 14931 while (w) { 14932 if (!(w & 0x1)) { 14933 j++; 14934 w >>= 1; 14935 continue; 14936 } 14937 rid = (i << BT_ULSHIFT) | j; 14938 j++; 14939 w >>= 1; 14940 14941 if (rid < SFMMU_MAX_HME_REGIONS) { 14942 rgnp = srdp->srd_hmergnp[rid]; 14943 ASSERT(rgnp->rgn_id == rid); 14944 ASSERT(rgnp->rgn_refcnt > 0); 14945 sfmmu_unlink_from_hmeregion(scsfmmup, 14946 rgnp); 14947 14948 } else { 14949 sfmmu_t *ism_hatid = NULL; 14950 ism_ment_t *ism_ment; 14951 rid -= SFMMU_MAX_HME_REGIONS; 14952 rgnp = srdp->srd_ismrgnp[rid]; 14953 ASSERT(rgnp->rgn_id == rid); 14954 ASSERT(rgnp->rgn_refcnt > 0); 14955 14956 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14957 ASSERT(ism_hatid->sfmmu_ismhat); 14958 ism_ment = &scdp->scd_ism_links[rid]; 14959 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14960 ASSERT(ism_ment->iment_base_va == 14961 rgnp->rgn_saddr); 14962 ism_ment->iment_hat = NULL; 14963 ism_ment->iment_base_va = 0; 14964 mutex_enter(&ism_mlist_lock); 14965 iment_sub(ism_ment, ism_hatid); 14966 mutex_exit(&ism_mlist_lock); 14967 14968 } 14969 } 14970 } 14971 } 14972 /* 14973 * Allocates and initialises a new SCD structure, this is called with 14974 * the srd_scd_mutex held and returns with the reference count 14975 * initialised to 1. 14976 */ 14977 static sf_scd_t * 14978 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14979 { 14980 sf_scd_t *new_scdp; 14981 sfmmu_t *scsfmmup; 14982 int i; 14983 14984 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14985 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14986 14987 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14988 new_scdp->scd_sfmmup = scsfmmup; 14989 scsfmmup->sfmmu_srdp = srdp; 14990 scsfmmup->sfmmu_scdp = new_scdp; 14991 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14992 scsfmmup->sfmmu_scdhat = 1; 14993 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14994 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 14995 14996 ASSERT(max_mmu_ctxdoms > 0); 14997 for (i = 0; i < max_mmu_ctxdoms; i++) { 14998 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 14999 scsfmmup->sfmmu_ctxs[i].gnum = 0; 15000 } 15001 15002 for (i = 0; i < MMU_PAGE_SIZES; i++) { 15003 new_scdp->scd_rttecnt[i] = 0; 15004 } 15005 15006 new_scdp->scd_region_map = *new_map; 15007 new_scdp->scd_refcnt = 1; 15008 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 15009 kmem_cache_free(scd_cache, new_scdp); 15010 kmem_cache_free(sfmmuid_cache, scsfmmup); 15011 return (NULL); 15012 } 15013 return (new_scdp); 15014 } 15015 15016 /* 15017 * The first phase of a process joining an SCD. The hat structure is 15018 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 15019 * and a cross-call with context invalidation is used to cause the 15020 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 15021 * routine. 15022 */ 15023 static void 15024 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 15025 { 15026 hatlock_t *hatlockp; 15027 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15028 int i; 15029 sf_scd_t *old_scdp; 15030 15031 ASSERT(srdp != NULL); 15032 ASSERT(scdp != NULL); 15033 ASSERT(scdp->scd_refcnt > 0); 15034 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15035 15036 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 15037 ASSERT(old_scdp != scdp); 15038 15039 mutex_enter(&old_scdp->scd_mutex); 15040 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 15041 mutex_exit(&old_scdp->scd_mutex); 15042 /* 15043 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 15044 * include the shme rgn ttecnt for rgns that 15045 * were in the old SCD 15046 */ 15047 for (i = 0; i < mmu_page_sizes; i++) { 15048 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15049 old_scdp->scd_rttecnt[i]); 15050 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15051 sfmmup->sfmmu_scdrttecnt[i]); 15052 } 15053 } 15054 15055 /* 15056 * Move sfmmu to the scd lists. 15057 */ 15058 mutex_enter(&scdp->scd_mutex); 15059 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 15060 mutex_exit(&scdp->scd_mutex); 15061 SF_SCD_INCR_REF(scdp); 15062 15063 hatlockp = sfmmu_hat_enter(sfmmup); 15064 /* 15065 * For a multi-thread process, we must stop 15066 * all the other threads before joining the scd. 15067 */ 15068 15069 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 15070 15071 sfmmu_invalidate_ctx(sfmmup); 15072 sfmmup->sfmmu_scdp = scdp; 15073 15074 /* 15075 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 15076 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 15077 */ 15078 for (i = 0; i < mmu_page_sizes; i++) { 15079 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 15080 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 15081 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15082 -sfmmup->sfmmu_scdrttecnt[i]); 15083 } 15084 /* update tsb0 inflation count */ 15085 if (old_scdp != NULL) { 15086 sfmmup->sfmmu_tsb0_4minflcnt += 15087 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15088 } 15089 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 15090 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15091 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15092 15093 sfmmu_hat_exit(hatlockp); 15094 15095 if (old_scdp != NULL) { 15096 SF_SCD_DECR_REF(srdp, old_scdp); 15097 } 15098 15099 } 15100 15101 /* 15102 * This routine is called by a process to become part of an SCD. It is called 15103 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15104 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15105 */ 15106 static void 15107 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15108 { 15109 struct tsb_info *tsbinfop; 15110 15111 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15112 ASSERT(sfmmup->sfmmu_scdp != NULL); 15113 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15114 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15115 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15116 15117 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15118 tsbinfop = tsbinfop->tsb_next) { 15119 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15120 continue; 15121 } 15122 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15123 15124 sfmmu_inv_tsb(tsbinfop->tsb_va, 15125 TSB_BYTES(tsbinfop->tsb_szc)); 15126 } 15127 15128 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15129 sfmmu_ism_hatflags(sfmmup, 1); 15130 15131 SFMMU_STAT(sf_join_scd); 15132 } 15133 15134 /* 15135 * This routine is called in order to check if there is an SCD which matches 15136 * the process's region map if not then a new SCD may be created. 15137 */ 15138 static void 15139 sfmmu_find_scd(sfmmu_t *sfmmup) 15140 { 15141 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15142 sf_scd_t *scdp, *new_scdp; 15143 int ret; 15144 15145 ASSERT(srdp != NULL); 15146 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15147 15148 mutex_enter(&srdp->srd_scd_mutex); 15149 for (scdp = srdp->srd_scdp; scdp != NULL; 15150 scdp = scdp->scd_next) { 15151 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15152 &sfmmup->sfmmu_region_map, ret); 15153 if (ret == 1) { 15154 SF_SCD_INCR_REF(scdp); 15155 mutex_exit(&srdp->srd_scd_mutex); 15156 sfmmu_join_scd(scdp, sfmmup); 15157 ASSERT(scdp->scd_refcnt >= 2); 15158 atomic_add_32((volatile uint32_t *) 15159 &scdp->scd_refcnt, -1); 15160 return; 15161 } else { 15162 /* 15163 * If the sfmmu region map is a subset of the scd 15164 * region map, then the assumption is that this process 15165 * will continue attaching to ISM segments until the 15166 * region maps are equal. 15167 */ 15168 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15169 &sfmmup->sfmmu_region_map, ret); 15170 if (ret == 1) { 15171 mutex_exit(&srdp->srd_scd_mutex); 15172 return; 15173 } 15174 } 15175 } 15176 15177 ASSERT(scdp == NULL); 15178 /* 15179 * No matching SCD has been found, create a new one. 15180 */ 15181 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15182 NULL) { 15183 mutex_exit(&srdp->srd_scd_mutex); 15184 return; 15185 } 15186 15187 /* 15188 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15189 */ 15190 15191 /* Set scd_rttecnt for shme rgns in SCD */ 15192 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15193 15194 /* 15195 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15196 */ 15197 sfmmu_link_scd_to_regions(srdp, new_scdp); 15198 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15199 SFMMU_STAT_ADD(sf_create_scd, 1); 15200 15201 mutex_exit(&srdp->srd_scd_mutex); 15202 sfmmu_join_scd(new_scdp, sfmmup); 15203 ASSERT(new_scdp->scd_refcnt >= 2); 15204 atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); 15205 } 15206 15207 /* 15208 * This routine is called by a process to remove itself from an SCD. It is 15209 * either called when the processes has detached from a segment or from 15210 * hat_free_start() as a result of calling exit. 15211 */ 15212 static void 15213 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15214 { 15215 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15216 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15217 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15218 int i; 15219 15220 ASSERT(scdp != NULL); 15221 ASSERT(srdp != NULL); 15222 15223 if (sfmmup->sfmmu_free) { 15224 /* 15225 * If the process is part of an SCD the sfmmu is unlinked 15226 * from scd_sf_list. 15227 */ 15228 mutex_enter(&scdp->scd_mutex); 15229 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15230 mutex_exit(&scdp->scd_mutex); 15231 /* 15232 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15233 * are about to leave the SCD 15234 */ 15235 for (i = 0; i < mmu_page_sizes; i++) { 15236 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15237 scdp->scd_rttecnt[i]); 15238 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15239 sfmmup->sfmmu_scdrttecnt[i]); 15240 sfmmup->sfmmu_scdrttecnt[i] = 0; 15241 } 15242 sfmmup->sfmmu_scdp = NULL; 15243 15244 SF_SCD_DECR_REF(srdp, scdp); 15245 return; 15246 } 15247 15248 ASSERT(r_type != SFMMU_REGION_ISM || 15249 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15250 ASSERT(scdp->scd_refcnt); 15251 ASSERT(!sfmmup->sfmmu_free); 15252 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15253 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15254 15255 /* 15256 * Wait for ISM maps to be updated. 15257 */ 15258 if (r_type != SFMMU_REGION_ISM) { 15259 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15260 sfmmup->sfmmu_scdp != NULL) { 15261 cv_wait(&sfmmup->sfmmu_tsb_cv, 15262 HATLOCK_MUTEXP(hatlockp)); 15263 } 15264 15265 if (sfmmup->sfmmu_scdp == NULL) { 15266 sfmmu_hat_exit(hatlockp); 15267 return; 15268 } 15269 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15270 } 15271 15272 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15273 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15274 /* 15275 * Since HAT_JOIN_SCD was set our context 15276 * is still invalid. 15277 */ 15278 } else { 15279 /* 15280 * For a multi-thread process, we must stop 15281 * all the other threads before leaving the scd. 15282 */ 15283 15284 sfmmu_invalidate_ctx(sfmmup); 15285 } 15286 15287 /* Clear all the rid's for ISM, delete flags, etc */ 15288 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15289 sfmmu_ism_hatflags(sfmmup, 0); 15290 15291 /* 15292 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15293 * are in SCD before this sfmmup leaves the SCD. 15294 */ 15295 for (i = 0; i < mmu_page_sizes; i++) { 15296 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15297 scdp->scd_rttecnt[i]); 15298 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15299 sfmmup->sfmmu_scdrttecnt[i]); 15300 sfmmup->sfmmu_scdrttecnt[i] = 0; 15301 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15302 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15303 sfmmup->sfmmu_scdismttecnt[i] = 0; 15304 } 15305 /* update tsb0 inflation count */ 15306 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15307 15308 if (r_type != SFMMU_REGION_ISM) { 15309 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15310 } 15311 sfmmup->sfmmu_scdp = NULL; 15312 15313 sfmmu_hat_exit(hatlockp); 15314 15315 /* 15316 * Unlink sfmmu from scd_sf_list this can be done without holding 15317 * the hat lock as we hold the sfmmu_as lock which prevents 15318 * hat_join_region from adding this thread to the scd again. Other 15319 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15320 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15321 * while holding the hat lock. 15322 */ 15323 mutex_enter(&scdp->scd_mutex); 15324 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15325 mutex_exit(&scdp->scd_mutex); 15326 SFMMU_STAT(sf_leave_scd); 15327 15328 SF_SCD_DECR_REF(srdp, scdp); 15329 hatlockp = sfmmu_hat_enter(sfmmup); 15330 15331 } 15332 15333 /* 15334 * Unlink and free up an SCD structure with a reference count of 0. 15335 */ 15336 static void 15337 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15338 { 15339 sfmmu_t *scsfmmup; 15340 sf_scd_t *sp; 15341 hatlock_t *shatlockp; 15342 int i, ret; 15343 15344 mutex_enter(&srdp->srd_scd_mutex); 15345 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15346 if (sp == scdp) 15347 break; 15348 } 15349 if (sp == NULL || sp->scd_refcnt) { 15350 mutex_exit(&srdp->srd_scd_mutex); 15351 return; 15352 } 15353 15354 /* 15355 * It is possible that the scd has been freed and reallocated with a 15356 * different region map while we've been waiting for the srd_scd_mutex. 15357 */ 15358 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15359 if (ret != 1) { 15360 mutex_exit(&srdp->srd_scd_mutex); 15361 return; 15362 } 15363 15364 ASSERT(scdp->scd_sf_list == NULL); 15365 /* 15366 * Unlink scd from srd_scdp list. 15367 */ 15368 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15369 mutex_exit(&srdp->srd_scd_mutex); 15370 15371 sfmmu_unlink_scd_from_regions(srdp, scdp); 15372 15373 /* Clear shared context tsb and release ctx */ 15374 scsfmmup = scdp->scd_sfmmup; 15375 15376 /* 15377 * create a barrier so that scd will not be destroyed 15378 * if other thread still holds the same shared hat lock. 15379 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15380 * shared hat lock before checking the shared tsb reloc flag. 15381 */ 15382 shatlockp = sfmmu_hat_enter(scsfmmup); 15383 sfmmu_hat_exit(shatlockp); 15384 15385 sfmmu_free_scd_tsbs(scsfmmup); 15386 15387 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15388 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15389 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15390 SFMMU_L2_HMERLINKS_SIZE); 15391 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15392 } 15393 } 15394 kmem_cache_free(sfmmuid_cache, scsfmmup); 15395 kmem_cache_free(scd_cache, scdp); 15396 SFMMU_STAT(sf_destroy_scd); 15397 } 15398 15399 /* 15400 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15401 * bits which are set in the ism_region_map parameter. This flag indicates to 15402 * the tsbmiss handler that mapping for these segments should be loaded using 15403 * the shared context. 15404 */ 15405 static void 15406 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15407 { 15408 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15409 ism_blk_t *ism_blkp; 15410 ism_map_t *ism_map; 15411 int i, rid; 15412 15413 ASSERT(sfmmup->sfmmu_iblk != NULL); 15414 ASSERT(scdp != NULL); 15415 /* 15416 * Note that the caller either set HAT_ISMBUSY flag or checked 15417 * under hat lock that HAT_ISMBUSY was not set by another thread. 15418 */ 15419 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15420 15421 ism_blkp = sfmmup->sfmmu_iblk; 15422 while (ism_blkp != NULL) { 15423 ism_map = ism_blkp->iblk_maps; 15424 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15425 rid = ism_map[i].imap_rid; 15426 if (rid == SFMMU_INVALID_ISMRID) { 15427 continue; 15428 } 15429 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15430 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15431 addflag) { 15432 ism_map[i].imap_hatflags |= 15433 HAT_CTX1_FLAG; 15434 } else { 15435 ism_map[i].imap_hatflags &= 15436 ~HAT_CTX1_FLAG; 15437 } 15438 } 15439 ism_blkp = ism_blkp->iblk_next; 15440 } 15441 } 15442 15443 static int 15444 sfmmu_srd_lock_held(sf_srd_t *srdp) 15445 { 15446 return (MUTEX_HELD(&srdp->srd_mutex)); 15447 } 15448 15449 /* ARGSUSED */ 15450 static int 15451 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15452 { 15453 sf_scd_t *scdp = (sf_scd_t *)buf; 15454 15455 bzero(buf, sizeof (sf_scd_t)); 15456 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15457 return (0); 15458 } 15459 15460 /* ARGSUSED */ 15461 static void 15462 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15463 { 15464 sf_scd_t *scdp = (sf_scd_t *)buf; 15465 15466 mutex_destroy(&scdp->scd_mutex); 15467 } 15468