1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * VM - Hardware Address Translation management for Spitfire MMU. 28 * 29 * This file implements the machine specific hardware translation 30 * needed by the VM system. The machine independent interface is 31 * described in <vm/hat.h> while the machine dependent interface 32 * and data structures are described in <vm/hat_sfmmu.h>. 33 * 34 * The hat layer manages the address translation hardware as a cache 35 * driven by calls from the higher levels in the VM system. 36 */ 37 38 #include <sys/types.h> 39 #include <sys/kstat.h> 40 #include <vm/hat.h> 41 #include <vm/hat_sfmmu.h> 42 #include <vm/page.h> 43 #include <sys/pte.h> 44 #include <sys/systm.h> 45 #include <sys/mman.h> 46 #include <sys/sysmacros.h> 47 #include <sys/machparam.h> 48 #include <sys/vtrace.h> 49 #include <sys/kmem.h> 50 #include <sys/mmu.h> 51 #include <sys/cmn_err.h> 52 #include <sys/cpu.h> 53 #include <sys/cpuvar.h> 54 #include <sys/debug.h> 55 #include <sys/lgrp.h> 56 #include <sys/archsystm.h> 57 #include <sys/machsystm.h> 58 #include <sys/vmsystm.h> 59 #include <vm/as.h> 60 #include <vm/seg.h> 61 #include <vm/seg_kp.h> 62 #include <vm/seg_kmem.h> 63 #include <vm/seg_kpm.h> 64 #include <vm/rm.h> 65 #include <sys/t_lock.h> 66 #include <sys/obpdefs.h> 67 #include <sys/vm_machparam.h> 68 #include <sys/var.h> 69 #include <sys/trap.h> 70 #include <sys/machtrap.h> 71 #include <sys/scb.h> 72 #include <sys/bitmap.h> 73 #include <sys/machlock.h> 74 #include <sys/membar.h> 75 #include <sys/atomic.h> 76 #include <sys/cpu_module.h> 77 #include <sys/prom_debug.h> 78 #include <sys/ksynch.h> 79 #include <sys/mem_config.h> 80 #include <sys/mem_cage.h> 81 #include <vm/vm_dep.h> 82 #include <vm/xhat_sfmmu.h> 83 #include <sys/fpu/fpusystm.h> 84 #include <vm/mach_kpm.h> 85 #include <sys/callb.h> 86 87 #ifdef DEBUG 88 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 89 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 90 caddr_t _eaddr = (saddr) + (len); \ 91 sf_srd_t *_srdp; \ 92 sf_region_t *_rgnp; \ 93 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 94 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 95 ASSERT((hat) != ksfmmup); \ 96 _srdp = (hat)->sfmmu_srdp; \ 97 ASSERT(_srdp != NULL); \ 98 ASSERT(_srdp->srd_refcnt != 0); \ 99 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 100 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 101 ASSERT(_rgnp->rgn_refcnt != 0); \ 102 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 103 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 104 SFMMU_REGION_HME); \ 105 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 106 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 107 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 108 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 } 110 111 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 112 { \ 113 caddr_t _hsva; \ 114 caddr_t _heva; \ 115 caddr_t _rsva; \ 116 caddr_t _reva; \ 117 int _ttesz = get_hblk_ttesz(hmeblkp); \ 118 int _flagtte; \ 119 ASSERT((srdp)->srd_refcnt != 0); \ 120 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 121 ASSERT((rgnp)->rgn_id == rid); \ 122 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 123 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 124 SFMMU_REGION_HME); \ 125 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 126 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 127 _heva = get_hblk_endaddr(hmeblkp); \ 128 _rsva = (caddr_t)P2ALIGN( \ 129 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 130 _reva = (caddr_t)P2ROUNDUP( \ 131 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 132 HBLK_MIN_BYTES); \ 133 ASSERT(_hsva >= _rsva); \ 134 ASSERT(_hsva < _reva); \ 135 ASSERT(_heva > _rsva); \ 136 ASSERT(_heva <= _reva); \ 137 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 138 _ttesz; \ 139 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 140 } 141 142 #else /* DEBUG */ 143 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 144 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 145 #endif /* DEBUG */ 146 147 #if defined(SF_ERRATA_57) 148 extern caddr_t errata57_limit; 149 #endif 150 151 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 152 (sizeof (int64_t))) 153 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 154 155 #define HBLK_RESERVE_CNT 128 156 #define HBLK_RESERVE_MIN 20 157 158 static struct hme_blk *freehblkp; 159 static kmutex_t freehblkp_lock; 160 static int freehblkcnt; 161 162 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 163 static kmutex_t hblk_reserve_lock; 164 static kthread_t *hblk_reserve_thread; 165 166 static nucleus_hblk8_info_t nucleus_hblk8; 167 static nucleus_hblk1_info_t nucleus_hblk1; 168 169 /* 170 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 171 * after the initial phase of removing an hmeblk from the hash chain, see 172 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 173 */ 174 static cpu_hme_pend_t *cpu_hme_pend; 175 static uint_t cpu_hme_pend_thresh; 176 /* 177 * SFMMU specific hat functions 178 */ 179 void hat_pagecachectl(struct page *, int); 180 181 /* flags for hat_pagecachectl */ 182 #define HAT_CACHE 0x1 183 #define HAT_UNCACHE 0x2 184 #define HAT_TMPNC 0x4 185 186 /* 187 * This flag is set to 0 via the MD in platforms that do not support 188 * I-cache coherency in hardware. Used to enable "soft exec" mode. 189 * The MD "coherency" property is optional, and defaults to 1 (because 190 * coherent I-cache is the norm.) 191 */ 192 uint_t icache_is_coherent = 1; 193 194 /* 195 * Flag to allow the creation of non-cacheable translations 196 * to system memory. It is off by default. At the moment this 197 * flag is used by the ecache error injector. The error injector 198 * will turn it on when creating such a translation then shut it 199 * off when it's finished. 200 */ 201 202 int sfmmu_allow_nc_trans = 0; 203 204 /* 205 * Flag to disable large page support. 206 * value of 1 => disable all large pages. 207 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 208 * 209 * For example, use the value 0x4 to disable 512K pages. 210 * 211 */ 212 #define LARGE_PAGES_OFF 0x1 213 214 /* 215 * The disable_large_pages and disable_ism_large_pages variables control 216 * hat_memload_array and the page sizes to be used by ISM and the kernel. 217 * 218 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 219 * are only used to control which OOB pages to use at upper VM segment creation 220 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 221 * Their values may come from platform or CPU specific code to disable page 222 * sizes that should not be used. 223 * 224 * WARNING: 512K pages are currently not supported for ISM/DISM. 225 */ 226 uint_t disable_large_pages = 0; 227 uint_t disable_ism_large_pages = (1 << TTE512K); 228 uint_t disable_auto_data_large_pages = 0; 229 uint_t disable_auto_text_large_pages = 0; 230 uint_t disable_shctx_large_pages = 0; 231 232 /* 233 * Private sfmmu data structures for hat management 234 */ 235 static struct kmem_cache *sfmmuid_cache; 236 static struct kmem_cache *mmuctxdom_cache; 237 238 /* 239 * Private sfmmu data structures for tsb management 240 */ 241 static struct kmem_cache *sfmmu_tsbinfo_cache; 242 static struct kmem_cache *sfmmu_tsb8k_cache; 243 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 244 static vmem_t *kmem_bigtsb_arena; 245 static vmem_t *kmem_tsb_arena; 246 247 /* 248 * sfmmu static variables for hmeblk resource management. 249 */ 250 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 251 static struct kmem_cache *sfmmu8_cache; 252 static struct kmem_cache *sfmmu1_cache; 253 static struct kmem_cache *pa_hment_cache; 254 255 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 256 /* 257 * private data for ism 258 */ 259 static struct kmem_cache *ism_blk_cache; 260 static struct kmem_cache *ism_ment_cache; 261 #define ISMID_STARTADDR NULL 262 263 /* 264 * Region management data structures and function declarations. 265 */ 266 267 static void sfmmu_leave_srd(sfmmu_t *); 268 static int sfmmu_srdcache_constructor(void *, void *, int); 269 static void sfmmu_srdcache_destructor(void *, void *); 270 static int sfmmu_rgncache_constructor(void *, void *, int); 271 static void sfmmu_rgncache_destructor(void *, void *); 272 static int sfrgnmap_isnull(sf_region_map_t *); 273 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 274 static int sfmmu_scdcache_constructor(void *, void *, int); 275 static void sfmmu_scdcache_destructor(void *, void *); 276 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 277 size_t, void *, u_offset_t); 278 279 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 280 static sf_srd_bucket_t *srd_buckets; 281 static struct kmem_cache *srd_cache; 282 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 283 static struct kmem_cache *region_cache; 284 static struct kmem_cache *scd_cache; 285 286 #ifdef sun4v 287 int use_bigtsb_arena = 1; 288 #else 289 int use_bigtsb_arena = 0; 290 #endif 291 292 /* External /etc/system tunable, for turning on&off the shctx support */ 293 int disable_shctx = 0; 294 /* Internal variable, set by MD if the HW supports shctx feature */ 295 int shctx_on = 0; 296 297 /* Internal variable, set by MD if the HW supports the search order register */ 298 int pgsz_search_on = 0; 299 /* 300 * External /etc/system tunable, for controlling search order register 301 * support. 302 */ 303 int disable_pgsz_search = 0; 304 305 #ifdef DEBUG 306 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 307 #endif 308 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 309 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 310 311 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 312 static void sfmmu_find_scd(sfmmu_t *); 313 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 314 static void sfmmu_finish_join_scd(sfmmu_t *); 315 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 316 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 317 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 318 static void sfmmu_free_scd_tsbs(sfmmu_t *); 319 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 320 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 321 static void sfmmu_ism_hatflags(sfmmu_t *, int); 322 static int sfmmu_srd_lock_held(sf_srd_t *); 323 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 324 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 325 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 326 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 327 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 328 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 329 330 /* 331 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 332 * HAT flags, synchronizing TLB/TSB coherency, and context management. 333 * The lock is hashed on the sfmmup since the case where we need to lock 334 * all processes is rare but does occur (e.g. we need to unload a shared 335 * mapping from all processes using the mapping). We have a lot of buckets, 336 * and each slab of sfmmu_t's can use about a quarter of them, giving us 337 * a fairly good distribution without wasting too much space and overhead 338 * when we have to grab them all. 339 */ 340 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 341 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 342 343 /* 344 * Hash algorithm optimized for a small number of slabs. 345 * 7 is (highbit((sizeof sfmmu_t)) - 1) 346 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 347 * kmem_cache, and thus they will be sequential within that cache. In 348 * addition, each new slab will have a different "color" up to cache_maxcolor 349 * which will skew the hashing for each successive slab which is allocated. 350 * If the size of sfmmu_t changed to a larger size, this algorithm may need 351 * to be revisited. 352 */ 353 #define TSB_HASH_SHIFT_BITS (7) 354 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 355 356 #ifdef DEBUG 357 int tsb_hash_debug = 0; 358 #define TSB_HASH(sfmmup) \ 359 (tsb_hash_debug ? &hat_lock[0] : \ 360 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 361 #else /* DEBUG */ 362 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 363 #endif /* DEBUG */ 364 365 366 /* sfmmu_replace_tsb() return codes. */ 367 typedef enum tsb_replace_rc { 368 TSB_SUCCESS, 369 TSB_ALLOCFAIL, 370 TSB_LOSTRACE, 371 TSB_ALREADY_SWAPPED, 372 TSB_CANTGROW 373 } tsb_replace_rc_t; 374 375 /* 376 * Flags for TSB allocation routines. 377 */ 378 #define TSB_ALLOC 0x01 379 #define TSB_FORCEALLOC 0x02 380 #define TSB_GROW 0x04 381 #define TSB_SHRINK 0x08 382 #define TSB_SWAPIN 0x10 383 384 /* 385 * Support for HAT callbacks. 386 */ 387 #define SFMMU_MAX_RELOC_CALLBACKS 10 388 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 389 static id_t sfmmu_cb_nextid = 0; 390 static id_t sfmmu_tsb_cb_id; 391 struct sfmmu_callback *sfmmu_cb_table; 392 393 /* 394 * Kernel page relocation is enabled by default for non-caged 395 * kernel pages. This has little effect unless segkmem_reloc is 396 * set, since by default kernel memory comes from inside the 397 * kernel cage. 398 */ 399 int hat_kpr_enabled = 1; 400 401 kmutex_t kpr_mutex; 402 kmutex_t kpr_suspendlock; 403 kthread_t *kreloc_thread; 404 405 /* 406 * Enable VA->PA translation sanity checking on DEBUG kernels. 407 * Disabled by default. This is incompatible with some 408 * drivers (error injector, RSM) so if it breaks you get 409 * to keep both pieces. 410 */ 411 int hat_check_vtop = 0; 412 413 /* 414 * Private sfmmu routines (prototypes) 415 */ 416 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 417 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 418 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 419 uint_t); 420 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 421 caddr_t, demap_range_t *, uint_t); 422 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 423 caddr_t, int); 424 static void sfmmu_hblk_free(struct hme_blk **); 425 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 426 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 427 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 428 static struct hme_blk *sfmmu_hblk_steal(int); 429 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 430 struct hme_blk *, uint64_t, struct hme_blk *); 431 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 432 433 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 434 struct page **, uint_t, uint_t, uint_t); 435 static void hat_do_memload(struct hat *, caddr_t, struct page *, 436 uint_t, uint_t, uint_t); 437 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 438 uint_t, uint_t, pgcnt_t, uint_t); 439 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 440 uint_t); 441 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 442 uint_t, uint_t); 443 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 444 caddr_t, int, uint_t); 445 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 446 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 447 uint_t); 448 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 449 caddr_t, page_t **, uint_t, uint_t); 450 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 451 452 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 453 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 454 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 455 #ifdef VAC 456 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 457 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 458 int tst_tnc(page_t *pp, pgcnt_t); 459 void conv_tnc(page_t *pp, int); 460 #endif 461 462 static void sfmmu_get_ctx(sfmmu_t *); 463 static void sfmmu_free_sfmmu(sfmmu_t *); 464 465 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 466 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 467 468 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 469 static void hat_pagereload(struct page *, struct page *); 470 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 471 #ifdef VAC 472 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 473 static void sfmmu_page_cache(page_t *, int, int, int); 474 #endif 475 476 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 477 struct hme_blk *, int); 478 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 479 pfn_t, int, int, int, int); 480 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 481 pfn_t, int); 482 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 483 static void sfmmu_tlb_range_demap(demap_range_t *); 484 static void sfmmu_sync_mmustate(sfmmu_t *); 485 486 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 487 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 488 sfmmu_t *); 489 static void sfmmu_tsb_free(struct tsb_info *); 490 static void sfmmu_tsbinfo_free(struct tsb_info *); 491 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 492 sfmmu_t *); 493 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 494 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 495 static int sfmmu_select_tsb_szc(pgcnt_t); 496 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 497 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 498 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 499 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 500 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 501 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 502 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 503 hatlock_t *, uint_t); 504 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 505 506 #ifdef VAC 507 void sfmmu_cache_flush(pfn_t, int); 508 void sfmmu_cache_flushcolor(int, pfn_t); 509 #endif 510 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 511 caddr_t, demap_range_t *, uint_t, int); 512 513 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 514 static uint_t sfmmu_ptov_attr(tte_t *); 515 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 516 caddr_t, demap_range_t *, uint_t); 517 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 518 static int sfmmu_idcache_constructor(void *, void *, int); 519 static void sfmmu_idcache_destructor(void *, void *); 520 static int sfmmu_hblkcache_constructor(void *, void *, int); 521 static void sfmmu_hblkcache_destructor(void *, void *); 522 static void sfmmu_hblkcache_reclaim(void *); 523 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 524 struct hmehash_bucket *); 525 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 526 struct hme_blk *, struct hme_blk **, int); 527 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 528 uint64_t); 529 static struct hme_blk *sfmmu_check_pending_hblks(int); 530 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 531 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 532 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 533 int, caddr_t *); 534 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 535 536 static void sfmmu_rm_large_mappings(page_t *, int); 537 538 static void hat_lock_init(void); 539 static void hat_kstat_init(void); 540 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 541 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 542 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 543 static void sfmmu_check_page_sizes(sfmmu_t *, int); 544 int fnd_mapping_sz(page_t *); 545 static void iment_add(struct ism_ment *, struct hat *); 546 static void iment_sub(struct ism_ment *, struct hat *); 547 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 548 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 549 extern void sfmmu_clear_utsbinfo(void); 550 551 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 552 553 /* kpm globals */ 554 #ifdef DEBUG 555 /* 556 * Enable trap level tsbmiss handling 557 */ 558 int kpm_tsbmtl = 1; 559 560 /* 561 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 562 * required TLB shootdowns in this case, so handle w/ care. Off by default. 563 */ 564 int kpm_tlb_flush; 565 #endif /* DEBUG */ 566 567 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 568 569 #ifdef DEBUG 570 static void sfmmu_check_hblk_flist(); 571 #endif 572 573 /* 574 * Semi-private sfmmu data structures. Some of them are initialize in 575 * startup or in hat_init. Some of them are private but accessed by 576 * assembly code or mach_sfmmu.c 577 */ 578 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 579 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 580 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 581 uint64_t khme_hash_pa; /* PA of khme_hash */ 582 int uhmehash_num; /* # of buckets in user hash table */ 583 int khmehash_num; /* # of buckets in kernel hash table */ 584 585 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 586 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 587 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 588 589 #define DEFAULT_NUM_CTXS_PER_MMU 8192 590 uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 591 592 int cache; /* describes system cache */ 593 594 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 595 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 596 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 597 int ktsb_sz; /* kernel 8k-indexed tsb size */ 598 599 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 600 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 601 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 602 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 603 604 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 605 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 606 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 607 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 608 609 #ifndef sun4v 610 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 611 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 612 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 613 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 614 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 615 #endif /* sun4v */ 616 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 617 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 618 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 619 620 /* 621 * Size to use for TSB slabs. Future platforms that support page sizes 622 * larger than 4M may wish to change these values, and provide their own 623 * assembly macros for building and decoding the TSB base register contents. 624 * Note disable_large_pages will override the value set here. 625 */ 626 static uint_t tsb_slab_ttesz = TTE4M; 627 size_t tsb_slab_size = MMU_PAGESIZE4M; 628 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 629 /* PFN mask for TTE */ 630 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 631 632 /* 633 * Size to use for TSB slabs. These are used only when 256M tsb arenas 634 * exist. 635 */ 636 static uint_t bigtsb_slab_ttesz = TTE256M; 637 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 638 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 639 /* 256M page alignment for 8K pfn */ 640 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 641 642 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 643 static int tsb_max_growsize = 0; 644 645 /* 646 * Tunable parameters dealing with TSB policies. 647 */ 648 649 /* 650 * This undocumented tunable forces all 8K TSBs to be allocated from 651 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 652 */ 653 #ifdef DEBUG 654 int tsb_forceheap = 0; 655 #endif /* DEBUG */ 656 657 /* 658 * Decide whether to use per-lgroup arenas, or one global set of 659 * TSB arenas. The default is not to break up per-lgroup, since 660 * most platforms don't recognize any tangible benefit from it. 661 */ 662 int tsb_lgrp_affinity = 0; 663 664 /* 665 * Used for growing the TSB based on the process RSS. 666 * tsb_rss_factor is based on the smallest TSB, and is 667 * shifted by the TSB size to determine if we need to grow. 668 * The default will grow the TSB if the number of TTEs for 669 * this page size exceeds 75% of the number of TSB entries, 670 * which should _almost_ eliminate all conflict misses 671 * (at the expense of using up lots and lots of memory). 672 */ 673 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 674 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 675 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 676 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 677 default_tsb_size) 678 #define TSB_OK_SHRINK() \ 679 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 680 #define TSB_OK_GROW() \ 681 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 682 683 int enable_tsb_rss_sizing = 1; 684 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 685 686 /* which TSB size code to use for new address spaces or if rss sizing off */ 687 int default_tsb_size = TSB_8K_SZCODE; 688 689 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 690 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 691 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 692 693 #ifdef DEBUG 694 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 695 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 696 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 697 static int tsb_alloc_fail_mtbf = 0; 698 static int tsb_alloc_count = 0; 699 #endif /* DEBUG */ 700 701 /* if set to 1, will remap valid TTEs when growing TSB. */ 702 int tsb_remap_ttes = 1; 703 704 /* 705 * If we have more than this many mappings, allocate a second TSB. 706 * This default is chosen because the I/D fully associative TLBs are 707 * assumed to have at least 8 available entries. Platforms with a 708 * larger fully-associative TLB could probably override the default. 709 */ 710 711 #ifdef sun4v 712 int tsb_sectsb_threshold = 0; 713 #else 714 int tsb_sectsb_threshold = 8; 715 #endif 716 717 /* 718 * kstat data 719 */ 720 struct sfmmu_global_stat sfmmu_global_stat; 721 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 722 723 /* 724 * Global data 725 */ 726 sfmmu_t *ksfmmup; /* kernel's hat id */ 727 728 #ifdef DEBUG 729 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 730 #endif 731 732 /* sfmmu locking operations */ 733 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 734 static int sfmmu_mlspl_held(struct page *, int); 735 736 kmutex_t *sfmmu_page_enter(page_t *); 737 void sfmmu_page_exit(kmutex_t *); 738 int sfmmu_page_spl_held(struct page *); 739 740 /* sfmmu internal locking operations - accessed directly */ 741 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 742 kmutex_t **, kmutex_t **); 743 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 744 static hatlock_t *sfmmu_hat_tryenter(sfmmu_t *); 745 static void sfmmu_hat_lock_all(void); 746 static void sfmmu_hat_unlock_all(void); 747 static void sfmmu_ismhat_enter(sfmmu_t *, int); 748 static void sfmmu_ismhat_exit(sfmmu_t *, int); 749 750 /* 751 * Array of mutexes protecting a page's mapping list and p_nrm field. 752 * 753 * The hash function looks complicated, but is made up so that: 754 * 755 * "pp" not shifted, so adjacent pp values will hash to different cache lines 756 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 757 * 758 * "pp" >> mml_shift, incorporates more source bits into the hash result 759 * 760 * "& (mml_table_size - 1), should be faster than using remainder "%" 761 * 762 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 763 * cacheline, since they get declared next to each other below. We'll trust 764 * ld not to do something random. 765 */ 766 #ifdef DEBUG 767 int mlist_hash_debug = 0; 768 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 769 &mml_table[((uintptr_t)(pp) + \ 770 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 771 #else /* !DEBUG */ 772 #define MLIST_HASH(pp) &mml_table[ \ 773 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 774 #endif /* !DEBUG */ 775 776 kmutex_t *mml_table; 777 uint_t mml_table_sz; /* must be a power of 2 */ 778 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 779 780 kpm_hlk_t *kpmp_table; 781 uint_t kpmp_table_sz; /* must be a power of 2 */ 782 uchar_t kpmp_shift; 783 784 kpm_shlk_t *kpmp_stable; 785 uint_t kpmp_stable_sz; /* must be a power of 2 */ 786 787 /* 788 * SPL_HASH was improved to avoid false cache line sharing 789 */ 790 #define SPL_TABLE_SIZE 128 791 #define SPL_MASK (SPL_TABLE_SIZE - 1) 792 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 793 794 #define SPL_INDEX(pp) \ 795 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 796 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 797 (SPL_TABLE_SIZE - 1)) 798 799 #define SPL_HASH(pp) \ 800 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 801 802 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 803 804 805 /* 806 * hat_unload_callback() will group together callbacks in order 807 * to avoid xt_sync() calls. This is the maximum size of the group. 808 */ 809 #define MAX_CB_ADDR 32 810 811 tte_t hw_tte; 812 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 813 814 static char *mmu_ctx_kstat_names[] = { 815 "mmu_ctx_tsb_exceptions", 816 "mmu_ctx_tsb_raise_exception", 817 "mmu_ctx_wrap_around", 818 }; 819 820 /* 821 * Wrapper for vmem_xalloc since vmem_create only allows limited 822 * parameters for vm_source_alloc functions. This function allows us 823 * to specify alignment consistent with the size of the object being 824 * allocated. 825 */ 826 static void * 827 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 828 { 829 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 830 } 831 832 /* Common code for setting tsb_alloc_hiwater. */ 833 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 834 ptob(pages) / tsb_alloc_hiwater_factor 835 836 /* 837 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 838 * a single TSB. physmem is the number of physical pages so we need physmem 8K 839 * TTEs to represent all those physical pages. We round this up by using 840 * 1<<highbit(). To figure out which size code to use, remember that the size 841 * code is just an amount to shift the smallest TSB size to get the size of 842 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 843 * highbit() - 1) to get the size code for the smallest TSB that can represent 844 * all of physical memory, while erring on the side of too much. 845 * 846 * Restrict tsb_max_growsize to make sure that: 847 * 1) TSBs can't grow larger than the TSB slab size 848 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 849 */ 850 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 851 int _i, _szc, _slabszc, _tsbszc; \ 852 \ 853 _i = highbit(pages); \ 854 if ((1 << (_i - 1)) == (pages)) \ 855 _i--; /* 2^n case, round down */ \ 856 _szc = _i - TSB_START_SIZE; \ 857 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 858 _tsbszc = MIN(_szc, _slabszc); \ 859 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 860 } 861 862 /* 863 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 864 * tsb_info which handles that TTE size. 865 */ 866 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 867 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 868 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 869 sfmmu_hat_lock_held(sfmmup)); \ 870 if ((tte_szc) >= TTE4M) { \ 871 ASSERT((tsbinfop) != NULL); \ 872 (tsbinfop) = (tsbinfop)->tsb_next; \ 873 } \ 874 } 875 876 /* 877 * Macro to use to unload entries from the TSB. 878 * It has knowledge of which page sizes get replicated in the TSB 879 * and will call the appropriate unload routine for the appropriate size. 880 */ 881 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 882 { \ 883 int ttesz = get_hblk_ttesz(hmeblkp); \ 884 if (ttesz == TTE8K || ttesz == TTE4M) { \ 885 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 886 } else { \ 887 caddr_t sva = ismhat ? addr : \ 888 (caddr_t)get_hblk_base(hmeblkp); \ 889 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 890 ASSERT(addr >= sva && addr < eva); \ 891 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 892 } \ 893 } 894 895 896 /* Update tsb_alloc_hiwater after memory is configured. */ 897 /*ARGSUSED*/ 898 static void 899 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 900 { 901 /* Assumes physmem has already been updated. */ 902 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 903 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 904 } 905 906 /* 907 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 908 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 909 * deleted. 910 */ 911 /*ARGSUSED*/ 912 static int 913 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 914 { 915 return (0); 916 } 917 918 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 919 /*ARGSUSED*/ 920 static void 921 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 922 { 923 /* 924 * Whether the delete was cancelled or not, just go ahead and update 925 * tsb_alloc_hiwater and tsb_max_growsize. 926 */ 927 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 928 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 929 } 930 931 static kphysm_setup_vector_t sfmmu_update_vec = { 932 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 933 sfmmu_update_post_add, /* post_add */ 934 sfmmu_update_pre_del, /* pre_del */ 935 sfmmu_update_post_del /* post_del */ 936 }; 937 938 939 /* 940 * HME_BLK HASH PRIMITIVES 941 */ 942 943 /* 944 * Enter a hme on the mapping list for page pp. 945 * When large pages are more prevalent in the system we might want to 946 * keep the mapping list in ascending order by the hment size. For now, 947 * small pages are more frequent, so don't slow it down. 948 */ 949 #define HME_ADD(hme, pp) \ 950 { \ 951 ASSERT(sfmmu_mlist_held(pp)); \ 952 \ 953 hme->hme_prev = NULL; \ 954 hme->hme_next = pp->p_mapping; \ 955 hme->hme_page = pp; \ 956 if (pp->p_mapping) { \ 957 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 958 ASSERT(pp->p_share > 0); \ 959 } else { \ 960 /* EMPTY */ \ 961 ASSERT(pp->p_share == 0); \ 962 } \ 963 pp->p_mapping = hme; \ 964 pp->p_share++; \ 965 } 966 967 /* 968 * Enter a hme on the mapping list for page pp. 969 * If we are unmapping a large translation, we need to make sure that the 970 * change is reflect in the corresponding bit of the p_index field. 971 */ 972 #define HME_SUB(hme, pp) \ 973 { \ 974 ASSERT(sfmmu_mlist_held(pp)); \ 975 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 976 \ 977 if (pp->p_mapping == NULL) { \ 978 panic("hme_remove - no mappings"); \ 979 } \ 980 \ 981 membar_stst(); /* ensure previous stores finish */ \ 982 \ 983 ASSERT(pp->p_share > 0); \ 984 pp->p_share--; \ 985 \ 986 if (hme->hme_prev) { \ 987 ASSERT(pp->p_mapping != hme); \ 988 ASSERT(hme->hme_prev->hme_page == pp || \ 989 IS_PAHME(hme->hme_prev)); \ 990 hme->hme_prev->hme_next = hme->hme_next; \ 991 } else { \ 992 ASSERT(pp->p_mapping == hme); \ 993 pp->p_mapping = hme->hme_next; \ 994 ASSERT((pp->p_mapping == NULL) ? \ 995 (pp->p_share == 0) : 1); \ 996 } \ 997 \ 998 if (hme->hme_next) { \ 999 ASSERT(hme->hme_next->hme_page == pp || \ 1000 IS_PAHME(hme->hme_next)); \ 1001 hme->hme_next->hme_prev = hme->hme_prev; \ 1002 } \ 1003 \ 1004 /* zero out the entry */ \ 1005 hme->hme_next = NULL; \ 1006 hme->hme_prev = NULL; \ 1007 hme->hme_page = NULL; \ 1008 \ 1009 if (hme_size(hme) > TTE8K) { \ 1010 /* remove mappings for remainder of large pg */ \ 1011 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 1012 } \ 1013 } 1014 1015 /* 1016 * This function returns the hment given the hme_blk and a vaddr. 1017 * It assumes addr has already been checked to belong to hme_blk's 1018 * range. 1019 */ 1020 #define HBLKTOHME(hment, hmeblkp, addr) \ 1021 { \ 1022 int index; \ 1023 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 1024 } 1025 1026 /* 1027 * Version of HBLKTOHME that also returns the index in hmeblkp 1028 * of the hment. 1029 */ 1030 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1031 { \ 1032 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1033 \ 1034 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1035 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1036 } else \ 1037 idx = 0; \ 1038 \ 1039 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1040 } 1041 1042 /* 1043 * Disable any page sizes not supported by the CPU 1044 */ 1045 void 1046 hat_init_pagesizes() 1047 { 1048 int i; 1049 1050 mmu_exported_page_sizes = 0; 1051 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1052 1053 szc_2_userszc[i] = (uint_t)-1; 1054 userszc_2_szc[i] = (uint_t)-1; 1055 1056 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1057 disable_large_pages |= (1 << i); 1058 } else { 1059 szc_2_userszc[i] = mmu_exported_page_sizes; 1060 userszc_2_szc[mmu_exported_page_sizes] = i; 1061 mmu_exported_page_sizes++; 1062 } 1063 } 1064 1065 disable_ism_large_pages |= disable_large_pages; 1066 disable_auto_data_large_pages = disable_large_pages; 1067 disable_auto_text_large_pages = disable_large_pages; 1068 disable_shctx_large_pages |= disable_large_pages; 1069 1070 /* 1071 * Initialize mmu-specific large page sizes. 1072 */ 1073 if (&mmu_large_pages_disabled) { 1074 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1075 disable_shctx_large_pages |= disable_large_pages; 1076 disable_ism_large_pages |= 1077 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1078 disable_auto_data_large_pages |= 1079 mmu_large_pages_disabled(HAT_AUTO_DATA); 1080 disable_auto_text_large_pages |= 1081 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1082 } 1083 } 1084 1085 /* 1086 * Initialize the hardware address translation structures. 1087 */ 1088 void 1089 hat_init(void) 1090 { 1091 int i; 1092 uint_t sz; 1093 size_t size; 1094 1095 hat_lock_init(); 1096 hat_kstat_init(); 1097 1098 /* 1099 * Hardware-only bits in a TTE 1100 */ 1101 MAKE_TTE_MASK(&hw_tte); 1102 1103 hat_init_pagesizes(); 1104 1105 /* Initialize the hash locks */ 1106 for (i = 0; i < khmehash_num; i++) { 1107 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1108 MUTEX_DEFAULT, NULL); 1109 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1110 } 1111 for (i = 0; i < uhmehash_num; i++) { 1112 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1113 MUTEX_DEFAULT, NULL); 1114 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1115 } 1116 khmehash_num--; /* make sure counter starts from 0 */ 1117 uhmehash_num--; /* make sure counter starts from 0 */ 1118 1119 /* 1120 * Allocate context domain structures. 1121 * 1122 * A platform may choose to modify max_mmu_ctxdoms in 1123 * set_platform_defaults(). If a platform does not define 1124 * a set_platform_defaults() or does not choose to modify 1125 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1126 * 1127 * For sun4v, there will be one global context domain, this is to 1128 * avoid the ldom cpu substitution problem. 1129 * 1130 * For all platforms that have CPUs sharing MMUs, this 1131 * value must be defined. 1132 */ 1133 if (max_mmu_ctxdoms == 0) { 1134 #ifndef sun4v 1135 max_mmu_ctxdoms = max_ncpus; 1136 #else /* sun4v */ 1137 max_mmu_ctxdoms = 1; 1138 #endif /* sun4v */ 1139 } 1140 1141 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1142 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1143 1144 /* mmu_ctx_t is 64 bytes aligned */ 1145 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1146 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1147 /* 1148 * MMU context domain initialization for the Boot CPU. 1149 * This needs the context domains array allocated above. 1150 */ 1151 mutex_enter(&cpu_lock); 1152 sfmmu_cpu_init(CPU); 1153 mutex_exit(&cpu_lock); 1154 1155 /* 1156 * Intialize ism mapping list lock. 1157 */ 1158 1159 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1160 1161 /* 1162 * Each sfmmu structure carries an array of MMU context info 1163 * structures, one per context domain. The size of this array depends 1164 * on the maximum number of context domains. So, the size of the 1165 * sfmmu structure varies per platform. 1166 * 1167 * sfmmu is allocated from static arena, because trap 1168 * handler at TL > 0 is not allowed to touch kernel relocatable 1169 * memory. sfmmu's alignment is changed to 64 bytes from 1170 * default 8 bytes, as the lower 6 bits will be used to pass 1171 * pgcnt to vtag_flush_pgcnt_tl1. 1172 */ 1173 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1174 1175 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1176 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1177 NULL, NULL, static_arena, 0); 1178 1179 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1180 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1181 1182 /* 1183 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1184 * from the heap when low on memory or when TSB_FORCEALLOC is 1185 * specified, don't use magazines to cache them--we want to return 1186 * them to the system as quickly as possible. 1187 */ 1188 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1189 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1190 static_arena, KMC_NOMAGAZINE); 1191 1192 /* 1193 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1194 * memory, which corresponds to the old static reserve for TSBs. 1195 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1196 * memory we'll allocate for TSB slabs; beyond this point TSB 1197 * allocations will be taken from the kernel heap (via 1198 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1199 * consumer. 1200 */ 1201 if (tsb_alloc_hiwater_factor == 0) { 1202 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1203 } 1204 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1205 1206 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1207 if (!(disable_large_pages & (1 << sz))) 1208 break; 1209 } 1210 1211 if (sz < tsb_slab_ttesz) { 1212 tsb_slab_ttesz = sz; 1213 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1214 tsb_slab_size = 1 << tsb_slab_shift; 1215 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1216 use_bigtsb_arena = 0; 1217 } else if (use_bigtsb_arena && 1218 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1219 use_bigtsb_arena = 0; 1220 } 1221 1222 if (!use_bigtsb_arena) { 1223 bigtsb_slab_shift = tsb_slab_shift; 1224 } 1225 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1226 1227 /* 1228 * On smaller memory systems, allocate TSB memory in smaller chunks 1229 * than the default 4M slab size. We also honor disable_large_pages 1230 * here. 1231 * 1232 * The trap handlers need to be patched with the final slab shift, 1233 * since they need to be able to construct the TSB pointer at runtime. 1234 */ 1235 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1236 !(disable_large_pages & (1 << TTE512K))) { 1237 tsb_slab_ttesz = TTE512K; 1238 tsb_slab_shift = MMU_PAGESHIFT512K; 1239 tsb_slab_size = MMU_PAGESIZE512K; 1240 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1241 use_bigtsb_arena = 0; 1242 } 1243 1244 if (!use_bigtsb_arena) { 1245 bigtsb_slab_ttesz = tsb_slab_ttesz; 1246 bigtsb_slab_shift = tsb_slab_shift; 1247 bigtsb_slab_size = tsb_slab_size; 1248 bigtsb_slab_mask = tsb_slab_mask; 1249 } 1250 1251 1252 /* 1253 * Set up memory callback to update tsb_alloc_hiwater and 1254 * tsb_max_growsize. 1255 */ 1256 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1257 ASSERT(i == 0); 1258 1259 /* 1260 * kmem_tsb_arena is the source from which large TSB slabs are 1261 * drawn. The quantum of this arena corresponds to the largest 1262 * TSB size we can dynamically allocate for user processes. 1263 * Currently it must also be a supported page size since we 1264 * use exactly one translation entry to map each slab page. 1265 * 1266 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1267 * which most TSBs are allocated. Since most TSB allocations are 1268 * typically 8K we have a kmem cache we stack on top of each 1269 * kmem_tsb_default_arena to speed up those allocations. 1270 * 1271 * Note the two-level scheme of arenas is required only 1272 * because vmem_create doesn't allow us to specify alignment 1273 * requirements. If this ever changes the code could be 1274 * simplified to use only one level of arenas. 1275 * 1276 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1277 * will be provided in addition to the 4M kmem_tsb_arena. 1278 */ 1279 if (use_bigtsb_arena) { 1280 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1281 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1282 vmem_xfree, heap_arena, 0, VM_SLEEP); 1283 } 1284 1285 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1286 sfmmu_vmem_xalloc_aligned_wrapper, 1287 vmem_xfree, heap_arena, 0, VM_SLEEP); 1288 1289 if (tsb_lgrp_affinity) { 1290 char s[50]; 1291 for (i = 0; i < NLGRPS_MAX; i++) { 1292 if (use_bigtsb_arena) { 1293 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1294 kmem_bigtsb_default_arena[i] = vmem_create(s, 1295 NULL, 0, 2 * tsb_slab_size, 1296 sfmmu_tsb_segkmem_alloc, 1297 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1298 0, VM_SLEEP | VM_BESTFIT); 1299 } 1300 1301 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1302 kmem_tsb_default_arena[i] = vmem_create(s, 1303 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1304 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1305 VM_SLEEP | VM_BESTFIT); 1306 1307 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1308 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1309 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1310 kmem_tsb_default_arena[i], 0); 1311 } 1312 } else { 1313 if (use_bigtsb_arena) { 1314 kmem_bigtsb_default_arena[0] = 1315 vmem_create("kmem_bigtsb_default", NULL, 0, 1316 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1317 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1318 VM_SLEEP | VM_BESTFIT); 1319 } 1320 1321 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1322 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1323 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1324 VM_SLEEP | VM_BESTFIT); 1325 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1326 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1327 kmem_tsb_default_arena[0], 0); 1328 } 1329 1330 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1331 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1332 sfmmu_hblkcache_destructor, 1333 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1334 hat_memload_arena, KMC_NOHASH); 1335 1336 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1337 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1338 1339 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1340 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1341 sfmmu_hblkcache_destructor, 1342 NULL, (void *)HME1BLK_SZ, 1343 hat_memload1_arena, KMC_NOHASH); 1344 1345 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1346 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1347 1348 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1349 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1350 NULL, NULL, static_arena, KMC_NOHASH); 1351 1352 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1353 sizeof (ism_ment_t), 0, NULL, NULL, 1354 NULL, NULL, NULL, 0); 1355 1356 /* 1357 * We grab the first hat for the kernel, 1358 */ 1359 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1360 kas.a_hat = hat_alloc(&kas); 1361 AS_LOCK_EXIT(&kas, &kas.a_lock); 1362 1363 /* 1364 * Initialize hblk_reserve. 1365 */ 1366 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1367 va_to_pa((caddr_t)hblk_reserve); 1368 1369 #ifndef UTSB_PHYS 1370 /* 1371 * Reserve some kernel virtual address space for the locked TTEs 1372 * that allow us to probe the TSB from TL>0. 1373 */ 1374 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1375 0, 0, NULL, NULL, VM_SLEEP); 1376 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1377 0, 0, NULL, NULL, VM_SLEEP); 1378 #endif 1379 1380 #ifdef VAC 1381 /* 1382 * The big page VAC handling code assumes VAC 1383 * will not be bigger than the smallest big 1384 * page- which is 64K. 1385 */ 1386 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1387 cmn_err(CE_PANIC, "VAC too big!"); 1388 } 1389 #endif 1390 1391 (void) xhat_init(); 1392 1393 uhme_hash_pa = va_to_pa(uhme_hash); 1394 khme_hash_pa = va_to_pa(khme_hash); 1395 1396 /* 1397 * Initialize relocation locks. kpr_suspendlock is held 1398 * at PIL_MAX to prevent interrupts from pinning the holder 1399 * of a suspended TTE which may access it leading to a 1400 * deadlock condition. 1401 */ 1402 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1403 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1404 1405 /* 1406 * If Shared context support is disabled via /etc/system 1407 * set shctx_on to 0 here if it was set to 1 earlier in boot 1408 * sequence by cpu module initialization code. 1409 */ 1410 if (shctx_on && disable_shctx) { 1411 shctx_on = 0; 1412 } 1413 1414 /* 1415 * If support for page size search is disabled via /etc/system 1416 * set pgsz_search_on to 0 here. 1417 */ 1418 if (pgsz_search_on && disable_pgsz_search) { 1419 pgsz_search_on = 0; 1420 } 1421 1422 if (shctx_on) { 1423 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1424 sizeof (srd_buckets[0]), KM_SLEEP); 1425 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1426 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1427 MUTEX_DEFAULT, NULL); 1428 } 1429 1430 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1431 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1432 NULL, NULL, NULL, 0); 1433 region_cache = kmem_cache_create("region_cache", 1434 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1435 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1436 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1437 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1438 NULL, NULL, NULL, 0); 1439 } 1440 1441 /* 1442 * Pre-allocate hrm_hashtab before enabling the collection of 1443 * refmod statistics. Allocating on the fly would mean us 1444 * running the risk of suffering recursive mutex enters or 1445 * deadlocks. 1446 */ 1447 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1448 KM_SLEEP); 1449 1450 /* Allocate per-cpu pending freelist of hmeblks */ 1451 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1452 KM_SLEEP); 1453 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1454 (uintptr_t)cpu_hme_pend, 64); 1455 1456 for (i = 0; i < NCPU; i++) { 1457 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1458 NULL); 1459 } 1460 1461 if (cpu_hme_pend_thresh == 0) { 1462 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1463 } 1464 } 1465 1466 /* 1467 * Initialize locking for the hat layer, called early during boot. 1468 */ 1469 static void 1470 hat_lock_init() 1471 { 1472 int i; 1473 1474 /* 1475 * initialize the array of mutexes protecting a page's mapping 1476 * list and p_nrm field. 1477 */ 1478 for (i = 0; i < mml_table_sz; i++) 1479 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1480 1481 if (kpm_enable) { 1482 for (i = 0; i < kpmp_table_sz; i++) { 1483 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1484 MUTEX_DEFAULT, NULL); 1485 } 1486 } 1487 1488 /* 1489 * Initialize array of mutex locks that protects sfmmu fields and 1490 * TSB lists. 1491 */ 1492 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1493 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1494 NULL); 1495 } 1496 1497 #define SFMMU_KERNEL_MAXVA \ 1498 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1499 1500 /* 1501 * Allocate a hat structure. 1502 * Called when an address space first uses a hat. 1503 */ 1504 struct hat * 1505 hat_alloc(struct as *as) 1506 { 1507 sfmmu_t *sfmmup; 1508 int i; 1509 uint64_t cnum; 1510 extern uint_t get_color_start(struct as *); 1511 1512 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1513 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1514 sfmmup->sfmmu_as = as; 1515 sfmmup->sfmmu_flags = 0; 1516 sfmmup->sfmmu_tteflags = 0; 1517 sfmmup->sfmmu_rtteflags = 0; 1518 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1519 1520 if (as == &kas) { 1521 ksfmmup = sfmmup; 1522 sfmmup->sfmmu_cext = 0; 1523 cnum = KCONTEXT; 1524 1525 sfmmup->sfmmu_clrstart = 0; 1526 sfmmup->sfmmu_tsb = NULL; 1527 /* 1528 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1529 * to setup tsb_info for ksfmmup. 1530 */ 1531 } else { 1532 1533 /* 1534 * Just set to invalid ctx. When it faults, it will 1535 * get a valid ctx. This would avoid the situation 1536 * where we get a ctx, but it gets stolen and then 1537 * we fault when we try to run and so have to get 1538 * another ctx. 1539 */ 1540 sfmmup->sfmmu_cext = 0; 1541 cnum = INVALID_CONTEXT; 1542 1543 /* initialize original physical page coloring bin */ 1544 sfmmup->sfmmu_clrstart = get_color_start(as); 1545 #ifdef DEBUG 1546 if (tsb_random_size) { 1547 uint32_t randval = (uint32_t)gettick() >> 4; 1548 int size = randval % (tsb_max_growsize + 1); 1549 1550 /* chose a random tsb size for stress testing */ 1551 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1552 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1553 } else 1554 #endif /* DEBUG */ 1555 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1556 default_tsb_size, 1557 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1558 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1559 ASSERT(sfmmup->sfmmu_tsb != NULL); 1560 } 1561 1562 ASSERT(max_mmu_ctxdoms > 0); 1563 for (i = 0; i < max_mmu_ctxdoms; i++) { 1564 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1565 sfmmup->sfmmu_ctxs[i].gnum = 0; 1566 } 1567 1568 for (i = 0; i < max_mmu_page_sizes; i++) { 1569 sfmmup->sfmmu_ttecnt[i] = 0; 1570 sfmmup->sfmmu_scdrttecnt[i] = 0; 1571 sfmmup->sfmmu_ismttecnt[i] = 0; 1572 sfmmup->sfmmu_scdismttecnt[i] = 0; 1573 sfmmup->sfmmu_pgsz[i] = TTE8K; 1574 } 1575 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1576 sfmmup->sfmmu_iblk = NULL; 1577 sfmmup->sfmmu_ismhat = 0; 1578 sfmmup->sfmmu_scdhat = 0; 1579 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1580 if (sfmmup == ksfmmup) { 1581 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1582 } else { 1583 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1584 } 1585 sfmmup->sfmmu_free = 0; 1586 sfmmup->sfmmu_rmstat = 0; 1587 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1588 sfmmup->sfmmu_xhat_provider = NULL; 1589 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1590 sfmmup->sfmmu_srdp = NULL; 1591 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1592 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1593 sfmmup->sfmmu_scdp = NULL; 1594 sfmmup->sfmmu_scd_link.next = NULL; 1595 sfmmup->sfmmu_scd_link.prev = NULL; 1596 1597 if (&mmu_set_pgsz_order && sfmmup != ksfmmup) { 1598 mmu_set_pgsz_order(sfmmup, 0); 1599 sfmmu_init_pgsz_hv(sfmmup); 1600 } 1601 return (sfmmup); 1602 } 1603 1604 /* 1605 * Create per-MMU context domain kstats for a given MMU ctx. 1606 */ 1607 static void 1608 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1609 { 1610 mmu_ctx_stat_t stat; 1611 kstat_t *mmu_kstat; 1612 1613 ASSERT(MUTEX_HELD(&cpu_lock)); 1614 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1615 1616 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1617 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1618 1619 if (mmu_kstat == NULL) { 1620 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1621 mmu_ctxp->mmu_idx); 1622 } else { 1623 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1624 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1625 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1626 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1627 mmu_ctxp->mmu_kstat = mmu_kstat; 1628 kstat_install(mmu_kstat); 1629 } 1630 } 1631 1632 /* 1633 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1634 * context domain information for a given CPU. If a platform does not 1635 * specify that interface, then the function below is used instead to return 1636 * default information. The defaults are as follows: 1637 * 1638 * - For sun4u systems there's one MMU context domain per CPU. 1639 * This default is used by all sun4u systems except OPL. OPL systems 1640 * provide platform specific interface to map CPU ids to MMU ids 1641 * because on OPL more than 1 CPU shares a single MMU. 1642 * Note that on sun4v, there is one global context domain for 1643 * the entire system. This is to avoid running into potential problem 1644 * with ldom physical cpu substitution feature. 1645 * - The number of MMU context IDs supported on any CPU in the 1646 * system is 8K. 1647 */ 1648 /*ARGSUSED*/ 1649 static void 1650 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1651 { 1652 infop->mmu_nctxs = nctxs; 1653 #ifndef sun4v 1654 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1655 #else /* sun4v */ 1656 infop->mmu_idx = 0; 1657 #endif /* sun4v */ 1658 } 1659 1660 /* 1661 * Called during CPU initialization to set the MMU context-related information 1662 * for a CPU. 1663 * 1664 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1665 */ 1666 void 1667 sfmmu_cpu_init(cpu_t *cp) 1668 { 1669 mmu_ctx_info_t info; 1670 mmu_ctx_t *mmu_ctxp; 1671 1672 ASSERT(MUTEX_HELD(&cpu_lock)); 1673 1674 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1675 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1676 else 1677 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1678 1679 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1680 1681 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1682 /* Each mmu_ctx is cacheline aligned. */ 1683 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1684 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1685 1686 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1687 (void *)ipltospl(DISP_LEVEL)); 1688 mmu_ctxp->mmu_idx = info.mmu_idx; 1689 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1690 /* 1691 * Globally for lifetime of a system, 1692 * gnum must always increase. 1693 * mmu_saved_gnum is protected by the cpu_lock. 1694 */ 1695 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1696 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1697 1698 sfmmu_mmu_kstat_create(mmu_ctxp); 1699 1700 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1701 } else { 1702 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1703 } 1704 1705 /* 1706 * The mmu_lock is acquired here to prevent races with 1707 * the wrap-around code. 1708 */ 1709 mutex_enter(&mmu_ctxp->mmu_lock); 1710 1711 1712 mmu_ctxp->mmu_ncpus++; 1713 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1714 CPU_MMU_IDX(cp) = info.mmu_idx; 1715 CPU_MMU_CTXP(cp) = mmu_ctxp; 1716 1717 mutex_exit(&mmu_ctxp->mmu_lock); 1718 } 1719 1720 /* 1721 * Called to perform MMU context-related cleanup for a CPU. 1722 */ 1723 void 1724 sfmmu_cpu_cleanup(cpu_t *cp) 1725 { 1726 mmu_ctx_t *mmu_ctxp; 1727 1728 ASSERT(MUTEX_HELD(&cpu_lock)); 1729 1730 mmu_ctxp = CPU_MMU_CTXP(cp); 1731 ASSERT(mmu_ctxp != NULL); 1732 1733 /* 1734 * The mmu_lock is acquired here to prevent races with 1735 * the wrap-around code. 1736 */ 1737 mutex_enter(&mmu_ctxp->mmu_lock); 1738 1739 CPU_MMU_CTXP(cp) = NULL; 1740 1741 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1742 if (--mmu_ctxp->mmu_ncpus == 0) { 1743 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1744 mutex_exit(&mmu_ctxp->mmu_lock); 1745 mutex_destroy(&mmu_ctxp->mmu_lock); 1746 1747 if (mmu_ctxp->mmu_kstat) 1748 kstat_delete(mmu_ctxp->mmu_kstat); 1749 1750 /* mmu_saved_gnum is protected by the cpu_lock. */ 1751 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1752 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1753 1754 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1755 1756 return; 1757 } 1758 1759 mutex_exit(&mmu_ctxp->mmu_lock); 1760 } 1761 1762 /* 1763 * Hat_setup, makes an address space context the current active one. 1764 * In sfmmu this translates to setting the secondary context with the 1765 * corresponding context. 1766 */ 1767 void 1768 hat_setup(struct hat *sfmmup, int allocflag) 1769 { 1770 hatlock_t *hatlockp; 1771 1772 /* Init needs some special treatment. */ 1773 if (allocflag == HAT_INIT) { 1774 /* 1775 * Make sure that we have 1776 * 1. a TSB 1777 * 2. a valid ctx that doesn't get stolen after this point. 1778 */ 1779 hatlockp = sfmmu_hat_enter(sfmmup); 1780 1781 /* 1782 * Swap in the TSB. hat_init() allocates tsbinfos without 1783 * TSBs, but we need one for init, since the kernel does some 1784 * special things to set up its stack and needs the TSB to 1785 * resolve page faults. 1786 */ 1787 sfmmu_tsb_swapin(sfmmup, hatlockp); 1788 1789 sfmmu_get_ctx(sfmmup); 1790 1791 sfmmu_hat_exit(hatlockp); 1792 } else { 1793 ASSERT(allocflag == HAT_ALLOC); 1794 1795 hatlockp = sfmmu_hat_enter(sfmmup); 1796 kpreempt_disable(); 1797 1798 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1799 /* 1800 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1801 * pagesize bits don't matter in this case since we are passing 1802 * INVALID_CONTEXT to it. 1803 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1804 */ 1805 sfmmu_setctx_sec(INVALID_CONTEXT); 1806 sfmmu_clear_utsbinfo(); 1807 1808 kpreempt_enable(); 1809 sfmmu_hat_exit(hatlockp); 1810 } 1811 } 1812 1813 /* 1814 * Free all the translation resources for the specified address space. 1815 * Called from as_free when an address space is being destroyed. 1816 */ 1817 void 1818 hat_free_start(struct hat *sfmmup) 1819 { 1820 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1821 ASSERT(sfmmup != ksfmmup); 1822 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1823 1824 sfmmup->sfmmu_free = 1; 1825 if (sfmmup->sfmmu_scdp != NULL) { 1826 sfmmu_leave_scd(sfmmup, 0); 1827 } 1828 1829 ASSERT(sfmmup->sfmmu_scdp == NULL); 1830 } 1831 1832 void 1833 hat_free_end(struct hat *sfmmup) 1834 { 1835 int i; 1836 1837 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1838 ASSERT(sfmmup->sfmmu_free == 1); 1839 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1840 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1841 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1842 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1843 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1844 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1845 1846 if (sfmmup->sfmmu_rmstat) { 1847 hat_freestat(sfmmup->sfmmu_as, NULL); 1848 } 1849 1850 while (sfmmup->sfmmu_tsb != NULL) { 1851 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1852 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1853 sfmmup->sfmmu_tsb = next; 1854 } 1855 1856 if (sfmmup->sfmmu_srdp != NULL) { 1857 sfmmu_leave_srd(sfmmup); 1858 ASSERT(sfmmup->sfmmu_srdp == NULL); 1859 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1860 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1861 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1862 SFMMU_L2_HMERLINKS_SIZE); 1863 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1864 } 1865 } 1866 } 1867 sfmmu_free_sfmmu(sfmmup); 1868 1869 #ifdef DEBUG 1870 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1871 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1872 } 1873 #endif 1874 1875 kmem_cache_free(sfmmuid_cache, sfmmup); 1876 } 1877 1878 /* 1879 * Set up any translation structures, for the specified address space, 1880 * that are needed or preferred when the process is being swapped in. 1881 */ 1882 /* ARGSUSED */ 1883 void 1884 hat_swapin(struct hat *hat) 1885 { 1886 ASSERT(hat->sfmmu_xhat_provider == NULL); 1887 } 1888 1889 /* 1890 * Free all of the translation resources, for the specified address space, 1891 * that can be freed while the process is swapped out. Called from as_swapout. 1892 * Also, free up the ctx that this process was using. 1893 */ 1894 void 1895 hat_swapout(struct hat *sfmmup) 1896 { 1897 struct hmehash_bucket *hmebp; 1898 struct hme_blk *hmeblkp; 1899 struct hme_blk *pr_hblk = NULL; 1900 struct hme_blk *nx_hblk; 1901 int i; 1902 struct hme_blk *list = NULL; 1903 hatlock_t *hatlockp; 1904 struct tsb_info *tsbinfop; 1905 struct free_tsb { 1906 struct free_tsb *next; 1907 struct tsb_info *tsbinfop; 1908 }; /* free list of TSBs */ 1909 struct free_tsb *freelist, *last, *next; 1910 1911 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1912 SFMMU_STAT(sf_swapout); 1913 1914 /* 1915 * There is no way to go from an as to all its translations in sfmmu. 1916 * Here is one of the times when we take the big hit and traverse 1917 * the hash looking for hme_blks to free up. Not only do we free up 1918 * this as hme_blks but all those that are free. We are obviously 1919 * swapping because we need memory so let's free up as much 1920 * as we can. 1921 * 1922 * Note that we don't flush TLB/TSB here -- it's not necessary 1923 * because: 1924 * 1) we free the ctx we're using and throw away the TSB(s); 1925 * 2) processes aren't runnable while being swapped out. 1926 */ 1927 ASSERT(sfmmup != KHATID); 1928 for (i = 0; i <= UHMEHASH_SZ; i++) { 1929 hmebp = &uhme_hash[i]; 1930 SFMMU_HASH_LOCK(hmebp); 1931 hmeblkp = hmebp->hmeblkp; 1932 pr_hblk = NULL; 1933 while (hmeblkp) { 1934 1935 ASSERT(!hmeblkp->hblk_xhat_bit); 1936 1937 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1938 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1939 ASSERT(!hmeblkp->hblk_shared); 1940 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1941 (caddr_t)get_hblk_base(hmeblkp), 1942 get_hblk_endaddr(hmeblkp), 1943 NULL, HAT_UNLOAD); 1944 } 1945 nx_hblk = hmeblkp->hblk_next; 1946 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1947 ASSERT(!hmeblkp->hblk_lckcnt); 1948 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 1949 &list, 0); 1950 } else { 1951 pr_hblk = hmeblkp; 1952 } 1953 hmeblkp = nx_hblk; 1954 } 1955 SFMMU_HASH_UNLOCK(hmebp); 1956 } 1957 1958 sfmmu_hblks_list_purge(&list, 0); 1959 1960 /* 1961 * Now free up the ctx so that others can reuse it. 1962 */ 1963 hatlockp = sfmmu_hat_enter(sfmmup); 1964 1965 sfmmu_invalidate_ctx(sfmmup); 1966 1967 /* 1968 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1969 * If TSBs were never swapped in, just return. 1970 * This implies that we don't support partial swapping 1971 * of TSBs -- either all are swapped out, or none are. 1972 * 1973 * We must hold the HAT lock here to prevent racing with another 1974 * thread trying to unmap TTEs from the TSB or running the post- 1975 * relocator after relocating the TSB's memory. Unfortunately, we 1976 * can't free memory while holding the HAT lock or we could 1977 * deadlock, so we build a list of TSBs to be freed after marking 1978 * the tsbinfos as swapped out and free them after dropping the 1979 * lock. 1980 */ 1981 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1982 sfmmu_hat_exit(hatlockp); 1983 return; 1984 } 1985 1986 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1987 last = freelist = NULL; 1988 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1989 tsbinfop = tsbinfop->tsb_next) { 1990 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1991 1992 /* 1993 * Cast the TSB into a struct free_tsb and put it on the free 1994 * list. 1995 */ 1996 if (freelist == NULL) { 1997 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1998 } else { 1999 last->next = (struct free_tsb *)tsbinfop->tsb_va; 2000 last = last->next; 2001 } 2002 last->next = NULL; 2003 last->tsbinfop = tsbinfop; 2004 tsbinfop->tsb_flags |= TSB_SWAPPED; 2005 /* 2006 * Zero out the TTE to clear the valid bit. 2007 * Note we can't use a value like 0xbad because we want to 2008 * ensure diagnostic bits are NEVER set on TTEs that might 2009 * be loaded. The intent is to catch any invalid access 2010 * to the swapped TSB, such as a thread running with a valid 2011 * context without first calling sfmmu_tsb_swapin() to 2012 * allocate TSB memory. 2013 */ 2014 tsbinfop->tsb_tte.ll = 0; 2015 } 2016 2017 /* Now we can drop the lock and free the TSB memory. */ 2018 sfmmu_hat_exit(hatlockp); 2019 for (; freelist != NULL; freelist = next) { 2020 next = freelist->next; 2021 sfmmu_tsb_free(freelist->tsbinfop); 2022 } 2023 } 2024 2025 /* 2026 * Duplicate the translations of an as into another newas 2027 */ 2028 /* ARGSUSED */ 2029 int 2030 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 2031 uint_t flag) 2032 { 2033 sf_srd_t *srdp; 2034 sf_scd_t *scdp; 2035 int i; 2036 extern uint_t get_color_start(struct as *); 2037 2038 ASSERT(hat->sfmmu_xhat_provider == NULL); 2039 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 2040 (flag == HAT_DUP_SRD)); 2041 ASSERT(hat != ksfmmup); 2042 ASSERT(newhat != ksfmmup); 2043 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 2044 2045 if (flag == HAT_DUP_COW) { 2046 panic("hat_dup: HAT_DUP_COW not supported"); 2047 } 2048 2049 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2050 ASSERT(srdp->srd_evp != NULL); 2051 VN_HOLD(srdp->srd_evp); 2052 ASSERT(srdp->srd_refcnt > 0); 2053 newhat->sfmmu_srdp = srdp; 2054 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 2055 } 2056 2057 /* 2058 * HAT_DUP_ALL flag is used after as duplication is done. 2059 */ 2060 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2061 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2062 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2063 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2064 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2065 } 2066 2067 /* check if need to join scd */ 2068 if ((scdp = hat->sfmmu_scdp) != NULL && 2069 newhat->sfmmu_scdp != scdp) { 2070 int ret; 2071 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2072 &scdp->scd_region_map, ret); 2073 ASSERT(ret); 2074 sfmmu_join_scd(scdp, newhat); 2075 ASSERT(newhat->sfmmu_scdp == scdp && 2076 scdp->scd_refcnt >= 2); 2077 for (i = 0; i < max_mmu_page_sizes; i++) { 2078 newhat->sfmmu_ismttecnt[i] = 2079 hat->sfmmu_ismttecnt[i]; 2080 newhat->sfmmu_scdismttecnt[i] = 2081 hat->sfmmu_scdismttecnt[i]; 2082 } 2083 } else if (&mmu_set_pgsz_order) { 2084 mmu_set_pgsz_order(newhat, 0); 2085 } 2086 2087 sfmmu_check_page_sizes(newhat, 1); 2088 } 2089 2090 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2091 update_proc_pgcolorbase_after_fork != 0) { 2092 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2093 } 2094 return (0); 2095 } 2096 2097 void 2098 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2099 uint_t attr, uint_t flags) 2100 { 2101 hat_do_memload(hat, addr, pp, attr, flags, 2102 SFMMU_INVALID_SHMERID); 2103 } 2104 2105 void 2106 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2107 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2108 { 2109 uint_t rid; 2110 if (rcookie == HAT_INVALID_REGION_COOKIE || 2111 hat->sfmmu_xhat_provider != NULL) { 2112 hat_do_memload(hat, addr, pp, attr, flags, 2113 SFMMU_INVALID_SHMERID); 2114 return; 2115 } 2116 rid = (uint_t)((uint64_t)rcookie); 2117 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2118 hat_do_memload(hat, addr, pp, attr, flags, rid); 2119 } 2120 2121 /* 2122 * Set up addr to map to page pp with protection prot. 2123 * As an optimization we also load the TSB with the 2124 * corresponding tte but it is no big deal if the tte gets kicked out. 2125 */ 2126 static void 2127 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2128 uint_t attr, uint_t flags, uint_t rid) 2129 { 2130 tte_t tte; 2131 2132 2133 ASSERT(hat != NULL); 2134 ASSERT(PAGE_LOCKED(pp)); 2135 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2136 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2137 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2138 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2139 2140 if (PP_ISFREE(pp)) { 2141 panic("hat_memload: loading a mapping to free page %p", 2142 (void *)pp); 2143 } 2144 2145 if (hat->sfmmu_xhat_provider) { 2146 /* no regions for xhats */ 2147 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2148 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 2149 return; 2150 } 2151 2152 ASSERT((hat == ksfmmup) || 2153 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2154 2155 if (flags & ~SFMMU_LOAD_ALLFLAG) 2156 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2157 flags & ~SFMMU_LOAD_ALLFLAG); 2158 2159 if (hat->sfmmu_rmstat) 2160 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2161 2162 #if defined(SF_ERRATA_57) 2163 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2164 (addr < errata57_limit) && (attr & PROT_EXEC) && 2165 !(flags & HAT_LOAD_SHARE)) { 2166 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2167 " page executable"); 2168 attr &= ~PROT_EXEC; 2169 } 2170 #endif 2171 2172 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2173 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2174 2175 /* 2176 * Check TSB and TLB page sizes. 2177 */ 2178 if ((flags & HAT_LOAD_SHARE) == 0) { 2179 sfmmu_check_page_sizes(hat, 1); 2180 } 2181 } 2182 2183 /* 2184 * hat_devload can be called to map real memory (e.g. 2185 * /dev/kmem) and even though hat_devload will determine pf is 2186 * for memory, it will be unable to get a shared lock on the 2187 * page (because someone else has it exclusively) and will 2188 * pass dp = NULL. If tteload doesn't get a non-NULL 2189 * page pointer it can't cache memory. 2190 */ 2191 void 2192 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2193 uint_t attr, int flags) 2194 { 2195 tte_t tte; 2196 struct page *pp = NULL; 2197 int use_lgpg = 0; 2198 2199 ASSERT(hat != NULL); 2200 2201 if (hat->sfmmu_xhat_provider) { 2202 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 2203 return; 2204 } 2205 2206 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2207 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2208 ASSERT((hat == ksfmmup) || 2209 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2210 if (len == 0) 2211 panic("hat_devload: zero len"); 2212 if (flags & ~SFMMU_LOAD_ALLFLAG) 2213 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2214 flags & ~SFMMU_LOAD_ALLFLAG); 2215 2216 #if defined(SF_ERRATA_57) 2217 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2218 (addr < errata57_limit) && (attr & PROT_EXEC) && 2219 !(flags & HAT_LOAD_SHARE)) { 2220 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2221 " page executable"); 2222 attr &= ~PROT_EXEC; 2223 } 2224 #endif 2225 2226 /* 2227 * If it's a memory page find its pp 2228 */ 2229 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2230 pp = page_numtopp_nolock(pfn); 2231 if (pp == NULL) { 2232 flags |= HAT_LOAD_NOCONSIST; 2233 } else { 2234 if (PP_ISFREE(pp)) { 2235 panic("hat_memload: loading " 2236 "a mapping to free page %p", 2237 (void *)pp); 2238 } 2239 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2240 panic("hat_memload: loading a mapping " 2241 "to unlocked relocatable page %p", 2242 (void *)pp); 2243 } 2244 ASSERT(len == MMU_PAGESIZE); 2245 } 2246 } 2247 2248 if (hat->sfmmu_rmstat) 2249 hat_resvstat(len, hat->sfmmu_as, addr); 2250 2251 if (flags & HAT_LOAD_NOCONSIST) { 2252 attr |= SFMMU_UNCACHEVTTE; 2253 use_lgpg = 1; 2254 } 2255 if (!pf_is_memory(pfn)) { 2256 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2257 use_lgpg = 1; 2258 switch (attr & HAT_ORDER_MASK) { 2259 case HAT_STRICTORDER: 2260 case HAT_UNORDERED_OK: 2261 /* 2262 * we set the side effect bit for all non 2263 * memory mappings unless merging is ok 2264 */ 2265 attr |= SFMMU_SIDEFFECT; 2266 break; 2267 case HAT_MERGING_OK: 2268 case HAT_LOADCACHING_OK: 2269 case HAT_STORECACHING_OK: 2270 break; 2271 default: 2272 panic("hat_devload: bad attr"); 2273 break; 2274 } 2275 } 2276 while (len) { 2277 if (!use_lgpg) { 2278 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2279 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2280 flags, SFMMU_INVALID_SHMERID); 2281 len -= MMU_PAGESIZE; 2282 addr += MMU_PAGESIZE; 2283 pfn++; 2284 continue; 2285 } 2286 /* 2287 * try to use large pages, check va/pa alignments 2288 * Note that 32M/256M page sizes are not (yet) supported. 2289 */ 2290 if ((len >= MMU_PAGESIZE4M) && 2291 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2292 !(disable_large_pages & (1 << TTE4M)) && 2293 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2294 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2295 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2296 flags, SFMMU_INVALID_SHMERID); 2297 len -= MMU_PAGESIZE4M; 2298 addr += MMU_PAGESIZE4M; 2299 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2300 } else if ((len >= MMU_PAGESIZE512K) && 2301 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2302 !(disable_large_pages & (1 << TTE512K)) && 2303 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2304 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2305 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2306 flags, SFMMU_INVALID_SHMERID); 2307 len -= MMU_PAGESIZE512K; 2308 addr += MMU_PAGESIZE512K; 2309 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2310 } else if ((len >= MMU_PAGESIZE64K) && 2311 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2312 !(disable_large_pages & (1 << TTE64K)) && 2313 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2314 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2315 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2316 flags, SFMMU_INVALID_SHMERID); 2317 len -= MMU_PAGESIZE64K; 2318 addr += MMU_PAGESIZE64K; 2319 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2320 } else { 2321 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2322 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2323 flags, SFMMU_INVALID_SHMERID); 2324 len -= MMU_PAGESIZE; 2325 addr += MMU_PAGESIZE; 2326 pfn++; 2327 } 2328 } 2329 2330 /* 2331 * Check TSB and TLB page sizes. 2332 */ 2333 if ((flags & HAT_LOAD_SHARE) == 0) { 2334 sfmmu_check_page_sizes(hat, 1); 2335 } 2336 } 2337 2338 void 2339 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2340 struct page **pps, uint_t attr, uint_t flags) 2341 { 2342 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2343 SFMMU_INVALID_SHMERID); 2344 } 2345 2346 void 2347 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2348 struct page **pps, uint_t attr, uint_t flags, 2349 hat_region_cookie_t rcookie) 2350 { 2351 uint_t rid; 2352 if (rcookie == HAT_INVALID_REGION_COOKIE || 2353 hat->sfmmu_xhat_provider != NULL) { 2354 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2355 SFMMU_INVALID_SHMERID); 2356 return; 2357 } 2358 rid = (uint_t)((uint64_t)rcookie); 2359 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2360 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2361 } 2362 2363 /* 2364 * Map the largest extend possible out of the page array. The array may NOT 2365 * be in order. The largest possible mapping a page can have 2366 * is specified in the p_szc field. The p_szc field 2367 * cannot change as long as there any mappings (large or small) 2368 * to any of the pages that make up the large page. (ie. any 2369 * promotion/demotion of page size is not up to the hat but up to 2370 * the page free list manager). The array 2371 * should consist of properly aligned contigous pages that are 2372 * part of a big page for a large mapping to be created. 2373 */ 2374 static void 2375 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2376 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2377 { 2378 int ttesz; 2379 size_t mapsz; 2380 pgcnt_t numpg, npgs; 2381 tte_t tte; 2382 page_t *pp; 2383 uint_t large_pages_disable; 2384 2385 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2386 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2387 2388 if (hat->sfmmu_xhat_provider) { 2389 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2390 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2391 return; 2392 } 2393 2394 if (hat->sfmmu_rmstat) 2395 hat_resvstat(len, hat->sfmmu_as, addr); 2396 2397 #if defined(SF_ERRATA_57) 2398 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2399 (addr < errata57_limit) && (attr & PROT_EXEC) && 2400 !(flags & HAT_LOAD_SHARE)) { 2401 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2402 "user page executable"); 2403 attr &= ~PROT_EXEC; 2404 } 2405 #endif 2406 2407 /* Get number of pages */ 2408 npgs = len >> MMU_PAGESHIFT; 2409 2410 if (flags & HAT_LOAD_SHARE) { 2411 large_pages_disable = disable_ism_large_pages; 2412 } else { 2413 large_pages_disable = disable_large_pages; 2414 } 2415 2416 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2417 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2418 rid); 2419 return; 2420 } 2421 2422 while (npgs >= NHMENTS) { 2423 pp = *pps; 2424 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2425 /* 2426 * Check if this page size is disabled. 2427 */ 2428 if (large_pages_disable & (1 << ttesz)) 2429 continue; 2430 2431 numpg = TTEPAGES(ttesz); 2432 mapsz = numpg << MMU_PAGESHIFT; 2433 if ((npgs >= numpg) && 2434 IS_P2ALIGNED(addr, mapsz) && 2435 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2436 /* 2437 * At this point we have enough pages and 2438 * we know the virtual address and the pfn 2439 * are properly aligned. We still need 2440 * to check for physical contiguity but since 2441 * it is very likely that this is the case 2442 * we will assume they are so and undo 2443 * the request if necessary. It would 2444 * be great if we could get a hint flag 2445 * like HAT_CONTIG which would tell us 2446 * the pages are contigous for sure. 2447 */ 2448 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2449 attr, ttesz); 2450 if (!sfmmu_tteload_array(hat, &tte, addr, 2451 pps, flags, rid)) { 2452 break; 2453 } 2454 } 2455 } 2456 if (ttesz == TTE8K) { 2457 /* 2458 * We were not able to map array using a large page 2459 * batch a hmeblk or fraction at a time. 2460 */ 2461 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2462 & (NHMENTS-1); 2463 numpg = NHMENTS - numpg; 2464 ASSERT(numpg <= npgs); 2465 mapsz = numpg * MMU_PAGESIZE; 2466 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2467 numpg, rid); 2468 } 2469 addr += mapsz; 2470 npgs -= numpg; 2471 pps += numpg; 2472 } 2473 2474 if (npgs) { 2475 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2476 rid); 2477 } 2478 2479 /* 2480 * Check TSB and TLB page sizes. 2481 */ 2482 if ((flags & HAT_LOAD_SHARE) == 0) { 2483 sfmmu_check_page_sizes(hat, 1); 2484 } 2485 } 2486 2487 /* 2488 * Function tries to batch 8K pages into the same hme blk. 2489 */ 2490 static void 2491 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2492 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2493 { 2494 tte_t tte; 2495 page_t *pp; 2496 struct hmehash_bucket *hmebp; 2497 struct hme_blk *hmeblkp; 2498 int index; 2499 2500 while (npgs) { 2501 /* 2502 * Acquire the hash bucket. 2503 */ 2504 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2505 rid); 2506 ASSERT(hmebp); 2507 2508 /* 2509 * Find the hment block. 2510 */ 2511 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2512 TTE8K, flags, rid); 2513 ASSERT(hmeblkp); 2514 2515 do { 2516 /* 2517 * Make the tte. 2518 */ 2519 pp = *pps; 2520 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2521 2522 /* 2523 * Add the translation. 2524 */ 2525 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2526 vaddr, pps, flags, rid); 2527 2528 /* 2529 * Goto next page. 2530 */ 2531 pps++; 2532 npgs--; 2533 2534 /* 2535 * Goto next address. 2536 */ 2537 vaddr += MMU_PAGESIZE; 2538 2539 /* 2540 * Don't crossover into a different hmentblk. 2541 */ 2542 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2543 (NHMENTS-1)); 2544 2545 } while (index != 0 && npgs != 0); 2546 2547 /* 2548 * Release the hash bucket. 2549 */ 2550 2551 sfmmu_tteload_release_hashbucket(hmebp); 2552 } 2553 } 2554 2555 /* 2556 * Construct a tte for a page: 2557 * 2558 * tte_valid = 1 2559 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2560 * tte_size = size 2561 * tte_nfo = attr & HAT_NOFAULT 2562 * tte_ie = attr & HAT_STRUCTURE_LE 2563 * tte_hmenum = hmenum 2564 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2565 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2566 * tte_ref = 1 (optimization) 2567 * tte_wr_perm = attr & PROT_WRITE; 2568 * tte_no_sync = attr & HAT_NOSYNC 2569 * tte_lock = attr & SFMMU_LOCKTTE 2570 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2571 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2572 * tte_e = attr & SFMMU_SIDEFFECT 2573 * tte_priv = !(attr & PROT_USER) 2574 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2575 * tte_glb = 0 2576 */ 2577 void 2578 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2579 { 2580 ASSERT((attr & ~(SFMMU_LOAD_ALLATTR | HAT_ATTR_NOSOFTEXEC)) == 0); 2581 2582 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2583 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2584 2585 if (TTE_IS_NOSYNC(ttep)) { 2586 TTE_SET_REF(ttep); 2587 if (TTE_IS_WRITABLE(ttep)) { 2588 TTE_SET_MOD(ttep); 2589 } 2590 } 2591 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2592 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2593 } 2594 2595 /* 2596 * Disable hardware execute permission to force a fault if 2597 * this page is executed, so we can detect the execution. Set 2598 * the soft exec bit to remember that this TTE has execute 2599 * permission. 2600 */ 2601 if (TTE_IS_EXECUTABLE(ttep) && (attr & HAT_ATTR_NOSOFTEXEC) == 0 && 2602 icache_is_coherent == 0) { 2603 TTE_CLR_EXEC(ttep); 2604 TTE_SET_SOFTEXEC(ttep); 2605 } 2606 } 2607 2608 /* 2609 * This function will add a translation to the hme_blk and allocate the 2610 * hme_blk if one does not exist. 2611 * If a page structure is specified then it will add the 2612 * corresponding hment to the mapping list. 2613 * It will also update the hmenum field for the tte. 2614 * 2615 * Currently this function is only used for kernel mappings. 2616 * So pass invalid region to sfmmu_tteload_array(). 2617 */ 2618 void 2619 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2620 uint_t flags) 2621 { 2622 ASSERT(sfmmup == ksfmmup); 2623 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2624 SFMMU_INVALID_SHMERID); 2625 } 2626 2627 /* 2628 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2629 * Assumes that a particular page size may only be resident in one TSB. 2630 */ 2631 static void 2632 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2633 { 2634 struct tsb_info *tsbinfop = NULL; 2635 uint64_t tag; 2636 struct tsbe *tsbe_addr; 2637 uint64_t tsb_base; 2638 uint_t tsb_size; 2639 int vpshift = MMU_PAGESHIFT; 2640 int phys = 0; 2641 2642 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2643 phys = ktsb_phys; 2644 if (ttesz >= TTE4M) { 2645 #ifndef sun4v 2646 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2647 #endif 2648 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2649 tsb_size = ktsb4m_szcode; 2650 } else { 2651 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2652 tsb_size = ktsb_szcode; 2653 } 2654 } else { 2655 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2656 2657 /* 2658 * If there isn't a TSB for this page size, or the TSB is 2659 * swapped out, there is nothing to do. Note that the latter 2660 * case seems impossible but can occur if hat_pageunload() 2661 * is called on an ISM mapping while the process is swapped 2662 * out. 2663 */ 2664 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2665 return; 2666 2667 /* 2668 * If another thread is in the middle of relocating a TSB 2669 * we can't unload the entry so set a flag so that the 2670 * TSB will be flushed before it can be accessed by the 2671 * process. 2672 */ 2673 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2674 if (ttep == NULL) 2675 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2676 return; 2677 } 2678 #if defined(UTSB_PHYS) 2679 phys = 1; 2680 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2681 #else 2682 tsb_base = (uint64_t)tsbinfop->tsb_va; 2683 #endif 2684 tsb_size = tsbinfop->tsb_szc; 2685 } 2686 if (ttesz >= TTE4M) 2687 vpshift = MMU_PAGESHIFT4M; 2688 2689 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2690 tag = sfmmu_make_tsbtag(vaddr); 2691 2692 if (ttep == NULL) { 2693 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2694 } else { 2695 if (ttesz >= TTE4M) { 2696 SFMMU_STAT(sf_tsb_load4m); 2697 } else { 2698 SFMMU_STAT(sf_tsb_load8k); 2699 } 2700 2701 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2702 } 2703 } 2704 2705 /* 2706 * Unmap all entries from [start, end) matching the given page size. 2707 * 2708 * This function is used primarily to unmap replicated 64K or 512K entries 2709 * from the TSB that are inserted using the base page size TSB pointer, but 2710 * it may also be called to unmap a range of addresses from the TSB. 2711 */ 2712 void 2713 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2714 { 2715 struct tsb_info *tsbinfop; 2716 uint64_t tag; 2717 struct tsbe *tsbe_addr; 2718 caddr_t vaddr; 2719 uint64_t tsb_base; 2720 int vpshift, vpgsz; 2721 uint_t tsb_size; 2722 int phys = 0; 2723 2724 /* 2725 * Assumptions: 2726 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2727 * at a time shooting down any valid entries we encounter. 2728 * 2729 * If ttesz >= 4M we walk the range 4M at a time shooting 2730 * down any valid mappings we find. 2731 */ 2732 if (sfmmup == ksfmmup) { 2733 phys = ktsb_phys; 2734 if (ttesz >= TTE4M) { 2735 #ifndef sun4v 2736 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2737 #endif 2738 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2739 tsb_size = ktsb4m_szcode; 2740 } else { 2741 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2742 tsb_size = ktsb_szcode; 2743 } 2744 } else { 2745 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2746 2747 /* 2748 * If there isn't a TSB for this page size, or the TSB is 2749 * swapped out, there is nothing to do. Note that the latter 2750 * case seems impossible but can occur if hat_pageunload() 2751 * is called on an ISM mapping while the process is swapped 2752 * out. 2753 */ 2754 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2755 return; 2756 2757 /* 2758 * If another thread is in the middle of relocating a TSB 2759 * we can't unload the entry so set a flag so that the 2760 * TSB will be flushed before it can be accessed by the 2761 * process. 2762 */ 2763 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2764 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2765 return; 2766 } 2767 #if defined(UTSB_PHYS) 2768 phys = 1; 2769 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2770 #else 2771 tsb_base = (uint64_t)tsbinfop->tsb_va; 2772 #endif 2773 tsb_size = tsbinfop->tsb_szc; 2774 } 2775 if (ttesz >= TTE4M) { 2776 vpshift = MMU_PAGESHIFT4M; 2777 vpgsz = MMU_PAGESIZE4M; 2778 } else { 2779 vpshift = MMU_PAGESHIFT; 2780 vpgsz = MMU_PAGESIZE; 2781 } 2782 2783 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2784 tag = sfmmu_make_tsbtag(vaddr); 2785 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2786 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2787 } 2788 } 2789 2790 /* 2791 * Select the optimum TSB size given the number of mappings 2792 * that need to be cached. 2793 */ 2794 static int 2795 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2796 { 2797 int szc = 0; 2798 2799 #ifdef DEBUG 2800 if (tsb_grow_stress) { 2801 uint32_t randval = (uint32_t)gettick() >> 4; 2802 return (randval % (tsb_max_growsize + 1)); 2803 } 2804 #endif /* DEBUG */ 2805 2806 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2807 szc++; 2808 return (szc); 2809 } 2810 2811 /* 2812 * This function will add a translation to the hme_blk and allocate the 2813 * hme_blk if one does not exist. 2814 * If a page structure is specified then it will add the 2815 * corresponding hment to the mapping list. 2816 * It will also update the hmenum field for the tte. 2817 * Furthermore, it attempts to create a large page translation 2818 * for <addr,hat> at page array pps. It assumes addr and first 2819 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2820 */ 2821 static int 2822 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2823 page_t **pps, uint_t flags, uint_t rid) 2824 { 2825 struct hmehash_bucket *hmebp; 2826 struct hme_blk *hmeblkp; 2827 int ret; 2828 uint_t size; 2829 2830 /* 2831 * Get mapping size. 2832 */ 2833 size = TTE_CSZ(ttep); 2834 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2835 2836 /* 2837 * Acquire the hash bucket. 2838 */ 2839 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2840 ASSERT(hmebp); 2841 2842 /* 2843 * Find the hment block. 2844 */ 2845 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2846 rid); 2847 ASSERT(hmeblkp); 2848 2849 /* 2850 * Add the translation. 2851 */ 2852 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2853 rid); 2854 2855 /* 2856 * Release the hash bucket. 2857 */ 2858 sfmmu_tteload_release_hashbucket(hmebp); 2859 2860 return (ret); 2861 } 2862 2863 /* 2864 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2865 */ 2866 static struct hmehash_bucket * 2867 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2868 uint_t rid) 2869 { 2870 struct hmehash_bucket *hmebp; 2871 int hmeshift; 2872 void *htagid = sfmmutohtagid(sfmmup, rid); 2873 2874 ASSERT(htagid != NULL); 2875 2876 hmeshift = HME_HASH_SHIFT(size); 2877 2878 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2879 2880 SFMMU_HASH_LOCK(hmebp); 2881 2882 return (hmebp); 2883 } 2884 2885 /* 2886 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2887 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2888 * allocated. 2889 */ 2890 static struct hme_blk * 2891 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2892 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2893 { 2894 hmeblk_tag hblktag; 2895 int hmeshift; 2896 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2897 2898 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2899 2900 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2901 ASSERT(hblktag.htag_id != NULL); 2902 hmeshift = HME_HASH_SHIFT(size); 2903 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2904 hblktag.htag_rehash = HME_HASH_REHASH(size); 2905 hblktag.htag_rid = rid; 2906 2907 ttearray_realloc: 2908 2909 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2910 2911 /* 2912 * We block until hblk_reserve_lock is released; it's held by 2913 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2914 * replaced by a hblk from sfmmu8_cache. 2915 */ 2916 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2917 hblk_reserve_thread != curthread) { 2918 SFMMU_HASH_UNLOCK(hmebp); 2919 mutex_enter(&hblk_reserve_lock); 2920 mutex_exit(&hblk_reserve_lock); 2921 SFMMU_STAT(sf_hblk_reserve_hit); 2922 SFMMU_HASH_LOCK(hmebp); 2923 goto ttearray_realloc; 2924 } 2925 2926 if (hmeblkp == NULL) { 2927 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2928 hblktag, flags, rid); 2929 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2930 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2931 } else { 2932 /* 2933 * It is possible for 8k and 64k hblks to collide since they 2934 * have the same rehash value. This is because we 2935 * lazily free hblks and 8K/64K blks could be lingering. 2936 * If we find size mismatch we free the block and & try again. 2937 */ 2938 if (get_hblk_ttesz(hmeblkp) != size) { 2939 ASSERT(!hmeblkp->hblk_vcnt); 2940 ASSERT(!hmeblkp->hblk_hmecnt); 2941 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2942 &list, 0); 2943 goto ttearray_realloc; 2944 } 2945 if (hmeblkp->hblk_shw_bit) { 2946 /* 2947 * if the hblk was previously used as a shadow hblk then 2948 * we will change it to a normal hblk 2949 */ 2950 ASSERT(!hmeblkp->hblk_shared); 2951 if (hmeblkp->hblk_shw_mask) { 2952 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2953 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2954 goto ttearray_realloc; 2955 } else { 2956 hmeblkp->hblk_shw_bit = 0; 2957 } 2958 } 2959 SFMMU_STAT(sf_hblk_hit); 2960 } 2961 2962 /* 2963 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 2964 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 2965 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 2966 * just add these hmeblks to the per-cpu pending queue. 2967 */ 2968 sfmmu_hblks_list_purge(&list, 1); 2969 2970 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2971 ASSERT(!hmeblkp->hblk_shw_bit); 2972 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2973 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2974 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 2975 2976 return (hmeblkp); 2977 } 2978 2979 /* 2980 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2981 * otherwise. 2982 */ 2983 static int 2984 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2985 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 2986 { 2987 page_t *pp = *pps; 2988 int hmenum, size, remap; 2989 tte_t tteold, flush_tte; 2990 #ifdef DEBUG 2991 tte_t orig_old; 2992 #endif /* DEBUG */ 2993 struct sf_hment *sfhme; 2994 kmutex_t *pml, *pmtx; 2995 hatlock_t *hatlockp; 2996 int myflt; 2997 2998 /* 2999 * remove this panic when we decide to let user virtual address 3000 * space be >= USERLIMIT. 3001 */ 3002 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 3003 panic("user addr %p in kernel space", (void *)vaddr); 3004 #if defined(TTE_IS_GLOBAL) 3005 if (TTE_IS_GLOBAL(ttep)) 3006 panic("sfmmu_tteload: creating global tte"); 3007 #endif 3008 3009 #ifdef DEBUG 3010 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 3011 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 3012 panic("sfmmu_tteload: non cacheable memory tte"); 3013 #endif /* DEBUG */ 3014 3015 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 3016 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 3017 TTE_SET_REF(ttep); 3018 TTE_SET_MOD(ttep); 3019 } 3020 3021 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 3022 !TTE_IS_MOD(ttep)) { 3023 /* 3024 * Don't load TSB for dummy as in ISM. Also don't preload 3025 * the TSB if the TTE isn't writable since we're likely to 3026 * fault on it again -- preloading can be fairly expensive. 3027 */ 3028 flags |= SFMMU_NO_TSBLOAD; 3029 } 3030 3031 size = TTE_CSZ(ttep); 3032 switch (size) { 3033 case TTE8K: 3034 SFMMU_STAT(sf_tteload8k); 3035 break; 3036 case TTE64K: 3037 SFMMU_STAT(sf_tteload64k); 3038 break; 3039 case TTE512K: 3040 SFMMU_STAT(sf_tteload512k); 3041 break; 3042 case TTE4M: 3043 SFMMU_STAT(sf_tteload4m); 3044 break; 3045 case (TTE32M): 3046 SFMMU_STAT(sf_tteload32m); 3047 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3048 break; 3049 case (TTE256M): 3050 SFMMU_STAT(sf_tteload256m); 3051 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3052 break; 3053 } 3054 3055 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3056 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3057 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3058 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3059 3060 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3061 3062 /* 3063 * Need to grab mlist lock here so that pageunload 3064 * will not change tte behind us. 3065 */ 3066 if (pp) { 3067 pml = sfmmu_mlist_enter(pp); 3068 } 3069 3070 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3071 /* 3072 * Look for corresponding hment and if valid verify 3073 * pfns are equal. 3074 */ 3075 remap = TTE_IS_VALID(&tteold); 3076 if (remap) { 3077 pfn_t new_pfn, old_pfn; 3078 3079 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3080 new_pfn = TTE_TO_PFN(vaddr, ttep); 3081 3082 if (flags & HAT_LOAD_REMAP) { 3083 /* make sure we are remapping same type of pages */ 3084 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3085 panic("sfmmu_tteload - tte remap io<->memory"); 3086 } 3087 if (old_pfn != new_pfn && 3088 (pp != NULL || sfhme->hme_page != NULL)) { 3089 panic("sfmmu_tteload - tte remap pp != NULL"); 3090 } 3091 } else if (old_pfn != new_pfn) { 3092 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3093 (void *)hmeblkp); 3094 } 3095 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3096 3097 if (TTE_IS_EXECUTABLE(&tteold) && TTE_IS_SOFTEXEC(ttep)) { 3098 TTE_SET_EXEC(ttep); 3099 } 3100 } 3101 3102 if (pp) { 3103 /* 3104 * If we know that this page will be executed, because 3105 * it was in the past (PP_ISEXEC is already true), or 3106 * if the caller says it will likely be executed 3107 * (HAT_LOAD_TEXT is true), then there is no need to 3108 * dynamically detect execution with a soft exec 3109 * fault. Enable hardware execute permission now. 3110 */ 3111 if ((PP_ISEXEC(pp) || (flags & HAT_LOAD_TEXT)) && 3112 TTE_IS_SOFTEXEC(ttep)) { 3113 TTE_SET_EXEC(ttep); 3114 } 3115 3116 if (size == TTE8K) { 3117 #ifdef VAC 3118 /* 3119 * Handle VAC consistency 3120 */ 3121 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3122 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3123 } 3124 #endif 3125 3126 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3127 pmtx = sfmmu_page_enter(pp); 3128 PP_CLRRO(pp); 3129 sfmmu_page_exit(pmtx); 3130 } else if (!PP_ISMAPPED(pp) && 3131 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3132 pmtx = sfmmu_page_enter(pp); 3133 if (!(PP_ISMOD(pp))) { 3134 PP_SETRO(pp); 3135 } 3136 sfmmu_page_exit(pmtx); 3137 } 3138 3139 if (TTE_EXECUTED(ttep)) { 3140 pmtx = sfmmu_page_enter(pp); 3141 PP_SETEXEC(pp); 3142 sfmmu_page_exit(pmtx); 3143 } 3144 3145 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3146 /* 3147 * sfmmu_pagearray_setup failed so return 3148 */ 3149 sfmmu_mlist_exit(pml); 3150 return (1); 3151 } 3152 3153 } else if (TTE_IS_SOFTEXEC(ttep)) { 3154 TTE_SET_EXEC(ttep); 3155 } 3156 3157 /* 3158 * Make sure hment is not on a mapping list. 3159 */ 3160 ASSERT(remap || (sfhme->hme_page == NULL)); 3161 3162 /* if it is not a remap then hme->next better be NULL */ 3163 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3164 3165 if (flags & HAT_LOAD_LOCK) { 3166 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3167 panic("too high lckcnt-hmeblk %p", 3168 (void *)hmeblkp); 3169 } 3170 atomic_add_32(&hmeblkp->hblk_lckcnt, 1); 3171 3172 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3173 } 3174 3175 #ifdef VAC 3176 if (pp && PP_ISNC(pp)) { 3177 /* 3178 * If the physical page is marked to be uncacheable, like 3179 * by a vac conflict, make sure the new mapping is also 3180 * uncacheable. 3181 */ 3182 TTE_CLR_VCACHEABLE(ttep); 3183 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3184 } 3185 #endif 3186 ttep->tte_hmenum = hmenum; 3187 3188 #ifdef DEBUG 3189 orig_old = tteold; 3190 #endif /* DEBUG */ 3191 3192 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3193 if ((sfmmup == KHATID) && 3194 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3195 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3196 } 3197 #ifdef DEBUG 3198 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3199 #endif /* DEBUG */ 3200 } 3201 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3202 3203 if (!TTE_IS_VALID(&tteold)) { 3204 3205 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 3206 if (rid == SFMMU_INVALID_SHMERID) { 3207 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 3208 } else { 3209 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3210 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3211 /* 3212 * We already accounted for region ttecnt's in sfmmu 3213 * during hat_join_region() processing. Here we 3214 * only update ttecnt's in region struture. 3215 */ 3216 atomic_add_long(&rgnp->rgn_ttecnt[size], 1); 3217 } 3218 } 3219 3220 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3221 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3222 sfmmup != ksfmmup) { 3223 uchar_t tteflag = 1 << size; 3224 if (rid == SFMMU_INVALID_SHMERID) { 3225 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3226 hatlockp = sfmmu_hat_enter(sfmmup); 3227 sfmmup->sfmmu_tteflags |= tteflag; 3228 if (&mmu_set_pgsz_order) { 3229 mmu_set_pgsz_order(sfmmup, 1); 3230 } 3231 sfmmu_hat_exit(hatlockp); 3232 } 3233 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3234 hatlockp = sfmmu_hat_enter(sfmmup); 3235 sfmmup->sfmmu_rtteflags |= tteflag; 3236 if (&mmu_set_pgsz_order && sfmmup != ksfmmup) { 3237 mmu_set_pgsz_order(sfmmup, 1); 3238 } 3239 sfmmu_hat_exit(hatlockp); 3240 } 3241 /* 3242 * Update the current CPU tsbmiss area, so the current thread 3243 * won't need to take the tsbmiss for the new pagesize. 3244 * The other threads in the process will update their tsb 3245 * miss area lazily in sfmmu_tsbmiss_exception() when they 3246 * fail to find the translation for a newly added pagesize. 3247 */ 3248 if (size > TTE64K && myflt) { 3249 struct tsbmiss *tsbmp; 3250 kpreempt_disable(); 3251 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3252 if (rid == SFMMU_INVALID_SHMERID) { 3253 if (!(tsbmp->uhat_tteflags & tteflag)) { 3254 tsbmp->uhat_tteflags |= tteflag; 3255 } 3256 } else { 3257 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3258 tsbmp->uhat_rtteflags |= tteflag; 3259 } 3260 } 3261 kpreempt_enable(); 3262 } 3263 } 3264 3265 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3266 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3267 hatlockp = sfmmu_hat_enter(sfmmup); 3268 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3269 sfmmu_hat_exit(hatlockp); 3270 } 3271 3272 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3273 hw_tte.tte_intlo; 3274 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3275 hw_tte.tte_inthi; 3276 3277 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3278 /* 3279 * If remap and new tte differs from old tte we need 3280 * to sync the mod bit and flush TLB/TSB. We don't 3281 * need to sync ref bit because we currently always set 3282 * ref bit in tteload. 3283 */ 3284 ASSERT(TTE_IS_REF(ttep)); 3285 if (TTE_IS_MOD(&tteold) || (TTE_EXECUTED(&tteold) && 3286 !TTE_IS_EXECUTABLE(ttep))) { 3287 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3288 } 3289 /* 3290 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3291 * hmes are only used for read only text. Adding this code for 3292 * completeness and future use of shared hmeblks with writable 3293 * mappings of VMODSORT vnodes. 3294 */ 3295 if (hmeblkp->hblk_shared) { 3296 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3297 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3298 xt_sync(cpuset); 3299 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3300 } else { 3301 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3302 xt_sync(sfmmup->sfmmu_cpusran); 3303 } 3304 } 3305 3306 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3307 /* 3308 * We only preload 8K and 4M mappings into the TSB, since 3309 * 64K and 512K mappings are replicated and hence don't 3310 * have a single, unique TSB entry. Ditto for 32M/256M. 3311 */ 3312 if (size == TTE8K || size == TTE4M) { 3313 sf_scd_t *scdp; 3314 hatlockp = sfmmu_hat_enter(sfmmup); 3315 /* 3316 * Don't preload private TSB if the mapping is used 3317 * by the shctx in the SCD. 3318 */ 3319 scdp = sfmmup->sfmmu_scdp; 3320 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3321 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3322 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3323 size); 3324 } 3325 sfmmu_hat_exit(hatlockp); 3326 } 3327 } 3328 if (pp) { 3329 if (!remap) { 3330 HME_ADD(sfhme, pp); 3331 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 3332 ASSERT(hmeblkp->hblk_hmecnt > 0); 3333 3334 /* 3335 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3336 * see pageunload() for comment. 3337 */ 3338 } 3339 sfmmu_mlist_exit(pml); 3340 } 3341 3342 return (0); 3343 } 3344 /* 3345 * Function unlocks hash bucket. 3346 */ 3347 static void 3348 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3349 { 3350 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3351 SFMMU_HASH_UNLOCK(hmebp); 3352 } 3353 3354 /* 3355 * function which checks and sets up page array for a large 3356 * translation. Will set p_vcolor, p_index, p_ro fields. 3357 * Assumes addr and pfnum of first page are properly aligned. 3358 * Will check for physical contiguity. If check fails it return 3359 * non null. 3360 */ 3361 static int 3362 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3363 { 3364 int i, index, ttesz; 3365 pfn_t pfnum; 3366 pgcnt_t npgs; 3367 page_t *pp, *pp1; 3368 kmutex_t *pmtx; 3369 #ifdef VAC 3370 int osz; 3371 int cflags = 0; 3372 int vac_err = 0; 3373 #endif 3374 int newidx = 0; 3375 3376 ttesz = TTE_CSZ(ttep); 3377 3378 ASSERT(ttesz > TTE8K); 3379 3380 npgs = TTEPAGES(ttesz); 3381 index = PAGESZ_TO_INDEX(ttesz); 3382 3383 pfnum = (*pps)->p_pagenum; 3384 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3385 3386 /* 3387 * Save the first pp so we can do HAT_TMPNC at the end. 3388 */ 3389 pp1 = *pps; 3390 #ifdef VAC 3391 osz = fnd_mapping_sz(pp1); 3392 #endif 3393 3394 for (i = 0; i < npgs; i++, pps++) { 3395 pp = *pps; 3396 ASSERT(PAGE_LOCKED(pp)); 3397 ASSERT(pp->p_szc >= ttesz); 3398 ASSERT(pp->p_szc == pp1->p_szc); 3399 ASSERT(sfmmu_mlist_held(pp)); 3400 3401 /* 3402 * XXX is it possible to maintain P_RO on the root only? 3403 */ 3404 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3405 pmtx = sfmmu_page_enter(pp); 3406 PP_CLRRO(pp); 3407 sfmmu_page_exit(pmtx); 3408 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3409 !PP_ISMOD(pp)) { 3410 pmtx = sfmmu_page_enter(pp); 3411 if (!(PP_ISMOD(pp))) { 3412 PP_SETRO(pp); 3413 } 3414 sfmmu_page_exit(pmtx); 3415 } 3416 3417 if (TTE_EXECUTED(ttep)) { 3418 pmtx = sfmmu_page_enter(pp); 3419 PP_SETEXEC(pp); 3420 sfmmu_page_exit(pmtx); 3421 } 3422 3423 /* 3424 * If this is a remap we skip vac & contiguity checks. 3425 */ 3426 if (remap) 3427 continue; 3428 3429 /* 3430 * set p_vcolor and detect any vac conflicts. 3431 */ 3432 #ifdef VAC 3433 if (vac_err == 0) { 3434 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3435 3436 } 3437 #endif 3438 3439 /* 3440 * Save current index in case we need to undo it. 3441 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3442 * "SFMMU_INDEX_SHIFT 6" 3443 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3444 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3445 * 3446 * So: index = PAGESZ_TO_INDEX(ttesz); 3447 * if ttesz == 1 then index = 0x2 3448 * 2 then index = 0x4 3449 * 3 then index = 0x8 3450 * 4 then index = 0x10 3451 * 5 then index = 0x20 3452 * The code below checks if it's a new pagesize (ie, newidx) 3453 * in case we need to take it back out of p_index, 3454 * and then or's the new index into the existing index. 3455 */ 3456 if ((PP_MAPINDEX(pp) & index) == 0) 3457 newidx = 1; 3458 pp->p_index = (PP_MAPINDEX(pp) | index); 3459 3460 /* 3461 * contiguity check 3462 */ 3463 if (pp->p_pagenum != pfnum) { 3464 /* 3465 * If we fail the contiguity test then 3466 * the only thing we need to fix is the p_index field. 3467 * We might get a few extra flushes but since this 3468 * path is rare that is ok. The p_ro field will 3469 * get automatically fixed on the next tteload to 3470 * the page. NO TNC bit is set yet. 3471 */ 3472 while (i >= 0) { 3473 pp = *pps; 3474 if (newidx) 3475 pp->p_index = (PP_MAPINDEX(pp) & 3476 ~index); 3477 pps--; 3478 i--; 3479 } 3480 return (1); 3481 } 3482 pfnum++; 3483 addr += MMU_PAGESIZE; 3484 } 3485 3486 #ifdef VAC 3487 if (vac_err) { 3488 if (ttesz > osz) { 3489 /* 3490 * There are some smaller mappings that causes vac 3491 * conflicts. Convert all existing small mappings to 3492 * TNC. 3493 */ 3494 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3495 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3496 npgs); 3497 } else { 3498 /* EMPTY */ 3499 /* 3500 * If there exists an big page mapping, 3501 * that means the whole existing big page 3502 * has TNC setting already. No need to covert to 3503 * TNC again. 3504 */ 3505 ASSERT(PP_ISTNC(pp1)); 3506 } 3507 } 3508 #endif /* VAC */ 3509 3510 return (0); 3511 } 3512 3513 #ifdef VAC 3514 /* 3515 * Routine that detects vac consistency for a large page. It also 3516 * sets virtual color for all pp's for this big mapping. 3517 */ 3518 static int 3519 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3520 { 3521 int vcolor, ocolor; 3522 3523 ASSERT(sfmmu_mlist_held(pp)); 3524 3525 if (PP_ISNC(pp)) { 3526 return (HAT_TMPNC); 3527 } 3528 3529 vcolor = addr_to_vcolor(addr); 3530 if (PP_NEWPAGE(pp)) { 3531 PP_SET_VCOLOR(pp, vcolor); 3532 return (0); 3533 } 3534 3535 ocolor = PP_GET_VCOLOR(pp); 3536 if (ocolor == vcolor) { 3537 return (0); 3538 } 3539 3540 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3541 /* 3542 * Previous user of page had a differnet color 3543 * but since there are no current users 3544 * we just flush the cache and change the color. 3545 * As an optimization for large pages we flush the 3546 * entire cache of that color and set a flag. 3547 */ 3548 SFMMU_STAT(sf_pgcolor_conflict); 3549 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3550 CacheColor_SetFlushed(*cflags, ocolor); 3551 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3552 } 3553 PP_SET_VCOLOR(pp, vcolor); 3554 return (0); 3555 } 3556 3557 /* 3558 * We got a real conflict with a current mapping. 3559 * set flags to start unencaching all mappings 3560 * and return failure so we restart looping 3561 * the pp array from the beginning. 3562 */ 3563 return (HAT_TMPNC); 3564 } 3565 #endif /* VAC */ 3566 3567 /* 3568 * creates a large page shadow hmeblk for a tte. 3569 * The purpose of this routine is to allow us to do quick unloads because 3570 * the vm layer can easily pass a very large but sparsely populated range. 3571 */ 3572 static struct hme_blk * 3573 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3574 { 3575 struct hmehash_bucket *hmebp; 3576 hmeblk_tag hblktag; 3577 int hmeshift, size, vshift; 3578 uint_t shw_mask, newshw_mask; 3579 struct hme_blk *hmeblkp; 3580 3581 ASSERT(sfmmup != KHATID); 3582 if (mmu_page_sizes == max_mmu_page_sizes) { 3583 ASSERT(ttesz < TTE256M); 3584 } else { 3585 ASSERT(ttesz < TTE4M); 3586 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3587 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3588 } 3589 3590 if (ttesz == TTE8K) { 3591 size = TTE512K; 3592 } else { 3593 size = ++ttesz; 3594 } 3595 3596 hblktag.htag_id = sfmmup; 3597 hmeshift = HME_HASH_SHIFT(size); 3598 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3599 hblktag.htag_rehash = HME_HASH_REHASH(size); 3600 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3601 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3602 3603 SFMMU_HASH_LOCK(hmebp); 3604 3605 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3606 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3607 if (hmeblkp == NULL) { 3608 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3609 hblktag, flags, SFMMU_INVALID_SHMERID); 3610 } 3611 ASSERT(hmeblkp); 3612 if (!hmeblkp->hblk_shw_mask) { 3613 /* 3614 * if this is a unused hblk it was just allocated or could 3615 * potentially be a previous large page hblk so we need to 3616 * set the shadow bit. 3617 */ 3618 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3619 hmeblkp->hblk_shw_bit = 1; 3620 } else if (hmeblkp->hblk_shw_bit == 0) { 3621 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3622 (void *)hmeblkp); 3623 } 3624 ASSERT(hmeblkp->hblk_shw_bit == 1); 3625 ASSERT(!hmeblkp->hblk_shared); 3626 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3627 ASSERT(vshift < 8); 3628 /* 3629 * Atomically set shw mask bit 3630 */ 3631 do { 3632 shw_mask = hmeblkp->hblk_shw_mask; 3633 newshw_mask = shw_mask | (1 << vshift); 3634 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3635 newshw_mask); 3636 } while (newshw_mask != shw_mask); 3637 3638 SFMMU_HASH_UNLOCK(hmebp); 3639 3640 return (hmeblkp); 3641 } 3642 3643 /* 3644 * This routine cleanup a previous shadow hmeblk and changes it to 3645 * a regular hblk. This happens rarely but it is possible 3646 * when a process wants to use large pages and there are hblks still 3647 * lying around from the previous as that used these hmeblks. 3648 * The alternative was to cleanup the shadow hblks at unload time 3649 * but since so few user processes actually use large pages, it is 3650 * better to be lazy and cleanup at this time. 3651 */ 3652 static void 3653 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3654 struct hmehash_bucket *hmebp) 3655 { 3656 caddr_t addr, endaddr; 3657 int hashno, size; 3658 3659 ASSERT(hmeblkp->hblk_shw_bit); 3660 ASSERT(!hmeblkp->hblk_shared); 3661 3662 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3663 3664 if (!hmeblkp->hblk_shw_mask) { 3665 hmeblkp->hblk_shw_bit = 0; 3666 return; 3667 } 3668 addr = (caddr_t)get_hblk_base(hmeblkp); 3669 endaddr = get_hblk_endaddr(hmeblkp); 3670 size = get_hblk_ttesz(hmeblkp); 3671 hashno = size - 1; 3672 ASSERT(hashno > 0); 3673 SFMMU_HASH_UNLOCK(hmebp); 3674 3675 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3676 3677 SFMMU_HASH_LOCK(hmebp); 3678 } 3679 3680 static void 3681 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3682 int hashno) 3683 { 3684 int hmeshift, shadow = 0; 3685 hmeblk_tag hblktag; 3686 struct hmehash_bucket *hmebp; 3687 struct hme_blk *hmeblkp; 3688 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3689 3690 ASSERT(hashno > 0); 3691 hblktag.htag_id = sfmmup; 3692 hblktag.htag_rehash = hashno; 3693 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3694 3695 hmeshift = HME_HASH_SHIFT(hashno); 3696 3697 while (addr < endaddr) { 3698 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3699 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3700 SFMMU_HASH_LOCK(hmebp); 3701 /* inline HME_HASH_SEARCH */ 3702 hmeblkp = hmebp->hmeblkp; 3703 pr_hblk = NULL; 3704 while (hmeblkp) { 3705 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3706 /* found hme_blk */ 3707 ASSERT(!hmeblkp->hblk_shared); 3708 if (hmeblkp->hblk_shw_bit) { 3709 if (hmeblkp->hblk_shw_mask) { 3710 shadow = 1; 3711 sfmmu_shadow_hcleanup(sfmmup, 3712 hmeblkp, hmebp); 3713 break; 3714 } else { 3715 hmeblkp->hblk_shw_bit = 0; 3716 } 3717 } 3718 3719 /* 3720 * Hblk_hmecnt and hblk_vcnt could be non zero 3721 * since hblk_unload() does not gurantee that. 3722 * 3723 * XXX - this could cause tteload() to spin 3724 * where sfmmu_shadow_hcleanup() is called. 3725 */ 3726 } 3727 3728 nx_hblk = hmeblkp->hblk_next; 3729 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3730 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3731 &list, 0); 3732 } else { 3733 pr_hblk = hmeblkp; 3734 } 3735 hmeblkp = nx_hblk; 3736 } 3737 3738 SFMMU_HASH_UNLOCK(hmebp); 3739 3740 if (shadow) { 3741 /* 3742 * We found another shadow hblk so cleaned its 3743 * children. We need to go back and cleanup 3744 * the original hblk so we don't change the 3745 * addr. 3746 */ 3747 shadow = 0; 3748 } else { 3749 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3750 (1 << hmeshift)); 3751 } 3752 } 3753 sfmmu_hblks_list_purge(&list, 0); 3754 } 3755 3756 /* 3757 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3758 * may still linger on after pageunload. 3759 */ 3760 static void 3761 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3762 { 3763 int hmeshift; 3764 hmeblk_tag hblktag; 3765 struct hmehash_bucket *hmebp; 3766 struct hme_blk *hmeblkp; 3767 struct hme_blk *pr_hblk; 3768 struct hme_blk *list = NULL; 3769 3770 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3771 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3772 3773 hmeshift = HME_HASH_SHIFT(ttesz); 3774 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3775 hblktag.htag_rehash = ttesz; 3776 hblktag.htag_rid = rid; 3777 hblktag.htag_id = srdp; 3778 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3779 3780 SFMMU_HASH_LOCK(hmebp); 3781 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3782 if (hmeblkp != NULL) { 3783 ASSERT(hmeblkp->hblk_shared); 3784 ASSERT(!hmeblkp->hblk_shw_bit); 3785 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3786 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3787 } 3788 ASSERT(!hmeblkp->hblk_lckcnt); 3789 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3790 &list, 0); 3791 } 3792 SFMMU_HASH_UNLOCK(hmebp); 3793 sfmmu_hblks_list_purge(&list, 0); 3794 } 3795 3796 /* ARGSUSED */ 3797 static void 3798 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3799 size_t r_size, void *r_obj, u_offset_t r_objoff) 3800 { 3801 } 3802 3803 /* 3804 * Searches for an hmeblk which maps addr, then unloads this mapping 3805 * and updates *eaddrp, if the hmeblk is found. 3806 */ 3807 static void 3808 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3809 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3810 { 3811 int hmeshift; 3812 hmeblk_tag hblktag; 3813 struct hmehash_bucket *hmebp; 3814 struct hme_blk *hmeblkp; 3815 struct hme_blk *pr_hblk; 3816 struct hme_blk *list = NULL; 3817 3818 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3819 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3820 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3821 3822 hmeshift = HME_HASH_SHIFT(ttesz); 3823 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3824 hblktag.htag_rehash = ttesz; 3825 hblktag.htag_rid = rid; 3826 hblktag.htag_id = srdp; 3827 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3828 3829 SFMMU_HASH_LOCK(hmebp); 3830 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3831 if (hmeblkp != NULL) { 3832 ASSERT(hmeblkp->hblk_shared); 3833 ASSERT(!hmeblkp->hblk_lckcnt); 3834 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3835 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3836 eaddr, NULL, HAT_UNLOAD); 3837 ASSERT(*eaddrp > addr); 3838 } 3839 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3840 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3841 &list, 0); 3842 } 3843 SFMMU_HASH_UNLOCK(hmebp); 3844 sfmmu_hblks_list_purge(&list, 0); 3845 } 3846 3847 static void 3848 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3849 { 3850 int ttesz = rgnp->rgn_pgszc; 3851 size_t rsz = rgnp->rgn_size; 3852 caddr_t rsaddr = rgnp->rgn_saddr; 3853 caddr_t readdr = rsaddr + rsz; 3854 caddr_t rhsaddr; 3855 caddr_t va; 3856 uint_t rid = rgnp->rgn_id; 3857 caddr_t cbsaddr; 3858 caddr_t cbeaddr; 3859 hat_rgn_cb_func_t rcbfunc; 3860 ulong_t cnt; 3861 3862 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3863 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3864 3865 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3866 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3867 if (ttesz < HBLK_MIN_TTESZ) { 3868 ttesz = HBLK_MIN_TTESZ; 3869 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3870 } else { 3871 rhsaddr = rsaddr; 3872 } 3873 3874 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3875 rcbfunc = sfmmu_rgn_cb_noop; 3876 } 3877 3878 while (ttesz >= HBLK_MIN_TTESZ) { 3879 cbsaddr = rsaddr; 3880 cbeaddr = rsaddr; 3881 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3882 ttesz--; 3883 continue; 3884 } 3885 cnt = 0; 3886 va = rsaddr; 3887 while (va < readdr) { 3888 ASSERT(va >= rhsaddr); 3889 if (va != cbeaddr) { 3890 if (cbeaddr != cbsaddr) { 3891 ASSERT(cbeaddr > cbsaddr); 3892 (*rcbfunc)(cbsaddr, cbeaddr, 3893 rsaddr, rsz, rgnp->rgn_obj, 3894 rgnp->rgn_objoff); 3895 } 3896 cbsaddr = va; 3897 cbeaddr = va; 3898 } 3899 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3900 ttesz, &cbeaddr); 3901 cnt++; 3902 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3903 } 3904 if (cbeaddr != cbsaddr) { 3905 ASSERT(cbeaddr > cbsaddr); 3906 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3907 rsz, rgnp->rgn_obj, 3908 rgnp->rgn_objoff); 3909 } 3910 ttesz--; 3911 } 3912 } 3913 3914 /* 3915 * Release one hardware address translation lock on the given address range. 3916 */ 3917 void 3918 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3919 { 3920 struct hmehash_bucket *hmebp; 3921 hmeblk_tag hblktag; 3922 int hmeshift, hashno = 1; 3923 struct hme_blk *hmeblkp, *list = NULL; 3924 caddr_t endaddr; 3925 3926 ASSERT(sfmmup != NULL); 3927 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3928 3929 ASSERT((sfmmup == ksfmmup) || 3930 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3931 ASSERT((len & MMU_PAGEOFFSET) == 0); 3932 endaddr = addr + len; 3933 hblktag.htag_id = sfmmup; 3934 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3935 3936 /* 3937 * Spitfire supports 4 page sizes. 3938 * Most pages are expected to be of the smallest page size (8K) and 3939 * these will not need to be rehashed. 64K pages also don't need to be 3940 * rehashed because an hmeblk spans 64K of address space. 512K pages 3941 * might need 1 rehash and and 4M pages might need 2 rehashes. 3942 */ 3943 while (addr < endaddr) { 3944 hmeshift = HME_HASH_SHIFT(hashno); 3945 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3946 hblktag.htag_rehash = hashno; 3947 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3948 3949 SFMMU_HASH_LOCK(hmebp); 3950 3951 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3952 if (hmeblkp != NULL) { 3953 ASSERT(!hmeblkp->hblk_shared); 3954 /* 3955 * If we encounter a shadow hmeblk then 3956 * we know there are no valid hmeblks mapping 3957 * this address at this size or larger. 3958 * Just increment address by the smallest 3959 * page size. 3960 */ 3961 if (hmeblkp->hblk_shw_bit) { 3962 addr += MMU_PAGESIZE; 3963 } else { 3964 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3965 endaddr); 3966 } 3967 SFMMU_HASH_UNLOCK(hmebp); 3968 hashno = 1; 3969 continue; 3970 } 3971 SFMMU_HASH_UNLOCK(hmebp); 3972 3973 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3974 /* 3975 * We have traversed the whole list and rehashed 3976 * if necessary without finding the address to unlock 3977 * which should never happen. 3978 */ 3979 panic("sfmmu_unlock: addr not found. " 3980 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3981 } else { 3982 hashno++; 3983 } 3984 } 3985 3986 sfmmu_hblks_list_purge(&list, 0); 3987 } 3988 3989 void 3990 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 3991 hat_region_cookie_t rcookie) 3992 { 3993 sf_srd_t *srdp; 3994 sf_region_t *rgnp; 3995 int ttesz; 3996 uint_t rid; 3997 caddr_t eaddr; 3998 caddr_t va; 3999 int hmeshift; 4000 hmeblk_tag hblktag; 4001 struct hmehash_bucket *hmebp; 4002 struct hme_blk *hmeblkp; 4003 struct hme_blk *pr_hblk; 4004 struct hme_blk *list; 4005 4006 if (rcookie == HAT_INVALID_REGION_COOKIE) { 4007 hat_unlock(sfmmup, addr, len); 4008 return; 4009 } 4010 4011 ASSERT(sfmmup != NULL); 4012 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4013 ASSERT(sfmmup != ksfmmup); 4014 4015 srdp = sfmmup->sfmmu_srdp; 4016 rid = (uint_t)((uint64_t)rcookie); 4017 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 4018 eaddr = addr + len; 4019 va = addr; 4020 list = NULL; 4021 rgnp = srdp->srd_hmergnp[rid]; 4022 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 4023 4024 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 4025 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 4026 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 4027 ttesz = HBLK_MIN_TTESZ; 4028 } else { 4029 ttesz = rgnp->rgn_pgszc; 4030 } 4031 while (va < eaddr) { 4032 while (ttesz < rgnp->rgn_pgszc && 4033 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 4034 ttesz++; 4035 } 4036 while (ttesz >= HBLK_MIN_TTESZ) { 4037 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 4038 ttesz--; 4039 continue; 4040 } 4041 hmeshift = HME_HASH_SHIFT(ttesz); 4042 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 4043 hblktag.htag_rehash = ttesz; 4044 hblktag.htag_rid = rid; 4045 hblktag.htag_id = srdp; 4046 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 4047 SFMMU_HASH_LOCK(hmebp); 4048 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 4049 &list); 4050 if (hmeblkp == NULL) { 4051 SFMMU_HASH_UNLOCK(hmebp); 4052 ttesz--; 4053 continue; 4054 } 4055 ASSERT(hmeblkp->hblk_shared); 4056 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 4057 ASSERT(va >= eaddr || 4058 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 4059 SFMMU_HASH_UNLOCK(hmebp); 4060 break; 4061 } 4062 if (ttesz < HBLK_MIN_TTESZ) { 4063 panic("hat_unlock_region: addr not found " 4064 "addr %p hat %p", (void *)va, (void *)sfmmup); 4065 } 4066 } 4067 sfmmu_hblks_list_purge(&list, 0); 4068 } 4069 4070 /* 4071 * Function to unlock a range of addresses in an hmeblk. It returns the 4072 * next address that needs to be unlocked. 4073 * Should be called with the hash lock held. 4074 */ 4075 static caddr_t 4076 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4077 { 4078 struct sf_hment *sfhme; 4079 tte_t tteold, ttemod; 4080 int ttesz, ret; 4081 4082 ASSERT(in_hblk_range(hmeblkp, addr)); 4083 ASSERT(hmeblkp->hblk_shw_bit == 0); 4084 4085 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4086 ttesz = get_hblk_ttesz(hmeblkp); 4087 4088 HBLKTOHME(sfhme, hmeblkp, addr); 4089 while (addr < endaddr) { 4090 readtte: 4091 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4092 if (TTE_IS_VALID(&tteold)) { 4093 4094 ttemod = tteold; 4095 4096 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4097 &sfhme->hme_tte); 4098 4099 if (ret < 0) 4100 goto readtte; 4101 4102 if (hmeblkp->hblk_lckcnt == 0) 4103 panic("zero hblk lckcnt"); 4104 4105 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4106 (uintptr_t)endaddr) 4107 panic("can't unlock large tte"); 4108 4109 ASSERT(hmeblkp->hblk_lckcnt > 0); 4110 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 4111 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4112 } else { 4113 panic("sfmmu_hblk_unlock: invalid tte"); 4114 } 4115 addr += TTEBYTES(ttesz); 4116 sfhme++; 4117 } 4118 return (addr); 4119 } 4120 4121 /* 4122 * Physical Address Mapping Framework 4123 * 4124 * General rules: 4125 * 4126 * (1) Applies only to seg_kmem memory pages. To make things easier, 4127 * seg_kpm addresses are also accepted by the routines, but nothing 4128 * is done with them since by definition their PA mappings are static. 4129 * (2) hat_add_callback() may only be called while holding the page lock 4130 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4131 * or passing HAC_PAGELOCK flag. 4132 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4133 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4134 * callbacks may not sleep or acquire adaptive mutex locks. 4135 * (4) Either prehandler() or posthandler() (but not both) may be specified 4136 * as being NULL. Specifying an errhandler() is optional. 4137 * 4138 * Details of using the framework: 4139 * 4140 * registering a callback (hat_register_callback()) 4141 * 4142 * Pass prehandler, posthandler, errhandler addresses 4143 * as described below. If capture_cpus argument is nonzero, 4144 * suspend callback to the prehandler will occur with CPUs 4145 * captured and executing xc_loop() and CPUs will remain 4146 * captured until after the posthandler suspend callback 4147 * occurs. 4148 * 4149 * adding a callback (hat_add_callback()) 4150 * 4151 * as_pagelock(); 4152 * hat_add_callback(); 4153 * save returned pfn in private data structures or program registers; 4154 * as_pageunlock(); 4155 * 4156 * prehandler() 4157 * 4158 * Stop all accesses by physical address to this memory page. 4159 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4160 * adaptive locks. The second, SUSPEND, is called at high PIL with 4161 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4162 * locks must be XCALL_PIL or higher locks). 4163 * 4164 * May return the following errors: 4165 * EIO: A fatal error has occurred. This will result in panic. 4166 * EAGAIN: The page cannot be suspended. This will fail the 4167 * relocation. 4168 * 0: Success. 4169 * 4170 * posthandler() 4171 * 4172 * Save new pfn in private data structures or program registers; 4173 * not allowed to fail (non-zero return values will result in panic). 4174 * 4175 * errhandler() 4176 * 4177 * called when an error occurs related to the callback. Currently 4178 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4179 * a page is being freed, but there are still outstanding callback(s) 4180 * registered on the page. 4181 * 4182 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4183 * 4184 * stop using physical address 4185 * hat_delete_callback(); 4186 * 4187 */ 4188 4189 /* 4190 * Register a callback class. Each subsystem should do this once and 4191 * cache the id_t returned for use in setting up and tearing down callbacks. 4192 * 4193 * There is no facility for removing callback IDs once they are created; 4194 * the "key" should be unique for each module, so in case a module is unloaded 4195 * and subsequently re-loaded, we can recycle the module's previous entry. 4196 */ 4197 id_t 4198 hat_register_callback(int key, 4199 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4200 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4201 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4202 int capture_cpus) 4203 { 4204 id_t id; 4205 4206 /* 4207 * Search the table for a pre-existing callback associated with 4208 * the identifier "key". If one exists, we re-use that entry in 4209 * the table for this instance, otherwise we assign the next 4210 * available table slot. 4211 */ 4212 for (id = 0; id < sfmmu_max_cb_id; id++) { 4213 if (sfmmu_cb_table[id].key == key) 4214 break; 4215 } 4216 4217 if (id == sfmmu_max_cb_id) { 4218 id = sfmmu_cb_nextid++; 4219 if (id >= sfmmu_max_cb_id) 4220 panic("hat_register_callback: out of callback IDs"); 4221 } 4222 4223 ASSERT(prehandler != NULL || posthandler != NULL); 4224 4225 sfmmu_cb_table[id].key = key; 4226 sfmmu_cb_table[id].prehandler = prehandler; 4227 sfmmu_cb_table[id].posthandler = posthandler; 4228 sfmmu_cb_table[id].errhandler = errhandler; 4229 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4230 4231 return (id); 4232 } 4233 4234 #define HAC_COOKIE_NONE (void *)-1 4235 4236 /* 4237 * Add relocation callbacks to the specified addr/len which will be called 4238 * when relocating the associated page. See the description of pre and 4239 * posthandler above for more details. 4240 * 4241 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4242 * locked internally so the caller must be able to deal with the callback 4243 * running even before this function has returned. If HAC_PAGELOCK is not 4244 * set, it is assumed that the underlying memory pages are locked. 4245 * 4246 * Since the caller must track the individual page boundaries anyway, 4247 * we only allow a callback to be added to a single page (large 4248 * or small). Thus [addr, addr + len) MUST be contained within a single 4249 * page. 4250 * 4251 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4252 * _provided_that_ a unique parameter is specified for each callback. 4253 * If multiple callbacks are registered on the same range the callback will 4254 * be invoked with each unique parameter. Registering the same callback with 4255 * the same argument more than once will result in corrupted kernel state. 4256 * 4257 * Returns the pfn of the underlying kernel page in *rpfn 4258 * on success, or PFN_INVALID on failure. 4259 * 4260 * cookiep (if passed) provides storage space for an opaque cookie 4261 * to return later to hat_delete_callback(). This cookie makes the callback 4262 * deletion significantly quicker by avoiding a potentially lengthy hash 4263 * search. 4264 * 4265 * Returns values: 4266 * 0: success 4267 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4268 * EINVAL: callback ID is not valid 4269 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4270 * space 4271 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4272 */ 4273 int 4274 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4275 void *pvt, pfn_t *rpfn, void **cookiep) 4276 { 4277 struct hmehash_bucket *hmebp; 4278 hmeblk_tag hblktag; 4279 struct hme_blk *hmeblkp; 4280 int hmeshift, hashno; 4281 caddr_t saddr, eaddr, baseaddr; 4282 struct pa_hment *pahmep; 4283 struct sf_hment *sfhmep, *osfhmep; 4284 kmutex_t *pml; 4285 tte_t tte; 4286 page_t *pp; 4287 vnode_t *vp; 4288 u_offset_t off; 4289 pfn_t pfn; 4290 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4291 int locked = 0; 4292 4293 /* 4294 * For KPM mappings, just return the physical address since we 4295 * don't need to register any callbacks. 4296 */ 4297 if (IS_KPM_ADDR(vaddr)) { 4298 uint64_t paddr; 4299 SFMMU_KPM_VTOP(vaddr, paddr); 4300 *rpfn = btop(paddr); 4301 if (cookiep != NULL) 4302 *cookiep = HAC_COOKIE_NONE; 4303 return (0); 4304 } 4305 4306 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4307 *rpfn = PFN_INVALID; 4308 return (EINVAL); 4309 } 4310 4311 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4312 *rpfn = PFN_INVALID; 4313 return (ENOMEM); 4314 } 4315 4316 sfhmep = &pahmep->sfment; 4317 4318 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4319 eaddr = saddr + len; 4320 4321 rehash: 4322 /* Find the mapping(s) for this page */ 4323 for (hashno = TTE64K, hmeblkp = NULL; 4324 hmeblkp == NULL && hashno <= mmu_hashcnt; 4325 hashno++) { 4326 hmeshift = HME_HASH_SHIFT(hashno); 4327 hblktag.htag_id = ksfmmup; 4328 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4329 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4330 hblktag.htag_rehash = hashno; 4331 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4332 4333 SFMMU_HASH_LOCK(hmebp); 4334 4335 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4336 4337 if (hmeblkp == NULL) 4338 SFMMU_HASH_UNLOCK(hmebp); 4339 } 4340 4341 if (hmeblkp == NULL) { 4342 kmem_cache_free(pa_hment_cache, pahmep); 4343 *rpfn = PFN_INVALID; 4344 return (ENXIO); 4345 } 4346 4347 ASSERT(!hmeblkp->hblk_shared); 4348 4349 HBLKTOHME(osfhmep, hmeblkp, saddr); 4350 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4351 4352 if (!TTE_IS_VALID(&tte)) { 4353 SFMMU_HASH_UNLOCK(hmebp); 4354 kmem_cache_free(pa_hment_cache, pahmep); 4355 *rpfn = PFN_INVALID; 4356 return (ENXIO); 4357 } 4358 4359 /* 4360 * Make sure the boundaries for the callback fall within this 4361 * single mapping. 4362 */ 4363 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4364 ASSERT(saddr >= baseaddr); 4365 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4366 SFMMU_HASH_UNLOCK(hmebp); 4367 kmem_cache_free(pa_hment_cache, pahmep); 4368 *rpfn = PFN_INVALID; 4369 return (ERANGE); 4370 } 4371 4372 pfn = sfmmu_ttetopfn(&tte, vaddr); 4373 4374 /* 4375 * The pfn may not have a page_t underneath in which case we 4376 * just return it. This can happen if we are doing I/O to a 4377 * static portion of the kernel's address space, for instance. 4378 */ 4379 pp = osfhmep->hme_page; 4380 if (pp == NULL) { 4381 SFMMU_HASH_UNLOCK(hmebp); 4382 kmem_cache_free(pa_hment_cache, pahmep); 4383 *rpfn = pfn; 4384 if (cookiep) 4385 *cookiep = HAC_COOKIE_NONE; 4386 return (0); 4387 } 4388 ASSERT(pp == PP_PAGEROOT(pp)); 4389 4390 vp = pp->p_vnode; 4391 off = pp->p_offset; 4392 4393 pml = sfmmu_mlist_enter(pp); 4394 4395 if (flags & HAC_PAGELOCK) { 4396 if (!page_trylock(pp, SE_SHARED)) { 4397 /* 4398 * Somebody is holding SE_EXCL lock. Might 4399 * even be hat_page_relocate(). Drop all 4400 * our locks, lookup the page in &kvp, and 4401 * retry. If it doesn't exist in &kvp and &zvp, 4402 * then we must be dealing with a kernel mapped 4403 * page which doesn't actually belong to 4404 * segkmem so we punt. 4405 */ 4406 sfmmu_mlist_exit(pml); 4407 SFMMU_HASH_UNLOCK(hmebp); 4408 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4409 4410 /* check zvp before giving up */ 4411 if (pp == NULL) 4412 pp = page_lookup(&zvp, (u_offset_t)saddr, 4413 SE_SHARED); 4414 4415 /* Okay, we didn't find it, give up */ 4416 if (pp == NULL) { 4417 kmem_cache_free(pa_hment_cache, pahmep); 4418 *rpfn = pfn; 4419 if (cookiep) 4420 *cookiep = HAC_COOKIE_NONE; 4421 return (0); 4422 } 4423 page_unlock(pp); 4424 goto rehash; 4425 } 4426 locked = 1; 4427 } 4428 4429 if (!PAGE_LOCKED(pp) && !panicstr) 4430 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4431 4432 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4433 pp->p_offset != off) { 4434 /* 4435 * The page moved before we got our hands on it. Drop 4436 * all the locks and try again. 4437 */ 4438 ASSERT((flags & HAC_PAGELOCK) != 0); 4439 sfmmu_mlist_exit(pml); 4440 SFMMU_HASH_UNLOCK(hmebp); 4441 page_unlock(pp); 4442 locked = 0; 4443 goto rehash; 4444 } 4445 4446 if (!VN_ISKAS(vp)) { 4447 /* 4448 * This is not a segkmem page but another page which 4449 * has been kernel mapped. It had better have at least 4450 * a share lock on it. Return the pfn. 4451 */ 4452 sfmmu_mlist_exit(pml); 4453 SFMMU_HASH_UNLOCK(hmebp); 4454 if (locked) 4455 page_unlock(pp); 4456 kmem_cache_free(pa_hment_cache, pahmep); 4457 ASSERT(PAGE_LOCKED(pp)); 4458 *rpfn = pfn; 4459 if (cookiep) 4460 *cookiep = HAC_COOKIE_NONE; 4461 return (0); 4462 } 4463 4464 /* 4465 * Setup this pa_hment and link its embedded dummy sf_hment into 4466 * the mapping list. 4467 */ 4468 pp->p_share++; 4469 pahmep->cb_id = callback_id; 4470 pahmep->addr = vaddr; 4471 pahmep->len = len; 4472 pahmep->refcnt = 1; 4473 pahmep->flags = 0; 4474 pahmep->pvt = pvt; 4475 4476 sfhmep->hme_tte.ll = 0; 4477 sfhmep->hme_data = pahmep; 4478 sfhmep->hme_prev = osfhmep; 4479 sfhmep->hme_next = osfhmep->hme_next; 4480 4481 if (osfhmep->hme_next) 4482 osfhmep->hme_next->hme_prev = sfhmep; 4483 4484 osfhmep->hme_next = sfhmep; 4485 4486 sfmmu_mlist_exit(pml); 4487 SFMMU_HASH_UNLOCK(hmebp); 4488 4489 if (locked) 4490 page_unlock(pp); 4491 4492 *rpfn = pfn; 4493 if (cookiep) 4494 *cookiep = (void *)pahmep; 4495 4496 return (0); 4497 } 4498 4499 /* 4500 * Remove the relocation callbacks from the specified addr/len. 4501 */ 4502 void 4503 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4504 void *cookie) 4505 { 4506 struct hmehash_bucket *hmebp; 4507 hmeblk_tag hblktag; 4508 struct hme_blk *hmeblkp; 4509 int hmeshift, hashno; 4510 caddr_t saddr; 4511 struct pa_hment *pahmep; 4512 struct sf_hment *sfhmep, *osfhmep; 4513 kmutex_t *pml; 4514 tte_t tte; 4515 page_t *pp; 4516 vnode_t *vp; 4517 u_offset_t off; 4518 int locked = 0; 4519 4520 /* 4521 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4522 * remove so just return. 4523 */ 4524 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4525 return; 4526 4527 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4528 4529 rehash: 4530 /* Find the mapping(s) for this page */ 4531 for (hashno = TTE64K, hmeblkp = NULL; 4532 hmeblkp == NULL && hashno <= mmu_hashcnt; 4533 hashno++) { 4534 hmeshift = HME_HASH_SHIFT(hashno); 4535 hblktag.htag_id = ksfmmup; 4536 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4537 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4538 hblktag.htag_rehash = hashno; 4539 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4540 4541 SFMMU_HASH_LOCK(hmebp); 4542 4543 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4544 4545 if (hmeblkp == NULL) 4546 SFMMU_HASH_UNLOCK(hmebp); 4547 } 4548 4549 if (hmeblkp == NULL) 4550 return; 4551 4552 ASSERT(!hmeblkp->hblk_shared); 4553 4554 HBLKTOHME(osfhmep, hmeblkp, saddr); 4555 4556 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4557 if (!TTE_IS_VALID(&tte)) { 4558 SFMMU_HASH_UNLOCK(hmebp); 4559 return; 4560 } 4561 4562 pp = osfhmep->hme_page; 4563 if (pp == NULL) { 4564 SFMMU_HASH_UNLOCK(hmebp); 4565 ASSERT(cookie == NULL); 4566 return; 4567 } 4568 4569 vp = pp->p_vnode; 4570 off = pp->p_offset; 4571 4572 pml = sfmmu_mlist_enter(pp); 4573 4574 if (flags & HAC_PAGELOCK) { 4575 if (!page_trylock(pp, SE_SHARED)) { 4576 /* 4577 * Somebody is holding SE_EXCL lock. Might 4578 * even be hat_page_relocate(). Drop all 4579 * our locks, lookup the page in &kvp, and 4580 * retry. If it doesn't exist in &kvp and &zvp, 4581 * then we must be dealing with a kernel mapped 4582 * page which doesn't actually belong to 4583 * segkmem so we punt. 4584 */ 4585 sfmmu_mlist_exit(pml); 4586 SFMMU_HASH_UNLOCK(hmebp); 4587 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4588 /* check zvp before giving up */ 4589 if (pp == NULL) 4590 pp = page_lookup(&zvp, (u_offset_t)saddr, 4591 SE_SHARED); 4592 4593 if (pp == NULL) { 4594 ASSERT(cookie == NULL); 4595 return; 4596 } 4597 page_unlock(pp); 4598 goto rehash; 4599 } 4600 locked = 1; 4601 } 4602 4603 ASSERT(PAGE_LOCKED(pp)); 4604 4605 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4606 pp->p_offset != off) { 4607 /* 4608 * The page moved before we got our hands on it. Drop 4609 * all the locks and try again. 4610 */ 4611 ASSERT((flags & HAC_PAGELOCK) != 0); 4612 sfmmu_mlist_exit(pml); 4613 SFMMU_HASH_UNLOCK(hmebp); 4614 page_unlock(pp); 4615 locked = 0; 4616 goto rehash; 4617 } 4618 4619 if (!VN_ISKAS(vp)) { 4620 /* 4621 * This is not a segkmem page but another page which 4622 * has been kernel mapped. 4623 */ 4624 sfmmu_mlist_exit(pml); 4625 SFMMU_HASH_UNLOCK(hmebp); 4626 if (locked) 4627 page_unlock(pp); 4628 ASSERT(cookie == NULL); 4629 return; 4630 } 4631 4632 if (cookie != NULL) { 4633 pahmep = (struct pa_hment *)cookie; 4634 sfhmep = &pahmep->sfment; 4635 } else { 4636 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4637 sfhmep = sfhmep->hme_next) { 4638 4639 /* 4640 * skip va<->pa mappings 4641 */ 4642 if (!IS_PAHME(sfhmep)) 4643 continue; 4644 4645 pahmep = sfhmep->hme_data; 4646 ASSERT(pahmep != NULL); 4647 4648 /* 4649 * if pa_hment matches, remove it 4650 */ 4651 if ((pahmep->pvt == pvt) && 4652 (pahmep->addr == vaddr) && 4653 (pahmep->len == len)) { 4654 break; 4655 } 4656 } 4657 } 4658 4659 if (sfhmep == NULL) { 4660 if (!panicstr) { 4661 panic("hat_delete_callback: pa_hment not found, pp %p", 4662 (void *)pp); 4663 } 4664 return; 4665 } 4666 4667 /* 4668 * Note: at this point a valid kernel mapping must still be 4669 * present on this page. 4670 */ 4671 pp->p_share--; 4672 if (pp->p_share <= 0) 4673 panic("hat_delete_callback: zero p_share"); 4674 4675 if (--pahmep->refcnt == 0) { 4676 if (pahmep->flags != 0) 4677 panic("hat_delete_callback: pa_hment is busy"); 4678 4679 /* 4680 * Remove sfhmep from the mapping list for the page. 4681 */ 4682 if (sfhmep->hme_prev) { 4683 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4684 } else { 4685 pp->p_mapping = sfhmep->hme_next; 4686 } 4687 4688 if (sfhmep->hme_next) 4689 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4690 4691 sfmmu_mlist_exit(pml); 4692 SFMMU_HASH_UNLOCK(hmebp); 4693 4694 if (locked) 4695 page_unlock(pp); 4696 4697 kmem_cache_free(pa_hment_cache, pahmep); 4698 return; 4699 } 4700 4701 sfmmu_mlist_exit(pml); 4702 SFMMU_HASH_UNLOCK(hmebp); 4703 if (locked) 4704 page_unlock(pp); 4705 } 4706 4707 /* 4708 * hat_probe returns 1 if the translation for the address 'addr' is 4709 * loaded, zero otherwise. 4710 * 4711 * hat_probe should be used only for advisorary purposes because it may 4712 * occasionally return the wrong value. The implementation must guarantee that 4713 * returning the wrong value is a very rare event. hat_probe is used 4714 * to implement optimizations in the segment drivers. 4715 * 4716 */ 4717 int 4718 hat_probe(struct hat *sfmmup, caddr_t addr) 4719 { 4720 pfn_t pfn; 4721 tte_t tte; 4722 4723 ASSERT(sfmmup != NULL); 4724 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4725 4726 ASSERT((sfmmup == ksfmmup) || 4727 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4728 4729 if (sfmmup == ksfmmup) { 4730 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4731 == PFN_SUSPENDED) { 4732 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4733 } 4734 } else { 4735 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4736 } 4737 4738 if (pfn != PFN_INVALID) 4739 return (1); 4740 else 4741 return (0); 4742 } 4743 4744 ssize_t 4745 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4746 { 4747 tte_t tte; 4748 4749 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4750 4751 if (sfmmup == ksfmmup) { 4752 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4753 return (-1); 4754 } 4755 } else { 4756 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4757 return (-1); 4758 } 4759 } 4760 4761 ASSERT(TTE_IS_VALID(&tte)); 4762 return (TTEBYTES(TTE_CSZ(&tte))); 4763 } 4764 4765 uint_t 4766 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4767 { 4768 tte_t tte; 4769 4770 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4771 4772 if (sfmmup == ksfmmup) { 4773 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4774 tte.ll = 0; 4775 } 4776 } else { 4777 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4778 tte.ll = 0; 4779 } 4780 } 4781 if (TTE_IS_VALID(&tte)) { 4782 *attr = sfmmu_ptov_attr(&tte); 4783 return (0); 4784 } 4785 *attr = 0; 4786 return ((uint_t)0xffffffff); 4787 } 4788 4789 /* 4790 * Enables more attributes on specified address range (ie. logical OR) 4791 */ 4792 void 4793 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4794 { 4795 if (hat->sfmmu_xhat_provider) { 4796 XHAT_SETATTR(hat, addr, len, attr); 4797 return; 4798 } else { 4799 /* 4800 * This must be a CPU HAT. If the address space has 4801 * XHATs attached, change attributes for all of them, 4802 * just in case 4803 */ 4804 ASSERT(hat->sfmmu_as != NULL); 4805 if (hat->sfmmu_as->a_xhat != NULL) 4806 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4807 } 4808 4809 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4810 } 4811 4812 /* 4813 * Assigns attributes to the specified address range. All the attributes 4814 * are specified. 4815 */ 4816 void 4817 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4818 { 4819 if (hat->sfmmu_xhat_provider) { 4820 XHAT_CHGATTR(hat, addr, len, attr); 4821 return; 4822 } else { 4823 /* 4824 * This must be a CPU HAT. If the address space has 4825 * XHATs attached, change attributes for all of them, 4826 * just in case 4827 */ 4828 ASSERT(hat->sfmmu_as != NULL); 4829 if (hat->sfmmu_as->a_xhat != NULL) 4830 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4831 } 4832 4833 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4834 } 4835 4836 /* 4837 * Remove attributes on the specified address range (ie. loginal NAND) 4838 */ 4839 void 4840 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4841 { 4842 if (hat->sfmmu_xhat_provider) { 4843 XHAT_CLRATTR(hat, addr, len, attr); 4844 return; 4845 } else { 4846 /* 4847 * This must be a CPU HAT. If the address space has 4848 * XHATs attached, change attributes for all of them, 4849 * just in case 4850 */ 4851 ASSERT(hat->sfmmu_as != NULL); 4852 if (hat->sfmmu_as->a_xhat != NULL) 4853 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4854 } 4855 4856 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4857 } 4858 4859 /* 4860 * Change attributes on an address range to that specified by attr and mode. 4861 */ 4862 static void 4863 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4864 int mode) 4865 { 4866 struct hmehash_bucket *hmebp; 4867 hmeblk_tag hblktag; 4868 int hmeshift, hashno = 1; 4869 struct hme_blk *hmeblkp, *list = NULL; 4870 caddr_t endaddr; 4871 cpuset_t cpuset; 4872 demap_range_t dmr; 4873 4874 CPUSET_ZERO(cpuset); 4875 4876 ASSERT((sfmmup == ksfmmup) || 4877 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4878 ASSERT((len & MMU_PAGEOFFSET) == 0); 4879 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4880 4881 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4882 ((addr + len) > (caddr_t)USERLIMIT)) { 4883 panic("user addr %p in kernel space", 4884 (void *)addr); 4885 } 4886 4887 endaddr = addr + len; 4888 hblktag.htag_id = sfmmup; 4889 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4890 DEMAP_RANGE_INIT(sfmmup, &dmr); 4891 4892 while (addr < endaddr) { 4893 hmeshift = HME_HASH_SHIFT(hashno); 4894 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4895 hblktag.htag_rehash = hashno; 4896 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4897 4898 SFMMU_HASH_LOCK(hmebp); 4899 4900 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4901 if (hmeblkp != NULL) { 4902 ASSERT(!hmeblkp->hblk_shared); 4903 /* 4904 * We've encountered a shadow hmeblk so skip the range 4905 * of the next smaller mapping size. 4906 */ 4907 if (hmeblkp->hblk_shw_bit) { 4908 ASSERT(sfmmup != ksfmmup); 4909 ASSERT(hashno > 1); 4910 addr = (caddr_t)P2END((uintptr_t)addr, 4911 TTEBYTES(hashno - 1)); 4912 } else { 4913 addr = sfmmu_hblk_chgattr(sfmmup, 4914 hmeblkp, addr, endaddr, &dmr, attr, mode); 4915 } 4916 SFMMU_HASH_UNLOCK(hmebp); 4917 hashno = 1; 4918 continue; 4919 } 4920 SFMMU_HASH_UNLOCK(hmebp); 4921 4922 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4923 /* 4924 * We have traversed the whole list and rehashed 4925 * if necessary without finding the address to chgattr. 4926 * This is ok, so we increment the address by the 4927 * smallest hmeblk range for kernel mappings or for 4928 * user mappings with no large pages, and the largest 4929 * hmeblk range, to account for shadow hmeblks, for 4930 * user mappings with large pages and continue. 4931 */ 4932 if (sfmmup == ksfmmup) 4933 addr = (caddr_t)P2END((uintptr_t)addr, 4934 TTEBYTES(1)); 4935 else 4936 addr = (caddr_t)P2END((uintptr_t)addr, 4937 TTEBYTES(hashno)); 4938 hashno = 1; 4939 } else { 4940 hashno++; 4941 } 4942 } 4943 4944 sfmmu_hblks_list_purge(&list, 0); 4945 DEMAP_RANGE_FLUSH(&dmr); 4946 cpuset = sfmmup->sfmmu_cpusran; 4947 xt_sync(cpuset); 4948 } 4949 4950 /* 4951 * This function chgattr on a range of addresses in an hmeblk. It returns the 4952 * next addres that needs to be chgattr. 4953 * It should be called with the hash lock held. 4954 * XXX It should be possible to optimize chgattr by not flushing every time but 4955 * on the other hand: 4956 * 1. do one flush crosscall. 4957 * 2. only flush if we are increasing permissions (make sure this will work) 4958 */ 4959 static caddr_t 4960 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4961 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4962 { 4963 tte_t tte, tteattr, tteflags, ttemod; 4964 struct sf_hment *sfhmep; 4965 int ttesz; 4966 struct page *pp = NULL; 4967 kmutex_t *pml, *pmtx; 4968 int ret; 4969 int use_demap_range; 4970 #if defined(SF_ERRATA_57) 4971 int check_exec; 4972 #endif 4973 4974 ASSERT(in_hblk_range(hmeblkp, addr)); 4975 ASSERT(hmeblkp->hblk_shw_bit == 0); 4976 ASSERT(!hmeblkp->hblk_shared); 4977 4978 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4979 ttesz = get_hblk_ttesz(hmeblkp); 4980 4981 /* 4982 * Flush the current demap region if addresses have been 4983 * skipped or the page size doesn't match. 4984 */ 4985 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4986 if (use_demap_range) { 4987 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4988 } else { 4989 DEMAP_RANGE_FLUSH(dmrp); 4990 } 4991 4992 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4993 #if defined(SF_ERRATA_57) 4994 check_exec = (sfmmup != ksfmmup) && 4995 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4996 TTE_IS_EXECUTABLE(&tteattr); 4997 #endif 4998 HBLKTOHME(sfhmep, hmeblkp, addr); 4999 while (addr < endaddr) { 5000 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5001 if (TTE_IS_VALID(&tte)) { 5002 if ((tte.ll & tteflags.ll) == tteattr.ll) { 5003 /* 5004 * if the new attr is the same as old 5005 * continue 5006 */ 5007 goto next_addr; 5008 } 5009 if (!TTE_IS_WRITABLE(&tteattr)) { 5010 /* 5011 * make sure we clear hw modify bit if we 5012 * removing write protections 5013 */ 5014 tteflags.tte_intlo |= TTE_HWWR_INT; 5015 } 5016 5017 pml = NULL; 5018 pp = sfhmep->hme_page; 5019 if (pp) { 5020 pml = sfmmu_mlist_enter(pp); 5021 } 5022 5023 if (pp != sfhmep->hme_page) { 5024 /* 5025 * tte must have been unloaded. 5026 */ 5027 ASSERT(pml); 5028 sfmmu_mlist_exit(pml); 5029 continue; 5030 } 5031 5032 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5033 5034 ttemod = tte; 5035 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 5036 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 5037 5038 #if defined(SF_ERRATA_57) 5039 if (check_exec && addr < errata57_limit) 5040 ttemod.tte_exec_perm = 0; 5041 #endif 5042 ret = sfmmu_modifytte_try(&tte, &ttemod, 5043 &sfhmep->hme_tte); 5044 5045 if (ret < 0) { 5046 /* tte changed underneath us */ 5047 if (pml) { 5048 sfmmu_mlist_exit(pml); 5049 } 5050 continue; 5051 } 5052 5053 if ((tteflags.tte_intlo & TTE_HWWR_INT) || 5054 (TTE_EXECUTED(&tte) && 5055 !TTE_IS_EXECUTABLE(&ttemod))) { 5056 /* 5057 * need to sync if clearing modify/exec bit. 5058 */ 5059 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5060 } 5061 5062 if (pp && PP_ISRO(pp)) { 5063 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 5064 pmtx = sfmmu_page_enter(pp); 5065 PP_CLRRO(pp); 5066 sfmmu_page_exit(pmtx); 5067 } 5068 } 5069 5070 if (ret > 0 && use_demap_range) { 5071 DEMAP_RANGE_MARKPG(dmrp, addr); 5072 } else if (ret > 0) { 5073 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5074 } 5075 5076 if (pml) { 5077 sfmmu_mlist_exit(pml); 5078 } 5079 } 5080 next_addr: 5081 addr += TTEBYTES(ttesz); 5082 sfhmep++; 5083 DEMAP_RANGE_NEXTPG(dmrp); 5084 } 5085 return (addr); 5086 } 5087 5088 /* 5089 * This routine converts virtual attributes to physical ones. It will 5090 * update the tteflags field with the tte mask corresponding to the attributes 5091 * affected and it returns the new attributes. It will also clear the modify 5092 * bit if we are taking away write permission. This is necessary since the 5093 * modify bit is the hardware permission bit and we need to clear it in order 5094 * to detect write faults. 5095 */ 5096 static uint64_t 5097 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5098 { 5099 tte_t ttevalue; 5100 5101 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5102 5103 switch (mode) { 5104 case SFMMU_CHGATTR: 5105 /* all attributes specified */ 5106 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5107 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5108 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5109 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5110 if (!icache_is_coherent) { 5111 if (!(attr & PROT_EXEC)) { 5112 TTE_SET_SOFTEXEC(ttemaskp); 5113 } else { 5114 TTE_CLR_EXEC(ttemaskp); 5115 TTE_SET_SOFTEXEC(&ttevalue); 5116 } 5117 } 5118 break; 5119 case SFMMU_SETATTR: 5120 ASSERT(!(attr & ~HAT_PROT_MASK)); 5121 ttemaskp->ll = 0; 5122 ttevalue.ll = 0; 5123 /* 5124 * a valid tte implies exec and read for sfmmu 5125 * so no need to do anything about them. 5126 * since priviledged access implies user access 5127 * PROT_USER doesn't make sense either. 5128 */ 5129 if (attr & PROT_WRITE) { 5130 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5131 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5132 } 5133 break; 5134 case SFMMU_CLRATTR: 5135 /* attributes will be nand with current ones */ 5136 if (attr & ~(PROT_WRITE | PROT_USER)) { 5137 panic("sfmmu: attr %x not supported", attr); 5138 } 5139 ttemaskp->ll = 0; 5140 ttevalue.ll = 0; 5141 if (attr & PROT_WRITE) { 5142 /* clear both writable and modify bit */ 5143 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5144 } 5145 if (attr & PROT_USER) { 5146 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5147 ttevalue.tte_intlo |= TTE_PRIV_INT; 5148 } 5149 break; 5150 default: 5151 panic("sfmmu_vtop_attr: bad mode %x", mode); 5152 } 5153 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5154 return (ttevalue.ll); 5155 } 5156 5157 static uint_t 5158 sfmmu_ptov_attr(tte_t *ttep) 5159 { 5160 uint_t attr; 5161 5162 ASSERT(TTE_IS_VALID(ttep)); 5163 5164 attr = PROT_READ; 5165 5166 if (TTE_IS_WRITABLE(ttep)) { 5167 attr |= PROT_WRITE; 5168 } 5169 if (TTE_IS_EXECUTABLE(ttep)) { 5170 attr |= PROT_EXEC; 5171 } 5172 if (TTE_IS_SOFTEXEC(ttep)) { 5173 attr |= PROT_EXEC; 5174 } 5175 if (!TTE_IS_PRIVILEGED(ttep)) { 5176 attr |= PROT_USER; 5177 } 5178 if (TTE_IS_NFO(ttep)) { 5179 attr |= HAT_NOFAULT; 5180 } 5181 if (TTE_IS_NOSYNC(ttep)) { 5182 attr |= HAT_NOSYNC; 5183 } 5184 if (TTE_IS_SIDEFFECT(ttep)) { 5185 attr |= SFMMU_SIDEFFECT; 5186 } 5187 if (!TTE_IS_VCACHEABLE(ttep)) { 5188 attr |= SFMMU_UNCACHEVTTE; 5189 } 5190 if (!TTE_IS_PCACHEABLE(ttep)) { 5191 attr |= SFMMU_UNCACHEPTTE; 5192 } 5193 return (attr); 5194 } 5195 5196 /* 5197 * hat_chgprot is a deprecated hat call. New segment drivers 5198 * should store all attributes and use hat_*attr calls. 5199 * 5200 * Change the protections in the virtual address range 5201 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5202 * then remove write permission, leaving the other 5203 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5204 * 5205 */ 5206 void 5207 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5208 { 5209 struct hmehash_bucket *hmebp; 5210 hmeblk_tag hblktag; 5211 int hmeshift, hashno = 1; 5212 struct hme_blk *hmeblkp, *list = NULL; 5213 caddr_t endaddr; 5214 cpuset_t cpuset; 5215 demap_range_t dmr; 5216 5217 ASSERT((len & MMU_PAGEOFFSET) == 0); 5218 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5219 5220 if (sfmmup->sfmmu_xhat_provider) { 5221 XHAT_CHGPROT(sfmmup, addr, len, vprot); 5222 return; 5223 } else { 5224 /* 5225 * This must be a CPU HAT. If the address space has 5226 * XHATs attached, change attributes for all of them, 5227 * just in case 5228 */ 5229 ASSERT(sfmmup->sfmmu_as != NULL); 5230 if (sfmmup->sfmmu_as->a_xhat != NULL) 5231 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 5232 } 5233 5234 CPUSET_ZERO(cpuset); 5235 5236 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5237 ((addr + len) > (caddr_t)USERLIMIT)) { 5238 panic("user addr %p vprot %x in kernel space", 5239 (void *)addr, vprot); 5240 } 5241 endaddr = addr + len; 5242 hblktag.htag_id = sfmmup; 5243 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5244 DEMAP_RANGE_INIT(sfmmup, &dmr); 5245 5246 while (addr < endaddr) { 5247 hmeshift = HME_HASH_SHIFT(hashno); 5248 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5249 hblktag.htag_rehash = hashno; 5250 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5251 5252 SFMMU_HASH_LOCK(hmebp); 5253 5254 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5255 if (hmeblkp != NULL) { 5256 ASSERT(!hmeblkp->hblk_shared); 5257 /* 5258 * We've encountered a shadow hmeblk so skip the range 5259 * of the next smaller mapping size. 5260 */ 5261 if (hmeblkp->hblk_shw_bit) { 5262 ASSERT(sfmmup != ksfmmup); 5263 ASSERT(hashno > 1); 5264 addr = (caddr_t)P2END((uintptr_t)addr, 5265 TTEBYTES(hashno - 1)); 5266 } else { 5267 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5268 addr, endaddr, &dmr, vprot); 5269 } 5270 SFMMU_HASH_UNLOCK(hmebp); 5271 hashno = 1; 5272 continue; 5273 } 5274 SFMMU_HASH_UNLOCK(hmebp); 5275 5276 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5277 /* 5278 * We have traversed the whole list and rehashed 5279 * if necessary without finding the address to chgprot. 5280 * This is ok so we increment the address by the 5281 * smallest hmeblk range for kernel mappings and the 5282 * largest hmeblk range, to account for shadow hmeblks, 5283 * for user mappings and continue. 5284 */ 5285 if (sfmmup == ksfmmup) 5286 addr = (caddr_t)P2END((uintptr_t)addr, 5287 TTEBYTES(1)); 5288 else 5289 addr = (caddr_t)P2END((uintptr_t)addr, 5290 TTEBYTES(hashno)); 5291 hashno = 1; 5292 } else { 5293 hashno++; 5294 } 5295 } 5296 5297 sfmmu_hblks_list_purge(&list, 0); 5298 DEMAP_RANGE_FLUSH(&dmr); 5299 cpuset = sfmmup->sfmmu_cpusran; 5300 xt_sync(cpuset); 5301 } 5302 5303 /* 5304 * This function chgprots a range of addresses in an hmeblk. It returns the 5305 * next addres that needs to be chgprot. 5306 * It should be called with the hash lock held. 5307 * XXX It shold be possible to optimize chgprot by not flushing every time but 5308 * on the other hand: 5309 * 1. do one flush crosscall. 5310 * 2. only flush if we are increasing permissions (make sure this will work) 5311 */ 5312 static caddr_t 5313 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5314 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5315 { 5316 uint_t pprot; 5317 tte_t tte, ttemod; 5318 struct sf_hment *sfhmep; 5319 uint_t tteflags; 5320 int ttesz; 5321 struct page *pp = NULL; 5322 kmutex_t *pml, *pmtx; 5323 int ret; 5324 int use_demap_range; 5325 #if defined(SF_ERRATA_57) 5326 int check_exec; 5327 #endif 5328 5329 ASSERT(in_hblk_range(hmeblkp, addr)); 5330 ASSERT(hmeblkp->hblk_shw_bit == 0); 5331 ASSERT(!hmeblkp->hblk_shared); 5332 5333 #ifdef DEBUG 5334 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5335 (endaddr < get_hblk_endaddr(hmeblkp))) { 5336 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5337 } 5338 #endif /* DEBUG */ 5339 5340 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5341 ttesz = get_hblk_ttesz(hmeblkp); 5342 5343 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5344 #if defined(SF_ERRATA_57) 5345 check_exec = (sfmmup != ksfmmup) && 5346 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5347 ((vprot & PROT_EXEC) == PROT_EXEC); 5348 #endif 5349 HBLKTOHME(sfhmep, hmeblkp, addr); 5350 5351 /* 5352 * Flush the current demap region if addresses have been 5353 * skipped or the page size doesn't match. 5354 */ 5355 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5356 if (use_demap_range) { 5357 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5358 } else { 5359 DEMAP_RANGE_FLUSH(dmrp); 5360 } 5361 5362 while (addr < endaddr) { 5363 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5364 if (TTE_IS_VALID(&tte)) { 5365 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5366 /* 5367 * if the new protection is the same as old 5368 * continue 5369 */ 5370 goto next_addr; 5371 } 5372 pml = NULL; 5373 pp = sfhmep->hme_page; 5374 if (pp) { 5375 pml = sfmmu_mlist_enter(pp); 5376 } 5377 if (pp != sfhmep->hme_page) { 5378 /* 5379 * tte most have been unloaded 5380 * underneath us. Recheck 5381 */ 5382 ASSERT(pml); 5383 sfmmu_mlist_exit(pml); 5384 continue; 5385 } 5386 5387 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5388 5389 ttemod = tte; 5390 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5391 ASSERT(TTE_IS_SOFTEXEC(&tte) == 5392 TTE_IS_SOFTEXEC(&ttemod)); 5393 ASSERT(TTE_IS_EXECUTABLE(&tte) == 5394 TTE_IS_EXECUTABLE(&ttemod)); 5395 5396 #if defined(SF_ERRATA_57) 5397 if (check_exec && addr < errata57_limit) 5398 ttemod.tte_exec_perm = 0; 5399 #endif 5400 ret = sfmmu_modifytte_try(&tte, &ttemod, 5401 &sfhmep->hme_tte); 5402 5403 if (ret < 0) { 5404 /* tte changed underneath us */ 5405 if (pml) { 5406 sfmmu_mlist_exit(pml); 5407 } 5408 continue; 5409 } 5410 5411 if (tteflags & TTE_HWWR_INT) { 5412 /* 5413 * need to sync if we are clearing modify bit. 5414 */ 5415 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5416 } 5417 5418 if (pp && PP_ISRO(pp)) { 5419 if (pprot & TTE_WRPRM_INT) { 5420 pmtx = sfmmu_page_enter(pp); 5421 PP_CLRRO(pp); 5422 sfmmu_page_exit(pmtx); 5423 } 5424 } 5425 5426 if (ret > 0 && use_demap_range) { 5427 DEMAP_RANGE_MARKPG(dmrp, addr); 5428 } else if (ret > 0) { 5429 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5430 } 5431 5432 if (pml) { 5433 sfmmu_mlist_exit(pml); 5434 } 5435 } 5436 next_addr: 5437 addr += TTEBYTES(ttesz); 5438 sfhmep++; 5439 DEMAP_RANGE_NEXTPG(dmrp); 5440 } 5441 return (addr); 5442 } 5443 5444 /* 5445 * This routine is deprecated and should only be used by hat_chgprot. 5446 * The correct routine is sfmmu_vtop_attr. 5447 * This routine converts virtual page protections to physical ones. It will 5448 * update the tteflags field with the tte mask corresponding to the protections 5449 * affected and it returns the new protections. It will also clear the modify 5450 * bit if we are taking away write permission. This is necessary since the 5451 * modify bit is the hardware permission bit and we need to clear it in order 5452 * to detect write faults. 5453 * It accepts the following special protections: 5454 * ~PROT_WRITE = remove write permissions. 5455 * ~PROT_USER = remove user permissions. 5456 */ 5457 static uint_t 5458 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5459 { 5460 if (vprot == (uint_t)~PROT_WRITE) { 5461 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5462 return (0); /* will cause wrprm to be cleared */ 5463 } 5464 if (vprot == (uint_t)~PROT_USER) { 5465 *tteflagsp = TTE_PRIV_INT; 5466 return (0); /* will cause privprm to be cleared */ 5467 } 5468 if ((vprot == 0) || (vprot == PROT_USER) || 5469 ((vprot & PROT_ALL) != vprot)) { 5470 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5471 } 5472 5473 switch (vprot) { 5474 case (PROT_READ): 5475 case (PROT_EXEC): 5476 case (PROT_EXEC | PROT_READ): 5477 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5478 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5479 case (PROT_WRITE): 5480 case (PROT_WRITE | PROT_READ): 5481 case (PROT_EXEC | PROT_WRITE): 5482 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5483 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5484 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5485 case (PROT_USER | PROT_READ): 5486 case (PROT_USER | PROT_EXEC): 5487 case (PROT_USER | PROT_EXEC | PROT_READ): 5488 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5489 return (0); /* clr prv and wrt */ 5490 case (PROT_USER | PROT_WRITE): 5491 case (PROT_USER | PROT_WRITE | PROT_READ): 5492 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5493 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5494 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5495 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5496 default: 5497 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5498 } 5499 return (0); 5500 } 5501 5502 /* 5503 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5504 * the normal algorithm would take too long for a very large VA range with 5505 * few real mappings. This routine just walks thru all HMEs in the global 5506 * hash table to find and remove mappings. 5507 */ 5508 static void 5509 hat_unload_large_virtual( 5510 struct hat *sfmmup, 5511 caddr_t startaddr, 5512 size_t len, 5513 uint_t flags, 5514 hat_callback_t *callback) 5515 { 5516 struct hmehash_bucket *hmebp; 5517 struct hme_blk *hmeblkp; 5518 struct hme_blk *pr_hblk = NULL; 5519 struct hme_blk *nx_hblk; 5520 struct hme_blk *list = NULL; 5521 int i; 5522 demap_range_t dmr, *dmrp; 5523 cpuset_t cpuset; 5524 caddr_t endaddr = startaddr + len; 5525 caddr_t sa; 5526 caddr_t ea; 5527 caddr_t cb_sa[MAX_CB_ADDR]; 5528 caddr_t cb_ea[MAX_CB_ADDR]; 5529 int addr_cnt = 0; 5530 int a = 0; 5531 5532 if (sfmmup->sfmmu_free) { 5533 dmrp = NULL; 5534 } else { 5535 dmrp = &dmr; 5536 DEMAP_RANGE_INIT(sfmmup, dmrp); 5537 } 5538 5539 /* 5540 * Loop through all the hash buckets of HME blocks looking for matches. 5541 */ 5542 for (i = 0; i <= UHMEHASH_SZ; i++) { 5543 hmebp = &uhme_hash[i]; 5544 SFMMU_HASH_LOCK(hmebp); 5545 hmeblkp = hmebp->hmeblkp; 5546 pr_hblk = NULL; 5547 while (hmeblkp) { 5548 nx_hblk = hmeblkp->hblk_next; 5549 5550 /* 5551 * skip if not this context, if a shadow block or 5552 * if the mapping is not in the requested range 5553 */ 5554 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5555 hmeblkp->hblk_shw_bit || 5556 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5557 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5558 pr_hblk = hmeblkp; 5559 goto next_block; 5560 } 5561 5562 ASSERT(!hmeblkp->hblk_shared); 5563 /* 5564 * unload if there are any current valid mappings 5565 */ 5566 if (hmeblkp->hblk_vcnt != 0 || 5567 hmeblkp->hblk_hmecnt != 0) 5568 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5569 sa, ea, dmrp, flags); 5570 5571 /* 5572 * on unmap we also release the HME block itself, once 5573 * all mappings are gone. 5574 */ 5575 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5576 !hmeblkp->hblk_vcnt && 5577 !hmeblkp->hblk_hmecnt) { 5578 ASSERT(!hmeblkp->hblk_lckcnt); 5579 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5580 &list, 0); 5581 } else { 5582 pr_hblk = hmeblkp; 5583 } 5584 5585 if (callback == NULL) 5586 goto next_block; 5587 5588 /* 5589 * HME blocks may span more than one page, but we may be 5590 * unmapping only one page, so check for a smaller range 5591 * for the callback 5592 */ 5593 if (sa < startaddr) 5594 sa = startaddr; 5595 if (--ea > endaddr) 5596 ea = endaddr - 1; 5597 5598 cb_sa[addr_cnt] = sa; 5599 cb_ea[addr_cnt] = ea; 5600 if (++addr_cnt == MAX_CB_ADDR) { 5601 if (dmrp != NULL) { 5602 DEMAP_RANGE_FLUSH(dmrp); 5603 cpuset = sfmmup->sfmmu_cpusran; 5604 xt_sync(cpuset); 5605 } 5606 5607 for (a = 0; a < MAX_CB_ADDR; ++a) { 5608 callback->hcb_start_addr = cb_sa[a]; 5609 callback->hcb_end_addr = cb_ea[a]; 5610 callback->hcb_function(callback); 5611 } 5612 addr_cnt = 0; 5613 } 5614 5615 next_block: 5616 hmeblkp = nx_hblk; 5617 } 5618 SFMMU_HASH_UNLOCK(hmebp); 5619 } 5620 5621 sfmmu_hblks_list_purge(&list, 0); 5622 if (dmrp != NULL) { 5623 DEMAP_RANGE_FLUSH(dmrp); 5624 cpuset = sfmmup->sfmmu_cpusran; 5625 xt_sync(cpuset); 5626 } 5627 5628 for (a = 0; a < addr_cnt; ++a) { 5629 callback->hcb_start_addr = cb_sa[a]; 5630 callback->hcb_end_addr = cb_ea[a]; 5631 callback->hcb_function(callback); 5632 } 5633 5634 /* 5635 * Check TSB and TLB page sizes if the process isn't exiting. 5636 */ 5637 if (!sfmmup->sfmmu_free) 5638 sfmmu_check_page_sizes(sfmmup, 0); 5639 } 5640 5641 /* 5642 * Unload all the mappings in the range [addr..addr+len). addr and len must 5643 * be MMU_PAGESIZE aligned. 5644 */ 5645 5646 extern struct seg *segkmap; 5647 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5648 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5649 5650 5651 void 5652 hat_unload_callback( 5653 struct hat *sfmmup, 5654 caddr_t addr, 5655 size_t len, 5656 uint_t flags, 5657 hat_callback_t *callback) 5658 { 5659 struct hmehash_bucket *hmebp; 5660 hmeblk_tag hblktag; 5661 int hmeshift, hashno, iskernel; 5662 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5663 caddr_t endaddr; 5664 cpuset_t cpuset; 5665 int addr_count = 0; 5666 int a; 5667 caddr_t cb_start_addr[MAX_CB_ADDR]; 5668 caddr_t cb_end_addr[MAX_CB_ADDR]; 5669 int issegkmap = ISSEGKMAP(sfmmup, addr); 5670 demap_range_t dmr, *dmrp; 5671 5672 if (sfmmup->sfmmu_xhat_provider) { 5673 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 5674 return; 5675 } else { 5676 /* 5677 * This must be a CPU HAT. If the address space has 5678 * XHATs attached, unload the mappings for all of them, 5679 * just in case 5680 */ 5681 ASSERT(sfmmup->sfmmu_as != NULL); 5682 if (sfmmup->sfmmu_as->a_xhat != NULL) 5683 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 5684 len, flags, callback); 5685 } 5686 5687 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5688 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5689 5690 ASSERT(sfmmup != NULL); 5691 ASSERT((len & MMU_PAGEOFFSET) == 0); 5692 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5693 5694 /* 5695 * Probing through a large VA range (say 63 bits) will be slow, even 5696 * at 4 Meg steps between the probes. So, when the virtual address range 5697 * is very large, search the HME entries for what to unload. 5698 * 5699 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5700 * 5701 * UHMEHASH_SZ is number of hash buckets to examine 5702 * 5703 */ 5704 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5705 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5706 return; 5707 } 5708 5709 CPUSET_ZERO(cpuset); 5710 5711 /* 5712 * If the process is exiting, we can save a lot of fuss since 5713 * we'll flush the TLB when we free the ctx anyway. 5714 */ 5715 if (sfmmup->sfmmu_free) 5716 dmrp = NULL; 5717 else 5718 dmrp = &dmr; 5719 5720 DEMAP_RANGE_INIT(sfmmup, dmrp); 5721 endaddr = addr + len; 5722 hblktag.htag_id = sfmmup; 5723 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5724 5725 /* 5726 * It is likely for the vm to call unload over a wide range of 5727 * addresses that are actually very sparsely populated by 5728 * translations. In order to speed this up the sfmmu hat supports 5729 * the concept of shadow hmeblks. Dummy large page hmeblks that 5730 * correspond to actual small translations are allocated at tteload 5731 * time and are referred to as shadow hmeblks. Now, during unload 5732 * time, we first check if we have a shadow hmeblk for that 5733 * translation. The absence of one means the corresponding address 5734 * range is empty and can be skipped. 5735 * 5736 * The kernel is an exception to above statement and that is why 5737 * we don't use shadow hmeblks and hash starting from the smallest 5738 * page size. 5739 */ 5740 if (sfmmup == KHATID) { 5741 iskernel = 1; 5742 hashno = TTE64K; 5743 } else { 5744 iskernel = 0; 5745 if (mmu_page_sizes == max_mmu_page_sizes) { 5746 hashno = TTE256M; 5747 } else { 5748 hashno = TTE4M; 5749 } 5750 } 5751 while (addr < endaddr) { 5752 hmeshift = HME_HASH_SHIFT(hashno); 5753 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5754 hblktag.htag_rehash = hashno; 5755 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5756 5757 SFMMU_HASH_LOCK(hmebp); 5758 5759 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5760 if (hmeblkp == NULL) { 5761 /* 5762 * didn't find an hmeblk. skip the appropiate 5763 * address range. 5764 */ 5765 SFMMU_HASH_UNLOCK(hmebp); 5766 if (iskernel) { 5767 if (hashno < mmu_hashcnt) { 5768 hashno++; 5769 continue; 5770 } else { 5771 hashno = TTE64K; 5772 addr = (caddr_t)roundup((uintptr_t)addr 5773 + 1, MMU_PAGESIZE64K); 5774 continue; 5775 } 5776 } 5777 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5778 (1 << hmeshift)); 5779 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5780 ASSERT(hashno == TTE64K); 5781 continue; 5782 } 5783 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5784 hashno = TTE512K; 5785 continue; 5786 } 5787 if (mmu_page_sizes == max_mmu_page_sizes) { 5788 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5789 hashno = TTE4M; 5790 continue; 5791 } 5792 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5793 hashno = TTE32M; 5794 continue; 5795 } 5796 hashno = TTE256M; 5797 continue; 5798 } else { 5799 hashno = TTE4M; 5800 continue; 5801 } 5802 } 5803 ASSERT(hmeblkp); 5804 ASSERT(!hmeblkp->hblk_shared); 5805 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5806 /* 5807 * If the valid count is zero we can skip the range 5808 * mapped by this hmeblk. 5809 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5810 * is used by segment drivers as a hint 5811 * that the mapping resource won't be used any longer. 5812 * The best example of this is during exit(). 5813 */ 5814 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5815 get_hblk_span(hmeblkp)); 5816 if ((flags & HAT_UNLOAD_UNMAP) || 5817 (iskernel && !issegkmap)) { 5818 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5819 &list, 0); 5820 } 5821 SFMMU_HASH_UNLOCK(hmebp); 5822 5823 if (iskernel) { 5824 hashno = TTE64K; 5825 continue; 5826 } 5827 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5828 ASSERT(hashno == TTE64K); 5829 continue; 5830 } 5831 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5832 hashno = TTE512K; 5833 continue; 5834 } 5835 if (mmu_page_sizes == max_mmu_page_sizes) { 5836 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5837 hashno = TTE4M; 5838 continue; 5839 } 5840 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5841 hashno = TTE32M; 5842 continue; 5843 } 5844 hashno = TTE256M; 5845 continue; 5846 } else { 5847 hashno = TTE4M; 5848 continue; 5849 } 5850 } 5851 if (hmeblkp->hblk_shw_bit) { 5852 /* 5853 * If we encounter a shadow hmeblk we know there is 5854 * smaller sized hmeblks mapping the same address space. 5855 * Decrement the hash size and rehash. 5856 */ 5857 ASSERT(sfmmup != KHATID); 5858 hashno--; 5859 SFMMU_HASH_UNLOCK(hmebp); 5860 continue; 5861 } 5862 5863 /* 5864 * track callback address ranges. 5865 * only start a new range when it's not contiguous 5866 */ 5867 if (callback != NULL) { 5868 if (addr_count > 0 && 5869 addr == cb_end_addr[addr_count - 1]) 5870 --addr_count; 5871 else 5872 cb_start_addr[addr_count] = addr; 5873 } 5874 5875 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5876 dmrp, flags); 5877 5878 if (callback != NULL) 5879 cb_end_addr[addr_count++] = addr; 5880 5881 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5882 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5883 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5884 } 5885 SFMMU_HASH_UNLOCK(hmebp); 5886 5887 /* 5888 * Notify our caller as to exactly which pages 5889 * have been unloaded. We do these in clumps, 5890 * to minimize the number of xt_sync()s that need to occur. 5891 */ 5892 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5893 DEMAP_RANGE_FLUSH(dmrp); 5894 if (dmrp != NULL) { 5895 cpuset = sfmmup->sfmmu_cpusran; 5896 xt_sync(cpuset); 5897 } 5898 5899 for (a = 0; a < MAX_CB_ADDR; ++a) { 5900 callback->hcb_start_addr = cb_start_addr[a]; 5901 callback->hcb_end_addr = cb_end_addr[a]; 5902 callback->hcb_function(callback); 5903 } 5904 addr_count = 0; 5905 } 5906 if (iskernel) { 5907 hashno = TTE64K; 5908 continue; 5909 } 5910 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5911 ASSERT(hashno == TTE64K); 5912 continue; 5913 } 5914 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5915 hashno = TTE512K; 5916 continue; 5917 } 5918 if (mmu_page_sizes == max_mmu_page_sizes) { 5919 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5920 hashno = TTE4M; 5921 continue; 5922 } 5923 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5924 hashno = TTE32M; 5925 continue; 5926 } 5927 hashno = TTE256M; 5928 } else { 5929 hashno = TTE4M; 5930 } 5931 } 5932 5933 sfmmu_hblks_list_purge(&list, 0); 5934 DEMAP_RANGE_FLUSH(dmrp); 5935 if (dmrp != NULL) { 5936 cpuset = sfmmup->sfmmu_cpusran; 5937 xt_sync(cpuset); 5938 } 5939 if (callback && addr_count != 0) { 5940 for (a = 0; a < addr_count; ++a) { 5941 callback->hcb_start_addr = cb_start_addr[a]; 5942 callback->hcb_end_addr = cb_end_addr[a]; 5943 callback->hcb_function(callback); 5944 } 5945 } 5946 5947 /* 5948 * Check TSB and TLB page sizes if the process isn't exiting. 5949 */ 5950 if (!sfmmup->sfmmu_free) 5951 sfmmu_check_page_sizes(sfmmup, 0); 5952 } 5953 5954 /* 5955 * Unload all the mappings in the range [addr..addr+len). addr and len must 5956 * be MMU_PAGESIZE aligned. 5957 */ 5958 void 5959 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5960 { 5961 if (sfmmup->sfmmu_xhat_provider) { 5962 XHAT_UNLOAD(sfmmup, addr, len, flags); 5963 return; 5964 } 5965 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5966 } 5967 5968 5969 /* 5970 * Find the largest mapping size for this page. 5971 */ 5972 int 5973 fnd_mapping_sz(page_t *pp) 5974 { 5975 int sz; 5976 int p_index; 5977 5978 p_index = PP_MAPINDEX(pp); 5979 5980 sz = 0; 5981 p_index >>= 1; /* don't care about 8K bit */ 5982 for (; p_index; p_index >>= 1) { 5983 sz++; 5984 } 5985 5986 return (sz); 5987 } 5988 5989 /* 5990 * This function unloads a range of addresses for an hmeblk. 5991 * It returns the next address to be unloaded. 5992 * It should be called with the hash lock held. 5993 */ 5994 static caddr_t 5995 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5996 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5997 { 5998 tte_t tte, ttemod; 5999 struct sf_hment *sfhmep; 6000 int ttesz; 6001 long ttecnt; 6002 page_t *pp; 6003 kmutex_t *pml; 6004 int ret; 6005 int use_demap_range; 6006 6007 ASSERT(in_hblk_range(hmeblkp, addr)); 6008 ASSERT(!hmeblkp->hblk_shw_bit); 6009 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 6010 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 6011 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 6012 6013 #ifdef DEBUG 6014 if (get_hblk_ttesz(hmeblkp) != TTE8K && 6015 (endaddr < get_hblk_endaddr(hmeblkp))) { 6016 panic("sfmmu_hblk_unload: partial unload of large page"); 6017 } 6018 #endif /* DEBUG */ 6019 6020 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6021 ttesz = get_hblk_ttesz(hmeblkp); 6022 6023 use_demap_range = ((dmrp == NULL) || 6024 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 6025 6026 if (use_demap_range) { 6027 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 6028 } else { 6029 DEMAP_RANGE_FLUSH(dmrp); 6030 } 6031 ttecnt = 0; 6032 HBLKTOHME(sfhmep, hmeblkp, addr); 6033 6034 while (addr < endaddr) { 6035 pml = NULL; 6036 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6037 if (TTE_IS_VALID(&tte)) { 6038 pp = sfhmep->hme_page; 6039 if (pp != NULL) { 6040 pml = sfmmu_mlist_enter(pp); 6041 } 6042 6043 /* 6044 * Verify if hme still points to 'pp' now that 6045 * we have p_mapping lock. 6046 */ 6047 if (sfhmep->hme_page != pp) { 6048 if (pp != NULL && sfhmep->hme_page != NULL) { 6049 ASSERT(pml != NULL); 6050 sfmmu_mlist_exit(pml); 6051 /* Re-start this iteration. */ 6052 continue; 6053 } 6054 ASSERT((pp != NULL) && 6055 (sfhmep->hme_page == NULL)); 6056 goto tte_unloaded; 6057 } 6058 6059 /* 6060 * This point on we have both HASH and p_mapping 6061 * lock. 6062 */ 6063 ASSERT(pp == sfhmep->hme_page); 6064 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6065 6066 /* 6067 * We need to loop on modify tte because it is 6068 * possible for pagesync to come along and 6069 * change the software bits beneath us. 6070 * 6071 * Page_unload can also invalidate the tte after 6072 * we read tte outside of p_mapping lock. 6073 */ 6074 again: 6075 ttemod = tte; 6076 6077 TTE_SET_INVALID(&ttemod); 6078 ret = sfmmu_modifytte_try(&tte, &ttemod, 6079 &sfhmep->hme_tte); 6080 6081 if (ret <= 0) { 6082 if (TTE_IS_VALID(&tte)) { 6083 ASSERT(ret < 0); 6084 goto again; 6085 } 6086 if (pp != NULL) { 6087 panic("sfmmu_hblk_unload: pp = 0x%p " 6088 "tte became invalid under mlist" 6089 " lock = 0x%p", (void *)pp, 6090 (void *)pml); 6091 } 6092 continue; 6093 } 6094 6095 if (!(flags & HAT_UNLOAD_NOSYNC) || 6096 (pp != NULL && TTE_EXECUTED(&tte))) { 6097 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6098 } 6099 6100 /* 6101 * Ok- we invalidated the tte. Do the rest of the job. 6102 */ 6103 ttecnt++; 6104 6105 if (flags & HAT_UNLOAD_UNLOCK) { 6106 ASSERT(hmeblkp->hblk_lckcnt > 0); 6107 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 6108 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6109 } 6110 6111 /* 6112 * Normally we would need to flush the page 6113 * from the virtual cache at this point in 6114 * order to prevent a potential cache alias 6115 * inconsistency. 6116 * The particular scenario we need to worry 6117 * about is: 6118 * Given: va1 and va2 are two virtual address 6119 * that alias and map the same physical 6120 * address. 6121 * 1. mapping exists from va1 to pa and data 6122 * has been read into the cache. 6123 * 2. unload va1. 6124 * 3. load va2 and modify data using va2. 6125 * 4 unload va2. 6126 * 5. load va1 and reference data. Unless we 6127 * flush the data cache when we unload we will 6128 * get stale data. 6129 * Fortunately, page coloring eliminates the 6130 * above scenario by remembering the color a 6131 * physical page was last or is currently 6132 * mapped to. Now, we delay the flush until 6133 * the loading of translations. Only when the 6134 * new translation is of a different color 6135 * are we forced to flush. 6136 */ 6137 if (use_demap_range) { 6138 /* 6139 * Mark this page as needing a demap. 6140 */ 6141 DEMAP_RANGE_MARKPG(dmrp, addr); 6142 } else { 6143 ASSERT(sfmmup != NULL); 6144 ASSERT(!hmeblkp->hblk_shared); 6145 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6146 sfmmup->sfmmu_free, 0); 6147 } 6148 6149 if (pp) { 6150 /* 6151 * Remove the hment from the mapping list 6152 */ 6153 ASSERT(hmeblkp->hblk_hmecnt > 0); 6154 6155 /* 6156 * Again, we cannot 6157 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6158 */ 6159 HME_SUB(sfhmep, pp); 6160 membar_stst(); 6161 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6162 } 6163 6164 ASSERT(hmeblkp->hblk_vcnt > 0); 6165 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6166 6167 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6168 !hmeblkp->hblk_lckcnt); 6169 6170 #ifdef VAC 6171 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6172 if (PP_ISTNC(pp)) { 6173 /* 6174 * If page was temporary 6175 * uncached, try to recache 6176 * it. Note that HME_SUB() was 6177 * called above so p_index and 6178 * mlist had been updated. 6179 */ 6180 conv_tnc(pp, ttesz); 6181 } else if (pp->p_mapping == NULL) { 6182 ASSERT(kpm_enable); 6183 /* 6184 * Page is marked to be in VAC conflict 6185 * to an existing kpm mapping and/or is 6186 * kpm mapped using only the regular 6187 * pagesize. 6188 */ 6189 sfmmu_kpm_hme_unload(pp); 6190 } 6191 } 6192 #endif /* VAC */ 6193 } else if ((pp = sfhmep->hme_page) != NULL) { 6194 /* 6195 * TTE is invalid but the hme 6196 * still exists. let pageunload 6197 * complete its job. 6198 */ 6199 ASSERT(pml == NULL); 6200 pml = sfmmu_mlist_enter(pp); 6201 if (sfhmep->hme_page != NULL) { 6202 sfmmu_mlist_exit(pml); 6203 continue; 6204 } 6205 ASSERT(sfhmep->hme_page == NULL); 6206 } else if (hmeblkp->hblk_hmecnt != 0) { 6207 /* 6208 * pageunload may have not finished decrementing 6209 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6210 * wait for pageunload to finish. Rely on pageunload 6211 * to decrement hblk_hmecnt after hblk_vcnt. 6212 */ 6213 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6214 ASSERT(pml == NULL); 6215 if (pf_is_memory(pfn)) { 6216 pp = page_numtopp_nolock(pfn); 6217 if (pp != NULL) { 6218 pml = sfmmu_mlist_enter(pp); 6219 sfmmu_mlist_exit(pml); 6220 pml = NULL; 6221 } 6222 } 6223 } 6224 6225 tte_unloaded: 6226 /* 6227 * At this point, the tte we are looking at 6228 * should be unloaded, and hme has been unlinked 6229 * from page too. This is important because in 6230 * pageunload, it does ttesync() then HME_SUB. 6231 * We need to make sure HME_SUB has been completed 6232 * so we know ttesync() has been completed. Otherwise, 6233 * at exit time, after return from hat layer, VM will 6234 * release as structure which hat_setstat() (called 6235 * by ttesync()) needs. 6236 */ 6237 #ifdef DEBUG 6238 { 6239 tte_t dtte; 6240 6241 ASSERT(sfhmep->hme_page == NULL); 6242 6243 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6244 ASSERT(!TTE_IS_VALID(&dtte)); 6245 } 6246 #endif 6247 6248 if (pml) { 6249 sfmmu_mlist_exit(pml); 6250 } 6251 6252 addr += TTEBYTES(ttesz); 6253 sfhmep++; 6254 DEMAP_RANGE_NEXTPG(dmrp); 6255 } 6256 /* 6257 * For shared hmeblks this routine is only called when region is freed 6258 * and no longer referenced. So no need to decrement ttecnt 6259 * in the region structure here. 6260 */ 6261 if (ttecnt > 0 && sfmmup != NULL) { 6262 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6263 } 6264 return (addr); 6265 } 6266 6267 /* 6268 * Synchronize all the mappings in the range [addr..addr+len). 6269 * Can be called with clearflag having two states: 6270 * HAT_SYNC_DONTZERO means just return the rm stats 6271 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6272 */ 6273 void 6274 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6275 { 6276 struct hmehash_bucket *hmebp; 6277 hmeblk_tag hblktag; 6278 int hmeshift, hashno = 1; 6279 struct hme_blk *hmeblkp, *list = NULL; 6280 caddr_t endaddr; 6281 cpuset_t cpuset; 6282 6283 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 6284 ASSERT((sfmmup == ksfmmup) || 6285 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6286 ASSERT((len & MMU_PAGEOFFSET) == 0); 6287 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6288 (clearflag == HAT_SYNC_ZERORM)); 6289 6290 CPUSET_ZERO(cpuset); 6291 6292 endaddr = addr + len; 6293 hblktag.htag_id = sfmmup; 6294 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6295 6296 /* 6297 * Spitfire supports 4 page sizes. 6298 * Most pages are expected to be of the smallest page 6299 * size (8K) and these will not need to be rehashed. 64K 6300 * pages also don't need to be rehashed because the an hmeblk 6301 * spans 64K of address space. 512K pages might need 1 rehash and 6302 * and 4M pages 2 rehashes. 6303 */ 6304 while (addr < endaddr) { 6305 hmeshift = HME_HASH_SHIFT(hashno); 6306 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6307 hblktag.htag_rehash = hashno; 6308 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6309 6310 SFMMU_HASH_LOCK(hmebp); 6311 6312 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6313 if (hmeblkp != NULL) { 6314 ASSERT(!hmeblkp->hblk_shared); 6315 /* 6316 * We've encountered a shadow hmeblk so skip the range 6317 * of the next smaller mapping size. 6318 */ 6319 if (hmeblkp->hblk_shw_bit) { 6320 ASSERT(sfmmup != ksfmmup); 6321 ASSERT(hashno > 1); 6322 addr = (caddr_t)P2END((uintptr_t)addr, 6323 TTEBYTES(hashno - 1)); 6324 } else { 6325 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6326 addr, endaddr, clearflag); 6327 } 6328 SFMMU_HASH_UNLOCK(hmebp); 6329 hashno = 1; 6330 continue; 6331 } 6332 SFMMU_HASH_UNLOCK(hmebp); 6333 6334 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6335 /* 6336 * We have traversed the whole list and rehashed 6337 * if necessary without finding the address to sync. 6338 * This is ok so we increment the address by the 6339 * smallest hmeblk range for kernel mappings and the 6340 * largest hmeblk range, to account for shadow hmeblks, 6341 * for user mappings and continue. 6342 */ 6343 if (sfmmup == ksfmmup) 6344 addr = (caddr_t)P2END((uintptr_t)addr, 6345 TTEBYTES(1)); 6346 else 6347 addr = (caddr_t)P2END((uintptr_t)addr, 6348 TTEBYTES(hashno)); 6349 hashno = 1; 6350 } else { 6351 hashno++; 6352 } 6353 } 6354 sfmmu_hblks_list_purge(&list, 0); 6355 cpuset = sfmmup->sfmmu_cpusran; 6356 xt_sync(cpuset); 6357 } 6358 6359 static caddr_t 6360 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6361 caddr_t endaddr, int clearflag) 6362 { 6363 tte_t tte, ttemod; 6364 struct sf_hment *sfhmep; 6365 int ttesz; 6366 struct page *pp; 6367 kmutex_t *pml; 6368 int ret; 6369 6370 ASSERT(hmeblkp->hblk_shw_bit == 0); 6371 ASSERT(!hmeblkp->hblk_shared); 6372 6373 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6374 6375 ttesz = get_hblk_ttesz(hmeblkp); 6376 HBLKTOHME(sfhmep, hmeblkp, addr); 6377 6378 while (addr < endaddr) { 6379 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6380 if (TTE_IS_VALID(&tte)) { 6381 pml = NULL; 6382 pp = sfhmep->hme_page; 6383 if (pp) { 6384 pml = sfmmu_mlist_enter(pp); 6385 } 6386 if (pp != sfhmep->hme_page) { 6387 /* 6388 * tte most have been unloaded 6389 * underneath us. Recheck 6390 */ 6391 ASSERT(pml); 6392 sfmmu_mlist_exit(pml); 6393 continue; 6394 } 6395 6396 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6397 6398 if (clearflag == HAT_SYNC_ZERORM) { 6399 ttemod = tte; 6400 TTE_CLR_RM(&ttemod); 6401 ret = sfmmu_modifytte_try(&tte, &ttemod, 6402 &sfhmep->hme_tte); 6403 if (ret < 0) { 6404 if (pml) { 6405 sfmmu_mlist_exit(pml); 6406 } 6407 continue; 6408 } 6409 6410 if (ret > 0) { 6411 sfmmu_tlb_demap(addr, sfmmup, 6412 hmeblkp, 0, 0); 6413 } 6414 } 6415 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6416 if (pml) { 6417 sfmmu_mlist_exit(pml); 6418 } 6419 } 6420 addr += TTEBYTES(ttesz); 6421 sfhmep++; 6422 } 6423 return (addr); 6424 } 6425 6426 /* 6427 * This function will sync a tte to the page struct and it will 6428 * update the hat stats. Currently it allows us to pass a NULL pp 6429 * and we will simply update the stats. We may want to change this 6430 * so we only keep stats for pages backed by pp's. 6431 */ 6432 static void 6433 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6434 { 6435 uint_t rm = 0; 6436 int sz = TTE_CSZ(ttep); 6437 pgcnt_t npgs; 6438 6439 ASSERT(TTE_IS_VALID(ttep)); 6440 6441 if (!TTE_IS_NOSYNC(ttep)) { 6442 6443 if (TTE_IS_REF(ttep)) 6444 rm |= P_REF; 6445 6446 if (TTE_IS_MOD(ttep)) 6447 rm |= P_MOD; 6448 6449 if (rm != 0) { 6450 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6451 int i; 6452 caddr_t vaddr = addr; 6453 6454 for (i = 0; i < TTEPAGES(sz); i++) { 6455 hat_setstat(sfmmup->sfmmu_as, vaddr, 6456 MMU_PAGESIZE, rm); 6457 vaddr += MMU_PAGESIZE; 6458 } 6459 } 6460 } 6461 } 6462 6463 if (!pp) 6464 return; 6465 6466 /* 6467 * If software says this page is executable, and the page was 6468 * in fact executed (indicated by hardware exec permission 6469 * being enabled), then set P_EXEC on the page to remember 6470 * that it was executed. The I$ will be flushed when the page 6471 * is reassigned. 6472 */ 6473 if (TTE_EXECUTED(ttep)) { 6474 rm |= P_EXEC; 6475 } else if (rm == 0) { 6476 return; 6477 } 6478 6479 /* 6480 * XXX I want to use cas to update nrm bits but they 6481 * currently belong in common/vm and not in hat where 6482 * they should be. 6483 * The nrm bits are protected by the same mutex as 6484 * the one that protects the page's mapping list. 6485 */ 6486 ASSERT(sfmmu_mlist_held(pp)); 6487 /* 6488 * If the tte is for a large page, we need to sync all the 6489 * pages covered by the tte. 6490 */ 6491 if (sz != TTE8K) { 6492 ASSERT(pp->p_szc != 0); 6493 pp = PP_GROUPLEADER(pp, sz); 6494 ASSERT(sfmmu_mlist_held(pp)); 6495 } 6496 6497 /* Get number of pages from tte size. */ 6498 npgs = TTEPAGES(sz); 6499 6500 do { 6501 ASSERT(pp); 6502 ASSERT(sfmmu_mlist_held(pp)); 6503 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6504 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)) || 6505 ((rm & P_EXEC) != 0 && !PP_ISEXEC(pp))) 6506 hat_page_setattr(pp, rm); 6507 6508 /* 6509 * Are we done? If not, we must have a large mapping. 6510 * For large mappings we need to sync the rest of the pages 6511 * covered by this tte; goto the next page. 6512 */ 6513 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6514 } 6515 6516 /* 6517 * Execute pre-callback handler of each pa_hment linked to pp 6518 * 6519 * Inputs: 6520 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6521 * capture_cpus: pointer to return value (below) 6522 * 6523 * Returns: 6524 * Propagates the subsystem callback return values back to the caller; 6525 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6526 * is zero if all of the pa_hments are of a type that do not require 6527 * capturing CPUs prior to suspending the mapping, else it is 1. 6528 */ 6529 static int 6530 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6531 { 6532 struct sf_hment *sfhmep; 6533 struct pa_hment *pahmep; 6534 int (*f)(caddr_t, uint_t, uint_t, void *); 6535 int ret; 6536 id_t id; 6537 int locked = 0; 6538 kmutex_t *pml; 6539 6540 ASSERT(PAGE_EXCL(pp)); 6541 if (!sfmmu_mlist_held(pp)) { 6542 pml = sfmmu_mlist_enter(pp); 6543 locked = 1; 6544 } 6545 6546 if (capture_cpus) 6547 *capture_cpus = 0; 6548 6549 top: 6550 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6551 /* 6552 * skip sf_hments corresponding to VA<->PA mappings; 6553 * for pa_hment's, hme_tte.ll is zero 6554 */ 6555 if (!IS_PAHME(sfhmep)) 6556 continue; 6557 6558 pahmep = sfhmep->hme_data; 6559 ASSERT(pahmep != NULL); 6560 6561 /* 6562 * skip if pre-handler has been called earlier in this loop 6563 */ 6564 if (pahmep->flags & flag) 6565 continue; 6566 6567 id = pahmep->cb_id; 6568 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6569 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6570 *capture_cpus = 1; 6571 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6572 pahmep->flags |= flag; 6573 continue; 6574 } 6575 6576 /* 6577 * Drop the mapping list lock to avoid locking order issues. 6578 */ 6579 if (locked) 6580 sfmmu_mlist_exit(pml); 6581 6582 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6583 if (ret != 0) 6584 return (ret); /* caller must do the cleanup */ 6585 6586 if (locked) { 6587 pml = sfmmu_mlist_enter(pp); 6588 pahmep->flags |= flag; 6589 goto top; 6590 } 6591 6592 pahmep->flags |= flag; 6593 } 6594 6595 if (locked) 6596 sfmmu_mlist_exit(pml); 6597 6598 return (0); 6599 } 6600 6601 /* 6602 * Execute post-callback handler of each pa_hment linked to pp 6603 * 6604 * Same overall assumptions and restrictions apply as for 6605 * hat_pageprocess_precallbacks(). 6606 */ 6607 static void 6608 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6609 { 6610 pfn_t pgpfn = pp->p_pagenum; 6611 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6612 pfn_t newpfn; 6613 struct sf_hment *sfhmep; 6614 struct pa_hment *pahmep; 6615 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6616 id_t id; 6617 int locked = 0; 6618 kmutex_t *pml; 6619 6620 ASSERT(PAGE_EXCL(pp)); 6621 if (!sfmmu_mlist_held(pp)) { 6622 pml = sfmmu_mlist_enter(pp); 6623 locked = 1; 6624 } 6625 6626 top: 6627 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6628 /* 6629 * skip sf_hments corresponding to VA<->PA mappings; 6630 * for pa_hment's, hme_tte.ll is zero 6631 */ 6632 if (!IS_PAHME(sfhmep)) 6633 continue; 6634 6635 pahmep = sfhmep->hme_data; 6636 ASSERT(pahmep != NULL); 6637 6638 if ((pahmep->flags & flag) == 0) 6639 continue; 6640 6641 pahmep->flags &= ~flag; 6642 6643 id = pahmep->cb_id; 6644 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6645 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6646 continue; 6647 6648 /* 6649 * Convert the base page PFN into the constituent PFN 6650 * which is needed by the callback handler. 6651 */ 6652 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6653 6654 /* 6655 * Drop the mapping list lock to avoid locking order issues. 6656 */ 6657 if (locked) 6658 sfmmu_mlist_exit(pml); 6659 6660 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6661 != 0) 6662 panic("sfmmu: posthandler failed"); 6663 6664 if (locked) { 6665 pml = sfmmu_mlist_enter(pp); 6666 goto top; 6667 } 6668 } 6669 6670 if (locked) 6671 sfmmu_mlist_exit(pml); 6672 } 6673 6674 /* 6675 * Suspend locked kernel mapping 6676 */ 6677 void 6678 hat_pagesuspend(struct page *pp) 6679 { 6680 struct sf_hment *sfhmep; 6681 sfmmu_t *sfmmup; 6682 tte_t tte, ttemod; 6683 struct hme_blk *hmeblkp; 6684 caddr_t addr; 6685 int index, cons; 6686 cpuset_t cpuset; 6687 6688 ASSERT(PAGE_EXCL(pp)); 6689 ASSERT(sfmmu_mlist_held(pp)); 6690 6691 mutex_enter(&kpr_suspendlock); 6692 6693 /* 6694 * We're about to suspend a kernel mapping so mark this thread as 6695 * non-traceable by DTrace. This prevents us from running into issues 6696 * with probe context trying to touch a suspended page 6697 * in the relocation codepath itself. 6698 */ 6699 curthread->t_flag |= T_DONTDTRACE; 6700 6701 index = PP_MAPINDEX(pp); 6702 cons = TTE8K; 6703 6704 retry: 6705 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6706 6707 if (IS_PAHME(sfhmep)) 6708 continue; 6709 6710 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6711 continue; 6712 6713 /* 6714 * Loop until we successfully set the suspend bit in 6715 * the TTE. 6716 */ 6717 again: 6718 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6719 ASSERT(TTE_IS_VALID(&tte)); 6720 6721 ttemod = tte; 6722 TTE_SET_SUSPEND(&ttemod); 6723 if (sfmmu_modifytte_try(&tte, &ttemod, 6724 &sfhmep->hme_tte) < 0) 6725 goto again; 6726 6727 /* 6728 * Invalidate TSB entry 6729 */ 6730 hmeblkp = sfmmu_hmetohblk(sfhmep); 6731 6732 sfmmup = hblktosfmmu(hmeblkp); 6733 ASSERT(sfmmup == ksfmmup); 6734 ASSERT(!hmeblkp->hblk_shared); 6735 6736 addr = tte_to_vaddr(hmeblkp, tte); 6737 6738 /* 6739 * No need to make sure that the TSB for this sfmmu is 6740 * not being relocated since it is ksfmmup and thus it 6741 * will never be relocated. 6742 */ 6743 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6744 6745 /* 6746 * Update xcall stats 6747 */ 6748 cpuset = cpu_ready_set; 6749 CPUSET_DEL(cpuset, CPU->cpu_id); 6750 6751 /* LINTED: constant in conditional context */ 6752 SFMMU_XCALL_STATS(ksfmmup); 6753 6754 /* 6755 * Flush TLB entry on remote CPU's 6756 */ 6757 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6758 (uint64_t)ksfmmup); 6759 xt_sync(cpuset); 6760 6761 /* 6762 * Flush TLB entry on local CPU 6763 */ 6764 vtag_flushpage(addr, (uint64_t)ksfmmup); 6765 } 6766 6767 while (index != 0) { 6768 index = index >> 1; 6769 if (index != 0) 6770 cons++; 6771 if (index & 0x1) { 6772 pp = PP_GROUPLEADER(pp, cons); 6773 goto retry; 6774 } 6775 } 6776 } 6777 6778 #ifdef DEBUG 6779 6780 #define N_PRLE 1024 6781 struct prle { 6782 page_t *targ; 6783 page_t *repl; 6784 int status; 6785 int pausecpus; 6786 hrtime_t whence; 6787 }; 6788 6789 static struct prle page_relocate_log[N_PRLE]; 6790 static int prl_entry; 6791 static kmutex_t prl_mutex; 6792 6793 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6794 mutex_enter(&prl_mutex); \ 6795 page_relocate_log[prl_entry].targ = *(t); \ 6796 page_relocate_log[prl_entry].repl = *(r); \ 6797 page_relocate_log[prl_entry].status = (s); \ 6798 page_relocate_log[prl_entry].pausecpus = (p); \ 6799 page_relocate_log[prl_entry].whence = gethrtime(); \ 6800 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6801 mutex_exit(&prl_mutex); 6802 6803 #else /* !DEBUG */ 6804 #define PAGE_RELOCATE_LOG(t, r, s, p) 6805 #endif 6806 6807 /* 6808 * Core Kernel Page Relocation Algorithm 6809 * 6810 * Input: 6811 * 6812 * target : constituent pages are SE_EXCL locked. 6813 * replacement: constituent pages are SE_EXCL locked. 6814 * 6815 * Output: 6816 * 6817 * nrelocp: number of pages relocated 6818 */ 6819 int 6820 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6821 { 6822 page_t *targ, *repl; 6823 page_t *tpp, *rpp; 6824 kmutex_t *low, *high; 6825 spgcnt_t npages, i; 6826 page_t *pl = NULL; 6827 uint_t ppattr; 6828 int old_pil; 6829 cpuset_t cpuset; 6830 int cap_cpus; 6831 int ret; 6832 #ifdef VAC 6833 int cflags = 0; 6834 #endif 6835 6836 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6837 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6838 return (EAGAIN); 6839 } 6840 6841 mutex_enter(&kpr_mutex); 6842 kreloc_thread = curthread; 6843 6844 targ = *target; 6845 repl = *replacement; 6846 ASSERT(repl != NULL); 6847 ASSERT(targ->p_szc == repl->p_szc); 6848 6849 npages = page_get_pagecnt(targ->p_szc); 6850 6851 /* 6852 * unload VA<->PA mappings that are not locked 6853 */ 6854 tpp = targ; 6855 for (i = 0; i < npages; i++) { 6856 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6857 tpp++; 6858 } 6859 6860 /* 6861 * Do "presuspend" callbacks, in a context from which we can still 6862 * block as needed. Note that we don't hold the mapping list lock 6863 * of "targ" at this point due to potential locking order issues; 6864 * we assume that between the hat_pageunload() above and holding 6865 * the SE_EXCL lock that the mapping list *cannot* change at this 6866 * point. 6867 */ 6868 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6869 if (ret != 0) { 6870 /* 6871 * EIO translates to fatal error, for all others cleanup 6872 * and return EAGAIN. 6873 */ 6874 ASSERT(ret != EIO); 6875 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6876 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6877 kreloc_thread = NULL; 6878 mutex_exit(&kpr_mutex); 6879 return (EAGAIN); 6880 } 6881 6882 /* 6883 * acquire p_mapping list lock for both the target and replacement 6884 * root pages. 6885 * 6886 * low and high refer to the need to grab the mlist locks in a 6887 * specific order in order to prevent race conditions. Thus the 6888 * lower lock must be grabbed before the higher lock. 6889 * 6890 * This will block hat_unload's accessing p_mapping list. Since 6891 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6892 * blocked. Thus, no one else will be accessing the p_mapping list 6893 * while we suspend and reload the locked mapping below. 6894 */ 6895 tpp = targ; 6896 rpp = repl; 6897 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6898 6899 kpreempt_disable(); 6900 6901 /* 6902 * We raise our PIL to 13 so that we don't get captured by 6903 * another CPU or pinned by an interrupt thread. We can't go to 6904 * PIL 14 since the nexus driver(s) may need to interrupt at 6905 * that level in the case of IOMMU pseudo mappings. 6906 */ 6907 cpuset = cpu_ready_set; 6908 CPUSET_DEL(cpuset, CPU->cpu_id); 6909 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6910 old_pil = splr(XCALL_PIL); 6911 } else { 6912 old_pil = -1; 6913 xc_attention(cpuset); 6914 } 6915 ASSERT(getpil() == XCALL_PIL); 6916 6917 /* 6918 * Now do suspend callbacks. In the case of an IOMMU mapping 6919 * this will suspend all DMA activity to the page while it is 6920 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6921 * may be captured at this point we should have acquired any needed 6922 * locks in the presuspend callback. 6923 */ 6924 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6925 if (ret != 0) { 6926 repl = targ; 6927 goto suspend_fail; 6928 } 6929 6930 /* 6931 * Raise the PIL yet again, this time to block all high-level 6932 * interrupts on this CPU. This is necessary to prevent an 6933 * interrupt routine from pinning the thread which holds the 6934 * mapping suspended and then touching the suspended page. 6935 * 6936 * Once the page is suspended we also need to be careful to 6937 * avoid calling any functions which touch any seg_kmem memory 6938 * since that memory may be backed by the very page we are 6939 * relocating in here! 6940 */ 6941 hat_pagesuspend(targ); 6942 6943 /* 6944 * Now that we are confident everybody has stopped using this page, 6945 * copy the page contents. Note we use a physical copy to prevent 6946 * locking issues and to avoid fpRAS because we can't handle it in 6947 * this context. 6948 */ 6949 for (i = 0; i < npages; i++, tpp++, rpp++) { 6950 #ifdef VAC 6951 /* 6952 * If the replacement has a different vcolor than 6953 * the one being replacd, we need to handle VAC 6954 * consistency for it just as we were setting up 6955 * a new mapping to it. 6956 */ 6957 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6958 (tpp->p_vcolor != rpp->p_vcolor) && 6959 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6960 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6961 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6962 rpp->p_pagenum); 6963 } 6964 #endif 6965 /* 6966 * Copy the contents of the page. 6967 */ 6968 ppcopy_kernel(tpp, rpp); 6969 } 6970 6971 tpp = targ; 6972 rpp = repl; 6973 for (i = 0; i < npages; i++, tpp++, rpp++) { 6974 /* 6975 * Copy attributes. VAC consistency was handled above, 6976 * if required. 6977 */ 6978 ppattr = hat_page_getattr(tpp, (P_MOD | P_REF | P_RO)); 6979 page_clr_all_props(rpp, 0); 6980 page_set_props(rpp, ppattr); 6981 rpp->p_index = tpp->p_index; 6982 tpp->p_index = 0; 6983 #ifdef VAC 6984 rpp->p_vcolor = tpp->p_vcolor; 6985 #endif 6986 } 6987 6988 /* 6989 * First, unsuspend the page, if we set the suspend bit, and transfer 6990 * the mapping list from the target page to the replacement page. 6991 * Next process postcallbacks; since pa_hment's are linked only to the 6992 * p_mapping list of root page, we don't iterate over the constituent 6993 * pages. 6994 */ 6995 hat_pagereload(targ, repl); 6996 6997 suspend_fail: 6998 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6999 7000 /* 7001 * Now lower our PIL and release any captured CPUs since we 7002 * are out of the "danger zone". After this it will again be 7003 * safe to acquire adaptive mutex locks, or to drop them... 7004 */ 7005 if (old_pil != -1) { 7006 splx(old_pil); 7007 } else { 7008 xc_dismissed(cpuset); 7009 } 7010 7011 kpreempt_enable(); 7012 7013 sfmmu_mlist_reloc_exit(low, high); 7014 7015 /* 7016 * Postsuspend callbacks should drop any locks held across 7017 * the suspend callbacks. As before, we don't hold the mapping 7018 * list lock at this point.. our assumption is that the mapping 7019 * list still can't change due to our holding SE_EXCL lock and 7020 * there being no unlocked mappings left. Hence the restriction 7021 * on calling context to hat_delete_callback() 7022 */ 7023 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 7024 if (ret != 0) { 7025 /* 7026 * The second presuspend call failed: we got here through 7027 * the suspend_fail label above. 7028 */ 7029 ASSERT(ret != EIO); 7030 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 7031 kreloc_thread = NULL; 7032 mutex_exit(&kpr_mutex); 7033 return (EAGAIN); 7034 } 7035 7036 /* 7037 * Now that we're out of the performance critical section we can 7038 * take care of updating the hash table, since we still 7039 * hold all the pages locked SE_EXCL at this point we 7040 * needn't worry about things changing out from under us. 7041 */ 7042 tpp = targ; 7043 rpp = repl; 7044 for (i = 0; i < npages; i++, tpp++, rpp++) { 7045 7046 /* 7047 * replace targ with replacement in page_hash table 7048 */ 7049 targ = tpp; 7050 page_relocate_hash(rpp, targ); 7051 7052 /* 7053 * concatenate target; caller of platform_page_relocate() 7054 * expects target to be concatenated after returning. 7055 */ 7056 ASSERT(targ->p_next == targ); 7057 ASSERT(targ->p_prev == targ); 7058 page_list_concat(&pl, &targ); 7059 } 7060 7061 ASSERT(*target == pl); 7062 *nrelocp = npages; 7063 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 7064 kreloc_thread = NULL; 7065 mutex_exit(&kpr_mutex); 7066 return (0); 7067 } 7068 7069 /* 7070 * Called when stray pa_hments are found attached to a page which is 7071 * being freed. Notify the subsystem which attached the pa_hment of 7072 * the error if it registered a suitable handler, else panic. 7073 */ 7074 static void 7075 sfmmu_pahment_leaked(struct pa_hment *pahmep) 7076 { 7077 id_t cb_id = pahmep->cb_id; 7078 7079 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 7080 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 7081 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 7082 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 7083 return; /* non-fatal */ 7084 } 7085 panic("pa_hment leaked: 0x%p", (void *)pahmep); 7086 } 7087 7088 /* 7089 * Remove all mappings to page 'pp'. 7090 */ 7091 int 7092 hat_pageunload(struct page *pp, uint_t forceflag) 7093 { 7094 struct page *origpp = pp; 7095 struct sf_hment *sfhme, *tmphme; 7096 struct hme_blk *hmeblkp; 7097 kmutex_t *pml; 7098 #ifdef VAC 7099 kmutex_t *pmtx; 7100 #endif 7101 cpuset_t cpuset, tset; 7102 int index, cons; 7103 int xhme_blks; 7104 int pa_hments; 7105 7106 ASSERT(PAGE_EXCL(pp)); 7107 7108 retry_xhat: 7109 tmphme = NULL; 7110 xhme_blks = 0; 7111 pa_hments = 0; 7112 CPUSET_ZERO(cpuset); 7113 7114 pml = sfmmu_mlist_enter(pp); 7115 7116 #ifdef VAC 7117 if (pp->p_kpmref) 7118 sfmmu_kpm_pageunload(pp); 7119 ASSERT(!PP_ISMAPPED_KPM(pp)); 7120 #endif 7121 7122 index = PP_MAPINDEX(pp); 7123 cons = TTE8K; 7124 retry: 7125 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7126 tmphme = sfhme->hme_next; 7127 7128 if (IS_PAHME(sfhme)) { 7129 ASSERT(sfhme->hme_data != NULL); 7130 pa_hments++; 7131 continue; 7132 } 7133 7134 hmeblkp = sfmmu_hmetohblk(sfhme); 7135 if (hmeblkp->hblk_xhat_bit) { 7136 struct xhat_hme_blk *xblk = 7137 (struct xhat_hme_blk *)hmeblkp; 7138 7139 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 7140 pp, forceflag, XBLK2PROVBLK(xblk)); 7141 7142 xhme_blks = 1; 7143 continue; 7144 } 7145 7146 /* 7147 * If there are kernel mappings don't unload them, they will 7148 * be suspended. 7149 */ 7150 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7151 hmeblkp->hblk_tag.htag_id == ksfmmup) 7152 continue; 7153 7154 tset = sfmmu_pageunload(pp, sfhme, cons); 7155 CPUSET_OR(cpuset, tset); 7156 } 7157 7158 while (index != 0) { 7159 index = index >> 1; 7160 if (index != 0) 7161 cons++; 7162 if (index & 0x1) { 7163 /* Go to leading page */ 7164 pp = PP_GROUPLEADER(pp, cons); 7165 ASSERT(sfmmu_mlist_held(pp)); 7166 goto retry; 7167 } 7168 } 7169 7170 /* 7171 * cpuset may be empty if the page was only mapped by segkpm, 7172 * in which case we won't actually cross-trap. 7173 */ 7174 xt_sync(cpuset); 7175 7176 /* 7177 * The page should have no mappings at this point, unless 7178 * we were called from hat_page_relocate() in which case we 7179 * leave the locked mappings which will be suspended later. 7180 */ 7181 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 7182 (forceflag == SFMMU_KERNEL_RELOC)); 7183 7184 #ifdef VAC 7185 if (PP_ISTNC(pp)) { 7186 if (cons == TTE8K) { 7187 pmtx = sfmmu_page_enter(pp); 7188 PP_CLRTNC(pp); 7189 sfmmu_page_exit(pmtx); 7190 } else { 7191 conv_tnc(pp, cons); 7192 } 7193 } 7194 #endif /* VAC */ 7195 7196 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7197 /* 7198 * Unlink any pa_hments and free them, calling back 7199 * the responsible subsystem to notify it of the error. 7200 * This can occur in situations such as drivers leaking 7201 * DMA handles: naughty, but common enough that we'd like 7202 * to keep the system running rather than bringing it 7203 * down with an obscure error like "pa_hment leaked" 7204 * which doesn't aid the user in debugging their driver. 7205 */ 7206 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7207 tmphme = sfhme->hme_next; 7208 if (IS_PAHME(sfhme)) { 7209 struct pa_hment *pahmep = sfhme->hme_data; 7210 sfmmu_pahment_leaked(pahmep); 7211 HME_SUB(sfhme, pp); 7212 kmem_cache_free(pa_hment_cache, pahmep); 7213 } 7214 } 7215 7216 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 7217 } 7218 7219 sfmmu_mlist_exit(pml); 7220 7221 /* 7222 * XHAT may not have finished unloading pages 7223 * because some other thread was waiting for 7224 * mlist lock and XHAT_PAGEUNLOAD let it do 7225 * the job. 7226 */ 7227 if (xhme_blks) { 7228 pp = origpp; 7229 goto retry_xhat; 7230 } 7231 7232 return (0); 7233 } 7234 7235 cpuset_t 7236 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7237 { 7238 struct hme_blk *hmeblkp; 7239 sfmmu_t *sfmmup; 7240 tte_t tte, ttemod; 7241 #ifdef DEBUG 7242 tte_t orig_old; 7243 #endif /* DEBUG */ 7244 caddr_t addr; 7245 int ttesz; 7246 int ret; 7247 cpuset_t cpuset; 7248 7249 ASSERT(pp != NULL); 7250 ASSERT(sfmmu_mlist_held(pp)); 7251 ASSERT(!PP_ISKAS(pp)); 7252 7253 CPUSET_ZERO(cpuset); 7254 7255 hmeblkp = sfmmu_hmetohblk(sfhme); 7256 7257 readtte: 7258 sfmmu_copytte(&sfhme->hme_tte, &tte); 7259 if (TTE_IS_VALID(&tte)) { 7260 sfmmup = hblktosfmmu(hmeblkp); 7261 ttesz = get_hblk_ttesz(hmeblkp); 7262 /* 7263 * Only unload mappings of 'cons' size. 7264 */ 7265 if (ttesz != cons) 7266 return (cpuset); 7267 7268 /* 7269 * Note that we have p_mapping lock, but no hash lock here. 7270 * hblk_unload() has to have both hash lock AND p_mapping 7271 * lock before it tries to modify tte. So, the tte could 7272 * not become invalid in the sfmmu_modifytte_try() below. 7273 */ 7274 ttemod = tte; 7275 #ifdef DEBUG 7276 orig_old = tte; 7277 #endif /* DEBUG */ 7278 7279 TTE_SET_INVALID(&ttemod); 7280 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7281 if (ret < 0) { 7282 #ifdef DEBUG 7283 /* only R/M bits can change. */ 7284 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7285 #endif /* DEBUG */ 7286 goto readtte; 7287 } 7288 7289 if (ret == 0) { 7290 panic("pageunload: cas failed?"); 7291 } 7292 7293 addr = tte_to_vaddr(hmeblkp, tte); 7294 7295 if (hmeblkp->hblk_shared) { 7296 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7297 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7298 sf_region_t *rgnp; 7299 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7300 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7301 ASSERT(srdp != NULL); 7302 rgnp = srdp->srd_hmergnp[rid]; 7303 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7304 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7305 sfmmu_ttesync(NULL, addr, &tte, pp); 7306 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7307 atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); 7308 } else { 7309 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7310 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 7311 7312 /* 7313 * We need to flush the page from the virtual cache 7314 * in order to prevent a virtual cache alias 7315 * inconsistency. The particular scenario we need 7316 * to worry about is: 7317 * Given: va1 and va2 are two virtual address that 7318 * alias and will map the same physical address. 7319 * 1. mapping exists from va1 to pa and data has 7320 * been read into the cache. 7321 * 2. unload va1. 7322 * 3. load va2 and modify data using va2. 7323 * 4 unload va2. 7324 * 5. load va1 and reference data. Unless we flush 7325 * the data cache when we unload we will get 7326 * stale data. 7327 * This scenario is taken care of by using virtual 7328 * page coloring. 7329 */ 7330 if (sfmmup->sfmmu_ismhat) { 7331 /* 7332 * Flush TSBs, TLBs and caches 7333 * of every process 7334 * sharing this ism segment. 7335 */ 7336 sfmmu_hat_lock_all(); 7337 mutex_enter(&ism_mlist_lock); 7338 kpreempt_disable(); 7339 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7340 pp->p_pagenum, CACHE_NO_FLUSH); 7341 kpreempt_enable(); 7342 mutex_exit(&ism_mlist_lock); 7343 sfmmu_hat_unlock_all(); 7344 cpuset = cpu_ready_set; 7345 } else { 7346 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7347 cpuset = sfmmup->sfmmu_cpusran; 7348 } 7349 } 7350 7351 /* 7352 * Hme_sub has to run after ttesync() and a_rss update. 7353 * See hblk_unload(). 7354 */ 7355 HME_SUB(sfhme, pp); 7356 membar_stst(); 7357 7358 /* 7359 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7360 * since pteload may have done a HME_ADD() right after 7361 * we did the HME_SUB() above. Hmecnt is now maintained 7362 * by cas only. no lock guranteed its value. The only 7363 * gurantee we have is the hmecnt should not be less than 7364 * what it should be so the hblk will not be taken away. 7365 * It's also important that we decremented the hmecnt after 7366 * we are done with hmeblkp so that this hmeblk won't be 7367 * stolen. 7368 */ 7369 ASSERT(hmeblkp->hblk_hmecnt > 0); 7370 ASSERT(hmeblkp->hblk_vcnt > 0); 7371 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 7372 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 7373 /* 7374 * This is bug 4063182. 7375 * XXX: fixme 7376 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7377 * !hmeblkp->hblk_lckcnt); 7378 */ 7379 } else { 7380 panic("invalid tte? pp %p &tte %p", 7381 (void *)pp, (void *)&tte); 7382 } 7383 7384 return (cpuset); 7385 } 7386 7387 /* 7388 * While relocating a kernel page, this function will move the mappings 7389 * from tpp to dpp and modify any associated data with these mappings. 7390 * It also unsuspends the suspended kernel mapping. 7391 */ 7392 static void 7393 hat_pagereload(struct page *tpp, struct page *dpp) 7394 { 7395 struct sf_hment *sfhme; 7396 tte_t tte, ttemod; 7397 int index, cons; 7398 7399 ASSERT(getpil() == PIL_MAX); 7400 ASSERT(sfmmu_mlist_held(tpp)); 7401 ASSERT(sfmmu_mlist_held(dpp)); 7402 7403 index = PP_MAPINDEX(tpp); 7404 cons = TTE8K; 7405 7406 /* Update real mappings to the page */ 7407 retry: 7408 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7409 if (IS_PAHME(sfhme)) 7410 continue; 7411 sfmmu_copytte(&sfhme->hme_tte, &tte); 7412 ttemod = tte; 7413 7414 /* 7415 * replace old pfn with new pfn in TTE 7416 */ 7417 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7418 7419 /* 7420 * clear suspend bit 7421 */ 7422 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7423 TTE_CLR_SUSPEND(&ttemod); 7424 7425 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7426 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7427 7428 /* 7429 * set hme_page point to new page 7430 */ 7431 sfhme->hme_page = dpp; 7432 } 7433 7434 /* 7435 * move p_mapping list from old page to new page 7436 */ 7437 dpp->p_mapping = tpp->p_mapping; 7438 tpp->p_mapping = NULL; 7439 dpp->p_share = tpp->p_share; 7440 tpp->p_share = 0; 7441 7442 while (index != 0) { 7443 index = index >> 1; 7444 if (index != 0) 7445 cons++; 7446 if (index & 0x1) { 7447 tpp = PP_GROUPLEADER(tpp, cons); 7448 dpp = PP_GROUPLEADER(dpp, cons); 7449 goto retry; 7450 } 7451 } 7452 7453 curthread->t_flag &= ~T_DONTDTRACE; 7454 mutex_exit(&kpr_suspendlock); 7455 } 7456 7457 uint_t 7458 hat_pagesync(struct page *pp, uint_t clearflag) 7459 { 7460 struct sf_hment *sfhme, *tmphme = NULL; 7461 struct hme_blk *hmeblkp; 7462 kmutex_t *pml; 7463 cpuset_t cpuset, tset; 7464 int index, cons; 7465 extern ulong_t po_share; 7466 page_t *save_pp = pp; 7467 int stop_on_sh = 0; 7468 uint_t shcnt; 7469 7470 CPUSET_ZERO(cpuset); 7471 7472 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7473 return (PP_GENERIC_ATTR(pp)); 7474 } 7475 7476 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7477 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7478 return (PP_GENERIC_ATTR(pp)); 7479 } 7480 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7481 return (PP_GENERIC_ATTR(pp)); 7482 } 7483 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7484 if (pp->p_share > po_share) { 7485 hat_page_setattr(pp, P_REF); 7486 return (PP_GENERIC_ATTR(pp)); 7487 } 7488 stop_on_sh = 1; 7489 shcnt = 0; 7490 } 7491 } 7492 7493 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7494 pml = sfmmu_mlist_enter(pp); 7495 index = PP_MAPINDEX(pp); 7496 cons = TTE8K; 7497 retry: 7498 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7499 /* 7500 * We need to save the next hment on the list since 7501 * it is possible for pagesync to remove an invalid hment 7502 * from the list. 7503 */ 7504 tmphme = sfhme->hme_next; 7505 if (IS_PAHME(sfhme)) 7506 continue; 7507 /* 7508 * If we are looking for large mappings and this hme doesn't 7509 * reach the range we are seeking, just ignore it. 7510 */ 7511 hmeblkp = sfmmu_hmetohblk(sfhme); 7512 if (hmeblkp->hblk_xhat_bit) 7513 continue; 7514 7515 if (hme_size(sfhme) < cons) 7516 continue; 7517 7518 if (stop_on_sh) { 7519 if (hmeblkp->hblk_shared) { 7520 sf_srd_t *srdp = hblktosrd(hmeblkp); 7521 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7522 sf_region_t *rgnp; 7523 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7524 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7525 ASSERT(srdp != NULL); 7526 rgnp = srdp->srd_hmergnp[rid]; 7527 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7528 rgnp, rid); 7529 shcnt += rgnp->rgn_refcnt; 7530 } else { 7531 shcnt++; 7532 } 7533 if (shcnt > po_share) { 7534 /* 7535 * tell the pager to spare the page this time 7536 * around. 7537 */ 7538 hat_page_setattr(save_pp, P_REF); 7539 index = 0; 7540 break; 7541 } 7542 } 7543 tset = sfmmu_pagesync(pp, sfhme, 7544 clearflag & ~HAT_SYNC_STOPON_RM); 7545 CPUSET_OR(cpuset, tset); 7546 7547 /* 7548 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7549 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7550 */ 7551 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7552 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7553 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7554 index = 0; 7555 break; 7556 } 7557 } 7558 7559 while (index) { 7560 index = index >> 1; 7561 cons++; 7562 if (index & 0x1) { 7563 /* Go to leading page */ 7564 pp = PP_GROUPLEADER(pp, cons); 7565 goto retry; 7566 } 7567 } 7568 7569 xt_sync(cpuset); 7570 sfmmu_mlist_exit(pml); 7571 return (PP_GENERIC_ATTR(save_pp)); 7572 } 7573 7574 /* 7575 * Get all the hardware dependent attributes for a page struct 7576 */ 7577 static cpuset_t 7578 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7579 uint_t clearflag) 7580 { 7581 caddr_t addr; 7582 tte_t tte, ttemod; 7583 struct hme_blk *hmeblkp; 7584 int ret; 7585 sfmmu_t *sfmmup; 7586 cpuset_t cpuset; 7587 7588 ASSERT(pp != NULL); 7589 ASSERT(sfmmu_mlist_held(pp)); 7590 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7591 (clearflag == HAT_SYNC_ZERORM)); 7592 7593 SFMMU_STAT(sf_pagesync); 7594 7595 CPUSET_ZERO(cpuset); 7596 7597 sfmmu_pagesync_retry: 7598 7599 sfmmu_copytte(&sfhme->hme_tte, &tte); 7600 if (TTE_IS_VALID(&tte)) { 7601 hmeblkp = sfmmu_hmetohblk(sfhme); 7602 sfmmup = hblktosfmmu(hmeblkp); 7603 addr = tte_to_vaddr(hmeblkp, tte); 7604 if (clearflag == HAT_SYNC_ZERORM) { 7605 ttemod = tte; 7606 TTE_CLR_RM(&ttemod); 7607 ret = sfmmu_modifytte_try(&tte, &ttemod, 7608 &sfhme->hme_tte); 7609 if (ret < 0) { 7610 /* 7611 * cas failed and the new value is not what 7612 * we want. 7613 */ 7614 goto sfmmu_pagesync_retry; 7615 } 7616 7617 if (ret > 0) { 7618 /* we win the cas */ 7619 if (hmeblkp->hblk_shared) { 7620 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7621 uint_t rid = 7622 hmeblkp->hblk_tag.htag_rid; 7623 sf_region_t *rgnp; 7624 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7625 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7626 ASSERT(srdp != NULL); 7627 rgnp = srdp->srd_hmergnp[rid]; 7628 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7629 srdp, rgnp, rid); 7630 cpuset = sfmmu_rgntlb_demap(addr, 7631 rgnp, hmeblkp, 1); 7632 } else { 7633 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7634 0, 0); 7635 cpuset = sfmmup->sfmmu_cpusran; 7636 } 7637 } 7638 } 7639 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7640 &tte, pp); 7641 } 7642 return (cpuset); 7643 } 7644 7645 /* 7646 * Remove write permission from a mappings to a page, so that 7647 * we can detect the next modification of it. This requires modifying 7648 * the TTE then invalidating (demap) any TLB entry using that TTE. 7649 * This code is similar to sfmmu_pagesync(). 7650 */ 7651 static cpuset_t 7652 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7653 { 7654 caddr_t addr; 7655 tte_t tte; 7656 tte_t ttemod; 7657 struct hme_blk *hmeblkp; 7658 int ret; 7659 sfmmu_t *sfmmup; 7660 cpuset_t cpuset; 7661 7662 ASSERT(pp != NULL); 7663 ASSERT(sfmmu_mlist_held(pp)); 7664 7665 CPUSET_ZERO(cpuset); 7666 SFMMU_STAT(sf_clrwrt); 7667 7668 retry: 7669 7670 sfmmu_copytte(&sfhme->hme_tte, &tte); 7671 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7672 hmeblkp = sfmmu_hmetohblk(sfhme); 7673 7674 /* 7675 * xhat mappings should never be to a VMODSORT page. 7676 */ 7677 ASSERT(hmeblkp->hblk_xhat_bit == 0); 7678 7679 sfmmup = hblktosfmmu(hmeblkp); 7680 addr = tte_to_vaddr(hmeblkp, tte); 7681 7682 ttemod = tte; 7683 TTE_CLR_WRT(&ttemod); 7684 TTE_CLR_MOD(&ttemod); 7685 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7686 7687 /* 7688 * if cas failed and the new value is not what 7689 * we want retry 7690 */ 7691 if (ret < 0) 7692 goto retry; 7693 7694 /* we win the cas */ 7695 if (ret > 0) { 7696 if (hmeblkp->hblk_shared) { 7697 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7698 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7699 sf_region_t *rgnp; 7700 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7701 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7702 ASSERT(srdp != NULL); 7703 rgnp = srdp->srd_hmergnp[rid]; 7704 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7705 srdp, rgnp, rid); 7706 cpuset = sfmmu_rgntlb_demap(addr, 7707 rgnp, hmeblkp, 1); 7708 } else { 7709 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7710 cpuset = sfmmup->sfmmu_cpusran; 7711 } 7712 } 7713 } 7714 7715 return (cpuset); 7716 } 7717 7718 /* 7719 * Walk all mappings of a page, removing write permission and clearing the 7720 * ref/mod bits. This code is similar to hat_pagesync() 7721 */ 7722 static void 7723 hat_page_clrwrt(page_t *pp) 7724 { 7725 struct sf_hment *sfhme; 7726 struct sf_hment *tmphme = NULL; 7727 kmutex_t *pml; 7728 cpuset_t cpuset; 7729 cpuset_t tset; 7730 int index; 7731 int cons; 7732 7733 CPUSET_ZERO(cpuset); 7734 7735 pml = sfmmu_mlist_enter(pp); 7736 index = PP_MAPINDEX(pp); 7737 cons = TTE8K; 7738 retry: 7739 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7740 tmphme = sfhme->hme_next; 7741 7742 /* 7743 * If we are looking for large mappings and this hme doesn't 7744 * reach the range we are seeking, just ignore its. 7745 */ 7746 7747 if (hme_size(sfhme) < cons) 7748 continue; 7749 7750 tset = sfmmu_pageclrwrt(pp, sfhme); 7751 CPUSET_OR(cpuset, tset); 7752 } 7753 7754 while (index) { 7755 index = index >> 1; 7756 cons++; 7757 if (index & 0x1) { 7758 /* Go to leading page */ 7759 pp = PP_GROUPLEADER(pp, cons); 7760 goto retry; 7761 } 7762 } 7763 7764 xt_sync(cpuset); 7765 sfmmu_mlist_exit(pml); 7766 } 7767 7768 /* 7769 * Set the given REF/MOD/RO bits for the given page. 7770 * For a vnode with a sorted v_pages list, we need to change 7771 * the attributes and the v_pages list together under page_vnode_mutex. 7772 */ 7773 void 7774 hat_page_setattr(page_t *pp, uint_t flag) 7775 { 7776 vnode_t *vp = pp->p_vnode; 7777 page_t **listp; 7778 kmutex_t *pmtx; 7779 kmutex_t *vphm = NULL; 7780 int noshuffle; 7781 7782 noshuffle = flag & P_NSH; 7783 flag &= ~P_NSH; 7784 7785 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO | P_EXEC))); 7786 7787 /* 7788 * nothing to do if attribute already set 7789 */ 7790 if ((pp->p_nrm & flag) == flag) 7791 return; 7792 7793 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7794 !noshuffle) { 7795 vphm = page_vnode_mutex(vp); 7796 mutex_enter(vphm); 7797 } 7798 7799 pmtx = sfmmu_page_enter(pp); 7800 pp->p_nrm |= flag; 7801 sfmmu_page_exit(pmtx); 7802 7803 if (vphm != NULL) { 7804 /* 7805 * Some File Systems examine v_pages for NULL w/o 7806 * grabbing the vphm mutex. Must not let it become NULL when 7807 * pp is the only page on the list. 7808 */ 7809 if (pp->p_vpnext != pp) { 7810 page_vpsub(&vp->v_pages, pp); 7811 if (vp->v_pages != NULL) 7812 listp = &vp->v_pages->p_vpprev->p_vpnext; 7813 else 7814 listp = &vp->v_pages; 7815 page_vpadd(listp, pp); 7816 } 7817 mutex_exit(vphm); 7818 } 7819 } 7820 7821 void 7822 hat_page_clrattr(page_t *pp, uint_t flag) 7823 { 7824 vnode_t *vp = pp->p_vnode; 7825 kmutex_t *pmtx; 7826 7827 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7828 7829 pmtx = sfmmu_page_enter(pp); 7830 7831 /* 7832 * Caller is expected to hold page's io lock for VMODSORT to work 7833 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7834 * bit is cleared. 7835 * We don't have assert to avoid tripping some existing third party 7836 * code. The dirty page is moved back to top of the v_page list 7837 * after IO is done in pvn_write_done(). 7838 */ 7839 pp->p_nrm &= ~flag; 7840 sfmmu_page_exit(pmtx); 7841 7842 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7843 7844 /* 7845 * VMODSORT works by removing write permissions and getting 7846 * a fault when a page is made dirty. At this point 7847 * we need to remove write permission from all mappings 7848 * to this page. 7849 */ 7850 hat_page_clrwrt(pp); 7851 } 7852 } 7853 7854 uint_t 7855 hat_page_getattr(page_t *pp, uint_t flag) 7856 { 7857 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7858 return ((uint_t)(pp->p_nrm & flag)); 7859 } 7860 7861 /* 7862 * DEBUG kernels: verify that a kernel va<->pa translation 7863 * is safe by checking the underlying page_t is in a page 7864 * relocation-safe state. 7865 */ 7866 #ifdef DEBUG 7867 void 7868 sfmmu_check_kpfn(pfn_t pfn) 7869 { 7870 page_t *pp; 7871 int index, cons; 7872 7873 if (hat_check_vtop == 0) 7874 return; 7875 7876 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7877 return; 7878 7879 pp = page_numtopp_nolock(pfn); 7880 if (!pp) 7881 return; 7882 7883 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7884 return; 7885 7886 /* 7887 * Handed a large kernel page, we dig up the root page since we 7888 * know the root page might have the lock also. 7889 */ 7890 if (pp->p_szc != 0) { 7891 index = PP_MAPINDEX(pp); 7892 cons = TTE8K; 7893 again: 7894 while (index != 0) { 7895 index >>= 1; 7896 if (index != 0) 7897 cons++; 7898 if (index & 0x1) { 7899 pp = PP_GROUPLEADER(pp, cons); 7900 goto again; 7901 } 7902 } 7903 } 7904 7905 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7906 return; 7907 7908 /* 7909 * Pages need to be locked or allocated "permanent" (either from 7910 * static_arena arena or explicitly setting PG_NORELOC when calling 7911 * page_create_va()) for VA->PA translations to be valid. 7912 */ 7913 if (!PP_ISNORELOC(pp)) 7914 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7915 (void *)pp); 7916 else 7917 panic("Illegal VA->PA translation, pp 0x%p not locked", 7918 (void *)pp); 7919 } 7920 #endif /* DEBUG */ 7921 7922 /* 7923 * Returns a page frame number for a given virtual address. 7924 * Returns PFN_INVALID to indicate an invalid mapping 7925 */ 7926 pfn_t 7927 hat_getpfnum(struct hat *hat, caddr_t addr) 7928 { 7929 pfn_t pfn; 7930 tte_t tte; 7931 7932 /* 7933 * We would like to 7934 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7935 * but we can't because the iommu driver will call this 7936 * routine at interrupt time and it can't grab the as lock 7937 * or it will deadlock: A thread could have the as lock 7938 * and be waiting for io. The io can't complete 7939 * because the interrupt thread is blocked trying to grab 7940 * the as lock. 7941 */ 7942 7943 ASSERT(hat->sfmmu_xhat_provider == NULL); 7944 7945 if (hat == ksfmmup) { 7946 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7947 ASSERT(segkmem_lpszc > 0); 7948 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7949 if (pfn != PFN_INVALID) { 7950 sfmmu_check_kpfn(pfn); 7951 return (pfn); 7952 } 7953 } else if (segkpm && IS_KPM_ADDR(addr)) { 7954 return (sfmmu_kpm_vatopfn(addr)); 7955 } 7956 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7957 == PFN_SUSPENDED) { 7958 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7959 } 7960 sfmmu_check_kpfn(pfn); 7961 return (pfn); 7962 } else { 7963 return (sfmmu_uvatopfn(addr, hat, NULL)); 7964 } 7965 } 7966 7967 /* 7968 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7969 * Use hat_getpfnum(kas.a_hat, ...) instead. 7970 * 7971 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7972 * but can't right now due to the fact that some software has grown to use 7973 * this interface incorrectly. So for now when the interface is misused, 7974 * return a warning to the user that in the future it won't work in the 7975 * way they're abusing it, and carry on (after disabling page relocation). 7976 */ 7977 pfn_t 7978 hat_getkpfnum(caddr_t addr) 7979 { 7980 pfn_t pfn; 7981 tte_t tte; 7982 int badcaller = 0; 7983 extern int segkmem_reloc; 7984 7985 if (segkpm && IS_KPM_ADDR(addr)) { 7986 badcaller = 1; 7987 pfn = sfmmu_kpm_vatopfn(addr); 7988 } else { 7989 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7990 == PFN_SUSPENDED) { 7991 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7992 } 7993 badcaller = pf_is_memory(pfn); 7994 } 7995 7996 if (badcaller) { 7997 /* 7998 * We can't return PFN_INVALID or the caller may panic 7999 * or corrupt the system. The only alternative is to 8000 * disable page relocation at this point for all kernel 8001 * memory. This will impact any callers of page_relocate() 8002 * such as FMA or DR. 8003 * 8004 * RFE: Add junk here to spit out an ereport so the sysadmin 8005 * can be advised that he should upgrade his device driver 8006 * so that this doesn't happen. 8007 */ 8008 hat_getkpfnum_badcall(caller()); 8009 if (hat_kpr_enabled && segkmem_reloc) { 8010 hat_kpr_enabled = 0; 8011 segkmem_reloc = 0; 8012 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 8013 } 8014 } 8015 return (pfn); 8016 } 8017 8018 /* 8019 * This routine will return both pfn and tte for the vaddr. 8020 */ 8021 static pfn_t 8022 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 8023 { 8024 struct hmehash_bucket *hmebp; 8025 hmeblk_tag hblktag; 8026 int hmeshift, hashno = 1; 8027 struct hme_blk *hmeblkp = NULL; 8028 tte_t tte; 8029 8030 struct sf_hment *sfhmep; 8031 pfn_t pfn; 8032 8033 /* support for ISM */ 8034 ism_map_t *ism_map; 8035 ism_blk_t *ism_blkp; 8036 int i; 8037 sfmmu_t *ism_hatid = NULL; 8038 sfmmu_t *locked_hatid = NULL; 8039 sfmmu_t *sv_sfmmup = sfmmup; 8040 caddr_t sv_vaddr = vaddr; 8041 sf_srd_t *srdp; 8042 8043 if (ttep == NULL) { 8044 ttep = &tte; 8045 } else { 8046 ttep->ll = 0; 8047 } 8048 8049 ASSERT(sfmmup != ksfmmup); 8050 SFMMU_STAT(sf_user_vtop); 8051 /* 8052 * Set ism_hatid if vaddr falls in a ISM segment. 8053 */ 8054 ism_blkp = sfmmup->sfmmu_iblk; 8055 if (ism_blkp != NULL) { 8056 sfmmu_ismhat_enter(sfmmup, 0); 8057 locked_hatid = sfmmup; 8058 } 8059 while (ism_blkp != NULL && ism_hatid == NULL) { 8060 ism_map = ism_blkp->iblk_maps; 8061 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 8062 if (vaddr >= ism_start(ism_map[i]) && 8063 vaddr < ism_end(ism_map[i])) { 8064 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 8065 vaddr = (caddr_t)(vaddr - 8066 ism_start(ism_map[i])); 8067 break; 8068 } 8069 } 8070 ism_blkp = ism_blkp->iblk_next; 8071 } 8072 if (locked_hatid) { 8073 sfmmu_ismhat_exit(locked_hatid, 0); 8074 } 8075 8076 hblktag.htag_id = sfmmup; 8077 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 8078 do { 8079 hmeshift = HME_HASH_SHIFT(hashno); 8080 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 8081 hblktag.htag_rehash = hashno; 8082 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 8083 8084 SFMMU_HASH_LOCK(hmebp); 8085 8086 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 8087 if (hmeblkp != NULL) { 8088 ASSERT(!hmeblkp->hblk_shared); 8089 HBLKTOHME(sfhmep, hmeblkp, vaddr); 8090 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8091 SFMMU_HASH_UNLOCK(hmebp); 8092 if (TTE_IS_VALID(ttep)) { 8093 pfn = TTE_TO_PFN(vaddr, ttep); 8094 return (pfn); 8095 } 8096 break; 8097 } 8098 SFMMU_HASH_UNLOCK(hmebp); 8099 hashno++; 8100 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 8101 8102 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 8103 return (PFN_INVALID); 8104 } 8105 srdp = sv_sfmmup->sfmmu_srdp; 8106 ASSERT(srdp != NULL); 8107 ASSERT(srdp->srd_refcnt != 0); 8108 hblktag.htag_id = srdp; 8109 hashno = 1; 8110 do { 8111 hmeshift = HME_HASH_SHIFT(hashno); 8112 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 8113 hblktag.htag_rehash = hashno; 8114 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 8115 8116 SFMMU_HASH_LOCK(hmebp); 8117 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 8118 hmeblkp = hmeblkp->hblk_next) { 8119 uint_t rid; 8120 sf_region_t *rgnp; 8121 caddr_t rsaddr; 8122 caddr_t readdr; 8123 8124 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 8125 sv_sfmmup->sfmmu_hmeregion_map)) { 8126 continue; 8127 } 8128 ASSERT(hmeblkp->hblk_shared); 8129 rid = hmeblkp->hblk_tag.htag_rid; 8130 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8131 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8132 rgnp = srdp->srd_hmergnp[rid]; 8133 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 8134 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 8135 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8136 rsaddr = rgnp->rgn_saddr; 8137 readdr = rsaddr + rgnp->rgn_size; 8138 #ifdef DEBUG 8139 if (TTE_IS_VALID(ttep) || 8140 get_hblk_ttesz(hmeblkp) > TTE8K) { 8141 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 8142 ASSERT(eva > sv_vaddr); 8143 ASSERT(sv_vaddr >= rsaddr); 8144 ASSERT(sv_vaddr < readdr); 8145 ASSERT(eva <= readdr); 8146 } 8147 #endif /* DEBUG */ 8148 /* 8149 * Continue the search if we 8150 * found an invalid 8K tte outside of the area 8151 * covered by this hmeblk's region. 8152 */ 8153 if (TTE_IS_VALID(ttep)) { 8154 SFMMU_HASH_UNLOCK(hmebp); 8155 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8156 return (pfn); 8157 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8158 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8159 SFMMU_HASH_UNLOCK(hmebp); 8160 pfn = PFN_INVALID; 8161 return (pfn); 8162 } 8163 } 8164 SFMMU_HASH_UNLOCK(hmebp); 8165 hashno++; 8166 } while (hashno <= mmu_hashcnt); 8167 return (PFN_INVALID); 8168 } 8169 8170 8171 /* 8172 * For compatability with AT&T and later optimizations 8173 */ 8174 /* ARGSUSED */ 8175 void 8176 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8177 { 8178 ASSERT(hat != NULL); 8179 ASSERT(hat->sfmmu_xhat_provider == NULL); 8180 } 8181 8182 /* 8183 * Return the number of mappings to a particular page. This number is an 8184 * approximation of the number of people sharing the page. 8185 * 8186 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8187 * hat_page_checkshare() can be used to compare threshold to share 8188 * count that reflects the number of region sharers albeit at higher cost. 8189 */ 8190 ulong_t 8191 hat_page_getshare(page_t *pp) 8192 { 8193 page_t *spp = pp; /* start page */ 8194 kmutex_t *pml; 8195 ulong_t cnt; 8196 int index, sz = TTE64K; 8197 8198 /* 8199 * We need to grab the mlist lock to make sure any outstanding 8200 * load/unloads complete. Otherwise we could return zero 8201 * even though the unload(s) hasn't finished yet. 8202 */ 8203 pml = sfmmu_mlist_enter(spp); 8204 cnt = spp->p_share; 8205 8206 #ifdef VAC 8207 if (kpm_enable) 8208 cnt += spp->p_kpmref; 8209 #endif 8210 8211 /* 8212 * If we have any large mappings, we count the number of 8213 * mappings that this large page is part of. 8214 */ 8215 index = PP_MAPINDEX(spp); 8216 index >>= 1; 8217 while (index) { 8218 pp = PP_GROUPLEADER(spp, sz); 8219 if ((index & 0x1) && pp != spp) { 8220 cnt += pp->p_share; 8221 spp = pp; 8222 } 8223 index >>= 1; 8224 sz++; 8225 } 8226 sfmmu_mlist_exit(pml); 8227 return (cnt); 8228 } 8229 8230 /* 8231 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8232 * otherwise. Count shared hmeblks by region's refcnt. 8233 */ 8234 int 8235 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8236 { 8237 kmutex_t *pml; 8238 ulong_t cnt = 0; 8239 int index, sz = TTE8K; 8240 struct sf_hment *sfhme, *tmphme = NULL; 8241 struct hme_blk *hmeblkp; 8242 8243 pml = sfmmu_mlist_enter(pp); 8244 8245 if (kpm_enable) 8246 cnt = pp->p_kpmref; 8247 8248 if (pp->p_share + cnt > sh_thresh) { 8249 sfmmu_mlist_exit(pml); 8250 return (1); 8251 } 8252 8253 index = PP_MAPINDEX(pp); 8254 8255 again: 8256 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8257 tmphme = sfhme->hme_next; 8258 if (IS_PAHME(sfhme)) { 8259 continue; 8260 } 8261 8262 hmeblkp = sfmmu_hmetohblk(sfhme); 8263 if (hmeblkp->hblk_xhat_bit) { 8264 cnt++; 8265 if (cnt > sh_thresh) { 8266 sfmmu_mlist_exit(pml); 8267 return (1); 8268 } 8269 continue; 8270 } 8271 if (hme_size(sfhme) != sz) { 8272 continue; 8273 } 8274 8275 if (hmeblkp->hblk_shared) { 8276 sf_srd_t *srdp = hblktosrd(hmeblkp); 8277 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8278 sf_region_t *rgnp; 8279 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8280 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8281 ASSERT(srdp != NULL); 8282 rgnp = srdp->srd_hmergnp[rid]; 8283 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8284 rgnp, rid); 8285 cnt += rgnp->rgn_refcnt; 8286 } else { 8287 cnt++; 8288 } 8289 if (cnt > sh_thresh) { 8290 sfmmu_mlist_exit(pml); 8291 return (1); 8292 } 8293 } 8294 8295 index >>= 1; 8296 sz++; 8297 while (index) { 8298 pp = PP_GROUPLEADER(pp, sz); 8299 ASSERT(sfmmu_mlist_held(pp)); 8300 if (index & 0x1) { 8301 goto again; 8302 } 8303 index >>= 1; 8304 sz++; 8305 } 8306 sfmmu_mlist_exit(pml); 8307 return (0); 8308 } 8309 8310 /* 8311 * Unload all large mappings to the pp and reset the p_szc field of every 8312 * constituent page according to the remaining mappings. 8313 * 8314 * pp must be locked SE_EXCL. Even though no other constituent pages are 8315 * locked it's legal to unload the large mappings to the pp because all 8316 * constituent pages of large locked mappings have to be locked SE_SHARED. 8317 * This means if we have SE_EXCL lock on one of constituent pages none of the 8318 * large mappings to pp are locked. 8319 * 8320 * Decrease p_szc field starting from the last constituent page and ending 8321 * with the root page. This method is used because other threads rely on the 8322 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8323 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8324 * ensures that p_szc changes of the constituent pages appears atomic for all 8325 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8326 * 8327 * This mechanism is only used for file system pages where it's not always 8328 * possible to get SE_EXCL locks on all constituent pages to demote the size 8329 * code (as is done for anonymous or kernel large pages). 8330 * 8331 * See more comments in front of sfmmu_mlspl_enter(). 8332 */ 8333 void 8334 hat_page_demote(page_t *pp) 8335 { 8336 int index; 8337 int sz; 8338 cpuset_t cpuset; 8339 int sync = 0; 8340 page_t *rootpp; 8341 struct sf_hment *sfhme; 8342 struct sf_hment *tmphme = NULL; 8343 struct hme_blk *hmeblkp; 8344 uint_t pszc; 8345 page_t *lastpp; 8346 cpuset_t tset; 8347 pgcnt_t npgs; 8348 kmutex_t *pml; 8349 kmutex_t *pmtx = NULL; 8350 8351 ASSERT(PAGE_EXCL(pp)); 8352 ASSERT(!PP_ISFREE(pp)); 8353 ASSERT(!PP_ISKAS(pp)); 8354 ASSERT(page_szc_lock_assert(pp)); 8355 pml = sfmmu_mlist_enter(pp); 8356 8357 pszc = pp->p_szc; 8358 if (pszc == 0) { 8359 goto out; 8360 } 8361 8362 index = PP_MAPINDEX(pp) >> 1; 8363 8364 if (index) { 8365 CPUSET_ZERO(cpuset); 8366 sz = TTE64K; 8367 sync = 1; 8368 } 8369 8370 while (index) { 8371 if (!(index & 0x1)) { 8372 index >>= 1; 8373 sz++; 8374 continue; 8375 } 8376 ASSERT(sz <= pszc); 8377 rootpp = PP_GROUPLEADER(pp, sz); 8378 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8379 tmphme = sfhme->hme_next; 8380 ASSERT(!IS_PAHME(sfhme)); 8381 hmeblkp = sfmmu_hmetohblk(sfhme); 8382 if (hme_size(sfhme) != sz) { 8383 continue; 8384 } 8385 if (hmeblkp->hblk_xhat_bit) { 8386 cmn_err(CE_PANIC, 8387 "hat_page_demote: xhat hmeblk"); 8388 } 8389 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8390 CPUSET_OR(cpuset, tset); 8391 } 8392 if (index >>= 1) { 8393 sz++; 8394 } 8395 } 8396 8397 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8398 8399 if (sync) { 8400 xt_sync(cpuset); 8401 #ifdef VAC 8402 if (PP_ISTNC(pp)) { 8403 conv_tnc(rootpp, sz); 8404 } 8405 #endif /* VAC */ 8406 } 8407 8408 pmtx = sfmmu_page_enter(pp); 8409 8410 ASSERT(pp->p_szc == pszc); 8411 rootpp = PP_PAGEROOT(pp); 8412 ASSERT(rootpp->p_szc == pszc); 8413 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8414 8415 while (lastpp != rootpp) { 8416 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8417 ASSERT(sz < pszc); 8418 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8419 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8420 while (--npgs > 0) { 8421 lastpp->p_szc = (uchar_t)sz; 8422 lastpp = PP_PAGEPREV(lastpp); 8423 } 8424 if (sz) { 8425 /* 8426 * make sure before current root's pszc 8427 * is updated all updates to constituent pages pszc 8428 * fields are globally visible. 8429 */ 8430 membar_producer(); 8431 } 8432 lastpp->p_szc = sz; 8433 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8434 if (lastpp != rootpp) { 8435 lastpp = PP_PAGEPREV(lastpp); 8436 } 8437 } 8438 if (sz == 0) { 8439 /* the loop above doesn't cover this case */ 8440 rootpp->p_szc = 0; 8441 } 8442 out: 8443 ASSERT(pp->p_szc == 0); 8444 if (pmtx != NULL) { 8445 sfmmu_page_exit(pmtx); 8446 } 8447 sfmmu_mlist_exit(pml); 8448 } 8449 8450 /* 8451 * Refresh the HAT ismttecnt[] element for size szc. 8452 * Caller must have set ISM busy flag to prevent mapping 8453 * lists from changing while we're traversing them. 8454 */ 8455 pgcnt_t 8456 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8457 { 8458 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8459 ism_map_t *ism_map; 8460 pgcnt_t npgs = 0; 8461 pgcnt_t npgs_scd = 0; 8462 int j; 8463 sf_scd_t *scdp; 8464 uchar_t rid; 8465 hatlock_t *hatlockp; 8466 int ismnotinscd = 0; 8467 8468 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8469 scdp = sfmmup->sfmmu_scdp; 8470 8471 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8472 ism_map = ism_blkp->iblk_maps; 8473 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8474 rid = ism_map[j].imap_rid; 8475 ASSERT(rid == SFMMU_INVALID_ISMRID || 8476 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8477 8478 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8479 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8480 /* ISM is in sfmmup's SCD */ 8481 npgs_scd += 8482 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8483 } else { 8484 /* ISMs is not in SCD */ 8485 npgs += 8486 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8487 ismnotinscd = 1; 8488 } 8489 } 8490 } 8491 8492 if (&mmu_set_pgsz_order) { 8493 hatlockp = sfmmu_hat_enter(sfmmup); 8494 if (ismnotinscd) { 8495 SFMMU_FLAGS_SET(sfmmup, HAT_ISMNOTINSCD); 8496 } else { 8497 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMNOTINSCD); 8498 } 8499 sfmmu_hat_exit(hatlockp); 8500 } 8501 8502 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8503 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8504 return (npgs); 8505 } 8506 8507 /* 8508 * Yield the memory claim requirement for an address space. 8509 * 8510 * This is currently implemented as the number of bytes that have active 8511 * hardware translations that have page structures. Therefore, it can 8512 * underestimate the traditional resident set size, eg, if the 8513 * physical page is present and the hardware translation is missing; 8514 * and it can overestimate the rss, eg, if there are active 8515 * translations to a frame buffer with page structs. 8516 * Also, it does not take sharing into account. 8517 * 8518 * Note that we don't acquire locks here since this function is most often 8519 * called from the clock thread. 8520 */ 8521 size_t 8522 hat_get_mapped_size(struct hat *hat) 8523 { 8524 size_t assize = 0; 8525 int i; 8526 8527 if (hat == NULL) 8528 return (0); 8529 8530 ASSERT(hat->sfmmu_xhat_provider == NULL); 8531 8532 for (i = 0; i < mmu_page_sizes; i++) 8533 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8534 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8535 8536 if (hat->sfmmu_iblk == NULL) 8537 return (assize); 8538 8539 for (i = 0; i < mmu_page_sizes; i++) 8540 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8541 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8542 8543 return (assize); 8544 } 8545 8546 int 8547 hat_stats_enable(struct hat *hat) 8548 { 8549 hatlock_t *hatlockp; 8550 8551 ASSERT(hat->sfmmu_xhat_provider == NULL); 8552 8553 hatlockp = sfmmu_hat_enter(hat); 8554 hat->sfmmu_rmstat++; 8555 sfmmu_hat_exit(hatlockp); 8556 return (1); 8557 } 8558 8559 void 8560 hat_stats_disable(struct hat *hat) 8561 { 8562 hatlock_t *hatlockp; 8563 8564 ASSERT(hat->sfmmu_xhat_provider == NULL); 8565 8566 hatlockp = sfmmu_hat_enter(hat); 8567 hat->sfmmu_rmstat--; 8568 sfmmu_hat_exit(hatlockp); 8569 } 8570 8571 /* 8572 * Routines for entering or removing ourselves from the 8573 * ism_hat's mapping list. This is used for both private and 8574 * SCD hats. 8575 */ 8576 static void 8577 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8578 { 8579 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8580 8581 iment->iment_prev = NULL; 8582 iment->iment_next = ism_hat->sfmmu_iment; 8583 if (ism_hat->sfmmu_iment) { 8584 ism_hat->sfmmu_iment->iment_prev = iment; 8585 } 8586 ism_hat->sfmmu_iment = iment; 8587 } 8588 8589 static void 8590 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8591 { 8592 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8593 8594 if (ism_hat->sfmmu_iment == NULL) { 8595 panic("ism map entry remove - no entries"); 8596 } 8597 8598 if (iment->iment_prev) { 8599 ASSERT(ism_hat->sfmmu_iment != iment); 8600 iment->iment_prev->iment_next = iment->iment_next; 8601 } else { 8602 ASSERT(ism_hat->sfmmu_iment == iment); 8603 ism_hat->sfmmu_iment = iment->iment_next; 8604 } 8605 8606 if (iment->iment_next) { 8607 iment->iment_next->iment_prev = iment->iment_prev; 8608 } 8609 8610 /* 8611 * zero out the entry 8612 */ 8613 iment->iment_next = NULL; 8614 iment->iment_prev = NULL; 8615 iment->iment_hat = NULL; 8616 } 8617 8618 /* 8619 * Hat_share()/unshare() return an (non-zero) error 8620 * when saddr and daddr are not properly aligned. 8621 * 8622 * The top level mapping element determines the alignment 8623 * requirement for saddr and daddr, depending on different 8624 * architectures. 8625 * 8626 * When hat_share()/unshare() are not supported, 8627 * HATOP_SHARE()/UNSHARE() return 0 8628 */ 8629 int 8630 hat_share(struct hat *sfmmup, caddr_t addr, 8631 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8632 { 8633 ism_blk_t *ism_blkp; 8634 ism_blk_t *new_iblk; 8635 ism_map_t *ism_map; 8636 ism_ment_t *ism_ment; 8637 int i, added; 8638 hatlock_t *hatlockp; 8639 int reload_mmu = 0; 8640 uint_t ismshift = page_get_shift(ismszc); 8641 size_t ismpgsz = page_get_pagesize(ismszc); 8642 uint_t ismmask = (uint_t)ismpgsz - 1; 8643 size_t sh_size = ISM_SHIFT(ismshift, len); 8644 ushort_t ismhatflag; 8645 hat_region_cookie_t rcookie; 8646 sf_scd_t *old_scdp; 8647 8648 #ifdef DEBUG 8649 caddr_t eaddr = addr + len; 8650 #endif /* DEBUG */ 8651 8652 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8653 ASSERT(sptaddr == ISMID_STARTADDR); 8654 /* 8655 * Check the alignment. 8656 */ 8657 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8658 return (EINVAL); 8659 8660 /* 8661 * Check size alignment. 8662 */ 8663 if (!ISM_ALIGNED(ismshift, len)) 8664 return (EINVAL); 8665 8666 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 8667 8668 /* 8669 * Allocate ism_ment for the ism_hat's mapping list, and an 8670 * ism map blk in case we need one. We must do our 8671 * allocations before acquiring locks to prevent a deadlock 8672 * in the kmem allocator on the mapping list lock. 8673 */ 8674 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8675 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8676 8677 /* 8678 * Serialize ISM mappings with the ISM busy flag, and also the 8679 * trap handlers. 8680 */ 8681 sfmmu_ismhat_enter(sfmmup, 0); 8682 8683 /* 8684 * Allocate an ism map blk if necessary. 8685 */ 8686 if (sfmmup->sfmmu_iblk == NULL) { 8687 sfmmup->sfmmu_iblk = new_iblk; 8688 bzero(new_iblk, sizeof (*new_iblk)); 8689 new_iblk->iblk_nextpa = (uint64_t)-1; 8690 membar_stst(); /* make sure next ptr visible to all CPUs */ 8691 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8692 reload_mmu = 1; 8693 new_iblk = NULL; 8694 } 8695 8696 #ifdef DEBUG 8697 /* 8698 * Make sure mapping does not already exist. 8699 */ 8700 ism_blkp = sfmmup->sfmmu_iblk; 8701 while (ism_blkp != NULL) { 8702 ism_map = ism_blkp->iblk_maps; 8703 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8704 if ((addr >= ism_start(ism_map[i]) && 8705 addr < ism_end(ism_map[i])) || 8706 eaddr > ism_start(ism_map[i]) && 8707 eaddr <= ism_end(ism_map[i])) { 8708 panic("sfmmu_share: Already mapped!"); 8709 } 8710 } 8711 ism_blkp = ism_blkp->iblk_next; 8712 } 8713 #endif /* DEBUG */ 8714 8715 ASSERT(ismszc >= TTE4M); 8716 if (ismszc == TTE4M) { 8717 ismhatflag = HAT_4M_FLAG; 8718 } else if (ismszc == TTE32M) { 8719 ismhatflag = HAT_32M_FLAG; 8720 } else if (ismszc == TTE256M) { 8721 ismhatflag = HAT_256M_FLAG; 8722 } 8723 /* 8724 * Add mapping to first available mapping slot. 8725 */ 8726 ism_blkp = sfmmup->sfmmu_iblk; 8727 added = 0; 8728 while (!added) { 8729 ism_map = ism_blkp->iblk_maps; 8730 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8731 if (ism_map[i].imap_ismhat == NULL) { 8732 8733 ism_map[i].imap_ismhat = ism_hatid; 8734 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8735 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8736 ism_map[i].imap_hatflags = ismhatflag; 8737 ism_map[i].imap_sz_mask = ismmask; 8738 /* 8739 * imap_seg is checked in ISM_CHECK to see if 8740 * non-NULL, then other info assumed valid. 8741 */ 8742 membar_stst(); 8743 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8744 ism_map[i].imap_ment = ism_ment; 8745 8746 /* 8747 * Now add ourselves to the ism_hat's 8748 * mapping list. 8749 */ 8750 ism_ment->iment_hat = sfmmup; 8751 ism_ment->iment_base_va = addr; 8752 ism_hatid->sfmmu_ismhat = 1; 8753 mutex_enter(&ism_mlist_lock); 8754 iment_add(ism_ment, ism_hatid); 8755 mutex_exit(&ism_mlist_lock); 8756 added = 1; 8757 break; 8758 } 8759 } 8760 if (!added && ism_blkp->iblk_next == NULL) { 8761 ism_blkp->iblk_next = new_iblk; 8762 new_iblk = NULL; 8763 bzero(ism_blkp->iblk_next, 8764 sizeof (*ism_blkp->iblk_next)); 8765 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8766 membar_stst(); 8767 ism_blkp->iblk_nextpa = 8768 va_to_pa((caddr_t)ism_blkp->iblk_next); 8769 } 8770 ism_blkp = ism_blkp->iblk_next; 8771 } 8772 8773 /* 8774 * After calling hat_join_region, sfmmup may join a new SCD or 8775 * move from the old scd to a new scd, in which case, we want to 8776 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8777 * sfmmu_check_page_sizes at the end of this routine. 8778 */ 8779 old_scdp = sfmmup->sfmmu_scdp; 8780 8781 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8782 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8783 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8784 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8785 } 8786 /* 8787 * Update our counters for this sfmmup's ism mappings. 8788 */ 8789 for (i = 0; i <= ismszc; i++) { 8790 if (!(disable_ism_large_pages & (1 << i))) 8791 (void) ism_tsb_entries(sfmmup, i); 8792 } 8793 8794 /* 8795 * For ISM and DISM we do not support 512K pages, so we only only 8796 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8797 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8798 * 8799 * Need to set 32M/256M ISM flags to make sure 8800 * sfmmu_check_page_sizes() enables them on Panther. 8801 */ 8802 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8803 8804 switch (ismszc) { 8805 case TTE256M: 8806 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8807 hatlockp = sfmmu_hat_enter(sfmmup); 8808 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8809 sfmmu_hat_exit(hatlockp); 8810 } 8811 break; 8812 case TTE32M: 8813 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8814 hatlockp = sfmmu_hat_enter(sfmmup); 8815 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8816 sfmmu_hat_exit(hatlockp); 8817 } 8818 break; 8819 default: 8820 break; 8821 } 8822 8823 /* 8824 * If we updated the ismblkpa for this HAT we must make 8825 * sure all CPUs running this process reload their tsbmiss area. 8826 * Otherwise they will fail to load the mappings in the tsbmiss 8827 * handler and will loop calling pagefault(). 8828 */ 8829 if (reload_mmu) { 8830 hatlockp = sfmmu_hat_enter(sfmmup); 8831 sfmmu_sync_mmustate(sfmmup); 8832 sfmmu_hat_exit(hatlockp); 8833 } 8834 8835 if (&mmu_set_pgsz_order) { 8836 hatlockp = sfmmu_hat_enter(sfmmup); 8837 mmu_set_pgsz_order(sfmmup, 1); 8838 sfmmu_hat_exit(hatlockp); 8839 } 8840 sfmmu_ismhat_exit(sfmmup, 0); 8841 8842 /* 8843 * Free up ismblk if we didn't use it. 8844 */ 8845 if (new_iblk != NULL) 8846 kmem_cache_free(ism_blk_cache, new_iblk); 8847 8848 /* 8849 * Check TSB and TLB page sizes. 8850 */ 8851 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8852 sfmmu_check_page_sizes(sfmmup, 0); 8853 } else { 8854 sfmmu_check_page_sizes(sfmmup, 1); 8855 } 8856 return (0); 8857 } 8858 8859 /* 8860 * hat_unshare removes exactly one ism_map from 8861 * this process's as. It expects multiple calls 8862 * to hat_unshare for multiple shm segments. 8863 */ 8864 void 8865 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8866 { 8867 ism_map_t *ism_map; 8868 ism_ment_t *free_ment = NULL; 8869 ism_blk_t *ism_blkp; 8870 struct hat *ism_hatid; 8871 int found, i; 8872 hatlock_t *hatlockp; 8873 struct tsb_info *tsbinfo; 8874 uint_t ismshift = page_get_shift(ismszc); 8875 size_t sh_size = ISM_SHIFT(ismshift, len); 8876 uchar_t ism_rid; 8877 sf_scd_t *old_scdp; 8878 8879 ASSERT(ISM_ALIGNED(ismshift, addr)); 8880 ASSERT(ISM_ALIGNED(ismshift, len)); 8881 ASSERT(sfmmup != NULL); 8882 ASSERT(sfmmup != ksfmmup); 8883 8884 if (sfmmup->sfmmu_xhat_provider) { 8885 XHAT_UNSHARE(sfmmup, addr, len); 8886 return; 8887 } else { 8888 /* 8889 * This must be a CPU HAT. If the address space has 8890 * XHATs attached, inform all XHATs that ISM segment 8891 * is going away 8892 */ 8893 ASSERT(sfmmup->sfmmu_as != NULL); 8894 if (sfmmup->sfmmu_as->a_xhat != NULL) 8895 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 8896 } 8897 8898 /* 8899 * Make sure that during the entire time ISM mappings are removed, 8900 * the trap handlers serialize behind us, and that no one else 8901 * can be mucking with ISM mappings. This also lets us get away 8902 * with not doing expensive cross calls to flush the TLB -- we 8903 * just discard the context, flush the entire TSB, and call it 8904 * a day. 8905 */ 8906 sfmmu_ismhat_enter(sfmmup, 0); 8907 8908 /* 8909 * Remove the mapping. 8910 * 8911 * We can't have any holes in the ism map. 8912 * The tsb miss code while searching the ism map will 8913 * stop on an empty map slot. So we must move 8914 * everyone past the hole up 1 if any. 8915 * 8916 * Also empty ism map blks are not freed until the 8917 * process exits. This is to prevent a MT race condition 8918 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8919 */ 8920 found = 0; 8921 ism_blkp = sfmmup->sfmmu_iblk; 8922 while (!found && ism_blkp != NULL) { 8923 ism_map = ism_blkp->iblk_maps; 8924 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8925 if (addr == ism_start(ism_map[i]) && 8926 sh_size == (size_t)(ism_size(ism_map[i]))) { 8927 found = 1; 8928 break; 8929 } 8930 } 8931 if (!found) 8932 ism_blkp = ism_blkp->iblk_next; 8933 } 8934 8935 if (found) { 8936 ism_hatid = ism_map[i].imap_ismhat; 8937 ism_rid = ism_map[i].imap_rid; 8938 ASSERT(ism_hatid != NULL); 8939 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8940 8941 /* 8942 * After hat_leave_region, the sfmmup may leave SCD, 8943 * in which case, we want to grow the private tsb size when 8944 * calling sfmmu_check_page_sizes at the end of the routine. 8945 */ 8946 old_scdp = sfmmup->sfmmu_scdp; 8947 /* 8948 * Then remove ourselves from the region. 8949 */ 8950 if (ism_rid != SFMMU_INVALID_ISMRID) { 8951 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8952 HAT_REGION_ISM); 8953 } 8954 8955 /* 8956 * And now guarantee that any other cpu 8957 * that tries to process an ISM miss 8958 * will go to tl=0. 8959 */ 8960 hatlockp = sfmmu_hat_enter(sfmmup); 8961 sfmmu_invalidate_ctx(sfmmup); 8962 sfmmu_hat_exit(hatlockp); 8963 8964 /* 8965 * Remove ourselves from the ism mapping list. 8966 */ 8967 mutex_enter(&ism_mlist_lock); 8968 iment_sub(ism_map[i].imap_ment, ism_hatid); 8969 mutex_exit(&ism_mlist_lock); 8970 free_ment = ism_map[i].imap_ment; 8971 8972 /* 8973 * We delete the ism map by copying 8974 * the next map over the current one. 8975 * We will take the next one in the maps 8976 * array or from the next ism_blk. 8977 */ 8978 while (ism_blkp != NULL) { 8979 ism_map = ism_blkp->iblk_maps; 8980 while (i < (ISM_MAP_SLOTS - 1)) { 8981 ism_map[i] = ism_map[i + 1]; 8982 i++; 8983 } 8984 /* i == (ISM_MAP_SLOTS - 1) */ 8985 ism_blkp = ism_blkp->iblk_next; 8986 if (ism_blkp != NULL) { 8987 ism_map[i] = ism_blkp->iblk_maps[0]; 8988 i = 0; 8989 } else { 8990 ism_map[i].imap_seg = 0; 8991 ism_map[i].imap_vb_shift = 0; 8992 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8993 ism_map[i].imap_hatflags = 0; 8994 ism_map[i].imap_sz_mask = 0; 8995 ism_map[i].imap_ismhat = NULL; 8996 ism_map[i].imap_ment = NULL; 8997 } 8998 } 8999 9000 /* 9001 * Now flush entire TSB for the process, since 9002 * demapping page by page can be too expensive. 9003 * We don't have to flush the TLB here anymore 9004 * since we switch to a new TLB ctx instead. 9005 * Also, there is no need to flush if the process 9006 * is exiting since the TSB will be freed later. 9007 */ 9008 if (!sfmmup->sfmmu_free) { 9009 hatlockp = sfmmu_hat_enter(sfmmup); 9010 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 9011 tsbinfo = tsbinfo->tsb_next) { 9012 if (tsbinfo->tsb_flags & TSB_SWAPPED) 9013 continue; 9014 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 9015 tsbinfo->tsb_flags |= 9016 TSB_FLUSH_NEEDED; 9017 continue; 9018 } 9019 9020 sfmmu_inv_tsb(tsbinfo->tsb_va, 9021 TSB_BYTES(tsbinfo->tsb_szc)); 9022 } 9023 sfmmu_hat_exit(hatlockp); 9024 } 9025 } 9026 9027 /* 9028 * Update our counters for this sfmmup's ism mappings. 9029 */ 9030 for (i = 0; i <= ismszc; i++) { 9031 if (!(disable_ism_large_pages & (1 << i))) 9032 (void) ism_tsb_entries(sfmmup, i); 9033 } 9034 9035 if (&mmu_set_pgsz_order) { 9036 hatlockp = sfmmu_hat_enter(sfmmup); 9037 mmu_set_pgsz_order(sfmmup, 1); 9038 sfmmu_hat_exit(hatlockp); 9039 } 9040 sfmmu_ismhat_exit(sfmmup, 0); 9041 9042 /* 9043 * We must do our freeing here after dropping locks 9044 * to prevent a deadlock in the kmem allocator on the 9045 * mapping list lock. 9046 */ 9047 if (free_ment != NULL) 9048 kmem_cache_free(ism_ment_cache, free_ment); 9049 9050 /* 9051 * Check TSB and TLB page sizes if the process isn't exiting. 9052 */ 9053 if (!sfmmup->sfmmu_free) { 9054 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 9055 sfmmu_check_page_sizes(sfmmup, 1); 9056 } else { 9057 sfmmu_check_page_sizes(sfmmup, 0); 9058 } 9059 } 9060 } 9061 9062 /* ARGSUSED */ 9063 static int 9064 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 9065 { 9066 /* void *buf is sfmmu_t pointer */ 9067 bzero(buf, sizeof (sfmmu_t)); 9068 9069 return (0); 9070 } 9071 9072 /* ARGSUSED */ 9073 static void 9074 sfmmu_idcache_destructor(void *buf, void *cdrarg) 9075 { 9076 /* void *buf is sfmmu_t pointer */ 9077 } 9078 9079 /* 9080 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 9081 * field to be the pa of this hmeblk 9082 */ 9083 /* ARGSUSED */ 9084 static int 9085 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 9086 { 9087 struct hme_blk *hmeblkp; 9088 9089 bzero(buf, (size_t)cdrarg); 9090 hmeblkp = (struct hme_blk *)buf; 9091 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 9092 9093 #ifdef HBLK_TRACE 9094 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 9095 #endif /* HBLK_TRACE */ 9096 9097 return (0); 9098 } 9099 9100 /* ARGSUSED */ 9101 static void 9102 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 9103 { 9104 9105 #ifdef HBLK_TRACE 9106 9107 struct hme_blk *hmeblkp; 9108 9109 hmeblkp = (struct hme_blk *)buf; 9110 mutex_destroy(&hmeblkp->hblk_audit_lock); 9111 9112 #endif /* HBLK_TRACE */ 9113 } 9114 9115 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 9116 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 9117 /* 9118 * The kmem allocator will callback into our reclaim routine when the system 9119 * is running low in memory. We traverse the hash and free up all unused but 9120 * still cached hme_blks. We also traverse the free list and free them up 9121 * as well. 9122 */ 9123 /*ARGSUSED*/ 9124 static void 9125 sfmmu_hblkcache_reclaim(void *cdrarg) 9126 { 9127 int i; 9128 struct hmehash_bucket *hmebp; 9129 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 9130 static struct hmehash_bucket *uhmehash_reclaim_hand; 9131 static struct hmehash_bucket *khmehash_reclaim_hand; 9132 struct hme_blk *list = NULL, *last_hmeblkp; 9133 cpuset_t cpuset = cpu_ready_set; 9134 cpu_hme_pend_t *cpuhp; 9135 9136 /* Free up hmeblks on the cpu pending lists */ 9137 for (i = 0; i < NCPU; i++) { 9138 cpuhp = &cpu_hme_pend[i]; 9139 if (cpuhp->chp_listp != NULL) { 9140 mutex_enter(&cpuhp->chp_mutex); 9141 if (cpuhp->chp_listp == NULL) { 9142 mutex_exit(&cpuhp->chp_mutex); 9143 continue; 9144 } 9145 for (last_hmeblkp = cpuhp->chp_listp; 9146 last_hmeblkp->hblk_next != NULL; 9147 last_hmeblkp = last_hmeblkp->hblk_next) 9148 ; 9149 last_hmeblkp->hblk_next = list; 9150 list = cpuhp->chp_listp; 9151 cpuhp->chp_listp = NULL; 9152 cpuhp->chp_count = 0; 9153 mutex_exit(&cpuhp->chp_mutex); 9154 } 9155 9156 } 9157 9158 if (list != NULL) { 9159 kpreempt_disable(); 9160 CPUSET_DEL(cpuset, CPU->cpu_id); 9161 xt_sync(cpuset); 9162 xt_sync(cpuset); 9163 kpreempt_enable(); 9164 sfmmu_hblk_free(&list); 9165 list = NULL; 9166 } 9167 9168 hmebp = uhmehash_reclaim_hand; 9169 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 9170 uhmehash_reclaim_hand = hmebp = uhme_hash; 9171 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9172 9173 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9174 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9175 hmeblkp = hmebp->hmeblkp; 9176 pr_hblk = NULL; 9177 while (hmeblkp) { 9178 nx_hblk = hmeblkp->hblk_next; 9179 if (!hmeblkp->hblk_vcnt && 9180 !hmeblkp->hblk_hmecnt) { 9181 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9182 pr_hblk, &list, 0); 9183 } else { 9184 pr_hblk = hmeblkp; 9185 } 9186 hmeblkp = nx_hblk; 9187 } 9188 SFMMU_HASH_UNLOCK(hmebp); 9189 } 9190 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9191 hmebp = uhme_hash; 9192 } 9193 9194 hmebp = khmehash_reclaim_hand; 9195 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9196 khmehash_reclaim_hand = hmebp = khme_hash; 9197 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9198 9199 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9200 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9201 hmeblkp = hmebp->hmeblkp; 9202 pr_hblk = NULL; 9203 while (hmeblkp) { 9204 nx_hblk = hmeblkp->hblk_next; 9205 if (!hmeblkp->hblk_vcnt && 9206 !hmeblkp->hblk_hmecnt) { 9207 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9208 pr_hblk, &list, 0); 9209 } else { 9210 pr_hblk = hmeblkp; 9211 } 9212 hmeblkp = nx_hblk; 9213 } 9214 SFMMU_HASH_UNLOCK(hmebp); 9215 } 9216 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9217 hmebp = khme_hash; 9218 } 9219 sfmmu_hblks_list_purge(&list, 0); 9220 } 9221 9222 /* 9223 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9224 * same goes for sfmmu_get_addrvcolor(). 9225 * 9226 * This function will return the virtual color for the specified page. The 9227 * virtual color corresponds to this page current mapping or its last mapping. 9228 * It is used by memory allocators to choose addresses with the correct 9229 * alignment so vac consistency is automatically maintained. If the page 9230 * has no color it returns -1. 9231 */ 9232 /*ARGSUSED*/ 9233 int 9234 sfmmu_get_ppvcolor(struct page *pp) 9235 { 9236 #ifdef VAC 9237 int color; 9238 9239 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9240 return (-1); 9241 } 9242 color = PP_GET_VCOLOR(pp); 9243 ASSERT(color < mmu_btop(shm_alignment)); 9244 return (color); 9245 #else 9246 return (-1); 9247 #endif /* VAC */ 9248 } 9249 9250 /* 9251 * This function will return the desired alignment for vac consistency 9252 * (vac color) given a virtual address. If no vac is present it returns -1. 9253 */ 9254 /*ARGSUSED*/ 9255 int 9256 sfmmu_get_addrvcolor(caddr_t vaddr) 9257 { 9258 #ifdef VAC 9259 if (cache & CACHE_VAC) { 9260 return (addr_to_vcolor(vaddr)); 9261 } else { 9262 return (-1); 9263 } 9264 #else 9265 return (-1); 9266 #endif /* VAC */ 9267 } 9268 9269 #ifdef VAC 9270 /* 9271 * Check for conflicts. 9272 * A conflict exists if the new and existent mappings do not match in 9273 * their "shm_alignment fields. If conflicts exist, the existant mappings 9274 * are flushed unless one of them is locked. If one of them is locked, then 9275 * the mappings are flushed and converted to non-cacheable mappings. 9276 */ 9277 static void 9278 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9279 { 9280 struct hat *tmphat; 9281 struct sf_hment *sfhmep, *tmphme = NULL; 9282 struct hme_blk *hmeblkp; 9283 int vcolor; 9284 tte_t tte; 9285 9286 ASSERT(sfmmu_mlist_held(pp)); 9287 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9288 9289 vcolor = addr_to_vcolor(addr); 9290 if (PP_NEWPAGE(pp)) { 9291 PP_SET_VCOLOR(pp, vcolor); 9292 return; 9293 } 9294 9295 if (PP_GET_VCOLOR(pp) == vcolor) { 9296 return; 9297 } 9298 9299 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9300 /* 9301 * Previous user of page had a different color 9302 * but since there are no current users 9303 * we just flush the cache and change the color. 9304 */ 9305 SFMMU_STAT(sf_pgcolor_conflict); 9306 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9307 PP_SET_VCOLOR(pp, vcolor); 9308 return; 9309 } 9310 9311 /* 9312 * If we get here we have a vac conflict with a current 9313 * mapping. VAC conflict policy is as follows. 9314 * - The default is to unload the other mappings unless: 9315 * - If we have a large mapping we uncache the page. 9316 * We need to uncache the rest of the large page too. 9317 * - If any of the mappings are locked we uncache the page. 9318 * - If the requested mapping is inconsistent 9319 * with another mapping and that mapping 9320 * is in the same address space we have to 9321 * make it non-cached. The default thing 9322 * to do is unload the inconsistent mapping 9323 * but if they are in the same address space 9324 * we run the risk of unmapping the pc or the 9325 * stack which we will use as we return to the user, 9326 * in which case we can then fault on the thing 9327 * we just unloaded and get into an infinite loop. 9328 */ 9329 if (PP_ISMAPPED_LARGE(pp)) { 9330 int sz; 9331 9332 /* 9333 * Existing mapping is for big pages. We don't unload 9334 * existing big mappings to satisfy new mappings. 9335 * Always convert all mappings to TNC. 9336 */ 9337 sz = fnd_mapping_sz(pp); 9338 pp = PP_GROUPLEADER(pp, sz); 9339 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9340 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9341 TTEPAGES(sz)); 9342 9343 return; 9344 } 9345 9346 /* 9347 * check if any mapping is in same as or if it is locked 9348 * since in that case we need to uncache. 9349 */ 9350 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9351 tmphme = sfhmep->hme_next; 9352 if (IS_PAHME(sfhmep)) 9353 continue; 9354 hmeblkp = sfmmu_hmetohblk(sfhmep); 9355 if (hmeblkp->hblk_xhat_bit) 9356 continue; 9357 tmphat = hblktosfmmu(hmeblkp); 9358 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9359 ASSERT(TTE_IS_VALID(&tte)); 9360 if (hmeblkp->hblk_shared || tmphat == hat || 9361 hmeblkp->hblk_lckcnt) { 9362 /* 9363 * We have an uncache conflict 9364 */ 9365 SFMMU_STAT(sf_uncache_conflict); 9366 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9367 return; 9368 } 9369 } 9370 9371 /* 9372 * We have an unload conflict 9373 * We have already checked for LARGE mappings, therefore 9374 * the remaining mapping(s) must be TTE8K. 9375 */ 9376 SFMMU_STAT(sf_unload_conflict); 9377 9378 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9379 tmphme = sfhmep->hme_next; 9380 if (IS_PAHME(sfhmep)) 9381 continue; 9382 hmeblkp = sfmmu_hmetohblk(sfhmep); 9383 if (hmeblkp->hblk_xhat_bit) 9384 continue; 9385 ASSERT(!hmeblkp->hblk_shared); 9386 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9387 } 9388 9389 if (PP_ISMAPPED_KPM(pp)) 9390 sfmmu_kpm_vac_unload(pp, addr); 9391 9392 /* 9393 * Unloads only do TLB flushes so we need to flush the 9394 * cache here. 9395 */ 9396 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9397 PP_SET_VCOLOR(pp, vcolor); 9398 } 9399 9400 /* 9401 * Whenever a mapping is unloaded and the page is in TNC state, 9402 * we see if the page can be made cacheable again. 'pp' is 9403 * the page that we just unloaded a mapping from, the size 9404 * of mapping that was unloaded is 'ottesz'. 9405 * Remark: 9406 * The recache policy for mpss pages can leave a performance problem 9407 * under the following circumstances: 9408 * . A large page in uncached mode has just been unmapped. 9409 * . All constituent pages are TNC due to a conflicting small mapping. 9410 * . There are many other, non conflicting, small mappings around for 9411 * a lot of the constituent pages. 9412 * . We're called w/ the "old" groupleader page and the old ottesz, 9413 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9414 * we end up w/ TTE8K or npages == 1. 9415 * . We call tst_tnc w/ the old groupleader only, and if there is no 9416 * conflict, we re-cache only this page. 9417 * . All other small mappings are not checked and will be left in TNC mode. 9418 * The problem is not very serious because: 9419 * . mpss is actually only defined for heap and stack, so the probability 9420 * is not very high that a large page mapping exists in parallel to a small 9421 * one (this is possible, but seems to be bad programming style in the 9422 * appl). 9423 * . The problem gets a little bit more serious, when those TNC pages 9424 * have to be mapped into kernel space, e.g. for networking. 9425 * . When VAC alias conflicts occur in applications, this is regarded 9426 * as an application bug. So if kstat's show them, the appl should 9427 * be changed anyway. 9428 */ 9429 void 9430 conv_tnc(page_t *pp, int ottesz) 9431 { 9432 int cursz, dosz; 9433 pgcnt_t curnpgs, dopgs; 9434 pgcnt_t pg64k; 9435 page_t *pp2; 9436 9437 /* 9438 * Determine how big a range we check for TNC and find 9439 * leader page. cursz is the size of the biggest 9440 * mapping that still exist on 'pp'. 9441 */ 9442 if (PP_ISMAPPED_LARGE(pp)) { 9443 cursz = fnd_mapping_sz(pp); 9444 } else { 9445 cursz = TTE8K; 9446 } 9447 9448 if (ottesz >= cursz) { 9449 dosz = ottesz; 9450 pp2 = pp; 9451 } else { 9452 dosz = cursz; 9453 pp2 = PP_GROUPLEADER(pp, dosz); 9454 } 9455 9456 pg64k = TTEPAGES(TTE64K); 9457 dopgs = TTEPAGES(dosz); 9458 9459 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9460 9461 while (dopgs != 0) { 9462 curnpgs = TTEPAGES(cursz); 9463 if (tst_tnc(pp2, curnpgs)) { 9464 SFMMU_STAT_ADD(sf_recache, curnpgs); 9465 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9466 curnpgs); 9467 } 9468 9469 ASSERT(dopgs >= curnpgs); 9470 dopgs -= curnpgs; 9471 9472 if (dopgs == 0) { 9473 break; 9474 } 9475 9476 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9477 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9478 cursz = fnd_mapping_sz(pp2); 9479 } else { 9480 cursz = TTE8K; 9481 } 9482 } 9483 } 9484 9485 /* 9486 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9487 * returns 0 otherwise. Note that oaddr argument is valid for only 9488 * 8k pages. 9489 */ 9490 int 9491 tst_tnc(page_t *pp, pgcnt_t npages) 9492 { 9493 struct sf_hment *sfhme; 9494 struct hme_blk *hmeblkp; 9495 tte_t tte; 9496 caddr_t vaddr; 9497 int clr_valid = 0; 9498 int color, color1, bcolor; 9499 int i, ncolors; 9500 9501 ASSERT(pp != NULL); 9502 ASSERT(!(cache & CACHE_WRITEBACK)); 9503 9504 if (npages > 1) { 9505 ncolors = CACHE_NUM_COLOR; 9506 } 9507 9508 for (i = 0; i < npages; i++) { 9509 ASSERT(sfmmu_mlist_held(pp)); 9510 ASSERT(PP_ISTNC(pp)); 9511 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9512 9513 if (PP_ISPNC(pp)) { 9514 return (0); 9515 } 9516 9517 clr_valid = 0; 9518 if (PP_ISMAPPED_KPM(pp)) { 9519 caddr_t kpmvaddr; 9520 9521 ASSERT(kpm_enable); 9522 kpmvaddr = hat_kpm_page2va(pp, 1); 9523 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9524 color1 = addr_to_vcolor(kpmvaddr); 9525 clr_valid = 1; 9526 } 9527 9528 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9529 if (IS_PAHME(sfhme)) 9530 continue; 9531 hmeblkp = sfmmu_hmetohblk(sfhme); 9532 if (hmeblkp->hblk_xhat_bit) 9533 continue; 9534 9535 sfmmu_copytte(&sfhme->hme_tte, &tte); 9536 ASSERT(TTE_IS_VALID(&tte)); 9537 9538 vaddr = tte_to_vaddr(hmeblkp, tte); 9539 color = addr_to_vcolor(vaddr); 9540 9541 if (npages > 1) { 9542 /* 9543 * If there is a big mapping, make sure 9544 * 8K mapping is consistent with the big 9545 * mapping. 9546 */ 9547 bcolor = i % ncolors; 9548 if (color != bcolor) { 9549 return (0); 9550 } 9551 } 9552 if (!clr_valid) { 9553 clr_valid = 1; 9554 color1 = color; 9555 } 9556 9557 if (color1 != color) { 9558 return (0); 9559 } 9560 } 9561 9562 pp = PP_PAGENEXT(pp); 9563 } 9564 9565 return (1); 9566 } 9567 9568 void 9569 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9570 pgcnt_t npages) 9571 { 9572 kmutex_t *pmtx; 9573 int i, ncolors, bcolor; 9574 kpm_hlk_t *kpmp; 9575 cpuset_t cpuset; 9576 9577 ASSERT(pp != NULL); 9578 ASSERT(!(cache & CACHE_WRITEBACK)); 9579 9580 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9581 pmtx = sfmmu_page_enter(pp); 9582 9583 /* 9584 * Fast path caching single unmapped page 9585 */ 9586 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9587 flags == HAT_CACHE) { 9588 PP_CLRTNC(pp); 9589 PP_CLRPNC(pp); 9590 sfmmu_page_exit(pmtx); 9591 sfmmu_kpm_kpmp_exit(kpmp); 9592 return; 9593 } 9594 9595 /* 9596 * We need to capture all cpus in order to change cacheability 9597 * because we can't allow one cpu to access the same physical 9598 * page using a cacheable and a non-cachebale mapping at the same 9599 * time. Since we may end up walking the ism mapping list 9600 * have to grab it's lock now since we can't after all the 9601 * cpus have been captured. 9602 */ 9603 sfmmu_hat_lock_all(); 9604 mutex_enter(&ism_mlist_lock); 9605 kpreempt_disable(); 9606 cpuset = cpu_ready_set; 9607 xc_attention(cpuset); 9608 9609 if (npages > 1) { 9610 /* 9611 * Make sure all colors are flushed since the 9612 * sfmmu_page_cache() only flushes one color- 9613 * it does not know big pages. 9614 */ 9615 ncolors = CACHE_NUM_COLOR; 9616 if (flags & HAT_TMPNC) { 9617 for (i = 0; i < ncolors; i++) { 9618 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9619 } 9620 cache_flush_flag = CACHE_NO_FLUSH; 9621 } 9622 } 9623 9624 for (i = 0; i < npages; i++) { 9625 9626 ASSERT(sfmmu_mlist_held(pp)); 9627 9628 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9629 9630 if (npages > 1) { 9631 bcolor = i % ncolors; 9632 } else { 9633 bcolor = NO_VCOLOR; 9634 } 9635 9636 sfmmu_page_cache(pp, flags, cache_flush_flag, 9637 bcolor); 9638 } 9639 9640 pp = PP_PAGENEXT(pp); 9641 } 9642 9643 xt_sync(cpuset); 9644 xc_dismissed(cpuset); 9645 mutex_exit(&ism_mlist_lock); 9646 sfmmu_hat_unlock_all(); 9647 sfmmu_page_exit(pmtx); 9648 sfmmu_kpm_kpmp_exit(kpmp); 9649 kpreempt_enable(); 9650 } 9651 9652 /* 9653 * This function changes the virtual cacheability of all mappings to a 9654 * particular page. When changing from uncache to cacheable the mappings will 9655 * only be changed if all of them have the same virtual color. 9656 * We need to flush the cache in all cpus. It is possible that 9657 * a process referenced a page as cacheable but has sinced exited 9658 * and cleared the mapping list. We still to flush it but have no 9659 * state so all cpus is the only alternative. 9660 */ 9661 static void 9662 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9663 { 9664 struct sf_hment *sfhme; 9665 struct hme_blk *hmeblkp; 9666 sfmmu_t *sfmmup; 9667 tte_t tte, ttemod; 9668 caddr_t vaddr; 9669 int ret, color; 9670 pfn_t pfn; 9671 9672 color = bcolor; 9673 pfn = pp->p_pagenum; 9674 9675 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9676 9677 if (IS_PAHME(sfhme)) 9678 continue; 9679 hmeblkp = sfmmu_hmetohblk(sfhme); 9680 9681 if (hmeblkp->hblk_xhat_bit) 9682 continue; 9683 9684 sfmmu_copytte(&sfhme->hme_tte, &tte); 9685 ASSERT(TTE_IS_VALID(&tte)); 9686 vaddr = tte_to_vaddr(hmeblkp, tte); 9687 color = addr_to_vcolor(vaddr); 9688 9689 #ifdef DEBUG 9690 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9691 ASSERT(color == bcolor); 9692 } 9693 #endif 9694 9695 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9696 9697 ttemod = tte; 9698 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9699 TTE_CLR_VCACHEABLE(&ttemod); 9700 } else { /* flags & HAT_CACHE */ 9701 TTE_SET_VCACHEABLE(&ttemod); 9702 } 9703 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9704 if (ret < 0) { 9705 /* 9706 * Since all cpus are captured modifytte should not 9707 * fail. 9708 */ 9709 panic("sfmmu_page_cache: write to tte failed"); 9710 } 9711 9712 sfmmup = hblktosfmmu(hmeblkp); 9713 if (cache_flush_flag == CACHE_FLUSH) { 9714 /* 9715 * Flush TSBs, TLBs and caches 9716 */ 9717 if (hmeblkp->hblk_shared) { 9718 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9719 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9720 sf_region_t *rgnp; 9721 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9722 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9723 ASSERT(srdp != NULL); 9724 rgnp = srdp->srd_hmergnp[rid]; 9725 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9726 srdp, rgnp, rid); 9727 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9728 hmeblkp, 0); 9729 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9730 } else if (sfmmup->sfmmu_ismhat) { 9731 if (flags & HAT_CACHE) { 9732 SFMMU_STAT(sf_ism_recache); 9733 } else { 9734 SFMMU_STAT(sf_ism_uncache); 9735 } 9736 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9737 pfn, CACHE_FLUSH); 9738 } else { 9739 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9740 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9741 } 9742 9743 /* 9744 * all cache entries belonging to this pfn are 9745 * now flushed. 9746 */ 9747 cache_flush_flag = CACHE_NO_FLUSH; 9748 } else { 9749 /* 9750 * Flush only TSBs and TLBs. 9751 */ 9752 if (hmeblkp->hblk_shared) { 9753 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9754 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9755 sf_region_t *rgnp; 9756 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9757 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9758 ASSERT(srdp != NULL); 9759 rgnp = srdp->srd_hmergnp[rid]; 9760 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9761 srdp, rgnp, rid); 9762 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9763 hmeblkp, 0); 9764 } else if (sfmmup->sfmmu_ismhat) { 9765 if (flags & HAT_CACHE) { 9766 SFMMU_STAT(sf_ism_recache); 9767 } else { 9768 SFMMU_STAT(sf_ism_uncache); 9769 } 9770 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9771 pfn, CACHE_NO_FLUSH); 9772 } else { 9773 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9774 } 9775 } 9776 } 9777 9778 if (PP_ISMAPPED_KPM(pp)) 9779 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9780 9781 switch (flags) { 9782 9783 default: 9784 panic("sfmmu_pagecache: unknown flags"); 9785 break; 9786 9787 case HAT_CACHE: 9788 PP_CLRTNC(pp); 9789 PP_CLRPNC(pp); 9790 PP_SET_VCOLOR(pp, color); 9791 break; 9792 9793 case HAT_TMPNC: 9794 PP_SETTNC(pp); 9795 PP_SET_VCOLOR(pp, NO_VCOLOR); 9796 break; 9797 9798 case HAT_UNCACHE: 9799 PP_SETPNC(pp); 9800 PP_CLRTNC(pp); 9801 PP_SET_VCOLOR(pp, NO_VCOLOR); 9802 break; 9803 } 9804 } 9805 #endif /* VAC */ 9806 9807 9808 /* 9809 * Wrapper routine used to return a context. 9810 * 9811 * It's the responsibility of the caller to guarantee that the 9812 * process serializes on calls here by taking the HAT lock for 9813 * the hat. 9814 * 9815 */ 9816 static void 9817 sfmmu_get_ctx(sfmmu_t *sfmmup) 9818 { 9819 mmu_ctx_t *mmu_ctxp; 9820 uint_t pstate_save; 9821 int ret; 9822 9823 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9824 ASSERT(sfmmup != ksfmmup); 9825 9826 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9827 sfmmu_setup_tsbinfo(sfmmup); 9828 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9829 } 9830 9831 kpreempt_disable(); 9832 9833 mmu_ctxp = CPU_MMU_CTXP(CPU); 9834 ASSERT(mmu_ctxp); 9835 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9836 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9837 9838 /* 9839 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9840 */ 9841 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9842 sfmmu_ctx_wrap_around(mmu_ctxp); 9843 9844 /* 9845 * Let the MMU set up the page sizes to use for 9846 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9847 */ 9848 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9849 mmu_set_ctx_page_sizes(sfmmup); 9850 } 9851 9852 /* 9853 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9854 * interrupts disabled to prevent race condition with wrap-around 9855 * ctx invalidatation. In sun4v, ctx invalidation also involves 9856 * a HV call to set the number of TSBs to 0. If interrupts are not 9857 * disabled until after sfmmu_load_mmustate is complete TSBs may 9858 * become assigned to INVALID_CONTEXT. This is not allowed. 9859 */ 9860 pstate_save = sfmmu_disable_intrs(); 9861 9862 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9863 sfmmup->sfmmu_scdp != NULL) { 9864 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9865 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9866 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9867 /* debug purpose only */ 9868 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9869 != INVALID_CONTEXT); 9870 } 9871 sfmmu_load_mmustate(sfmmup); 9872 9873 sfmmu_enable_intrs(pstate_save); 9874 9875 kpreempt_enable(); 9876 } 9877 9878 /* 9879 * When all cnums are used up in a MMU, cnum will wrap around to the 9880 * next generation and start from 2. 9881 */ 9882 static void 9883 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 9884 { 9885 9886 /* caller must have disabled the preemption */ 9887 ASSERT(curthread->t_preempt >= 1); 9888 ASSERT(mmu_ctxp != NULL); 9889 9890 /* acquire Per-MMU (PM) spin lock */ 9891 mutex_enter(&mmu_ctxp->mmu_lock); 9892 9893 /* re-check to see if wrap-around is needed */ 9894 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9895 goto done; 9896 9897 SFMMU_MMU_STAT(mmu_wrap_around); 9898 9899 /* update gnum */ 9900 ASSERT(mmu_ctxp->mmu_gnum != 0); 9901 mmu_ctxp->mmu_gnum++; 9902 if (mmu_ctxp->mmu_gnum == 0 || 9903 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9904 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9905 (void *)mmu_ctxp); 9906 } 9907 9908 if (mmu_ctxp->mmu_ncpus > 1) { 9909 cpuset_t cpuset; 9910 9911 membar_enter(); /* make sure updated gnum visible */ 9912 9913 SFMMU_XCALL_STATS(NULL); 9914 9915 /* xcall to others on the same MMU to invalidate ctx */ 9916 cpuset = mmu_ctxp->mmu_cpuset; 9917 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 9918 CPUSET_DEL(cpuset, CPU->cpu_id); 9919 CPUSET_AND(cpuset, cpu_ready_set); 9920 9921 /* 9922 * Pass in INVALID_CONTEXT as the first parameter to 9923 * sfmmu_raise_tsb_exception, which invalidates the context 9924 * of any process running on the CPUs in the MMU. 9925 */ 9926 xt_some(cpuset, sfmmu_raise_tsb_exception, 9927 INVALID_CONTEXT, INVALID_CONTEXT); 9928 xt_sync(cpuset); 9929 9930 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9931 } 9932 9933 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9934 sfmmu_setctx_sec(INVALID_CONTEXT); 9935 sfmmu_clear_utsbinfo(); 9936 } 9937 9938 /* 9939 * No xcall is needed here. For sun4u systems all CPUs in context 9940 * domain share a single physical MMU therefore it's enough to flush 9941 * TLB on local CPU. On sun4v systems we use 1 global context 9942 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9943 * handler. Note that vtag_flushall_uctxs() is called 9944 * for Ultra II machine, where the equivalent flushall functionality 9945 * is implemented in SW, and only user ctx TLB entries are flushed. 9946 */ 9947 if (&vtag_flushall_uctxs != NULL) { 9948 vtag_flushall_uctxs(); 9949 } else { 9950 vtag_flushall(); 9951 } 9952 9953 /* reset mmu cnum, skips cnum 0 and 1 */ 9954 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9955 9956 done: 9957 mutex_exit(&mmu_ctxp->mmu_lock); 9958 } 9959 9960 9961 /* 9962 * For multi-threaded process, set the process context to INVALID_CONTEXT 9963 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9964 * process, we can just load the MMU state directly without having to 9965 * set context invalid. Caller must hold the hat lock since we don't 9966 * acquire it here. 9967 */ 9968 static void 9969 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9970 { 9971 uint_t cnum; 9972 uint_t pstate_save; 9973 9974 ASSERT(sfmmup != ksfmmup); 9975 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9976 9977 kpreempt_disable(); 9978 9979 /* 9980 * We check whether the pass'ed-in sfmmup is the same as the 9981 * current running proc. This is to makes sure the current proc 9982 * stays single-threaded if it already is. 9983 */ 9984 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9985 (curthread->t_procp->p_lwpcnt == 1)) { 9986 /* single-thread */ 9987 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9988 if (cnum != INVALID_CONTEXT) { 9989 uint_t curcnum; 9990 /* 9991 * Disable interrupts to prevent race condition 9992 * with sfmmu_ctx_wrap_around ctx invalidation. 9993 * In sun4v, ctx invalidation involves setting 9994 * TSB to NULL, hence, interrupts should be disabled 9995 * untill after sfmmu_load_mmustate is completed. 9996 */ 9997 pstate_save = sfmmu_disable_intrs(); 9998 curcnum = sfmmu_getctx_sec(); 9999 if (curcnum == cnum) 10000 sfmmu_load_mmustate(sfmmup); 10001 sfmmu_enable_intrs(pstate_save); 10002 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 10003 } 10004 } else { 10005 /* 10006 * multi-thread 10007 * or when sfmmup is not the same as the curproc. 10008 */ 10009 sfmmu_invalidate_ctx(sfmmup); 10010 } 10011 10012 kpreempt_enable(); 10013 } 10014 10015 10016 /* 10017 * Replace the specified TSB with a new TSB. This function gets called when 10018 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 10019 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 10020 * (8K). 10021 * 10022 * Caller must hold the HAT lock, but should assume any tsb_info 10023 * pointers it has are no longer valid after calling this function. 10024 * 10025 * Return values: 10026 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 10027 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 10028 * something to this tsbinfo/TSB 10029 * TSB_SUCCESS Operation succeeded 10030 */ 10031 static tsb_replace_rc_t 10032 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 10033 hatlock_t *hatlockp, uint_t flags) 10034 { 10035 struct tsb_info *new_tsbinfo = NULL; 10036 struct tsb_info *curtsb, *prevtsb; 10037 uint_t tte_sz_mask; 10038 int i; 10039 10040 ASSERT(sfmmup != ksfmmup); 10041 ASSERT(sfmmup->sfmmu_ismhat == 0); 10042 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10043 ASSERT(szc <= tsb_max_growsize); 10044 10045 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 10046 return (TSB_LOSTRACE); 10047 10048 /* 10049 * Find the tsb_info ahead of this one in the list, and 10050 * also make sure that the tsb_info passed in really 10051 * exists! 10052 */ 10053 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 10054 curtsb != old_tsbinfo && curtsb != NULL; 10055 prevtsb = curtsb, curtsb = curtsb->tsb_next) 10056 ; 10057 ASSERT(curtsb != NULL); 10058 10059 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10060 /* 10061 * The process is swapped out, so just set the new size 10062 * code. When it swaps back in, we'll allocate a new one 10063 * of the new chosen size. 10064 */ 10065 curtsb->tsb_szc = szc; 10066 return (TSB_SUCCESS); 10067 } 10068 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 10069 10070 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 10071 10072 /* 10073 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 10074 * If we fail to allocate a TSB, exit. 10075 * 10076 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 10077 * then try 4M slab after the initial alloc fails. 10078 * 10079 * If tsb swapin with tsb size > 4M, then try 4M after the 10080 * initial alloc fails. 10081 */ 10082 sfmmu_hat_exit(hatlockp); 10083 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 10084 tte_sz_mask, flags, sfmmup) && 10085 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 10086 (!(flags & TSB_SWAPIN) && 10087 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 10088 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 10089 tte_sz_mask, flags, sfmmup))) { 10090 (void) sfmmu_hat_enter(sfmmup); 10091 if (!(flags & TSB_SWAPIN)) 10092 SFMMU_STAT(sf_tsb_resize_failures); 10093 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10094 return (TSB_ALLOCFAIL); 10095 } 10096 (void) sfmmu_hat_enter(sfmmup); 10097 10098 /* 10099 * Re-check to make sure somebody else didn't muck with us while we 10100 * didn't hold the HAT lock. If the process swapped out, fine, just 10101 * exit; this can happen if we try to shrink the TSB from the context 10102 * of another process (such as on an ISM unmap), though it is rare. 10103 */ 10104 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10105 SFMMU_STAT(sf_tsb_resize_failures); 10106 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10107 sfmmu_hat_exit(hatlockp); 10108 sfmmu_tsbinfo_free(new_tsbinfo); 10109 (void) sfmmu_hat_enter(sfmmup); 10110 return (TSB_LOSTRACE); 10111 } 10112 10113 #ifdef DEBUG 10114 /* Reverify that the tsb_info still exists.. for debugging only */ 10115 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 10116 curtsb != old_tsbinfo && curtsb != NULL; 10117 prevtsb = curtsb, curtsb = curtsb->tsb_next) 10118 ; 10119 ASSERT(curtsb != NULL); 10120 #endif /* DEBUG */ 10121 10122 /* 10123 * Quiesce any CPUs running this process on their next TLB miss 10124 * so they atomically see the new tsb_info. We temporarily set the 10125 * context to invalid context so new threads that come on processor 10126 * after we do the xcall to cpusran will also serialize behind the 10127 * HAT lock on TLB miss and will see the new TSB. Since this short 10128 * race with a new thread coming on processor is relatively rare, 10129 * this synchronization mechanism should be cheaper than always 10130 * pausing all CPUs for the duration of the setup, which is what 10131 * the old implementation did. This is particuarly true if we are 10132 * copying a huge chunk of memory around during that window. 10133 * 10134 * The memory barriers are to make sure things stay consistent 10135 * with resume() since it does not hold the HAT lock while 10136 * walking the list of tsb_info structures. 10137 */ 10138 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 10139 /* The TSB is either growing or shrinking. */ 10140 sfmmu_invalidate_ctx(sfmmup); 10141 } else { 10142 /* 10143 * It is illegal to swap in TSBs from a process other 10144 * than a process being swapped in. This in turn 10145 * implies we do not have a valid MMU context here 10146 * since a process needs one to resolve translation 10147 * misses. 10148 */ 10149 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 10150 } 10151 10152 #ifdef DEBUG 10153 ASSERT(max_mmu_ctxdoms > 0); 10154 10155 /* 10156 * Process should have INVALID_CONTEXT on all MMUs 10157 */ 10158 for (i = 0; i < max_mmu_ctxdoms; i++) { 10159 10160 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 10161 } 10162 #endif 10163 10164 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 10165 membar_stst(); /* strict ordering required */ 10166 if (prevtsb) 10167 prevtsb->tsb_next = new_tsbinfo; 10168 else 10169 sfmmup->sfmmu_tsb = new_tsbinfo; 10170 membar_enter(); /* make sure new TSB globally visible */ 10171 10172 /* 10173 * We need to migrate TSB entries from the old TSB to the new TSB 10174 * if tsb_remap_ttes is set and the TSB is growing. 10175 */ 10176 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 10177 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 10178 10179 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10180 10181 /* 10182 * Drop the HAT lock to free our old tsb_info. 10183 */ 10184 sfmmu_hat_exit(hatlockp); 10185 10186 if ((flags & TSB_GROW) == TSB_GROW) { 10187 SFMMU_STAT(sf_tsb_grow); 10188 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 10189 SFMMU_STAT(sf_tsb_shrink); 10190 } 10191 10192 sfmmu_tsbinfo_free(old_tsbinfo); 10193 10194 (void) sfmmu_hat_enter(sfmmup); 10195 return (TSB_SUCCESS); 10196 } 10197 10198 /* 10199 * This function will re-program hat pgsz array, and invalidate the 10200 * process' context, forcing the process to switch to another 10201 * context on the next TLB miss, and therefore start using the 10202 * TLB that is reprogrammed for the new page sizes. 10203 */ 10204 void 10205 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10206 { 10207 int i; 10208 hatlock_t *hatlockp = NULL; 10209 10210 hatlockp = sfmmu_hat_enter(sfmmup); 10211 /* USIII+-IV+ optimization, requires hat lock */ 10212 if (tmp_pgsz) { 10213 for (i = 0; i < mmu_page_sizes; i++) 10214 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10215 } 10216 SFMMU_STAT(sf_tlb_reprog_pgsz); 10217 10218 sfmmu_invalidate_ctx(sfmmup); 10219 10220 sfmmu_hat_exit(hatlockp); 10221 } 10222 10223 /* 10224 * The scd_rttecnt field in the SCD must be updated to take account of the 10225 * regions which it contains. 10226 */ 10227 static void 10228 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10229 { 10230 uint_t rid; 10231 uint_t i, j; 10232 ulong_t w; 10233 sf_region_t *rgnp; 10234 10235 ASSERT(srdp != NULL); 10236 10237 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10238 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10239 continue; 10240 } 10241 10242 j = 0; 10243 while (w) { 10244 if (!(w & 0x1)) { 10245 j++; 10246 w >>= 1; 10247 continue; 10248 } 10249 rid = (i << BT_ULSHIFT) | j; 10250 j++; 10251 w >>= 1; 10252 10253 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10254 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10255 rgnp = srdp->srd_hmergnp[rid]; 10256 ASSERT(rgnp->rgn_refcnt > 0); 10257 ASSERT(rgnp->rgn_id == rid); 10258 10259 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10260 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10261 10262 /* 10263 * Maintain the tsb0 inflation cnt for the regions 10264 * in the SCD. 10265 */ 10266 if (rgnp->rgn_pgszc >= TTE4M) { 10267 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10268 rgnp->rgn_size >> 10269 (TTE_PAGE_SHIFT(TTE8K) + 2); 10270 } 10271 } 10272 } 10273 } 10274 10275 /* 10276 * This function assumes that there are either four or six supported page 10277 * sizes and at most two programmable TLBs, so we need to decide which 10278 * page sizes are most important and then tell the MMU layer so it 10279 * can adjust the TLB page sizes accordingly (if supported). 10280 * 10281 * If these assumptions change, this function will need to be 10282 * updated to support whatever the new limits are. 10283 * 10284 * The growing flag is nonzero if we are growing the address space, 10285 * and zero if it is shrinking. This allows us to decide whether 10286 * to grow or shrink our TSB, depending upon available memory 10287 * conditions. 10288 */ 10289 static void 10290 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10291 { 10292 uint64_t ttecnt[MMU_PAGE_SIZES]; 10293 uint64_t tte8k_cnt, tte4m_cnt; 10294 uint8_t i; 10295 int sectsb_thresh; 10296 10297 /* 10298 * Kernel threads, processes with small address spaces not using 10299 * large pages, and dummy ISM HATs need not apply. 10300 */ 10301 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10302 return; 10303 10304 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10305 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10306 return; 10307 10308 for (i = 0; i < mmu_page_sizes; i++) { 10309 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10310 sfmmup->sfmmu_ismttecnt[i]; 10311 } 10312 10313 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10314 if (&mmu_check_page_sizes) 10315 mmu_check_page_sizes(sfmmup, ttecnt); 10316 10317 /* 10318 * Calculate the number of 8k ttes to represent the span of these 10319 * pages. 10320 */ 10321 tte8k_cnt = ttecnt[TTE8K] + 10322 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10323 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10324 if (mmu_page_sizes == max_mmu_page_sizes) { 10325 tte4m_cnt = ttecnt[TTE4M] + 10326 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10327 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10328 } else { 10329 tte4m_cnt = ttecnt[TTE4M]; 10330 } 10331 10332 /* 10333 * Inflate tte8k_cnt to allow for region large page allocation failure. 10334 */ 10335 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10336 10337 /* 10338 * Inflate TSB sizes by a factor of 2 if this process 10339 * uses 4M text pages to minimize extra conflict misses 10340 * in the first TSB since without counting text pages 10341 * 8K TSB may become too small. 10342 * 10343 * Also double the size of the second TSB to minimize 10344 * extra conflict misses due to competition between 4M text pages 10345 * and data pages. 10346 * 10347 * We need to adjust the second TSB allocation threshold by the 10348 * inflation factor, since there is no point in creating a second 10349 * TSB when we know all the mappings can fit in the I/D TLBs. 10350 */ 10351 sectsb_thresh = tsb_sectsb_threshold; 10352 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10353 tte8k_cnt <<= 1; 10354 tte4m_cnt <<= 1; 10355 sectsb_thresh <<= 1; 10356 } 10357 10358 /* 10359 * Check to see if our TSB is the right size; we may need to 10360 * grow or shrink it. If the process is small, our work is 10361 * finished at this point. 10362 */ 10363 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10364 return; 10365 } 10366 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10367 } 10368 10369 static void 10370 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10371 uint64_t tte4m_cnt, int sectsb_thresh) 10372 { 10373 int tsb_bits; 10374 uint_t tsb_szc; 10375 struct tsb_info *tsbinfop; 10376 hatlock_t *hatlockp = NULL; 10377 10378 hatlockp = sfmmu_hat_enter(sfmmup); 10379 ASSERT(hatlockp != NULL); 10380 tsbinfop = sfmmup->sfmmu_tsb; 10381 ASSERT(tsbinfop != NULL); 10382 10383 /* 10384 * If we're growing, select the size based on RSS. If we're 10385 * shrinking, leave some room so we don't have to turn around and 10386 * grow again immediately. 10387 */ 10388 if (growing) 10389 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10390 else 10391 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10392 10393 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10394 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10395 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10396 hatlockp, TSB_SHRINK); 10397 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10398 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10399 hatlockp, TSB_GROW); 10400 } 10401 tsbinfop = sfmmup->sfmmu_tsb; 10402 10403 /* 10404 * With the TLB and first TSB out of the way, we need to see if 10405 * we need a second TSB for 4M pages. If we managed to reprogram 10406 * the TLB page sizes above, the process will start using this new 10407 * TSB right away; otherwise, it will start using it on the next 10408 * context switch. Either way, it's no big deal so there's no 10409 * synchronization with the trap handlers here unless we grow the 10410 * TSB (in which case it's required to prevent using the old one 10411 * after it's freed). Note: second tsb is required for 32M/256M 10412 * page sizes. 10413 */ 10414 if (tte4m_cnt > sectsb_thresh) { 10415 /* 10416 * If we're growing, select the size based on RSS. If we're 10417 * shrinking, leave some room so we don't have to turn 10418 * around and grow again immediately. 10419 */ 10420 if (growing) 10421 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10422 else 10423 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10424 if (tsbinfop->tsb_next == NULL) { 10425 struct tsb_info *newtsb; 10426 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10427 0 : TSB_ALLOC; 10428 10429 sfmmu_hat_exit(hatlockp); 10430 10431 /* 10432 * Try to allocate a TSB for 4[32|256]M pages. If we 10433 * can't get the size we want, retry w/a minimum sized 10434 * TSB. If that still didn't work, give up; we can 10435 * still run without one. 10436 */ 10437 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10438 TSB4M|TSB32M|TSB256M:TSB4M; 10439 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10440 allocflags, sfmmup)) && 10441 (tsb_szc <= TSB_4M_SZCODE || 10442 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10443 tsb_bits, allocflags, sfmmup)) && 10444 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10445 tsb_bits, allocflags, sfmmup)) { 10446 return; 10447 } 10448 10449 hatlockp = sfmmu_hat_enter(sfmmup); 10450 10451 sfmmu_invalidate_ctx(sfmmup); 10452 10453 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10454 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10455 SFMMU_STAT(sf_tsb_sectsb_create); 10456 sfmmu_hat_exit(hatlockp); 10457 return; 10458 } else { 10459 /* 10460 * It's annoying, but possible for us 10461 * to get here.. we dropped the HAT lock 10462 * because of locking order in the kmem 10463 * allocator, and while we were off getting 10464 * our memory, some other thread decided to 10465 * do us a favor and won the race to get a 10466 * second TSB for this process. Sigh. 10467 */ 10468 sfmmu_hat_exit(hatlockp); 10469 sfmmu_tsbinfo_free(newtsb); 10470 return; 10471 } 10472 } 10473 10474 /* 10475 * We have a second TSB, see if it's big enough. 10476 */ 10477 tsbinfop = tsbinfop->tsb_next; 10478 10479 /* 10480 * Check to see if our second TSB is the right size; 10481 * we may need to grow or shrink it. 10482 * To prevent thrashing (e.g. growing the TSB on a 10483 * subsequent map operation), only try to shrink if 10484 * the TSB reach exceeds twice the virtual address 10485 * space size. 10486 */ 10487 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10488 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10489 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10490 tsb_szc, hatlockp, TSB_SHRINK); 10491 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10492 TSB_OK_GROW()) { 10493 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10494 tsb_szc, hatlockp, TSB_GROW); 10495 } 10496 } 10497 10498 sfmmu_hat_exit(hatlockp); 10499 } 10500 10501 /* 10502 * Free up a sfmmu 10503 * Since the sfmmu is currently embedded in the hat struct we simply zero 10504 * out our fields and free up the ism map blk list if any. 10505 */ 10506 static void 10507 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10508 { 10509 ism_blk_t *blkp, *nx_blkp; 10510 #ifdef DEBUG 10511 ism_map_t *map; 10512 int i; 10513 #endif 10514 10515 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10516 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10517 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10518 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10519 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10520 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10521 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10522 10523 sfmmup->sfmmu_free = 0; 10524 sfmmup->sfmmu_ismhat = 0; 10525 10526 blkp = sfmmup->sfmmu_iblk; 10527 sfmmup->sfmmu_iblk = NULL; 10528 10529 while (blkp) { 10530 #ifdef DEBUG 10531 map = blkp->iblk_maps; 10532 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10533 ASSERT(map[i].imap_seg == 0); 10534 ASSERT(map[i].imap_ismhat == NULL); 10535 ASSERT(map[i].imap_ment == NULL); 10536 } 10537 #endif 10538 nx_blkp = blkp->iblk_next; 10539 blkp->iblk_next = NULL; 10540 blkp->iblk_nextpa = (uint64_t)-1; 10541 kmem_cache_free(ism_blk_cache, blkp); 10542 blkp = nx_blkp; 10543 } 10544 } 10545 10546 /* 10547 * Locking primitves accessed by HATLOCK macros 10548 */ 10549 10550 #define SFMMU_SPL_MTX (0x0) 10551 #define SFMMU_ML_MTX (0x1) 10552 10553 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10554 SPL_HASH(pg) : MLIST_HASH(pg)) 10555 10556 kmutex_t * 10557 sfmmu_page_enter(struct page *pp) 10558 { 10559 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10560 } 10561 10562 void 10563 sfmmu_page_exit(kmutex_t *spl) 10564 { 10565 mutex_exit(spl); 10566 } 10567 10568 int 10569 sfmmu_page_spl_held(struct page *pp) 10570 { 10571 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10572 } 10573 10574 kmutex_t * 10575 sfmmu_mlist_enter(struct page *pp) 10576 { 10577 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10578 } 10579 10580 void 10581 sfmmu_mlist_exit(kmutex_t *mml) 10582 { 10583 mutex_exit(mml); 10584 } 10585 10586 int 10587 sfmmu_mlist_held(struct page *pp) 10588 { 10589 10590 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10591 } 10592 10593 /* 10594 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10595 * sfmmu_mlist_enter() case mml_table lock array is used and for 10596 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10597 * 10598 * The lock is taken on a root page so that it protects an operation on all 10599 * constituent pages of a large page pp belongs to. 10600 * 10601 * The routine takes a lock from the appropriate array. The lock is determined 10602 * by hashing the root page. After taking the lock this routine checks if the 10603 * root page has the same size code that was used to determine the root (i.e 10604 * that root hasn't changed). If root page has the expected p_szc field we 10605 * have the right lock and it's returned to the caller. If root's p_szc 10606 * decreased we release the lock and retry from the beginning. This case can 10607 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10608 * value and taking the lock. The number of retries due to p_szc decrease is 10609 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10610 * determined by hashing pp itself. 10611 * 10612 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10613 * possible that p_szc can increase. To increase p_szc a thread has to lock 10614 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10615 * callers that don't hold a page locked recheck if hmeblk through which pp 10616 * was found still maps this pp. If it doesn't map it anymore returned lock 10617 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10618 * p_szc increase after taking the lock it returns this lock without further 10619 * retries because in this case the caller doesn't care about which lock was 10620 * taken. The caller will drop it right away. 10621 * 10622 * After the routine returns it's guaranteed that hat_page_demote() can't 10623 * change p_szc field of any of constituent pages of a large page pp belongs 10624 * to as long as pp was either locked at least SHARED prior to this call or 10625 * the caller finds that hment that pointed to this pp still references this 10626 * pp (this also assumes that the caller holds hme hash bucket lock so that 10627 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10628 * hat_pageunload()). 10629 */ 10630 static kmutex_t * 10631 sfmmu_mlspl_enter(struct page *pp, int type) 10632 { 10633 kmutex_t *mtx; 10634 uint_t prev_rszc = UINT_MAX; 10635 page_t *rootpp; 10636 uint_t szc; 10637 uint_t rszc; 10638 uint_t pszc = pp->p_szc; 10639 10640 ASSERT(pp != NULL); 10641 10642 again: 10643 if (pszc == 0) { 10644 mtx = SFMMU_MLSPL_MTX(type, pp); 10645 mutex_enter(mtx); 10646 return (mtx); 10647 } 10648 10649 /* The lock lives in the root page */ 10650 rootpp = PP_GROUPLEADER(pp, pszc); 10651 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10652 mutex_enter(mtx); 10653 10654 /* 10655 * Return mml in the following 3 cases: 10656 * 10657 * 1) If pp itself is root since if its p_szc decreased before we took 10658 * the lock pp is still the root of smaller szc page. And if its p_szc 10659 * increased it doesn't matter what lock we return (see comment in 10660 * front of this routine). 10661 * 10662 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10663 * large page we have the right lock since any previous potential 10664 * hat_page_demote() is done demoting from greater than current root's 10665 * p_szc because hat_page_demote() changes root's p_szc last. No 10666 * further hat_page_demote() can start or be in progress since it 10667 * would need the same lock we currently hold. 10668 * 10669 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10670 * matter what lock we return (see comment in front of this routine). 10671 */ 10672 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10673 rszc >= prev_rszc) { 10674 return (mtx); 10675 } 10676 10677 /* 10678 * hat_page_demote() could have decreased root's p_szc. 10679 * In this case pp's p_szc must also be smaller than pszc. 10680 * Retry. 10681 */ 10682 if (rszc < pszc) { 10683 szc = pp->p_szc; 10684 if (szc < pszc) { 10685 mutex_exit(mtx); 10686 pszc = szc; 10687 goto again; 10688 } 10689 /* 10690 * pp's p_szc increased after it was decreased. 10691 * page cannot be mapped. Return current lock. The caller 10692 * will drop it right away. 10693 */ 10694 return (mtx); 10695 } 10696 10697 /* 10698 * root's p_szc is greater than pp's p_szc. 10699 * hat_page_demote() is not done with all pages 10700 * yet. Wait for it to complete. 10701 */ 10702 mutex_exit(mtx); 10703 rootpp = PP_GROUPLEADER(rootpp, rszc); 10704 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10705 mutex_enter(mtx); 10706 mutex_exit(mtx); 10707 prev_rszc = rszc; 10708 goto again; 10709 } 10710 10711 static int 10712 sfmmu_mlspl_held(struct page *pp, int type) 10713 { 10714 kmutex_t *mtx; 10715 10716 ASSERT(pp != NULL); 10717 /* The lock lives in the root page */ 10718 pp = PP_PAGEROOT(pp); 10719 ASSERT(pp != NULL); 10720 10721 mtx = SFMMU_MLSPL_MTX(type, pp); 10722 return (MUTEX_HELD(mtx)); 10723 } 10724 10725 static uint_t 10726 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10727 { 10728 struct hme_blk *hblkp; 10729 10730 10731 if (freehblkp != NULL) { 10732 mutex_enter(&freehblkp_lock); 10733 if (freehblkp != NULL) { 10734 /* 10735 * If the current thread is owning hblk_reserve OR 10736 * critical request from sfmmu_hblk_steal() 10737 * let it succeed even if freehblkcnt is really low. 10738 */ 10739 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10740 SFMMU_STAT(sf_get_free_throttle); 10741 mutex_exit(&freehblkp_lock); 10742 return (0); 10743 } 10744 freehblkcnt--; 10745 *hmeblkpp = freehblkp; 10746 hblkp = *hmeblkpp; 10747 freehblkp = hblkp->hblk_next; 10748 mutex_exit(&freehblkp_lock); 10749 hblkp->hblk_next = NULL; 10750 SFMMU_STAT(sf_get_free_success); 10751 10752 ASSERT(hblkp->hblk_hmecnt == 0); 10753 ASSERT(hblkp->hblk_vcnt == 0); 10754 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10755 10756 return (1); 10757 } 10758 mutex_exit(&freehblkp_lock); 10759 } 10760 10761 /* Check cpu hblk pending queues */ 10762 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10763 hblkp = *hmeblkpp; 10764 hblkp->hblk_next = NULL; 10765 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10766 10767 ASSERT(hblkp->hblk_hmecnt == 0); 10768 ASSERT(hblkp->hblk_vcnt == 0); 10769 10770 return (1); 10771 } 10772 10773 SFMMU_STAT(sf_get_free_fail); 10774 return (0); 10775 } 10776 10777 static uint_t 10778 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10779 { 10780 struct hme_blk *hblkp; 10781 10782 ASSERT(hmeblkp->hblk_hmecnt == 0); 10783 ASSERT(hmeblkp->hblk_vcnt == 0); 10784 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10785 10786 /* 10787 * If the current thread is mapping into kernel space, 10788 * let it succede even if freehblkcnt is max 10789 * so that it will avoid freeing it to kmem. 10790 * This will prevent stack overflow due to 10791 * possible recursion since kmem_cache_free() 10792 * might require creation of a slab which 10793 * in turn needs an hmeblk to map that slab; 10794 * let's break this vicious chain at the first 10795 * opportunity. 10796 */ 10797 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10798 mutex_enter(&freehblkp_lock); 10799 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10800 SFMMU_STAT(sf_put_free_success); 10801 freehblkcnt++; 10802 hmeblkp->hblk_next = freehblkp; 10803 freehblkp = hmeblkp; 10804 mutex_exit(&freehblkp_lock); 10805 return (1); 10806 } 10807 mutex_exit(&freehblkp_lock); 10808 } 10809 10810 /* 10811 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10812 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10813 * we are not in the process of mapping into kernel space. 10814 */ 10815 ASSERT(!critical); 10816 while (freehblkcnt > HBLK_RESERVE_CNT) { 10817 mutex_enter(&freehblkp_lock); 10818 if (freehblkcnt > HBLK_RESERVE_CNT) { 10819 freehblkcnt--; 10820 hblkp = freehblkp; 10821 freehblkp = hblkp->hblk_next; 10822 mutex_exit(&freehblkp_lock); 10823 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10824 kmem_cache_free(sfmmu8_cache, hblkp); 10825 continue; 10826 } 10827 mutex_exit(&freehblkp_lock); 10828 } 10829 SFMMU_STAT(sf_put_free_fail); 10830 return (0); 10831 } 10832 10833 static void 10834 sfmmu_hblk_swap(struct hme_blk *new) 10835 { 10836 struct hme_blk *old, *hblkp, *prev; 10837 uint64_t newpa; 10838 caddr_t base, vaddr, endaddr; 10839 struct hmehash_bucket *hmebp; 10840 struct sf_hment *osfhme, *nsfhme; 10841 page_t *pp; 10842 kmutex_t *pml; 10843 tte_t tte; 10844 struct hme_blk *list = NULL; 10845 10846 #ifdef DEBUG 10847 hmeblk_tag hblktag; 10848 struct hme_blk *found; 10849 #endif 10850 old = HBLK_RESERVE; 10851 ASSERT(!old->hblk_shared); 10852 10853 /* 10854 * save pa before bcopy clobbers it 10855 */ 10856 newpa = new->hblk_nextpa; 10857 10858 base = (caddr_t)get_hblk_base(old); 10859 endaddr = base + get_hblk_span(old); 10860 10861 /* 10862 * acquire hash bucket lock. 10863 */ 10864 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10865 SFMMU_INVALID_SHMERID); 10866 10867 /* 10868 * copy contents from old to new 10869 */ 10870 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10871 10872 /* 10873 * add new to hash chain 10874 */ 10875 sfmmu_hblk_hash_add(hmebp, new, newpa); 10876 10877 /* 10878 * search hash chain for hblk_reserve; this needs to be performed 10879 * after adding new, otherwise prev won't correspond to the hblk which 10880 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10881 * remove old later. 10882 */ 10883 for (prev = NULL, 10884 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10885 prev = hblkp, hblkp = hblkp->hblk_next) 10886 ; 10887 10888 if (hblkp != old) 10889 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10890 10891 /* 10892 * p_mapping list is still pointing to hments in hblk_reserve; 10893 * fix up p_mapping list so that they point to hments in new. 10894 * 10895 * Since all these mappings are created by hblk_reserve_thread 10896 * on the way and it's using at least one of the buffers from each of 10897 * the newly minted slabs, there is no danger of any of these 10898 * mappings getting unloaded by another thread. 10899 * 10900 * tsbmiss could only modify ref/mod bits of hments in old/new. 10901 * Since all of these hments hold mappings established by segkmem 10902 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10903 * have no meaning for the mappings in hblk_reserve. hments in 10904 * old and new are identical except for ref/mod bits. 10905 */ 10906 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10907 10908 HBLKTOHME(osfhme, old, vaddr); 10909 sfmmu_copytte(&osfhme->hme_tte, &tte); 10910 10911 if (TTE_IS_VALID(&tte)) { 10912 if ((pp = osfhme->hme_page) == NULL) 10913 panic("sfmmu_hblk_swap: page not mapped"); 10914 10915 pml = sfmmu_mlist_enter(pp); 10916 10917 if (pp != osfhme->hme_page) 10918 panic("sfmmu_hblk_swap: mapping changed"); 10919 10920 HBLKTOHME(nsfhme, new, vaddr); 10921 10922 HME_ADD(nsfhme, pp); 10923 HME_SUB(osfhme, pp); 10924 10925 sfmmu_mlist_exit(pml); 10926 } 10927 } 10928 10929 /* 10930 * remove old from hash chain 10931 */ 10932 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10933 10934 #ifdef DEBUG 10935 10936 hblktag.htag_id = ksfmmup; 10937 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10938 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10939 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10940 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10941 10942 if (found != new) 10943 panic("sfmmu_hblk_swap: new hblk not found"); 10944 #endif 10945 10946 SFMMU_HASH_UNLOCK(hmebp); 10947 10948 /* 10949 * Reset hblk_reserve 10950 */ 10951 bzero((void *)old, HME8BLK_SZ); 10952 old->hblk_nextpa = va_to_pa((caddr_t)old); 10953 } 10954 10955 /* 10956 * Grab the mlist mutex for both pages passed in. 10957 * 10958 * low and high will be returned as pointers to the mutexes for these pages. 10959 * low refers to the mutex residing in the lower bin of the mlist hash, while 10960 * high refers to the mutex residing in the higher bin of the mlist hash. This 10961 * is due to the locking order restrictions on the same thread grabbing 10962 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10963 * 10964 * If both pages hash to the same mutex, only grab that single mutex, and 10965 * high will be returned as NULL 10966 * If the pages hash to different bins in the hash, grab the lower addressed 10967 * lock first and then the higher addressed lock in order to follow the locking 10968 * rules involved with the same thread grabbing multiple mlist mutexes. 10969 * low and high will both have non-NULL values. 10970 */ 10971 static void 10972 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10973 kmutex_t **low, kmutex_t **high) 10974 { 10975 kmutex_t *mml_targ, *mml_repl; 10976 10977 /* 10978 * no need to do the dance around szc as in sfmmu_mlist_enter() 10979 * because this routine is only called by hat_page_relocate() and all 10980 * targ and repl pages are already locked EXCL so szc can't change. 10981 */ 10982 10983 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10984 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10985 10986 if (mml_targ == mml_repl) { 10987 *low = mml_targ; 10988 *high = NULL; 10989 } else { 10990 if (mml_targ < mml_repl) { 10991 *low = mml_targ; 10992 *high = mml_repl; 10993 } else { 10994 *low = mml_repl; 10995 *high = mml_targ; 10996 } 10997 } 10998 10999 mutex_enter(*low); 11000 if (*high) 11001 mutex_enter(*high); 11002 } 11003 11004 static void 11005 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 11006 { 11007 if (high) 11008 mutex_exit(high); 11009 mutex_exit(low); 11010 } 11011 11012 hatlock_t * 11013 sfmmu_hat_enter(sfmmu_t *sfmmup) 11014 { 11015 hatlock_t *hatlockp; 11016 11017 if (sfmmup != ksfmmup) { 11018 hatlockp = TSB_HASH(sfmmup); 11019 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11020 return (hatlockp); 11021 } 11022 return (NULL); 11023 } 11024 11025 static hatlock_t * 11026 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 11027 { 11028 hatlock_t *hatlockp; 11029 11030 if (sfmmup != ksfmmup) { 11031 hatlockp = TSB_HASH(sfmmup); 11032 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 11033 return (NULL); 11034 return (hatlockp); 11035 } 11036 return (NULL); 11037 } 11038 11039 void 11040 sfmmu_hat_exit(hatlock_t *hatlockp) 11041 { 11042 if (hatlockp != NULL) 11043 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11044 } 11045 11046 static void 11047 sfmmu_hat_lock_all(void) 11048 { 11049 int i; 11050 for (i = 0; i < SFMMU_NUM_LOCK; i++) 11051 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 11052 } 11053 11054 static void 11055 sfmmu_hat_unlock_all(void) 11056 { 11057 int i; 11058 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 11059 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 11060 } 11061 11062 int 11063 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 11064 { 11065 ASSERT(sfmmup != ksfmmup); 11066 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 11067 } 11068 11069 /* 11070 * Locking primitives to provide consistency between ISM unmap 11071 * and other operations. Since ISM unmap can take a long time, we 11072 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 11073 * contention on the hatlock buckets while ISM segments are being 11074 * unmapped. The tradeoff is that the flags don't prevent priority 11075 * inversion from occurring, so we must request kernel priority in 11076 * case we have to sleep to keep from getting buried while holding 11077 * the HAT_ISMBUSY flag set, which in turn could block other kernel 11078 * threads from running (for example, in sfmmu_uvatopfn()). 11079 */ 11080 static void 11081 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 11082 { 11083 hatlock_t *hatlockp; 11084 11085 THREAD_KPRI_REQUEST(); 11086 if (!hatlock_held) 11087 hatlockp = sfmmu_hat_enter(sfmmup); 11088 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 11089 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11090 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 11091 if (!hatlock_held) 11092 sfmmu_hat_exit(hatlockp); 11093 } 11094 11095 static void 11096 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 11097 { 11098 hatlock_t *hatlockp; 11099 11100 if (!hatlock_held) 11101 hatlockp = sfmmu_hat_enter(sfmmup); 11102 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 11103 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 11104 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11105 if (!hatlock_held) 11106 sfmmu_hat_exit(hatlockp); 11107 THREAD_KPRI_RELEASE(); 11108 } 11109 11110 /* 11111 * 11112 * Algorithm: 11113 * 11114 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 11115 * hblks. 11116 * 11117 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 11118 * 11119 * (a) try to return an hblk from reserve pool of free hblks; 11120 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 11121 * and return hblk_reserve. 11122 * 11123 * (3) call kmem_cache_alloc() to allocate hblk; 11124 * 11125 * (a) if hblk_reserve_lock is held by the current thread, 11126 * atomically replace hblk_reserve by the hblk that is 11127 * returned by kmem_cache_alloc; release hblk_reserve_lock 11128 * and call kmem_cache_alloc() again. 11129 * (b) if reserve pool is not full, add the hblk that is 11130 * returned by kmem_cache_alloc to reserve pool and 11131 * call kmem_cache_alloc again. 11132 * 11133 */ 11134 static struct hme_blk * 11135 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 11136 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 11137 uint_t flags, uint_t rid) 11138 { 11139 struct hme_blk *hmeblkp = NULL; 11140 struct hme_blk *newhblkp; 11141 struct hme_blk *shw_hblkp = NULL; 11142 struct kmem_cache *sfmmu_cache = NULL; 11143 uint64_t hblkpa; 11144 ulong_t index; 11145 uint_t owner; /* set to 1 if using hblk_reserve */ 11146 uint_t forcefree; 11147 int sleep; 11148 sf_srd_t *srdp; 11149 sf_region_t *rgnp; 11150 11151 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11152 ASSERT(hblktag.htag_rid == rid); 11153 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 11154 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11155 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 11156 11157 /* 11158 * If segkmem is not created yet, allocate from static hmeblks 11159 * created at the end of startup_modules(). See the block comment 11160 * in startup_modules() describing how we estimate the number of 11161 * static hmeblks that will be needed during re-map. 11162 */ 11163 if (!hblk_alloc_dynamic) { 11164 11165 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11166 11167 if (size == TTE8K) { 11168 index = nucleus_hblk8.index; 11169 if (index >= nucleus_hblk8.len) { 11170 /* 11171 * If we panic here, see startup_modules() to 11172 * make sure that we are calculating the 11173 * number of hblk8's that we need correctly. 11174 */ 11175 prom_panic("no nucleus hblk8 to allocate"); 11176 } 11177 hmeblkp = 11178 (struct hme_blk *)&nucleus_hblk8.list[index]; 11179 nucleus_hblk8.index++; 11180 SFMMU_STAT(sf_hblk8_nalloc); 11181 } else { 11182 index = nucleus_hblk1.index; 11183 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 11184 /* 11185 * If we panic here, see startup_modules(). 11186 * Most likely you need to update the 11187 * calculation of the number of hblk1 elements 11188 * that the kernel needs to boot. 11189 */ 11190 prom_panic("no nucleus hblk1 to allocate"); 11191 } 11192 hmeblkp = 11193 (struct hme_blk *)&nucleus_hblk1.list[index]; 11194 nucleus_hblk1.index++; 11195 SFMMU_STAT(sf_hblk1_nalloc); 11196 } 11197 11198 goto hblk_init; 11199 } 11200 11201 SFMMU_HASH_UNLOCK(hmebp); 11202 11203 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11204 if (mmu_page_sizes == max_mmu_page_sizes) { 11205 if (size < TTE256M) 11206 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11207 size, flags); 11208 } else { 11209 if (size < TTE4M) 11210 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11211 size, flags); 11212 } 11213 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11214 /* 11215 * Shared hmes use per region bitmaps in rgn_hmeflag 11216 * rather than shadow hmeblks to keep track of the 11217 * mapping sizes which have been allocated for the region. 11218 * Here we cleanup old invalid hmeblks with this rid, 11219 * which may be left around by pageunload(). 11220 */ 11221 int ttesz; 11222 caddr_t va; 11223 caddr_t eva = vaddr + TTEBYTES(size); 11224 11225 ASSERT(sfmmup != KHATID); 11226 11227 srdp = sfmmup->sfmmu_srdp; 11228 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11229 rgnp = srdp->srd_hmergnp[rid]; 11230 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11231 ASSERT(rgnp->rgn_refcnt != 0); 11232 ASSERT(size <= rgnp->rgn_pgszc); 11233 11234 ttesz = HBLK_MIN_TTESZ; 11235 do { 11236 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11237 continue; 11238 } 11239 11240 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11241 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11242 } else if (ttesz < size) { 11243 for (va = vaddr; va < eva; 11244 va += TTEBYTES(ttesz)) { 11245 sfmmu_cleanup_rhblk(srdp, va, rid, 11246 ttesz); 11247 } 11248 } 11249 } while (++ttesz <= rgnp->rgn_pgszc); 11250 } 11251 11252 fill_hblk: 11253 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11254 11255 if (owner && size == TTE8K) { 11256 11257 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11258 /* 11259 * We are really in a tight spot. We already own 11260 * hblk_reserve and we need another hblk. In anticipation 11261 * of this kind of scenario, we specifically set aside 11262 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11263 * by owner of hblk_reserve. 11264 */ 11265 SFMMU_STAT(sf_hblk_recurse_cnt); 11266 11267 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11268 panic("sfmmu_hblk_alloc: reserve list is empty"); 11269 11270 goto hblk_verify; 11271 } 11272 11273 ASSERT(!owner); 11274 11275 if ((flags & HAT_NO_KALLOC) == 0) { 11276 11277 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11278 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11279 11280 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11281 hmeblkp = sfmmu_hblk_steal(size); 11282 } else { 11283 /* 11284 * if we are the owner of hblk_reserve, 11285 * swap hblk_reserve with hmeblkp and 11286 * start a fresh life. Hope things go 11287 * better this time. 11288 */ 11289 if (hblk_reserve_thread == curthread) { 11290 ASSERT(sfmmu_cache == sfmmu8_cache); 11291 sfmmu_hblk_swap(hmeblkp); 11292 hblk_reserve_thread = NULL; 11293 mutex_exit(&hblk_reserve_lock); 11294 goto fill_hblk; 11295 } 11296 /* 11297 * let's donate this hblk to our reserve list if 11298 * we are not mapping kernel range 11299 */ 11300 if (size == TTE8K && sfmmup != KHATID) { 11301 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11302 goto fill_hblk; 11303 } 11304 } 11305 } else { 11306 /* 11307 * We are here to map the slab in sfmmu8_cache; let's 11308 * check if we could tap our reserve list; if successful, 11309 * this will avoid the pain of going thru sfmmu_hblk_swap 11310 */ 11311 SFMMU_STAT(sf_hblk_slab_cnt); 11312 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11313 /* 11314 * let's start hblk_reserve dance 11315 */ 11316 SFMMU_STAT(sf_hblk_reserve_cnt); 11317 owner = 1; 11318 mutex_enter(&hblk_reserve_lock); 11319 hmeblkp = HBLK_RESERVE; 11320 hblk_reserve_thread = curthread; 11321 } 11322 } 11323 11324 hblk_verify: 11325 ASSERT(hmeblkp != NULL); 11326 set_hblk_sz(hmeblkp, size); 11327 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11328 SFMMU_HASH_LOCK(hmebp); 11329 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11330 if (newhblkp != NULL) { 11331 SFMMU_HASH_UNLOCK(hmebp); 11332 if (hmeblkp != HBLK_RESERVE) { 11333 /* 11334 * This is really tricky! 11335 * 11336 * vmem_alloc(vmem_seg_arena) 11337 * vmem_alloc(vmem_internal_arena) 11338 * segkmem_alloc(heap_arena) 11339 * vmem_alloc(heap_arena) 11340 * page_create() 11341 * hat_memload() 11342 * kmem_cache_free() 11343 * kmem_cache_alloc() 11344 * kmem_slab_create() 11345 * vmem_alloc(kmem_internal_arena) 11346 * segkmem_alloc(heap_arena) 11347 * vmem_alloc(heap_arena) 11348 * page_create() 11349 * hat_memload() 11350 * kmem_cache_free() 11351 * ... 11352 * 11353 * Thus, hat_memload() could call kmem_cache_free 11354 * for enough number of times that we could easily 11355 * hit the bottom of the stack or run out of reserve 11356 * list of vmem_seg structs. So, we must donate 11357 * this hblk to reserve list if it's allocated 11358 * from sfmmu8_cache *and* mapping kernel range. 11359 * We don't need to worry about freeing hmeblk1's 11360 * to kmem since they don't map any kmem slabs. 11361 * 11362 * Note: When segkmem supports largepages, we must 11363 * free hmeblk1's to reserve list as well. 11364 */ 11365 forcefree = (sfmmup == KHATID) ? 1 : 0; 11366 if (size == TTE8K && 11367 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11368 goto re_verify; 11369 } 11370 ASSERT(sfmmup != KHATID); 11371 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11372 } else { 11373 /* 11374 * Hey! we don't need hblk_reserve any more. 11375 */ 11376 ASSERT(owner); 11377 hblk_reserve_thread = NULL; 11378 mutex_exit(&hblk_reserve_lock); 11379 owner = 0; 11380 } 11381 re_verify: 11382 /* 11383 * let's check if the goodies are still present 11384 */ 11385 SFMMU_HASH_LOCK(hmebp); 11386 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11387 if (newhblkp != NULL) { 11388 /* 11389 * return newhblkp if it's not hblk_reserve; 11390 * if newhblkp is hblk_reserve, return it 11391 * _only if_ we are the owner of hblk_reserve. 11392 */ 11393 if (newhblkp != HBLK_RESERVE || owner) { 11394 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11395 newhblkp->hblk_shared); 11396 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11397 !newhblkp->hblk_shared); 11398 return (newhblkp); 11399 } else { 11400 /* 11401 * we just hit hblk_reserve in the hash and 11402 * we are not the owner of that; 11403 * 11404 * block until hblk_reserve_thread completes 11405 * swapping hblk_reserve and try the dance 11406 * once again. 11407 */ 11408 SFMMU_HASH_UNLOCK(hmebp); 11409 mutex_enter(&hblk_reserve_lock); 11410 mutex_exit(&hblk_reserve_lock); 11411 SFMMU_STAT(sf_hblk_reserve_hit); 11412 goto fill_hblk; 11413 } 11414 } else { 11415 /* 11416 * it's no more! try the dance once again. 11417 */ 11418 SFMMU_HASH_UNLOCK(hmebp); 11419 goto fill_hblk; 11420 } 11421 } 11422 11423 hblk_init: 11424 if (SFMMU_IS_SHMERID_VALID(rid)) { 11425 uint16_t tteflag = 0x1 << 11426 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11427 11428 if (!(rgnp->rgn_hmeflags & tteflag)) { 11429 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11430 } 11431 hmeblkp->hblk_shared = 1; 11432 } else { 11433 hmeblkp->hblk_shared = 0; 11434 } 11435 set_hblk_sz(hmeblkp, size); 11436 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11437 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11438 hmeblkp->hblk_tag = hblktag; 11439 hmeblkp->hblk_shadow = shw_hblkp; 11440 hblkpa = hmeblkp->hblk_nextpa; 11441 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11442 11443 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11444 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11445 ASSERT(hmeblkp->hblk_hmecnt == 0); 11446 ASSERT(hmeblkp->hblk_vcnt == 0); 11447 ASSERT(hmeblkp->hblk_lckcnt == 0); 11448 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11449 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11450 return (hmeblkp); 11451 } 11452 11453 /* 11454 * This function cleans up the hme_blk and returns it to the free list. 11455 */ 11456 /* ARGSUSED */ 11457 static void 11458 sfmmu_hblk_free(struct hme_blk **listp) 11459 { 11460 struct hme_blk *hmeblkp, *next_hmeblkp; 11461 int size; 11462 uint_t critical; 11463 uint64_t hblkpa; 11464 11465 ASSERT(*listp != NULL); 11466 11467 hmeblkp = *listp; 11468 while (hmeblkp != NULL) { 11469 next_hmeblkp = hmeblkp->hblk_next; 11470 ASSERT(!hmeblkp->hblk_hmecnt); 11471 ASSERT(!hmeblkp->hblk_vcnt); 11472 ASSERT(!hmeblkp->hblk_lckcnt); 11473 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11474 ASSERT(hmeblkp->hblk_shared == 0); 11475 ASSERT(hmeblkp->hblk_shw_bit == 0); 11476 ASSERT(hmeblkp->hblk_shadow == NULL); 11477 11478 hblkpa = va_to_pa((caddr_t)hmeblkp); 11479 ASSERT(hblkpa != (uint64_t)-1); 11480 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11481 11482 size = get_hblk_ttesz(hmeblkp); 11483 hmeblkp->hblk_next = NULL; 11484 hmeblkp->hblk_nextpa = hblkpa; 11485 11486 if (hmeblkp->hblk_nuc_bit == 0) { 11487 11488 if (size != TTE8K || 11489 !sfmmu_put_free_hblk(hmeblkp, critical)) 11490 kmem_cache_free(get_hblk_cache(hmeblkp), 11491 hmeblkp); 11492 } 11493 hmeblkp = next_hmeblkp; 11494 } 11495 } 11496 11497 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11498 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11499 11500 static uint_t sfmmu_hblk_steal_twice; 11501 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11502 11503 /* 11504 * Steal a hmeblk from user or kernel hme hash lists. 11505 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11506 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11507 * tap into critical reserve of freehblkp. 11508 * Note: We remain looping in this routine until we find one. 11509 */ 11510 static struct hme_blk * 11511 sfmmu_hblk_steal(int size) 11512 { 11513 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11514 struct hmehash_bucket *hmebp; 11515 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11516 uint64_t hblkpa; 11517 int i; 11518 uint_t loop_cnt = 0, critical; 11519 11520 for (;;) { 11521 /* Check cpu hblk pending queues */ 11522 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11523 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11524 ASSERT(hmeblkp->hblk_hmecnt == 0); 11525 ASSERT(hmeblkp->hblk_vcnt == 0); 11526 return (hmeblkp); 11527 } 11528 11529 if (size == TTE8K) { 11530 critical = 11531 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11532 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11533 return (hmeblkp); 11534 } 11535 11536 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11537 uhmehash_steal_hand; 11538 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11539 11540 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11541 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11542 SFMMU_HASH_LOCK(hmebp); 11543 hmeblkp = hmebp->hmeblkp; 11544 hblkpa = hmebp->hmeh_nextpa; 11545 pr_hblk = NULL; 11546 while (hmeblkp) { 11547 /* 11548 * check if it is a hmeblk that is not locked 11549 * and not shared. skip shadow hmeblks with 11550 * shadow_mask set i.e valid count non zero. 11551 */ 11552 if ((get_hblk_ttesz(hmeblkp) == size) && 11553 (hmeblkp->hblk_shw_bit == 0 || 11554 hmeblkp->hblk_vcnt == 0) && 11555 (hmeblkp->hblk_lckcnt == 0)) { 11556 /* 11557 * there is a high probability that we 11558 * will find a free one. search some 11559 * buckets for a free hmeblk initially 11560 * before unloading a valid hmeblk. 11561 */ 11562 if ((hmeblkp->hblk_vcnt == 0 && 11563 hmeblkp->hblk_hmecnt == 0) || (i >= 11564 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11565 if (sfmmu_steal_this_hblk(hmebp, 11566 hmeblkp, hblkpa, pr_hblk)) { 11567 /* 11568 * Hblk is unloaded 11569 * successfully 11570 */ 11571 break; 11572 } 11573 } 11574 } 11575 pr_hblk = hmeblkp; 11576 hblkpa = hmeblkp->hblk_nextpa; 11577 hmeblkp = hmeblkp->hblk_next; 11578 } 11579 11580 SFMMU_HASH_UNLOCK(hmebp); 11581 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11582 hmebp = uhme_hash; 11583 } 11584 uhmehash_steal_hand = hmebp; 11585 11586 if (hmeblkp != NULL) 11587 break; 11588 11589 /* 11590 * in the worst case, look for a free one in the kernel 11591 * hash table. 11592 */ 11593 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11594 SFMMU_HASH_LOCK(hmebp); 11595 hmeblkp = hmebp->hmeblkp; 11596 hblkpa = hmebp->hmeh_nextpa; 11597 pr_hblk = NULL; 11598 while (hmeblkp) { 11599 /* 11600 * check if it is free hmeblk 11601 */ 11602 if ((get_hblk_ttesz(hmeblkp) == size) && 11603 (hmeblkp->hblk_lckcnt == 0) && 11604 (hmeblkp->hblk_vcnt == 0) && 11605 (hmeblkp->hblk_hmecnt == 0)) { 11606 if (sfmmu_steal_this_hblk(hmebp, 11607 hmeblkp, hblkpa, pr_hblk)) { 11608 break; 11609 } else { 11610 /* 11611 * Cannot fail since we have 11612 * hash lock. 11613 */ 11614 panic("fail to steal?"); 11615 } 11616 } 11617 11618 pr_hblk = hmeblkp; 11619 hblkpa = hmeblkp->hblk_nextpa; 11620 hmeblkp = hmeblkp->hblk_next; 11621 } 11622 11623 SFMMU_HASH_UNLOCK(hmebp); 11624 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11625 hmebp = khme_hash; 11626 } 11627 11628 if (hmeblkp != NULL) 11629 break; 11630 sfmmu_hblk_steal_twice++; 11631 } 11632 return (hmeblkp); 11633 } 11634 11635 /* 11636 * This routine does real work to prepare a hblk to be "stolen" by 11637 * unloading the mappings, updating shadow counts .... 11638 * It returns 1 if the block is ready to be reused (stolen), or 0 11639 * means the block cannot be stolen yet- pageunload is still working 11640 * on this hblk. 11641 */ 11642 static int 11643 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11644 uint64_t hblkpa, struct hme_blk *pr_hblk) 11645 { 11646 int shw_size, vshift; 11647 struct hme_blk *shw_hblkp; 11648 caddr_t vaddr; 11649 uint_t shw_mask, newshw_mask; 11650 struct hme_blk *list = NULL; 11651 11652 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11653 11654 /* 11655 * check if the hmeblk is free, unload if necessary 11656 */ 11657 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11658 sfmmu_t *sfmmup; 11659 demap_range_t dmr; 11660 11661 sfmmup = hblktosfmmu(hmeblkp); 11662 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11663 return (0); 11664 } 11665 DEMAP_RANGE_INIT(sfmmup, &dmr); 11666 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11667 (caddr_t)get_hblk_base(hmeblkp), 11668 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11669 DEMAP_RANGE_FLUSH(&dmr); 11670 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11671 /* 11672 * Pageunload is working on the same hblk. 11673 */ 11674 return (0); 11675 } 11676 11677 sfmmu_hblk_steal_unload_count++; 11678 } 11679 11680 ASSERT(hmeblkp->hblk_lckcnt == 0); 11681 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11682 11683 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11684 hmeblkp->hblk_nextpa = hblkpa; 11685 11686 shw_hblkp = hmeblkp->hblk_shadow; 11687 if (shw_hblkp) { 11688 ASSERT(!hmeblkp->hblk_shared); 11689 shw_size = get_hblk_ttesz(shw_hblkp); 11690 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11691 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11692 ASSERT(vshift < 8); 11693 /* 11694 * Atomically clear shadow mask bit 11695 */ 11696 do { 11697 shw_mask = shw_hblkp->hblk_shw_mask; 11698 ASSERT(shw_mask & (1 << vshift)); 11699 newshw_mask = shw_mask & ~(1 << vshift); 11700 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11701 shw_mask, newshw_mask); 11702 } while (newshw_mask != shw_mask); 11703 hmeblkp->hblk_shadow = NULL; 11704 } 11705 11706 /* 11707 * remove shadow bit if we are stealing an unused shadow hmeblk. 11708 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11709 * we are indeed allocating a shadow hmeblk. 11710 */ 11711 hmeblkp->hblk_shw_bit = 0; 11712 11713 if (hmeblkp->hblk_shared) { 11714 sf_srd_t *srdp; 11715 sf_region_t *rgnp; 11716 uint_t rid; 11717 11718 srdp = hblktosrd(hmeblkp); 11719 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11720 rid = hmeblkp->hblk_tag.htag_rid; 11721 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11722 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11723 rgnp = srdp->srd_hmergnp[rid]; 11724 ASSERT(rgnp != NULL); 11725 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11726 hmeblkp->hblk_shared = 0; 11727 } 11728 11729 sfmmu_hblk_steal_count++; 11730 SFMMU_STAT(sf_steal_count); 11731 11732 return (1); 11733 } 11734 11735 struct hme_blk * 11736 sfmmu_hmetohblk(struct sf_hment *sfhme) 11737 { 11738 struct hme_blk *hmeblkp; 11739 struct sf_hment *sfhme0; 11740 struct hme_blk *hblk_dummy = 0; 11741 11742 /* 11743 * No dummy sf_hments, please. 11744 */ 11745 ASSERT(sfhme->hme_tte.ll != 0); 11746 11747 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11748 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11749 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11750 11751 return (hmeblkp); 11752 } 11753 11754 /* 11755 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11756 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11757 * KM_SLEEP allocation. 11758 * 11759 * Return 0 on success, -1 otherwise. 11760 */ 11761 static void 11762 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11763 { 11764 struct tsb_info *tsbinfop, *next; 11765 tsb_replace_rc_t rc; 11766 boolean_t gotfirst = B_FALSE; 11767 11768 ASSERT(sfmmup != ksfmmup); 11769 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11770 11771 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11772 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11773 } 11774 11775 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11776 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11777 } else { 11778 return; 11779 } 11780 11781 ASSERT(sfmmup->sfmmu_tsb != NULL); 11782 11783 /* 11784 * Loop over all tsbinfo's replacing them with ones that actually have 11785 * a TSB. If any of the replacements ever fail, bail out of the loop. 11786 */ 11787 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11788 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11789 next = tsbinfop->tsb_next; 11790 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11791 hatlockp, TSB_SWAPIN); 11792 if (rc != TSB_SUCCESS) { 11793 break; 11794 } 11795 gotfirst = B_TRUE; 11796 } 11797 11798 switch (rc) { 11799 case TSB_SUCCESS: 11800 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11801 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11802 return; 11803 case TSB_LOSTRACE: 11804 break; 11805 case TSB_ALLOCFAIL: 11806 break; 11807 default: 11808 panic("sfmmu_replace_tsb returned unrecognized failure code " 11809 "%d", rc); 11810 } 11811 11812 /* 11813 * In this case, we failed to get one of our TSBs. If we failed to 11814 * get the first TSB, get one of minimum size (8KB). Walk the list 11815 * and throw away the tsbinfos, starting where the allocation failed; 11816 * we can get by with just one TSB as long as we don't leave the 11817 * SWAPPED tsbinfo structures lying around. 11818 */ 11819 tsbinfop = sfmmup->sfmmu_tsb; 11820 next = tsbinfop->tsb_next; 11821 tsbinfop->tsb_next = NULL; 11822 11823 sfmmu_hat_exit(hatlockp); 11824 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11825 next = tsbinfop->tsb_next; 11826 sfmmu_tsbinfo_free(tsbinfop); 11827 } 11828 hatlockp = sfmmu_hat_enter(sfmmup); 11829 11830 /* 11831 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11832 * pages. 11833 */ 11834 if (!gotfirst) { 11835 tsbinfop = sfmmup->sfmmu_tsb; 11836 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11837 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11838 ASSERT(rc == TSB_SUCCESS); 11839 } 11840 11841 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11842 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11843 } 11844 11845 static int 11846 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11847 { 11848 ulong_t bix = 0; 11849 uint_t rid; 11850 sf_region_t *rgnp; 11851 11852 ASSERT(srdp != NULL); 11853 ASSERT(srdp->srd_refcnt != 0); 11854 11855 w <<= BT_ULSHIFT; 11856 while (bmw) { 11857 if (!(bmw & 0x1)) { 11858 bix++; 11859 bmw >>= 1; 11860 continue; 11861 } 11862 rid = w | bix; 11863 rgnp = srdp->srd_hmergnp[rid]; 11864 ASSERT(rgnp->rgn_refcnt > 0); 11865 ASSERT(rgnp->rgn_id == rid); 11866 if (addr < rgnp->rgn_saddr || 11867 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11868 bix++; 11869 bmw >>= 1; 11870 } else { 11871 return (1); 11872 } 11873 } 11874 return (0); 11875 } 11876 11877 /* 11878 * Handle exceptions for low level tsb_handler. 11879 * 11880 * There are many scenarios that could land us here: 11881 * 11882 * If the context is invalid we land here. The context can be invalid 11883 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11884 * perform a wrap around operation in order to allocate a new context. 11885 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11886 * TSBs configuration is changeing for this process and we are forced into 11887 * here to do a syncronization operation. If the context is valid we can 11888 * be here from window trap hanlder. In this case just call trap to handle 11889 * the fault. 11890 * 11891 * Note that the process will run in INVALID_CONTEXT before 11892 * faulting into here and subsequently loading the MMU registers 11893 * (including the TSB base register) associated with this process. 11894 * For this reason, the trap handlers must all test for 11895 * INVALID_CONTEXT before attempting to access any registers other 11896 * than the context registers. 11897 */ 11898 void 11899 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11900 { 11901 sfmmu_t *sfmmup, *shsfmmup; 11902 uint_t ctxtype; 11903 klwp_id_t lwp; 11904 char lwp_save_state; 11905 hatlock_t *hatlockp, *shatlockp; 11906 struct tsb_info *tsbinfop; 11907 struct tsbmiss *tsbmp; 11908 sf_scd_t *scdp; 11909 11910 SFMMU_STAT(sf_tsb_exceptions); 11911 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11912 sfmmup = astosfmmu(curthread->t_procp->p_as); 11913 /* 11914 * note that in sun4u, tagacces register contains ctxnum 11915 * while sun4v passes ctxtype in the tagaccess register. 11916 */ 11917 ctxtype = tagaccess & TAGACC_CTX_MASK; 11918 11919 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11920 ASSERT(sfmmup->sfmmu_ismhat == 0); 11921 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11922 ctxtype == INVALID_CONTEXT); 11923 11924 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11925 /* 11926 * We may land here because shme bitmap and pagesize 11927 * flags are updated lazily in tsbmiss area on other cpus. 11928 * If we detect here that tsbmiss area is out of sync with 11929 * sfmmu update it and retry the trapped instruction. 11930 * Otherwise call trap(). 11931 */ 11932 int ret = 0; 11933 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11934 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11935 11936 /* 11937 * Must set lwp state to LWP_SYS before 11938 * trying to acquire any adaptive lock 11939 */ 11940 lwp = ttolwp(curthread); 11941 ASSERT(lwp); 11942 lwp_save_state = lwp->lwp_state; 11943 lwp->lwp_state = LWP_SYS; 11944 11945 hatlockp = sfmmu_hat_enter(sfmmup); 11946 kpreempt_disable(); 11947 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11948 ASSERT(sfmmup == tsbmp->usfmmup); 11949 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11950 ~tteflag_mask) || 11951 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11952 ~tteflag_mask)) { 11953 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11954 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11955 ret = 1; 11956 } 11957 if (sfmmup->sfmmu_srdp != NULL) { 11958 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11959 ulong_t *tm = tsbmp->shmermap; 11960 ulong_t i; 11961 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11962 ulong_t d = tm[i] ^ sm[i]; 11963 if (d) { 11964 if (d & sm[i]) { 11965 if (!ret && sfmmu_is_rgnva( 11966 sfmmup->sfmmu_srdp, 11967 addr, i, d & sm[i])) { 11968 ret = 1; 11969 } 11970 } 11971 tm[i] = sm[i]; 11972 } 11973 } 11974 } 11975 kpreempt_enable(); 11976 sfmmu_hat_exit(hatlockp); 11977 lwp->lwp_state = lwp_save_state; 11978 if (ret) { 11979 return; 11980 } 11981 } else if (ctxtype == INVALID_CONTEXT) { 11982 /* 11983 * First, make sure we come out of here with a valid ctx, 11984 * since if we don't get one we'll simply loop on the 11985 * faulting instruction. 11986 * 11987 * If the ISM mappings are changing, the TSB is relocated, 11988 * the process is swapped, the process is joining SCD or 11989 * leaving SCD or shared regions we serialize behind the 11990 * controlling thread with hat lock, sfmmu_flags and 11991 * sfmmu_tsb_cv condition variable. 11992 */ 11993 11994 /* 11995 * Must set lwp state to LWP_SYS before 11996 * trying to acquire any adaptive lock 11997 */ 11998 lwp = ttolwp(curthread); 11999 ASSERT(lwp); 12000 lwp_save_state = lwp->lwp_state; 12001 lwp->lwp_state = LWP_SYS; 12002 12003 hatlockp = sfmmu_hat_enter(sfmmup); 12004 retry: 12005 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 12006 shsfmmup = scdp->scd_sfmmup; 12007 ASSERT(shsfmmup != NULL); 12008 12009 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 12010 tsbinfop = tsbinfop->tsb_next) { 12011 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 12012 /* drop the private hat lock */ 12013 sfmmu_hat_exit(hatlockp); 12014 /* acquire the shared hat lock */ 12015 shatlockp = sfmmu_hat_enter(shsfmmup); 12016 /* 12017 * recheck to see if anything changed 12018 * after we drop the private hat lock. 12019 */ 12020 if (sfmmup->sfmmu_scdp == scdp && 12021 shsfmmup == scdp->scd_sfmmup) { 12022 sfmmu_tsb_chk_reloc(shsfmmup, 12023 shatlockp); 12024 } 12025 sfmmu_hat_exit(shatlockp); 12026 hatlockp = sfmmu_hat_enter(sfmmup); 12027 goto retry; 12028 } 12029 } 12030 } 12031 12032 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 12033 tsbinfop = tsbinfop->tsb_next) { 12034 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 12035 cv_wait(&sfmmup->sfmmu_tsb_cv, 12036 HATLOCK_MUTEXP(hatlockp)); 12037 goto retry; 12038 } 12039 } 12040 12041 /* 12042 * Wait for ISM maps to be updated. 12043 */ 12044 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12045 cv_wait(&sfmmup->sfmmu_tsb_cv, 12046 HATLOCK_MUTEXP(hatlockp)); 12047 goto retry; 12048 } 12049 12050 /* Is this process joining an SCD? */ 12051 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12052 /* 12053 * Flush private TSB and setup shared TSB. 12054 * sfmmu_finish_join_scd() does not drop the 12055 * hat lock. 12056 */ 12057 sfmmu_finish_join_scd(sfmmup); 12058 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 12059 } 12060 12061 /* 12062 * If we're swapping in, get TSB(s). Note that we must do 12063 * this before we get a ctx or load the MMU state. Once 12064 * we swap in we have to recheck to make sure the TSB(s) and 12065 * ISM mappings didn't change while we slept. 12066 */ 12067 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 12068 sfmmu_tsb_swapin(sfmmup, hatlockp); 12069 goto retry; 12070 } 12071 12072 sfmmu_get_ctx(sfmmup); 12073 12074 sfmmu_hat_exit(hatlockp); 12075 /* 12076 * Must restore lwp_state if not calling 12077 * trap() for further processing. Restore 12078 * it anyway. 12079 */ 12080 lwp->lwp_state = lwp_save_state; 12081 return; 12082 } 12083 trap(rp, (caddr_t)tagaccess, traptype, 0); 12084 } 12085 12086 static void 12087 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 12088 { 12089 struct tsb_info *tp; 12090 12091 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12092 12093 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 12094 if (tp->tsb_flags & TSB_RELOC_FLAG) { 12095 cv_wait(&sfmmup->sfmmu_tsb_cv, 12096 HATLOCK_MUTEXP(hatlockp)); 12097 break; 12098 } 12099 } 12100 } 12101 12102 /* 12103 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 12104 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 12105 * rather than spinning to avoid send mondo timeouts with 12106 * interrupts enabled. When the lock is acquired it is immediately 12107 * released and we return back to sfmmu_vatopfn just after 12108 * the GET_TTE call. 12109 */ 12110 void 12111 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 12112 { 12113 struct page **pp; 12114 12115 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12116 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12117 } 12118 12119 /* 12120 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 12121 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 12122 * cross traps which cannot be handled while spinning in the 12123 * trap handlers. Simply enter and exit the kpr_suspendlock spin 12124 * mutex, which is held by the holder of the suspend bit, and then 12125 * retry the trapped instruction after unwinding. 12126 */ 12127 /*ARGSUSED*/ 12128 void 12129 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 12130 { 12131 ASSERT(curthread != kreloc_thread); 12132 mutex_enter(&kpr_suspendlock); 12133 mutex_exit(&kpr_suspendlock); 12134 } 12135 12136 /* 12137 * This routine could be optimized to reduce the number of xcalls by flushing 12138 * the entire TLBs if region reference count is above some threshold but the 12139 * tradeoff will depend on the size of the TLB. So for now flush the specific 12140 * page a context at a time. 12141 * 12142 * If uselocks is 0 then it's called after all cpus were captured and all the 12143 * hat locks were taken. In this case don't take the region lock by relying on 12144 * the order of list region update operations in hat_join_region(), 12145 * hat_leave_region() and hat_dup_region(). The ordering in those routines 12146 * guarantees that list is always forward walkable and reaches active sfmmus 12147 * regardless of where xc_attention() captures a cpu. 12148 */ 12149 cpuset_t 12150 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 12151 struct hme_blk *hmeblkp, int uselocks) 12152 { 12153 sfmmu_t *sfmmup; 12154 cpuset_t cpuset; 12155 cpuset_t rcpuset; 12156 hatlock_t *hatlockp; 12157 uint_t rid = rgnp->rgn_id; 12158 sf_rgn_link_t *rlink; 12159 sf_scd_t *scdp; 12160 12161 ASSERT(hmeblkp->hblk_shared); 12162 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 12163 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 12164 12165 CPUSET_ZERO(rcpuset); 12166 if (uselocks) { 12167 mutex_enter(&rgnp->rgn_mutex); 12168 } 12169 sfmmup = rgnp->rgn_sfmmu_head; 12170 while (sfmmup != NULL) { 12171 if (uselocks) { 12172 hatlockp = sfmmu_hat_enter(sfmmup); 12173 } 12174 12175 /* 12176 * When an SCD is created the SCD hat is linked on the sfmmu 12177 * region lists for each hme region which is part of the 12178 * SCD. If we find an SCD hat, when walking these lists, 12179 * then we flush the shared TSBs, if we find a private hat, 12180 * which is part of an SCD, but where the region 12181 * is not part of the SCD then we flush the private TSBs. 12182 * 12183 * If the Rock page size register is present, then SCDs 12184 * may contain both shared and private pages, so we cannot 12185 * use this optimization to avoid flushing private TSBs. 12186 */ 12187 if (pgsz_search_on == 0 && 12188 !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12189 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12190 scdp = sfmmup->sfmmu_scdp; 12191 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 12192 if (uselocks) { 12193 sfmmu_hat_exit(hatlockp); 12194 } 12195 goto next; 12196 } 12197 } 12198 12199 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12200 12201 kpreempt_disable(); 12202 cpuset = sfmmup->sfmmu_cpusran; 12203 CPUSET_AND(cpuset, cpu_ready_set); 12204 CPUSET_DEL(cpuset, CPU->cpu_id); 12205 SFMMU_XCALL_STATS(sfmmup); 12206 xt_some(cpuset, vtag_flushpage_tl1, 12207 (uint64_t)addr, (uint64_t)sfmmup); 12208 vtag_flushpage(addr, (uint64_t)sfmmup); 12209 if (uselocks) { 12210 sfmmu_hat_exit(hatlockp); 12211 } 12212 kpreempt_enable(); 12213 CPUSET_OR(rcpuset, cpuset); 12214 12215 next: 12216 /* LINTED: constant in conditional context */ 12217 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12218 ASSERT(rlink != NULL); 12219 sfmmup = rlink->next; 12220 } 12221 if (uselocks) { 12222 mutex_exit(&rgnp->rgn_mutex); 12223 } 12224 return (rcpuset); 12225 } 12226 12227 /* 12228 * This routine takes an sfmmu pointer and the va for an adddress in an 12229 * ISM region as input and returns the corresponding region id in ism_rid. 12230 * The return value of 1 indicates that a region has been found and ism_rid 12231 * is valid, otherwise 0 is returned. 12232 */ 12233 static int 12234 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12235 { 12236 ism_blk_t *ism_blkp; 12237 int i; 12238 ism_map_t *ism_map; 12239 #ifdef DEBUG 12240 struct hat *ism_hatid; 12241 #endif 12242 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12243 12244 ism_blkp = sfmmup->sfmmu_iblk; 12245 while (ism_blkp != NULL) { 12246 ism_map = ism_blkp->iblk_maps; 12247 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12248 if ((va >= ism_start(ism_map[i])) && 12249 (va < ism_end(ism_map[i]))) { 12250 12251 *ism_rid = ism_map[i].imap_rid; 12252 #ifdef DEBUG 12253 ism_hatid = ism_map[i].imap_ismhat; 12254 ASSERT(ism_hatid == ism_sfmmup); 12255 ASSERT(ism_hatid->sfmmu_ismhat); 12256 #endif 12257 return (1); 12258 } 12259 } 12260 ism_blkp = ism_blkp->iblk_next; 12261 } 12262 return (0); 12263 } 12264 12265 /* 12266 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12267 * This routine may be called with all cpu's captured. Therefore, the 12268 * caller is responsible for holding all locks and disabling kernel 12269 * preemption. 12270 */ 12271 /* ARGSUSED */ 12272 static void 12273 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12274 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12275 { 12276 cpuset_t cpuset; 12277 caddr_t va; 12278 ism_ment_t *ment; 12279 sfmmu_t *sfmmup; 12280 #ifdef VAC 12281 int vcolor; 12282 #endif 12283 12284 sf_scd_t *scdp; 12285 uint_t ism_rid; 12286 12287 ASSERT(!hmeblkp->hblk_shared); 12288 /* 12289 * Walk the ism_hat's mapping list and flush the page 12290 * from every hat sharing this ism_hat. This routine 12291 * may be called while all cpu's have been captured. 12292 * Therefore we can't attempt to grab any locks. For now 12293 * this means we will protect the ism mapping list under 12294 * a single lock which will be grabbed by the caller. 12295 * If hat_share/unshare scalibility becomes a performance 12296 * problem then we may need to re-think ism mapping list locking. 12297 */ 12298 ASSERT(ism_sfmmup->sfmmu_ismhat); 12299 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12300 addr = addr - ISMID_STARTADDR; 12301 12302 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12303 12304 sfmmup = ment->iment_hat; 12305 12306 va = ment->iment_base_va; 12307 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12308 12309 /* 12310 * When an SCD is created the SCD hat is linked on the ism 12311 * mapping lists for each ISM segment which is part of the 12312 * SCD. If we find an SCD hat, when walking these lists, 12313 * then we flush the shared TSBs, if we find a private hat, 12314 * which is part of an SCD, but where the region 12315 * corresponding to this va is not part of the SCD then we 12316 * flush the private TSBs. 12317 * 12318 * If the Rock page size register is present, then SCDs 12319 * may contain both shared and private pages, so we cannot 12320 * use this optimization to avoid flushing private TSBs. 12321 */ 12322 if (pgsz_search_on == 0 && 12323 !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12324 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12325 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12326 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12327 &ism_rid)) { 12328 cmn_err(CE_PANIC, 12329 "can't find matching ISM rid!"); 12330 } 12331 12332 scdp = sfmmup->sfmmu_scdp; 12333 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12334 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12335 ism_rid)) { 12336 continue; 12337 } 12338 } 12339 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12340 12341 cpuset = sfmmup->sfmmu_cpusran; 12342 CPUSET_AND(cpuset, cpu_ready_set); 12343 CPUSET_DEL(cpuset, CPU->cpu_id); 12344 SFMMU_XCALL_STATS(sfmmup); 12345 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12346 (uint64_t)sfmmup); 12347 vtag_flushpage(va, (uint64_t)sfmmup); 12348 12349 #ifdef VAC 12350 /* 12351 * Flush D$ 12352 * When flushing D$ we must flush all 12353 * cpu's. See sfmmu_cache_flush(). 12354 */ 12355 if (cache_flush_flag == CACHE_FLUSH) { 12356 cpuset = cpu_ready_set; 12357 CPUSET_DEL(cpuset, CPU->cpu_id); 12358 12359 SFMMU_XCALL_STATS(sfmmup); 12360 vcolor = addr_to_vcolor(va); 12361 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12362 vac_flushpage(pfnum, vcolor); 12363 } 12364 #endif /* VAC */ 12365 } 12366 } 12367 12368 /* 12369 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12370 * a particular virtual address and ctx. If noflush is set we do not 12371 * flush the TLB/TSB. This function may or may not be called with the 12372 * HAT lock held. 12373 */ 12374 static void 12375 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12376 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12377 int hat_lock_held) 12378 { 12379 #ifdef VAC 12380 int vcolor; 12381 #endif 12382 cpuset_t cpuset; 12383 hatlock_t *hatlockp; 12384 12385 ASSERT(!hmeblkp->hblk_shared); 12386 12387 #if defined(lint) && !defined(VAC) 12388 pfnum = pfnum; 12389 cpu_flag = cpu_flag; 12390 cache_flush_flag = cache_flush_flag; 12391 #endif 12392 12393 /* 12394 * There is no longer a need to protect against ctx being 12395 * stolen here since we don't store the ctx in the TSB anymore. 12396 */ 12397 #ifdef VAC 12398 vcolor = addr_to_vcolor(addr); 12399 #endif 12400 12401 /* 12402 * We must hold the hat lock during the flush of TLB, 12403 * to avoid a race with sfmmu_invalidate_ctx(), where 12404 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12405 * causing TLB demap routine to skip flush on that MMU. 12406 * If the context on a MMU has already been set to 12407 * INVALID_CONTEXT, we just get an extra flush on 12408 * that MMU. 12409 */ 12410 if (!hat_lock_held && !tlb_noflush) 12411 hatlockp = sfmmu_hat_enter(sfmmup); 12412 12413 kpreempt_disable(); 12414 if (!tlb_noflush) { 12415 /* 12416 * Flush the TSB and TLB. 12417 */ 12418 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12419 12420 cpuset = sfmmup->sfmmu_cpusran; 12421 CPUSET_AND(cpuset, cpu_ready_set); 12422 CPUSET_DEL(cpuset, CPU->cpu_id); 12423 12424 SFMMU_XCALL_STATS(sfmmup); 12425 12426 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12427 (uint64_t)sfmmup); 12428 12429 vtag_flushpage(addr, (uint64_t)sfmmup); 12430 } 12431 12432 if (!hat_lock_held && !tlb_noflush) 12433 sfmmu_hat_exit(hatlockp); 12434 12435 #ifdef VAC 12436 /* 12437 * Flush the D$ 12438 * 12439 * Even if the ctx is stolen, we need to flush the 12440 * cache. Our ctx stealer only flushes the TLBs. 12441 */ 12442 if (cache_flush_flag == CACHE_FLUSH) { 12443 if (cpu_flag & FLUSH_ALL_CPUS) { 12444 cpuset = cpu_ready_set; 12445 } else { 12446 cpuset = sfmmup->sfmmu_cpusran; 12447 CPUSET_AND(cpuset, cpu_ready_set); 12448 } 12449 CPUSET_DEL(cpuset, CPU->cpu_id); 12450 SFMMU_XCALL_STATS(sfmmup); 12451 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12452 vac_flushpage(pfnum, vcolor); 12453 } 12454 #endif /* VAC */ 12455 kpreempt_enable(); 12456 } 12457 12458 /* 12459 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12460 * address and ctx. If noflush is set we do not currently do anything. 12461 * This function may or may not be called with the HAT lock held. 12462 */ 12463 static void 12464 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12465 int tlb_noflush, int hat_lock_held) 12466 { 12467 cpuset_t cpuset; 12468 hatlock_t *hatlockp; 12469 12470 ASSERT(!hmeblkp->hblk_shared); 12471 12472 /* 12473 * If the process is exiting we have nothing to do. 12474 */ 12475 if (tlb_noflush) 12476 return; 12477 12478 /* 12479 * Flush TSB. 12480 */ 12481 if (!hat_lock_held) 12482 hatlockp = sfmmu_hat_enter(sfmmup); 12483 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12484 12485 kpreempt_disable(); 12486 12487 cpuset = sfmmup->sfmmu_cpusran; 12488 CPUSET_AND(cpuset, cpu_ready_set); 12489 CPUSET_DEL(cpuset, CPU->cpu_id); 12490 12491 SFMMU_XCALL_STATS(sfmmup); 12492 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12493 12494 vtag_flushpage(addr, (uint64_t)sfmmup); 12495 12496 if (!hat_lock_held) 12497 sfmmu_hat_exit(hatlockp); 12498 12499 kpreempt_enable(); 12500 12501 } 12502 12503 /* 12504 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12505 * call handler that can flush a range of pages to save on xcalls. 12506 */ 12507 static int sfmmu_xcall_save; 12508 12509 /* 12510 * this routine is never used for demaping addresses backed by SRD hmeblks. 12511 */ 12512 static void 12513 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12514 { 12515 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12516 hatlock_t *hatlockp; 12517 cpuset_t cpuset; 12518 uint64_t sfmmu_pgcnt; 12519 pgcnt_t pgcnt = 0; 12520 int pgunload = 0; 12521 int dirtypg = 0; 12522 caddr_t addr = dmrp->dmr_addr; 12523 caddr_t eaddr; 12524 uint64_t bitvec = dmrp->dmr_bitvec; 12525 12526 ASSERT(bitvec & 1); 12527 12528 /* 12529 * Flush TSB and calculate number of pages to flush. 12530 */ 12531 while (bitvec != 0) { 12532 dirtypg = 0; 12533 /* 12534 * Find the first page to flush and then count how many 12535 * pages there are after it that also need to be flushed. 12536 * This way the number of TSB flushes is minimized. 12537 */ 12538 while ((bitvec & 1) == 0) { 12539 pgcnt++; 12540 addr += MMU_PAGESIZE; 12541 bitvec >>= 1; 12542 } 12543 while (bitvec & 1) { 12544 dirtypg++; 12545 bitvec >>= 1; 12546 } 12547 eaddr = addr + ptob(dirtypg); 12548 hatlockp = sfmmu_hat_enter(sfmmup); 12549 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12550 sfmmu_hat_exit(hatlockp); 12551 pgunload += dirtypg; 12552 addr = eaddr; 12553 pgcnt += dirtypg; 12554 } 12555 12556 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12557 if (sfmmup->sfmmu_free == 0) { 12558 addr = dmrp->dmr_addr; 12559 bitvec = dmrp->dmr_bitvec; 12560 12561 /* 12562 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12563 * as it will be used to pack argument for xt_some 12564 */ 12565 ASSERT((pgcnt > 0) && 12566 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12567 12568 /* 12569 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12570 * the low 6 bits of sfmmup. This is doable since pgcnt 12571 * always >= 1. 12572 */ 12573 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12574 sfmmu_pgcnt = (uint64_t)sfmmup | 12575 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12576 12577 /* 12578 * We must hold the hat lock during the flush of TLB, 12579 * to avoid a race with sfmmu_invalidate_ctx(), where 12580 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12581 * causing TLB demap routine to skip flush on that MMU. 12582 * If the context on a MMU has already been set to 12583 * INVALID_CONTEXT, we just get an extra flush on 12584 * that MMU. 12585 */ 12586 hatlockp = sfmmu_hat_enter(sfmmup); 12587 kpreempt_disable(); 12588 12589 cpuset = sfmmup->sfmmu_cpusran; 12590 CPUSET_AND(cpuset, cpu_ready_set); 12591 CPUSET_DEL(cpuset, CPU->cpu_id); 12592 12593 SFMMU_XCALL_STATS(sfmmup); 12594 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12595 sfmmu_pgcnt); 12596 12597 for (; bitvec != 0; bitvec >>= 1) { 12598 if (bitvec & 1) 12599 vtag_flushpage(addr, (uint64_t)sfmmup); 12600 addr += MMU_PAGESIZE; 12601 } 12602 kpreempt_enable(); 12603 sfmmu_hat_exit(hatlockp); 12604 12605 sfmmu_xcall_save += (pgunload-1); 12606 } 12607 dmrp->dmr_bitvec = 0; 12608 } 12609 12610 /* 12611 * In cases where we need to synchronize with TLB/TSB miss trap 12612 * handlers, _and_ need to flush the TLB, it's a lot easier to 12613 * throw away the context from the process than to do a 12614 * special song and dance to keep things consistent for the 12615 * handlers. 12616 * 12617 * Since the process suddenly ends up without a context and our caller 12618 * holds the hat lock, threads that fault after this function is called 12619 * will pile up on the lock. We can then do whatever we need to 12620 * atomically from the context of the caller. The first blocked thread 12621 * to resume executing will get the process a new context, and the 12622 * process will resume executing. 12623 * 12624 * One added advantage of this approach is that on MMUs that 12625 * support a "flush all" operation, we will delay the flush until 12626 * cnum wrap-around, and then flush the TLB one time. This 12627 * is rather rare, so it's a lot less expensive than making 8000 12628 * x-calls to flush the TLB 8000 times. 12629 * 12630 * A per-process (PP) lock is used to synchronize ctx allocations in 12631 * resume() and ctx invalidations here. 12632 */ 12633 void 12634 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12635 { 12636 cpuset_t cpuset; 12637 int cnum, currcnum; 12638 mmu_ctx_t *mmu_ctxp; 12639 int i; 12640 uint_t pstate_save; 12641 12642 SFMMU_STAT(sf_ctx_inv); 12643 12644 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12645 ASSERT(sfmmup != ksfmmup); 12646 12647 kpreempt_disable(); 12648 12649 mmu_ctxp = CPU_MMU_CTXP(CPU); 12650 ASSERT(mmu_ctxp); 12651 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12652 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12653 12654 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12655 12656 pstate_save = sfmmu_disable_intrs(); 12657 12658 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12659 /* set HAT cnum invalid across all context domains. */ 12660 for (i = 0; i < max_mmu_ctxdoms; i++) { 12661 12662 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12663 if (cnum == INVALID_CONTEXT) { 12664 continue; 12665 } 12666 12667 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12668 } 12669 membar_enter(); /* make sure globally visible to all CPUs */ 12670 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12671 12672 sfmmu_enable_intrs(pstate_save); 12673 12674 cpuset = sfmmup->sfmmu_cpusran; 12675 CPUSET_DEL(cpuset, CPU->cpu_id); 12676 CPUSET_AND(cpuset, cpu_ready_set); 12677 if (!CPUSET_ISNULL(cpuset)) { 12678 SFMMU_XCALL_STATS(sfmmup); 12679 xt_some(cpuset, sfmmu_raise_tsb_exception, 12680 (uint64_t)sfmmup, INVALID_CONTEXT); 12681 xt_sync(cpuset); 12682 SFMMU_STAT(sf_tsb_raise_exception); 12683 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12684 } 12685 12686 /* 12687 * If the hat to-be-invalidated is the same as the current 12688 * process on local CPU we need to invalidate 12689 * this CPU context as well. 12690 */ 12691 if ((sfmmu_getctx_sec() == currcnum) && 12692 (currcnum != INVALID_CONTEXT)) { 12693 /* sets shared context to INVALID too */ 12694 sfmmu_setctx_sec(INVALID_CONTEXT); 12695 sfmmu_clear_utsbinfo(); 12696 } 12697 12698 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12699 12700 kpreempt_enable(); 12701 12702 /* 12703 * we hold the hat lock, so nobody should allocate a context 12704 * for us yet 12705 */ 12706 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12707 } 12708 12709 #ifdef VAC 12710 /* 12711 * We need to flush the cache in all cpus. It is possible that 12712 * a process referenced a page as cacheable but has sinced exited 12713 * and cleared the mapping list. We still to flush it but have no 12714 * state so all cpus is the only alternative. 12715 */ 12716 void 12717 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12718 { 12719 cpuset_t cpuset; 12720 12721 kpreempt_disable(); 12722 cpuset = cpu_ready_set; 12723 CPUSET_DEL(cpuset, CPU->cpu_id); 12724 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12725 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12726 xt_sync(cpuset); 12727 vac_flushpage(pfnum, vcolor); 12728 kpreempt_enable(); 12729 } 12730 12731 void 12732 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12733 { 12734 cpuset_t cpuset; 12735 12736 ASSERT(vcolor >= 0); 12737 12738 kpreempt_disable(); 12739 cpuset = cpu_ready_set; 12740 CPUSET_DEL(cpuset, CPU->cpu_id); 12741 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12742 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12743 xt_sync(cpuset); 12744 vac_flushcolor(vcolor, pfnum); 12745 kpreempt_enable(); 12746 } 12747 #endif /* VAC */ 12748 12749 /* 12750 * We need to prevent processes from accessing the TSB using a cached physical 12751 * address. It's alright if they try to access the TSB via virtual address 12752 * since they will just fault on that virtual address once the mapping has 12753 * been suspended. 12754 */ 12755 #pragma weak sendmondo_in_recover 12756 12757 /* ARGSUSED */ 12758 static int 12759 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12760 { 12761 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12762 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12763 hatlock_t *hatlockp; 12764 sf_scd_t *scdp; 12765 12766 if (flags != HAT_PRESUSPEND) 12767 return (0); 12768 12769 /* 12770 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12771 * be a shared hat, then set SCD's tsbinfo's flag. 12772 * If tsb is not shared, sfmmup is a private hat, then set 12773 * its private tsbinfo's flag. 12774 */ 12775 hatlockp = sfmmu_hat_enter(sfmmup); 12776 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12777 12778 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12779 sfmmu_tsb_inv_ctx(sfmmup); 12780 sfmmu_hat_exit(hatlockp); 12781 } else { 12782 /* release lock on the shared hat */ 12783 sfmmu_hat_exit(hatlockp); 12784 /* sfmmup is a shared hat */ 12785 ASSERT(sfmmup->sfmmu_scdhat); 12786 scdp = sfmmup->sfmmu_scdp; 12787 ASSERT(scdp != NULL); 12788 /* get private hat from the scd list */ 12789 mutex_enter(&scdp->scd_mutex); 12790 sfmmup = scdp->scd_sf_list; 12791 while (sfmmup != NULL) { 12792 hatlockp = sfmmu_hat_enter(sfmmup); 12793 /* 12794 * We do not call sfmmu_tsb_inv_ctx here because 12795 * sendmondo_in_recover check is only needed for 12796 * sun4u. 12797 */ 12798 sfmmu_invalidate_ctx(sfmmup); 12799 sfmmu_hat_exit(hatlockp); 12800 sfmmup = sfmmup->sfmmu_scd_link.next; 12801 12802 } 12803 mutex_exit(&scdp->scd_mutex); 12804 } 12805 return (0); 12806 } 12807 12808 static void 12809 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12810 { 12811 extern uint32_t sendmondo_in_recover; 12812 12813 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12814 12815 /* 12816 * For Cheetah+ Erratum 25: 12817 * Wait for any active recovery to finish. We can't risk 12818 * relocating the TSB of the thread running mondo_recover_proc() 12819 * since, if we did that, we would deadlock. The scenario we are 12820 * trying to avoid is as follows: 12821 * 12822 * THIS CPU RECOVER CPU 12823 * -------- ----------- 12824 * Begins recovery, walking through TSB 12825 * hat_pagesuspend() TSB TTE 12826 * TLB miss on TSB TTE, spins at TL1 12827 * xt_sync() 12828 * send_mondo_timeout() 12829 * mondo_recover_proc() 12830 * ((deadlocked)) 12831 * 12832 * The second half of the workaround is that mondo_recover_proc() 12833 * checks to see if the tsb_info has the RELOC flag set, and if it 12834 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12835 * and hence avoiding the TLB miss that could result in a deadlock. 12836 */ 12837 if (&sendmondo_in_recover) { 12838 membar_enter(); /* make sure RELOC flag visible */ 12839 while (sendmondo_in_recover) { 12840 drv_usecwait(1); 12841 membar_consumer(); 12842 } 12843 } 12844 12845 sfmmu_invalidate_ctx(sfmmup); 12846 } 12847 12848 /* ARGSUSED */ 12849 static int 12850 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12851 void *tsbinfo, pfn_t newpfn) 12852 { 12853 hatlock_t *hatlockp; 12854 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12855 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12856 12857 if (flags != HAT_POSTUNSUSPEND) 12858 return (0); 12859 12860 hatlockp = sfmmu_hat_enter(sfmmup); 12861 12862 SFMMU_STAT(sf_tsb_reloc); 12863 12864 /* 12865 * The process may have swapped out while we were relocating one 12866 * of its TSBs. If so, don't bother doing the setup since the 12867 * process can't be using the memory anymore. 12868 */ 12869 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12870 ASSERT(va == tsbinfop->tsb_va); 12871 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12872 12873 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12874 sfmmu_inv_tsb(tsbinfop->tsb_va, 12875 TSB_BYTES(tsbinfop->tsb_szc)); 12876 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12877 } 12878 } 12879 12880 membar_exit(); 12881 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12882 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12883 12884 sfmmu_hat_exit(hatlockp); 12885 12886 return (0); 12887 } 12888 12889 /* 12890 * Allocate and initialize a tsb_info structure. Note that we may or may not 12891 * allocate a TSB here, depending on the flags passed in. 12892 */ 12893 static int 12894 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12895 uint_t flags, sfmmu_t *sfmmup) 12896 { 12897 int err; 12898 12899 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12900 sfmmu_tsbinfo_cache, KM_SLEEP); 12901 12902 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12903 tsb_szc, flags, sfmmup)) != 0) { 12904 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12905 SFMMU_STAT(sf_tsb_allocfail); 12906 *tsbinfopp = NULL; 12907 return (err); 12908 } 12909 SFMMU_STAT(sf_tsb_alloc); 12910 12911 /* 12912 * Bump the TSB size counters for this TSB size. 12913 */ 12914 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12915 return (0); 12916 } 12917 12918 static void 12919 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12920 { 12921 caddr_t tsbva = tsbinfo->tsb_va; 12922 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12923 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12924 vmem_t *vmp = tsbinfo->tsb_vmp; 12925 12926 /* 12927 * If we allocated this TSB from relocatable kernel memory, then we 12928 * need to uninstall the callback handler. 12929 */ 12930 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12931 uintptr_t slab_mask; 12932 caddr_t slab_vaddr; 12933 page_t **ppl; 12934 int ret; 12935 12936 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12937 if (tsb_size > MMU_PAGESIZE4M) 12938 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12939 else 12940 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12941 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12942 12943 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12944 ASSERT(ret == 0); 12945 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12946 0, NULL); 12947 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12948 } 12949 12950 if (kmem_cachep != NULL) { 12951 kmem_cache_free(kmem_cachep, tsbva); 12952 } else { 12953 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12954 } 12955 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12956 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12957 } 12958 12959 static void 12960 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12961 { 12962 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12963 sfmmu_tsb_free(tsbinfo); 12964 } 12965 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12966 12967 } 12968 12969 /* 12970 * Setup all the references to physical memory for this tsbinfo. 12971 * The underlying page(s) must be locked. 12972 */ 12973 static void 12974 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12975 { 12976 ASSERT(pfn != PFN_INVALID); 12977 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12978 12979 #ifndef sun4v 12980 if (tsbinfo->tsb_szc == 0) { 12981 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12982 PROT_WRITE|PROT_READ, TTE8K); 12983 } else { 12984 /* 12985 * Round down PA and use a large mapping; the handlers will 12986 * compute the TSB pointer at the correct offset into the 12987 * big virtual page. NOTE: this assumes all TSBs larger 12988 * than 8K must come from physically contiguous slabs of 12989 * size tsb_slab_size. 12990 */ 12991 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12992 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12993 } 12994 tsbinfo->tsb_pa = ptob(pfn); 12995 12996 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12997 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12998 12999 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 13000 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 13001 #else /* sun4v */ 13002 tsbinfo->tsb_pa = ptob(pfn); 13003 #endif /* sun4v */ 13004 } 13005 13006 13007 /* 13008 * Returns zero on success, ENOMEM if over the high water mark, 13009 * or EAGAIN if the caller needs to retry with a smaller TSB 13010 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 13011 * 13012 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 13013 * is specified and the TSB requested is PAGESIZE, though it 13014 * may sleep waiting for memory if sufficient memory is not 13015 * available. 13016 */ 13017 static int 13018 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 13019 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 13020 { 13021 caddr_t vaddr = NULL; 13022 caddr_t slab_vaddr; 13023 uintptr_t slab_mask; 13024 int tsbbytes = TSB_BYTES(tsbcode); 13025 int lowmem = 0; 13026 struct kmem_cache *kmem_cachep = NULL; 13027 vmem_t *vmp = NULL; 13028 lgrp_id_t lgrpid = LGRP_NONE; 13029 pfn_t pfn; 13030 uint_t cbflags = HAC_SLEEP; 13031 page_t **pplist; 13032 int ret; 13033 13034 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 13035 if (tsbbytes > MMU_PAGESIZE4M) 13036 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 13037 else 13038 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 13039 13040 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 13041 flags |= TSB_ALLOC; 13042 13043 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 13044 13045 tsbinfo->tsb_sfmmu = sfmmup; 13046 13047 /* 13048 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 13049 * return. 13050 */ 13051 if ((flags & TSB_ALLOC) == 0) { 13052 tsbinfo->tsb_szc = tsbcode; 13053 tsbinfo->tsb_ttesz_mask = tteszmask; 13054 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 13055 tsbinfo->tsb_pa = -1; 13056 tsbinfo->tsb_tte.ll = 0; 13057 tsbinfo->tsb_next = NULL; 13058 tsbinfo->tsb_flags = TSB_SWAPPED; 13059 tsbinfo->tsb_cache = NULL; 13060 tsbinfo->tsb_vmp = NULL; 13061 return (0); 13062 } 13063 13064 #ifdef DEBUG 13065 /* 13066 * For debugging: 13067 * Randomly force allocation failures every tsb_alloc_mtbf 13068 * tries if TSB_FORCEALLOC is not specified. This will 13069 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 13070 * it is even, to allow testing of both failure paths... 13071 */ 13072 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 13073 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 13074 tsb_alloc_count = 0; 13075 tsb_alloc_fail_mtbf++; 13076 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 13077 } 13078 #endif /* DEBUG */ 13079 13080 /* 13081 * Enforce high water mark if we are not doing a forced allocation 13082 * and are not shrinking a process' TSB. 13083 */ 13084 if ((flags & TSB_SHRINK) == 0 && 13085 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 13086 if ((flags & TSB_FORCEALLOC) == 0) 13087 return (ENOMEM); 13088 lowmem = 1; 13089 } 13090 13091 /* 13092 * Allocate from the correct location based upon the size of the TSB 13093 * compared to the base page size, and what memory conditions dictate. 13094 * Note we always do nonblocking allocations from the TSB arena since 13095 * we don't want memory fragmentation to cause processes to block 13096 * indefinitely waiting for memory; until the kernel algorithms that 13097 * coalesce large pages are improved this is our best option. 13098 * 13099 * Algorithm: 13100 * If allocating a "large" TSB (>8K), allocate from the 13101 * appropriate kmem_tsb_default_arena vmem arena 13102 * else if low on memory or the TSB_FORCEALLOC flag is set or 13103 * tsb_forceheap is set 13104 * Allocate from kernel heap via sfmmu_tsb8k_cache with 13105 * KM_SLEEP (never fails) 13106 * else 13107 * Allocate from appropriate sfmmu_tsb_cache with 13108 * KM_NOSLEEP 13109 * endif 13110 */ 13111 if (tsb_lgrp_affinity) 13112 lgrpid = lgrp_home_id(curthread); 13113 if (lgrpid == LGRP_NONE) 13114 lgrpid = 0; /* use lgrp of boot CPU */ 13115 13116 if (tsbbytes > MMU_PAGESIZE) { 13117 if (tsbbytes > MMU_PAGESIZE4M) { 13118 vmp = kmem_bigtsb_default_arena[lgrpid]; 13119 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 13120 0, 0, NULL, NULL, VM_NOSLEEP); 13121 } else { 13122 vmp = kmem_tsb_default_arena[lgrpid]; 13123 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 13124 0, 0, NULL, NULL, VM_NOSLEEP); 13125 } 13126 #ifdef DEBUG 13127 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 13128 #else /* !DEBUG */ 13129 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 13130 #endif /* DEBUG */ 13131 kmem_cachep = sfmmu_tsb8k_cache; 13132 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 13133 ASSERT(vaddr != NULL); 13134 } else { 13135 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 13136 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 13137 } 13138 13139 tsbinfo->tsb_cache = kmem_cachep; 13140 tsbinfo->tsb_vmp = vmp; 13141 13142 if (vaddr == NULL) { 13143 return (EAGAIN); 13144 } 13145 13146 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 13147 kmem_cachep = tsbinfo->tsb_cache; 13148 13149 /* 13150 * If we are allocating from outside the cage, then we need to 13151 * register a relocation callback handler. Note that for now 13152 * since pseudo mappings always hang off of the slab's root page, 13153 * we need only lock the first 8K of the TSB slab. This is a bit 13154 * hacky but it is good for performance. 13155 */ 13156 if (kmem_cachep != sfmmu_tsb8k_cache) { 13157 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 13158 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 13159 ASSERT(ret == 0); 13160 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 13161 cbflags, (void *)tsbinfo, &pfn, NULL); 13162 13163 /* 13164 * Need to free up resources if we could not successfully 13165 * add the callback function and return an error condition. 13166 */ 13167 if (ret != 0) { 13168 if (kmem_cachep) { 13169 kmem_cache_free(kmem_cachep, vaddr); 13170 } else { 13171 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 13172 } 13173 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 13174 S_WRITE); 13175 return (EAGAIN); 13176 } 13177 } else { 13178 /* 13179 * Since allocation of 8K TSBs from heap is rare and occurs 13180 * during memory pressure we allocate them from permanent 13181 * memory rather than using callbacks to get the PFN. 13182 */ 13183 pfn = hat_getpfnum(kas.a_hat, vaddr); 13184 } 13185 13186 tsbinfo->tsb_va = vaddr; 13187 tsbinfo->tsb_szc = tsbcode; 13188 tsbinfo->tsb_ttesz_mask = tteszmask; 13189 tsbinfo->tsb_next = NULL; 13190 tsbinfo->tsb_flags = 0; 13191 13192 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 13193 13194 sfmmu_inv_tsb(vaddr, tsbbytes); 13195 13196 if (kmem_cachep != sfmmu_tsb8k_cache) { 13197 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 13198 } 13199 13200 return (0); 13201 } 13202 13203 /* 13204 * Initialize per cpu tsb and per cpu tsbmiss_area 13205 */ 13206 void 13207 sfmmu_init_tsbs(void) 13208 { 13209 int i; 13210 struct tsbmiss *tsbmissp; 13211 struct kpmtsbm *kpmtsbmp; 13212 #ifndef sun4v 13213 extern int dcache_line_mask; 13214 #endif /* sun4v */ 13215 extern uint_t vac_colors; 13216 13217 /* 13218 * Init. tsb miss area. 13219 */ 13220 tsbmissp = tsbmiss_area; 13221 13222 for (i = 0; i < NCPU; tsbmissp++, i++) { 13223 /* 13224 * initialize the tsbmiss area. 13225 * Do this for all possible CPUs as some may be added 13226 * while the system is running. There is no cost to this. 13227 */ 13228 tsbmissp->ksfmmup = ksfmmup; 13229 #ifndef sun4v 13230 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13231 #endif /* sun4v */ 13232 tsbmissp->khashstart = 13233 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13234 tsbmissp->uhashstart = 13235 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13236 tsbmissp->khashsz = khmehash_num; 13237 tsbmissp->uhashsz = uhmehash_num; 13238 } 13239 13240 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13241 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13242 13243 if (kpm_enable == 0) 13244 return; 13245 13246 /* -- Begin KPM specific init -- */ 13247 13248 if (kpm_smallpages) { 13249 /* 13250 * If we're using base pagesize pages for seg_kpm 13251 * mappings, we use the kernel TSB since we can't afford 13252 * to allocate a second huge TSB for these mappings. 13253 */ 13254 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13255 kpm_tsbsz = ktsb_szcode; 13256 kpmsm_tsbbase = kpm_tsbbase; 13257 kpmsm_tsbsz = kpm_tsbsz; 13258 } else { 13259 /* 13260 * In VAC conflict case, just put the entries in the 13261 * kernel 8K indexed TSB for now so we can find them. 13262 * This could really be changed in the future if we feel 13263 * the need... 13264 */ 13265 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13266 kpmsm_tsbsz = ktsb_szcode; 13267 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13268 kpm_tsbsz = ktsb4m_szcode; 13269 } 13270 13271 kpmtsbmp = kpmtsbm_area; 13272 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13273 /* 13274 * Initialize the kpmtsbm area. 13275 * Do this for all possible CPUs as some may be added 13276 * while the system is running. There is no cost to this. 13277 */ 13278 kpmtsbmp->vbase = kpm_vbase; 13279 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13280 kpmtsbmp->sz_shift = kpm_size_shift; 13281 kpmtsbmp->kpmp_shift = kpmp_shift; 13282 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13283 if (kpm_smallpages == 0) { 13284 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13285 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13286 } else { 13287 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13288 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13289 } 13290 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13291 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13292 #ifdef DEBUG 13293 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13294 #endif /* DEBUG */ 13295 if (ktsb_phys) 13296 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13297 } 13298 13299 /* -- End KPM specific init -- */ 13300 } 13301 13302 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13303 struct tsb_info ktsb_info[2]; 13304 13305 /* 13306 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13307 */ 13308 void 13309 sfmmu_init_ktsbinfo() 13310 { 13311 ASSERT(ksfmmup != NULL); 13312 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13313 /* 13314 * Allocate tsbinfos for kernel and copy in data 13315 * to make debug easier and sun4v setup easier. 13316 */ 13317 ktsb_info[0].tsb_sfmmu = ksfmmup; 13318 ktsb_info[0].tsb_szc = ktsb_szcode; 13319 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13320 ktsb_info[0].tsb_va = ktsb_base; 13321 ktsb_info[0].tsb_pa = ktsb_pbase; 13322 ktsb_info[0].tsb_flags = 0; 13323 ktsb_info[0].tsb_tte.ll = 0; 13324 ktsb_info[0].tsb_cache = NULL; 13325 13326 ktsb_info[1].tsb_sfmmu = ksfmmup; 13327 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13328 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13329 ktsb_info[1].tsb_va = ktsb4m_base; 13330 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13331 ktsb_info[1].tsb_flags = 0; 13332 ktsb_info[1].tsb_tte.ll = 0; 13333 ktsb_info[1].tsb_cache = NULL; 13334 13335 /* Link them into ksfmmup. */ 13336 ktsb_info[0].tsb_next = &ktsb_info[1]; 13337 ktsb_info[1].tsb_next = NULL; 13338 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13339 13340 sfmmu_setup_tsbinfo(ksfmmup); 13341 } 13342 13343 /* 13344 * Cache the last value returned from va_to_pa(). If the VA specified 13345 * in the current call to cached_va_to_pa() maps to the same Page (as the 13346 * previous call to cached_va_to_pa()), then compute the PA using 13347 * cached info, else call va_to_pa(). 13348 * 13349 * Note: this function is neither MT-safe nor consistent in the presence 13350 * of multiple, interleaved threads. This function was created to enable 13351 * an optimization used during boot (at a point when there's only one thread 13352 * executing on the "boot CPU", and before startup_vm() has been called). 13353 */ 13354 static uint64_t 13355 cached_va_to_pa(void *vaddr) 13356 { 13357 static uint64_t prev_vaddr_base = 0; 13358 static uint64_t prev_pfn = 0; 13359 13360 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13361 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13362 } else { 13363 uint64_t pa = va_to_pa(vaddr); 13364 13365 if (pa != ((uint64_t)-1)) { 13366 /* 13367 * Computed physical address is valid. Cache its 13368 * related info for the next cached_va_to_pa() call. 13369 */ 13370 prev_pfn = pa & MMU_PAGEMASK; 13371 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13372 } 13373 13374 return (pa); 13375 } 13376 } 13377 13378 /* 13379 * Carve up our nucleus hblk region. We may allocate more hblks than 13380 * asked due to rounding errors but we are guaranteed to have at least 13381 * enough space to allocate the requested number of hblk8's and hblk1's. 13382 */ 13383 void 13384 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13385 { 13386 struct hme_blk *hmeblkp; 13387 size_t hme8blk_sz, hme1blk_sz; 13388 size_t i; 13389 size_t hblk8_bound; 13390 ulong_t j = 0, k = 0; 13391 13392 ASSERT(addr != NULL && size != 0); 13393 13394 /* Need to use proper structure alignment */ 13395 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13396 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13397 13398 nucleus_hblk8.list = (void *)addr; 13399 nucleus_hblk8.index = 0; 13400 13401 /* 13402 * Use as much memory as possible for hblk8's since we 13403 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13404 * We need to hold back enough space for the hblk1's which 13405 * we'll allocate next. 13406 */ 13407 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13408 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13409 hmeblkp = (struct hme_blk *)addr; 13410 addr += hme8blk_sz; 13411 hmeblkp->hblk_nuc_bit = 1; 13412 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13413 } 13414 nucleus_hblk8.len = j; 13415 ASSERT(j >= nhblk8); 13416 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13417 13418 nucleus_hblk1.list = (void *)addr; 13419 nucleus_hblk1.index = 0; 13420 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13421 hmeblkp = (struct hme_blk *)addr; 13422 addr += hme1blk_sz; 13423 hmeblkp->hblk_nuc_bit = 1; 13424 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13425 } 13426 ASSERT(k >= nhblk1); 13427 nucleus_hblk1.len = k; 13428 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13429 } 13430 13431 /* 13432 * This function is currently not supported on this platform. For what 13433 * it's supposed to do, see hat.c and hat_srmmu.c 13434 */ 13435 /* ARGSUSED */ 13436 faultcode_t 13437 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13438 uint_t flags) 13439 { 13440 ASSERT(hat->sfmmu_xhat_provider == NULL); 13441 return (FC_NOSUPPORT); 13442 } 13443 13444 /* 13445 * Searchs the mapping list of the page for a mapping of the same size. If not 13446 * found the corresponding bit is cleared in the p_index field. When large 13447 * pages are more prevalent in the system, we can maintain the mapping list 13448 * in order and we don't have to traverse the list each time. Just check the 13449 * next and prev entries, and if both are of different size, we clear the bit. 13450 */ 13451 static void 13452 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13453 { 13454 struct sf_hment *sfhmep; 13455 struct hme_blk *hmeblkp; 13456 int index; 13457 pgcnt_t npgs; 13458 13459 ASSERT(ttesz > TTE8K); 13460 13461 ASSERT(sfmmu_mlist_held(pp)); 13462 13463 ASSERT(PP_ISMAPPED_LARGE(pp)); 13464 13465 /* 13466 * Traverse mapping list looking for another mapping of same size. 13467 * since we only want to clear index field if all mappings of 13468 * that size are gone. 13469 */ 13470 13471 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13472 if (IS_PAHME(sfhmep)) 13473 continue; 13474 hmeblkp = sfmmu_hmetohblk(sfhmep); 13475 if (hmeblkp->hblk_xhat_bit) 13476 continue; 13477 if (hme_size(sfhmep) == ttesz) { 13478 /* 13479 * another mapping of the same size. don't clear index. 13480 */ 13481 return; 13482 } 13483 } 13484 13485 /* 13486 * Clear the p_index bit for large page. 13487 */ 13488 index = PAGESZ_TO_INDEX(ttesz); 13489 npgs = TTEPAGES(ttesz); 13490 while (npgs-- > 0) { 13491 ASSERT(pp->p_index & index); 13492 pp->p_index &= ~index; 13493 pp = PP_PAGENEXT(pp); 13494 } 13495 } 13496 13497 /* 13498 * return supported features 13499 */ 13500 /* ARGSUSED */ 13501 int 13502 hat_supported(enum hat_features feature, void *arg) 13503 { 13504 switch (feature) { 13505 case HAT_SHARED_PT: 13506 case HAT_DYNAMIC_ISM_UNMAP: 13507 case HAT_VMODSORT: 13508 return (1); 13509 case HAT_SHARED_REGIONS: 13510 if (shctx_on) 13511 return (1); 13512 else 13513 return (0); 13514 default: 13515 return (0); 13516 } 13517 } 13518 13519 void 13520 hat_enter(struct hat *hat) 13521 { 13522 hatlock_t *hatlockp; 13523 13524 if (hat != ksfmmup) { 13525 hatlockp = TSB_HASH(hat); 13526 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13527 } 13528 } 13529 13530 void 13531 hat_exit(struct hat *hat) 13532 { 13533 hatlock_t *hatlockp; 13534 13535 if (hat != ksfmmup) { 13536 hatlockp = TSB_HASH(hat); 13537 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13538 } 13539 } 13540 13541 /*ARGSUSED*/ 13542 void 13543 hat_reserve(struct as *as, caddr_t addr, size_t len) 13544 { 13545 } 13546 13547 static void 13548 hat_kstat_init(void) 13549 { 13550 kstat_t *ksp; 13551 13552 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13553 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13554 KSTAT_FLAG_VIRTUAL); 13555 if (ksp) { 13556 ksp->ks_data = (void *) &sfmmu_global_stat; 13557 kstat_install(ksp); 13558 } 13559 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13560 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13561 KSTAT_FLAG_VIRTUAL); 13562 if (ksp) { 13563 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13564 kstat_install(ksp); 13565 } 13566 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13567 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13568 KSTAT_FLAG_WRITABLE); 13569 if (ksp) { 13570 ksp->ks_update = sfmmu_kstat_percpu_update; 13571 kstat_install(ksp); 13572 } 13573 } 13574 13575 /* ARGSUSED */ 13576 static int 13577 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13578 { 13579 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13580 struct tsbmiss *tsbm = tsbmiss_area; 13581 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13582 int i; 13583 13584 ASSERT(cpu_kstat); 13585 if (rw == KSTAT_READ) { 13586 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13587 cpu_kstat->sf_itlb_misses = 0; 13588 cpu_kstat->sf_dtlb_misses = 0; 13589 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13590 tsbm->uprot_traps; 13591 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13592 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13593 cpu_kstat->sf_tsb_hits = 0; 13594 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13595 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13596 } 13597 } else { 13598 /* KSTAT_WRITE is used to clear stats */ 13599 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13600 tsbm->utsb_misses = 0; 13601 tsbm->ktsb_misses = 0; 13602 tsbm->uprot_traps = 0; 13603 tsbm->kprot_traps = 0; 13604 kpmtsbm->kpm_dtlb_misses = 0; 13605 kpmtsbm->kpm_tsb_misses = 0; 13606 } 13607 } 13608 return (0); 13609 } 13610 13611 #ifdef DEBUG 13612 13613 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13614 13615 /* 13616 * A tte checker. *orig_old is the value we read before cas. 13617 * *cur is the value returned by cas. 13618 * *new is the desired value when we do the cas. 13619 * 13620 * *hmeblkp is currently unused. 13621 */ 13622 13623 /* ARGSUSED */ 13624 void 13625 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13626 { 13627 pfn_t i, j, k; 13628 int cpuid = CPU->cpu_id; 13629 13630 gorig[cpuid] = orig_old; 13631 gcur[cpuid] = cur; 13632 gnew[cpuid] = new; 13633 13634 #ifdef lint 13635 hmeblkp = hmeblkp; 13636 #endif 13637 13638 if (TTE_IS_VALID(orig_old)) { 13639 if (TTE_IS_VALID(cur)) { 13640 i = TTE_TO_TTEPFN(orig_old); 13641 j = TTE_TO_TTEPFN(cur); 13642 k = TTE_TO_TTEPFN(new); 13643 if (i != j) { 13644 /* remap error? */ 13645 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13646 } 13647 13648 if (i != k) { 13649 /* remap error? */ 13650 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13651 } 13652 } else { 13653 if (TTE_IS_VALID(new)) { 13654 panic("chk_tte: invalid cur? "); 13655 } 13656 13657 i = TTE_TO_TTEPFN(orig_old); 13658 k = TTE_TO_TTEPFN(new); 13659 if (i != k) { 13660 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13661 } 13662 } 13663 } else { 13664 if (TTE_IS_VALID(cur)) { 13665 j = TTE_TO_TTEPFN(cur); 13666 if (TTE_IS_VALID(new)) { 13667 k = TTE_TO_TTEPFN(new); 13668 if (j != k) { 13669 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13670 j, k); 13671 } 13672 } else { 13673 panic("chk_tte: why here?"); 13674 } 13675 } else { 13676 if (!TTE_IS_VALID(new)) { 13677 panic("chk_tte: why here2 ?"); 13678 } 13679 } 13680 } 13681 } 13682 13683 #endif /* DEBUG */ 13684 13685 extern void prefetch_tsbe_read(struct tsbe *); 13686 extern void prefetch_tsbe_write(struct tsbe *); 13687 13688 13689 /* 13690 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13691 * us optimal performance on Cheetah+. You can only have 8 outstanding 13692 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13693 * prefetch to make the most utilization of the prefetch capability. 13694 */ 13695 #define TSBE_PREFETCH_STRIDE (7) 13696 13697 void 13698 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13699 { 13700 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13701 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13702 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13703 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13704 struct tsbe *old; 13705 struct tsbe *new; 13706 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13707 uint64_t va; 13708 int new_offset; 13709 int i; 13710 int vpshift; 13711 int last_prefetch; 13712 13713 if (old_bytes == new_bytes) { 13714 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13715 } else { 13716 13717 /* 13718 * A TSBE is 16 bytes which means there are four TSBE's per 13719 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13720 */ 13721 old = (struct tsbe *)old_tsbinfo->tsb_va; 13722 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13723 for (i = 0; i < old_entries; i++, old++) { 13724 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13725 prefetch_tsbe_read(old); 13726 if (!old->tte_tag.tag_invalid) { 13727 /* 13728 * We have a valid TTE to remap. Check the 13729 * size. We won't remap 64K or 512K TTEs 13730 * because they span more than one TSB entry 13731 * and are indexed using an 8K virt. page. 13732 * Ditto for 32M and 256M TTEs. 13733 */ 13734 if (TTE_CSZ(&old->tte_data) == TTE64K || 13735 TTE_CSZ(&old->tte_data) == TTE512K) 13736 continue; 13737 if (mmu_page_sizes == max_mmu_page_sizes) { 13738 if (TTE_CSZ(&old->tte_data) == TTE32M || 13739 TTE_CSZ(&old->tte_data) == TTE256M) 13740 continue; 13741 } 13742 13743 /* clear the lower 22 bits of the va */ 13744 va = *(uint64_t *)old << 22; 13745 /* turn va into a virtual pfn */ 13746 va >>= 22 - TSB_START_SIZE; 13747 /* 13748 * or in bits from the offset in the tsb 13749 * to get the real virtual pfn. These 13750 * correspond to bits [21:13] in the va 13751 */ 13752 vpshift = 13753 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13754 0x1ff; 13755 va |= (i << vpshift); 13756 va >>= vpshift; 13757 new_offset = va & (new_entries - 1); 13758 new = new_base + new_offset; 13759 prefetch_tsbe_write(new); 13760 *new = *old; 13761 } 13762 } 13763 } 13764 } 13765 13766 /* 13767 * unused in sfmmu 13768 */ 13769 void 13770 hat_dump(void) 13771 { 13772 } 13773 13774 /* 13775 * Called when a thread is exiting and we have switched to the kernel address 13776 * space. Perform the same VM initialization resume() uses when switching 13777 * processes. 13778 * 13779 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13780 * we call it anyway in case the semantics change in the future. 13781 */ 13782 /*ARGSUSED*/ 13783 void 13784 hat_thread_exit(kthread_t *thd) 13785 { 13786 uint_t pgsz_cnum; 13787 uint_t pstate_save; 13788 13789 ASSERT(thd->t_procp->p_as == &kas); 13790 13791 pgsz_cnum = KCONTEXT; 13792 #ifdef sun4u 13793 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13794 #endif 13795 13796 /* 13797 * Note that sfmmu_load_mmustate() is currently a no-op for 13798 * kernel threads. We need to disable interrupts here, 13799 * simply because otherwise sfmmu_load_mmustate() would panic 13800 * if the caller does not disable interrupts. 13801 */ 13802 pstate_save = sfmmu_disable_intrs(); 13803 13804 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13805 sfmmu_setctx_sec(pgsz_cnum); 13806 sfmmu_load_mmustate(ksfmmup); 13807 sfmmu_enable_intrs(pstate_save); 13808 } 13809 13810 13811 /* 13812 * SRD support 13813 */ 13814 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13815 (((uintptr_t)(vp)) >> 11)) & \ 13816 srd_hashmask) 13817 13818 /* 13819 * Attach the process to the srd struct associated with the exec vnode 13820 * from which the process is started. 13821 */ 13822 void 13823 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13824 { 13825 uint_t hash = SRD_HASH_FUNCTION(evp); 13826 sf_srd_t *srdp; 13827 sf_srd_t *newsrdp; 13828 13829 ASSERT(sfmmup != ksfmmup); 13830 ASSERT(sfmmup->sfmmu_srdp == NULL); 13831 13832 if (!shctx_on) { 13833 return; 13834 } 13835 13836 VN_HOLD(evp); 13837 13838 if (srd_buckets[hash].srdb_srdp != NULL) { 13839 mutex_enter(&srd_buckets[hash].srdb_lock); 13840 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13841 srdp = srdp->srd_hash) { 13842 if (srdp->srd_evp == evp) { 13843 ASSERT(srdp->srd_refcnt >= 0); 13844 sfmmup->sfmmu_srdp = srdp; 13845 atomic_add_32( 13846 (volatile uint_t *)&srdp->srd_refcnt, 1); 13847 mutex_exit(&srd_buckets[hash].srdb_lock); 13848 return; 13849 } 13850 } 13851 mutex_exit(&srd_buckets[hash].srdb_lock); 13852 } 13853 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13854 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13855 13856 newsrdp->srd_evp = evp; 13857 newsrdp->srd_refcnt = 1; 13858 newsrdp->srd_hmergnfree = NULL; 13859 newsrdp->srd_ismrgnfree = NULL; 13860 13861 mutex_enter(&srd_buckets[hash].srdb_lock); 13862 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13863 srdp = srdp->srd_hash) { 13864 if (srdp->srd_evp == evp) { 13865 ASSERT(srdp->srd_refcnt >= 0); 13866 sfmmup->sfmmu_srdp = srdp; 13867 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 13868 mutex_exit(&srd_buckets[hash].srdb_lock); 13869 kmem_cache_free(srd_cache, newsrdp); 13870 return; 13871 } 13872 } 13873 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13874 srd_buckets[hash].srdb_srdp = newsrdp; 13875 sfmmup->sfmmu_srdp = newsrdp; 13876 13877 mutex_exit(&srd_buckets[hash].srdb_lock); 13878 13879 } 13880 13881 static void 13882 sfmmu_leave_srd(sfmmu_t *sfmmup) 13883 { 13884 vnode_t *evp; 13885 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13886 uint_t hash; 13887 sf_srd_t **prev_srdpp; 13888 sf_region_t *rgnp; 13889 sf_region_t *nrgnp; 13890 #ifdef DEBUG 13891 int rgns = 0; 13892 #endif 13893 int i; 13894 13895 ASSERT(sfmmup != ksfmmup); 13896 ASSERT(srdp != NULL); 13897 ASSERT(srdp->srd_refcnt > 0); 13898 ASSERT(sfmmup->sfmmu_scdp == NULL); 13899 ASSERT(sfmmup->sfmmu_free == 1); 13900 13901 sfmmup->sfmmu_srdp = NULL; 13902 evp = srdp->srd_evp; 13903 ASSERT(evp != NULL); 13904 if (atomic_add_32_nv( 13905 (volatile uint_t *)&srdp->srd_refcnt, -1)) { 13906 VN_RELE(evp); 13907 return; 13908 } 13909 13910 hash = SRD_HASH_FUNCTION(evp); 13911 mutex_enter(&srd_buckets[hash].srdb_lock); 13912 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13913 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13914 if (srdp->srd_evp == evp) { 13915 break; 13916 } 13917 } 13918 if (srdp == NULL || srdp->srd_refcnt) { 13919 mutex_exit(&srd_buckets[hash].srdb_lock); 13920 VN_RELE(evp); 13921 return; 13922 } 13923 *prev_srdpp = srdp->srd_hash; 13924 mutex_exit(&srd_buckets[hash].srdb_lock); 13925 13926 ASSERT(srdp->srd_refcnt == 0); 13927 VN_RELE(evp); 13928 13929 #ifdef DEBUG 13930 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13931 ASSERT(srdp->srd_rgnhash[i] == NULL); 13932 } 13933 #endif /* DEBUG */ 13934 13935 /* free each hme regions in the srd */ 13936 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13937 nrgnp = rgnp->rgn_next; 13938 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13939 ASSERT(rgnp->rgn_refcnt == 0); 13940 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13941 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13942 ASSERT(rgnp->rgn_hmeflags == 0); 13943 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13944 #ifdef DEBUG 13945 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13946 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13947 } 13948 rgns++; 13949 #endif /* DEBUG */ 13950 kmem_cache_free(region_cache, rgnp); 13951 } 13952 ASSERT(rgns == srdp->srd_next_hmerid); 13953 13954 #ifdef DEBUG 13955 rgns = 0; 13956 #endif 13957 /* free each ism rgns in the srd */ 13958 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13959 nrgnp = rgnp->rgn_next; 13960 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13961 ASSERT(rgnp->rgn_refcnt == 0); 13962 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13963 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13964 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13965 #ifdef DEBUG 13966 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13967 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13968 } 13969 rgns++; 13970 #endif /* DEBUG */ 13971 kmem_cache_free(region_cache, rgnp); 13972 } 13973 ASSERT(rgns == srdp->srd_next_ismrid); 13974 ASSERT(srdp->srd_ismbusyrgns == 0); 13975 ASSERT(srdp->srd_hmebusyrgns == 0); 13976 13977 srdp->srd_next_ismrid = 0; 13978 srdp->srd_next_hmerid = 0; 13979 13980 bzero((void *)srdp->srd_ismrgnp, 13981 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13982 bzero((void *)srdp->srd_hmergnp, 13983 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13984 13985 ASSERT(srdp->srd_scdp == NULL); 13986 kmem_cache_free(srd_cache, srdp); 13987 } 13988 13989 /* ARGSUSED */ 13990 static int 13991 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13992 { 13993 sf_srd_t *srdp = (sf_srd_t *)buf; 13994 bzero(buf, sizeof (*srdp)); 13995 13996 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13997 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13998 return (0); 13999 } 14000 14001 /* ARGSUSED */ 14002 static void 14003 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 14004 { 14005 sf_srd_t *srdp = (sf_srd_t *)buf; 14006 14007 mutex_destroy(&srdp->srd_mutex); 14008 mutex_destroy(&srdp->srd_scd_mutex); 14009 } 14010 14011 /* 14012 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 14013 * at the same time for the same process and address range. This is ensured by 14014 * the fact that address space is locked as writer when a process joins the 14015 * regions. Therefore there's no need to hold an srd lock during the entire 14016 * execution of hat_join_region()/hat_leave_region(). 14017 */ 14018 14019 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 14020 (((uintptr_t)(obj)) >> 11)) & \ 14021 srd_rgn_hashmask) 14022 /* 14023 * This routine implements the shared context functionality required when 14024 * attaching a segment to an address space. It must be called from 14025 * hat_share() for D(ISM) segments and from segvn_create() for segments 14026 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 14027 * which is saved in the private segment data for hme segments and 14028 * the ism_map structure for ism segments. 14029 */ 14030 hat_region_cookie_t 14031 hat_join_region(struct hat *sfmmup, 14032 caddr_t r_saddr, 14033 size_t r_size, 14034 void *r_obj, 14035 u_offset_t r_objoff, 14036 uchar_t r_perm, 14037 uchar_t r_pgszc, 14038 hat_rgn_cb_func_t r_cb_function, 14039 uint_t flags) 14040 { 14041 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14042 uint_t rhash; 14043 uint_t rid; 14044 hatlock_t *hatlockp; 14045 sf_region_t *rgnp; 14046 sf_region_t *new_rgnp = NULL; 14047 int i; 14048 uint16_t *nextidp; 14049 sf_region_t **freelistp; 14050 int maxids; 14051 sf_region_t **rarrp; 14052 uint16_t *busyrgnsp; 14053 ulong_t rttecnt; 14054 uchar_t tteflag; 14055 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14056 int text = (r_type == HAT_REGION_TEXT); 14057 14058 if (srdp == NULL || r_size == 0) { 14059 return (HAT_INVALID_REGION_COOKIE); 14060 } 14061 14062 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14063 ASSERT(sfmmup != ksfmmup); 14064 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14065 ASSERT(srdp->srd_refcnt > 0); 14066 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14067 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14068 ASSERT(r_pgszc < mmu_page_sizes); 14069 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 14070 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 14071 panic("hat_join_region: region addr or size is not aligned\n"); 14072 } 14073 14074 14075 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14076 SFMMU_REGION_HME; 14077 /* 14078 * Currently only support shared hmes for the read only main text 14079 * region. 14080 */ 14081 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 14082 (r_perm & PROT_WRITE))) { 14083 return (HAT_INVALID_REGION_COOKIE); 14084 } 14085 14086 rhash = RGN_HASH_FUNCTION(r_obj); 14087 14088 if (r_type == SFMMU_REGION_ISM) { 14089 nextidp = &srdp->srd_next_ismrid; 14090 freelistp = &srdp->srd_ismrgnfree; 14091 maxids = SFMMU_MAX_ISM_REGIONS; 14092 rarrp = srdp->srd_ismrgnp; 14093 busyrgnsp = &srdp->srd_ismbusyrgns; 14094 } else { 14095 nextidp = &srdp->srd_next_hmerid; 14096 freelistp = &srdp->srd_hmergnfree; 14097 maxids = SFMMU_MAX_HME_REGIONS; 14098 rarrp = srdp->srd_hmergnp; 14099 busyrgnsp = &srdp->srd_hmebusyrgns; 14100 } 14101 14102 mutex_enter(&srdp->srd_mutex); 14103 14104 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14105 rgnp = rgnp->rgn_hash) { 14106 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 14107 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 14108 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 14109 break; 14110 } 14111 } 14112 14113 rfound: 14114 if (rgnp != NULL) { 14115 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14116 ASSERT(rgnp->rgn_cb_function == r_cb_function); 14117 ASSERT(rgnp->rgn_refcnt >= 0); 14118 rid = rgnp->rgn_id; 14119 ASSERT(rid < maxids); 14120 ASSERT(rarrp[rid] == rgnp); 14121 ASSERT(rid < *nextidp); 14122 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14123 mutex_exit(&srdp->srd_mutex); 14124 if (new_rgnp != NULL) { 14125 kmem_cache_free(region_cache, new_rgnp); 14126 } 14127 if (r_type == SFMMU_REGION_HME) { 14128 int myjoin = 14129 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 14130 14131 sfmmu_link_to_hmeregion(sfmmup, rgnp); 14132 /* 14133 * bitmap should be updated after linking sfmmu on 14134 * region list so that pageunload() doesn't skip 14135 * TSB/TLB flush. As soon as bitmap is updated another 14136 * thread in this process can already start accessing 14137 * this region. 14138 */ 14139 /* 14140 * Normally ttecnt accounting is done as part of 14141 * pagefault handling. But a process may not take any 14142 * pagefaults on shared hmeblks created by some other 14143 * process. To compensate for this assume that the 14144 * entire region will end up faulted in using 14145 * the region's pagesize. 14146 * 14147 */ 14148 if (r_pgszc > TTE8K) { 14149 tteflag = 1 << r_pgszc; 14150 if (disable_large_pages & tteflag) { 14151 tteflag = 0; 14152 } 14153 } else { 14154 tteflag = 0; 14155 } 14156 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 14157 hatlockp = sfmmu_hat_enter(sfmmup); 14158 sfmmup->sfmmu_rtteflags |= tteflag; 14159 if (&mmu_set_pgsz_order) { 14160 mmu_set_pgsz_order(sfmmup, 1); 14161 } 14162 sfmmu_hat_exit(hatlockp); 14163 } 14164 hatlockp = sfmmu_hat_enter(sfmmup); 14165 14166 /* 14167 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 14168 * region to allow for large page allocation failure. 14169 */ 14170 if (r_pgszc >= TTE4M) { 14171 sfmmup->sfmmu_tsb0_4minflcnt += 14172 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14173 } 14174 14175 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14176 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14177 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14178 rttecnt); 14179 14180 if (text && r_pgszc >= TTE4M && 14181 (tteflag || ((disable_large_pages >> TTE4M) & 14182 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 14183 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 14184 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 14185 } 14186 14187 sfmmu_hat_exit(hatlockp); 14188 /* 14189 * On Panther we need to make sure TLB is programmed 14190 * to accept 32M/256M pages. Call 14191 * sfmmu_check_page_sizes() now to make sure TLB is 14192 * setup before making hmeregions visible to other 14193 * threads. 14194 */ 14195 sfmmu_check_page_sizes(sfmmup, 1); 14196 hatlockp = sfmmu_hat_enter(sfmmup); 14197 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14198 14199 /* 14200 * if context is invalid tsb miss exception code will 14201 * call sfmmu_check_page_sizes() and update tsbmiss 14202 * area later. 14203 */ 14204 kpreempt_disable(); 14205 if (myjoin && 14206 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 14207 != INVALID_CONTEXT)) { 14208 struct tsbmiss *tsbmp; 14209 14210 tsbmp = &tsbmiss_area[CPU->cpu_id]; 14211 ASSERT(sfmmup == tsbmp->usfmmup); 14212 BT_SET(tsbmp->shmermap, rid); 14213 if (r_pgszc > TTE64K) { 14214 tsbmp->uhat_rtteflags |= tteflag; 14215 } 14216 14217 } 14218 kpreempt_enable(); 14219 14220 sfmmu_hat_exit(hatlockp); 14221 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14222 HAT_INVALID_REGION_COOKIE); 14223 } else { 14224 hatlockp = sfmmu_hat_enter(sfmmup); 14225 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14226 sfmmu_hat_exit(hatlockp); 14227 } 14228 ASSERT(rid < maxids); 14229 14230 if (r_type == SFMMU_REGION_ISM) { 14231 sfmmu_find_scd(sfmmup); 14232 } 14233 return ((hat_region_cookie_t)((uint64_t)rid)); 14234 } 14235 14236 ASSERT(new_rgnp == NULL); 14237 14238 if (*busyrgnsp >= maxids) { 14239 mutex_exit(&srdp->srd_mutex); 14240 return (HAT_INVALID_REGION_COOKIE); 14241 } 14242 14243 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14244 if (*freelistp != NULL) { 14245 rgnp = *freelistp; 14246 *freelistp = rgnp->rgn_next; 14247 ASSERT(rgnp->rgn_id < *nextidp); 14248 ASSERT(rgnp->rgn_id < maxids); 14249 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14250 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14251 == r_type); 14252 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14253 ASSERT(rgnp->rgn_hmeflags == 0); 14254 } else { 14255 /* 14256 * release local locks before memory allocation. 14257 */ 14258 mutex_exit(&srdp->srd_mutex); 14259 14260 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14261 14262 mutex_enter(&srdp->srd_mutex); 14263 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14264 rgnp = rgnp->rgn_hash) { 14265 if (rgnp->rgn_saddr == r_saddr && 14266 rgnp->rgn_size == r_size && 14267 rgnp->rgn_obj == r_obj && 14268 rgnp->rgn_objoff == r_objoff && 14269 rgnp->rgn_perm == r_perm && 14270 rgnp->rgn_pgszc == r_pgszc) { 14271 break; 14272 } 14273 } 14274 if (rgnp != NULL) { 14275 goto rfound; 14276 } 14277 14278 if (*nextidp >= maxids) { 14279 mutex_exit(&srdp->srd_mutex); 14280 goto fail; 14281 } 14282 rgnp = new_rgnp; 14283 new_rgnp = NULL; 14284 rgnp->rgn_id = (*nextidp)++; 14285 ASSERT(rgnp->rgn_id < maxids); 14286 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14287 rarrp[rgnp->rgn_id] = rgnp; 14288 } 14289 14290 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14291 ASSERT(rgnp->rgn_hmeflags == 0); 14292 #ifdef DEBUG 14293 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14294 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14295 } 14296 #endif 14297 rgnp->rgn_saddr = r_saddr; 14298 rgnp->rgn_size = r_size; 14299 rgnp->rgn_obj = r_obj; 14300 rgnp->rgn_objoff = r_objoff; 14301 rgnp->rgn_perm = r_perm; 14302 rgnp->rgn_pgszc = r_pgszc; 14303 rgnp->rgn_flags = r_type; 14304 rgnp->rgn_refcnt = 0; 14305 rgnp->rgn_cb_function = r_cb_function; 14306 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14307 srdp->srd_rgnhash[rhash] = rgnp; 14308 (*busyrgnsp)++; 14309 ASSERT(*busyrgnsp <= maxids); 14310 goto rfound; 14311 14312 fail: 14313 ASSERT(new_rgnp != NULL); 14314 kmem_cache_free(region_cache, new_rgnp); 14315 return (HAT_INVALID_REGION_COOKIE); 14316 } 14317 14318 /* 14319 * This function implements the shared context functionality required 14320 * when detaching a segment from an address space. It must be called 14321 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14322 * for segments with a valid region_cookie. 14323 * It will also be called from all seg_vn routines which change a 14324 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14325 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14326 * from segvn_fault(). 14327 */ 14328 void 14329 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14330 { 14331 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14332 sf_scd_t *scdp; 14333 uint_t rhash; 14334 uint_t rid = (uint_t)((uint64_t)rcookie); 14335 hatlock_t *hatlockp = NULL; 14336 sf_region_t *rgnp; 14337 sf_region_t **prev_rgnpp; 14338 sf_region_t *cur_rgnp; 14339 void *r_obj; 14340 int i; 14341 caddr_t r_saddr; 14342 caddr_t r_eaddr; 14343 size_t r_size; 14344 uchar_t r_pgszc; 14345 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14346 14347 ASSERT(sfmmup != ksfmmup); 14348 ASSERT(srdp != NULL); 14349 ASSERT(srdp->srd_refcnt > 0); 14350 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14351 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14352 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14353 14354 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14355 SFMMU_REGION_HME; 14356 14357 if (r_type == SFMMU_REGION_ISM) { 14358 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14359 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14360 rgnp = srdp->srd_ismrgnp[rid]; 14361 } else { 14362 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14363 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14364 rgnp = srdp->srd_hmergnp[rid]; 14365 } 14366 ASSERT(rgnp != NULL); 14367 ASSERT(rgnp->rgn_id == rid); 14368 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14369 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14370 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14371 14372 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14373 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { 14374 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, 14375 rgnp->rgn_size, 0, NULL); 14376 } 14377 14378 if (sfmmup->sfmmu_free) { 14379 ulong_t rttecnt; 14380 r_pgszc = rgnp->rgn_pgszc; 14381 r_size = rgnp->rgn_size; 14382 14383 ASSERT(sfmmup->sfmmu_scdp == NULL); 14384 if (r_type == SFMMU_REGION_ISM) { 14385 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14386 } else { 14387 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14388 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14389 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14390 14391 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14392 -rttecnt); 14393 14394 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14395 } 14396 } else if (r_type == SFMMU_REGION_ISM) { 14397 hatlockp = sfmmu_hat_enter(sfmmup); 14398 ASSERT(rid < srdp->srd_next_ismrid); 14399 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14400 scdp = sfmmup->sfmmu_scdp; 14401 if (scdp != NULL && 14402 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14403 sfmmu_leave_scd(sfmmup, r_type); 14404 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14405 } 14406 sfmmu_hat_exit(hatlockp); 14407 } else { 14408 ulong_t rttecnt; 14409 r_pgszc = rgnp->rgn_pgszc; 14410 r_saddr = rgnp->rgn_saddr; 14411 r_size = rgnp->rgn_size; 14412 r_eaddr = r_saddr + r_size; 14413 14414 ASSERT(r_type == SFMMU_REGION_HME); 14415 hatlockp = sfmmu_hat_enter(sfmmup); 14416 ASSERT(rid < srdp->srd_next_hmerid); 14417 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14418 14419 /* 14420 * If region is part of an SCD call sfmmu_leave_scd(). 14421 * Otherwise if process is not exiting and has valid context 14422 * just drop the context on the floor to lose stale TLB 14423 * entries and force the update of tsb miss area to reflect 14424 * the new region map. After that clean our TSB entries. 14425 */ 14426 scdp = sfmmup->sfmmu_scdp; 14427 if (scdp != NULL && 14428 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14429 sfmmu_leave_scd(sfmmup, r_type); 14430 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14431 } 14432 sfmmu_invalidate_ctx(sfmmup); 14433 14434 i = TTE8K; 14435 while (i < mmu_page_sizes) { 14436 if (rgnp->rgn_ttecnt[i] != 0) { 14437 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14438 r_eaddr, i); 14439 if (i < TTE4M) { 14440 i = TTE4M; 14441 continue; 14442 } else { 14443 break; 14444 } 14445 } 14446 i++; 14447 } 14448 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14449 if (r_pgszc >= TTE4M) { 14450 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14451 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14452 rttecnt); 14453 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14454 } 14455 14456 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14457 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14458 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14459 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14460 14461 sfmmu_hat_exit(hatlockp); 14462 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14463 /* sfmmup left the scd, grow private tsb */ 14464 sfmmu_check_page_sizes(sfmmup, 1); 14465 } else { 14466 sfmmu_check_page_sizes(sfmmup, 0); 14467 } 14468 } 14469 14470 if (r_type == SFMMU_REGION_HME) { 14471 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14472 } 14473 14474 r_obj = rgnp->rgn_obj; 14475 if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { 14476 return; 14477 } 14478 14479 /* 14480 * looks like nobody uses this region anymore. Free it. 14481 */ 14482 rhash = RGN_HASH_FUNCTION(r_obj); 14483 mutex_enter(&srdp->srd_mutex); 14484 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14485 (cur_rgnp = *prev_rgnpp) != NULL; 14486 prev_rgnpp = &cur_rgnp->rgn_hash) { 14487 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14488 break; 14489 } 14490 } 14491 14492 if (cur_rgnp == NULL) { 14493 mutex_exit(&srdp->srd_mutex); 14494 return; 14495 } 14496 14497 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14498 *prev_rgnpp = rgnp->rgn_hash; 14499 if (r_type == SFMMU_REGION_ISM) { 14500 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14501 ASSERT(rid < srdp->srd_next_ismrid); 14502 rgnp->rgn_next = srdp->srd_ismrgnfree; 14503 srdp->srd_ismrgnfree = rgnp; 14504 ASSERT(srdp->srd_ismbusyrgns > 0); 14505 srdp->srd_ismbusyrgns--; 14506 mutex_exit(&srdp->srd_mutex); 14507 return; 14508 } 14509 mutex_exit(&srdp->srd_mutex); 14510 14511 /* 14512 * Destroy region's hmeblks. 14513 */ 14514 sfmmu_unload_hmeregion(srdp, rgnp); 14515 14516 rgnp->rgn_hmeflags = 0; 14517 14518 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14519 ASSERT(rgnp->rgn_id == rid); 14520 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14521 rgnp->rgn_ttecnt[i] = 0; 14522 } 14523 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14524 mutex_enter(&srdp->srd_mutex); 14525 ASSERT(rid < srdp->srd_next_hmerid); 14526 rgnp->rgn_next = srdp->srd_hmergnfree; 14527 srdp->srd_hmergnfree = rgnp; 14528 ASSERT(srdp->srd_hmebusyrgns > 0); 14529 srdp->srd_hmebusyrgns--; 14530 mutex_exit(&srdp->srd_mutex); 14531 } 14532 14533 /* 14534 * For now only called for hmeblk regions and not for ISM regions. 14535 */ 14536 void 14537 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14538 { 14539 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14540 uint_t rid = (uint_t)((uint64_t)rcookie); 14541 sf_region_t *rgnp; 14542 sf_rgn_link_t *rlink; 14543 sf_rgn_link_t *hrlink; 14544 ulong_t rttecnt; 14545 14546 ASSERT(sfmmup != ksfmmup); 14547 ASSERT(srdp != NULL); 14548 ASSERT(srdp->srd_refcnt > 0); 14549 14550 ASSERT(rid < srdp->srd_next_hmerid); 14551 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14552 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14553 14554 rgnp = srdp->srd_hmergnp[rid]; 14555 ASSERT(rgnp->rgn_refcnt > 0); 14556 ASSERT(rgnp->rgn_id == rid); 14557 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14558 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14559 14560 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14561 14562 /* LINTED: constant in conditional context */ 14563 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14564 ASSERT(rlink != NULL); 14565 mutex_enter(&rgnp->rgn_mutex); 14566 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14567 /* LINTED: constant in conditional context */ 14568 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14569 ASSERT(hrlink != NULL); 14570 ASSERT(hrlink->prev == NULL); 14571 rlink->next = rgnp->rgn_sfmmu_head; 14572 rlink->prev = NULL; 14573 hrlink->prev = sfmmup; 14574 /* 14575 * make sure rlink's next field is correct 14576 * before making this link visible. 14577 */ 14578 membar_stst(); 14579 rgnp->rgn_sfmmu_head = sfmmup; 14580 mutex_exit(&rgnp->rgn_mutex); 14581 14582 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14583 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14584 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14585 /* update tsb0 inflation count */ 14586 if (rgnp->rgn_pgszc >= TTE4M) { 14587 sfmmup->sfmmu_tsb0_4minflcnt += 14588 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14589 } 14590 /* 14591 * Update regionid bitmask without hat lock since no other thread 14592 * can update this region bitmask right now. 14593 */ 14594 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14595 } 14596 14597 /* ARGSUSED */ 14598 static int 14599 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14600 { 14601 sf_region_t *rgnp = (sf_region_t *)buf; 14602 bzero(buf, sizeof (*rgnp)); 14603 14604 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14605 14606 return (0); 14607 } 14608 14609 /* ARGSUSED */ 14610 static void 14611 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14612 { 14613 sf_region_t *rgnp = (sf_region_t *)buf; 14614 mutex_destroy(&rgnp->rgn_mutex); 14615 } 14616 14617 static int 14618 sfrgnmap_isnull(sf_region_map_t *map) 14619 { 14620 int i; 14621 14622 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14623 if (map->bitmap[i] != 0) { 14624 return (0); 14625 } 14626 } 14627 return (1); 14628 } 14629 14630 static int 14631 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14632 { 14633 int i; 14634 14635 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14636 if (map->bitmap[i] != 0) { 14637 return (0); 14638 } 14639 } 14640 return (1); 14641 } 14642 14643 #ifdef DEBUG 14644 static void 14645 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14646 { 14647 sfmmu_t *sp; 14648 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14649 14650 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14651 ASSERT(srdp == sp->sfmmu_srdp); 14652 if (sp == sfmmup) { 14653 if (onlist) { 14654 return; 14655 } else { 14656 panic("shctx: sfmmu 0x%p found on scd" 14657 "list 0x%p", (void *)sfmmup, 14658 (void *)*headp); 14659 } 14660 } 14661 } 14662 if (onlist) { 14663 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14664 (void *)sfmmup, (void *)*headp); 14665 } else { 14666 return; 14667 } 14668 } 14669 #else /* DEBUG */ 14670 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14671 #endif /* DEBUG */ 14672 14673 /* 14674 * Removes an sfmmu from the SCD sfmmu list. 14675 */ 14676 static void 14677 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14678 { 14679 ASSERT(sfmmup->sfmmu_srdp != NULL); 14680 check_scd_sfmmu_list(headp, sfmmup, 1); 14681 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14682 ASSERT(*headp != sfmmup); 14683 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14684 sfmmup->sfmmu_scd_link.next; 14685 } else { 14686 ASSERT(*headp == sfmmup); 14687 *headp = sfmmup->sfmmu_scd_link.next; 14688 } 14689 if (sfmmup->sfmmu_scd_link.next != NULL) { 14690 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14691 sfmmup->sfmmu_scd_link.prev; 14692 } 14693 } 14694 14695 14696 /* 14697 * Adds an sfmmu to the start of the queue. 14698 */ 14699 static void 14700 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14701 { 14702 check_scd_sfmmu_list(headp, sfmmup, 0); 14703 sfmmup->sfmmu_scd_link.prev = NULL; 14704 sfmmup->sfmmu_scd_link.next = *headp; 14705 if (*headp != NULL) 14706 (*headp)->sfmmu_scd_link.prev = sfmmup; 14707 *headp = sfmmup; 14708 } 14709 14710 /* 14711 * Remove an scd from the start of the queue. 14712 */ 14713 static void 14714 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14715 { 14716 if (scdp->scd_prev != NULL) { 14717 ASSERT(*headp != scdp); 14718 scdp->scd_prev->scd_next = scdp->scd_next; 14719 } else { 14720 ASSERT(*headp == scdp); 14721 *headp = scdp->scd_next; 14722 } 14723 14724 if (scdp->scd_next != NULL) { 14725 scdp->scd_next->scd_prev = scdp->scd_prev; 14726 } 14727 } 14728 14729 /* 14730 * Add an scd to the start of the queue. 14731 */ 14732 static void 14733 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14734 { 14735 scdp->scd_prev = NULL; 14736 scdp->scd_next = *headp; 14737 if (*headp != NULL) { 14738 (*headp)->scd_prev = scdp; 14739 } 14740 *headp = scdp; 14741 } 14742 14743 static int 14744 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14745 { 14746 uint_t rid; 14747 uint_t i; 14748 uint_t j; 14749 ulong_t w; 14750 sf_region_t *rgnp; 14751 ulong_t tte8k_cnt = 0; 14752 ulong_t tte4m_cnt = 0; 14753 uint_t tsb_szc; 14754 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14755 sfmmu_t *ism_hatid; 14756 struct tsb_info *newtsb; 14757 int szc; 14758 14759 ASSERT(srdp != NULL); 14760 14761 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14762 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14763 continue; 14764 } 14765 j = 0; 14766 while (w) { 14767 if (!(w & 0x1)) { 14768 j++; 14769 w >>= 1; 14770 continue; 14771 } 14772 rid = (i << BT_ULSHIFT) | j; 14773 j++; 14774 w >>= 1; 14775 14776 if (rid < SFMMU_MAX_HME_REGIONS) { 14777 rgnp = srdp->srd_hmergnp[rid]; 14778 ASSERT(rgnp->rgn_id == rid); 14779 ASSERT(rgnp->rgn_refcnt > 0); 14780 14781 if (rgnp->rgn_pgszc < TTE4M) { 14782 tte8k_cnt += rgnp->rgn_size >> 14783 TTE_PAGE_SHIFT(TTE8K); 14784 } else { 14785 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14786 tte4m_cnt += rgnp->rgn_size >> 14787 TTE_PAGE_SHIFT(TTE4M); 14788 /* 14789 * Inflate SCD tsb0 by preallocating 14790 * 1/4 8k ttecnt for 4M regions to 14791 * allow for lgpg alloc failure. 14792 */ 14793 tte8k_cnt += rgnp->rgn_size >> 14794 (TTE_PAGE_SHIFT(TTE8K) + 2); 14795 } 14796 } else { 14797 rid -= SFMMU_MAX_HME_REGIONS; 14798 rgnp = srdp->srd_ismrgnp[rid]; 14799 ASSERT(rgnp->rgn_id == rid); 14800 ASSERT(rgnp->rgn_refcnt > 0); 14801 14802 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14803 ASSERT(ism_hatid->sfmmu_ismhat); 14804 14805 for (szc = 0; szc < TTE4M; szc++) { 14806 tte8k_cnt += 14807 ism_hatid->sfmmu_ttecnt[szc] << 14808 TTE_BSZS_SHIFT(szc); 14809 } 14810 14811 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14812 if (rgnp->rgn_pgszc >= TTE4M) { 14813 tte4m_cnt += rgnp->rgn_size >> 14814 TTE_PAGE_SHIFT(TTE4M); 14815 } 14816 } 14817 } 14818 } 14819 14820 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14821 14822 /* Allocate both the SCD TSBs here. */ 14823 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14824 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14825 (tsb_szc <= TSB_4M_SZCODE || 14826 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14827 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14828 TSB_ALLOC, scsfmmup))) { 14829 14830 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14831 return (TSB_ALLOCFAIL); 14832 } else { 14833 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14834 14835 if (tte4m_cnt) { 14836 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14837 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14838 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14839 (tsb_szc <= TSB_4M_SZCODE || 14840 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14841 TSB4M|TSB32M|TSB256M, 14842 TSB_ALLOC, scsfmmup))) { 14843 /* 14844 * If we fail to allocate the 2nd shared tsb, 14845 * just free the 1st tsb, return failure. 14846 */ 14847 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14848 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14849 return (TSB_ALLOCFAIL); 14850 } else { 14851 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14852 newtsb->tsb_flags |= TSB_SHAREDCTX; 14853 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14854 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14855 } 14856 } 14857 SFMMU_STAT(sf_scd_1sttsb_alloc); 14858 } 14859 return (TSB_SUCCESS); 14860 } 14861 14862 static void 14863 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14864 { 14865 while (scd_sfmmu->sfmmu_tsb != NULL) { 14866 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14867 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14868 scd_sfmmu->sfmmu_tsb = next; 14869 } 14870 } 14871 14872 /* 14873 * Link the sfmmu onto the hme region list. 14874 */ 14875 void 14876 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14877 { 14878 uint_t rid; 14879 sf_rgn_link_t *rlink; 14880 sfmmu_t *head; 14881 sf_rgn_link_t *hrlink; 14882 14883 rid = rgnp->rgn_id; 14884 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14885 14886 /* LINTED: constant in conditional context */ 14887 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14888 ASSERT(rlink != NULL); 14889 mutex_enter(&rgnp->rgn_mutex); 14890 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14891 rlink->next = NULL; 14892 rlink->prev = NULL; 14893 /* 14894 * make sure rlink's next field is NULL 14895 * before making this link visible. 14896 */ 14897 membar_stst(); 14898 rgnp->rgn_sfmmu_head = sfmmup; 14899 } else { 14900 /* LINTED: constant in conditional context */ 14901 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14902 ASSERT(hrlink != NULL); 14903 ASSERT(hrlink->prev == NULL); 14904 rlink->next = head; 14905 rlink->prev = NULL; 14906 hrlink->prev = sfmmup; 14907 /* 14908 * make sure rlink's next field is correct 14909 * before making this link visible. 14910 */ 14911 membar_stst(); 14912 rgnp->rgn_sfmmu_head = sfmmup; 14913 } 14914 mutex_exit(&rgnp->rgn_mutex); 14915 } 14916 14917 /* 14918 * Unlink the sfmmu from the hme region list. 14919 */ 14920 void 14921 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14922 { 14923 uint_t rid; 14924 sf_rgn_link_t *rlink; 14925 14926 rid = rgnp->rgn_id; 14927 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14928 14929 /* LINTED: constant in conditional context */ 14930 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14931 ASSERT(rlink != NULL); 14932 mutex_enter(&rgnp->rgn_mutex); 14933 if (rgnp->rgn_sfmmu_head == sfmmup) { 14934 sfmmu_t *next = rlink->next; 14935 rgnp->rgn_sfmmu_head = next; 14936 /* 14937 * if we are stopped by xc_attention() after this 14938 * point the forward link walking in 14939 * sfmmu_rgntlb_demap() will work correctly since the 14940 * head correctly points to the next element. 14941 */ 14942 membar_stst(); 14943 rlink->next = NULL; 14944 ASSERT(rlink->prev == NULL); 14945 if (next != NULL) { 14946 sf_rgn_link_t *nrlink; 14947 /* LINTED: constant in conditional context */ 14948 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14949 ASSERT(nrlink != NULL); 14950 ASSERT(nrlink->prev == sfmmup); 14951 nrlink->prev = NULL; 14952 } 14953 } else { 14954 sfmmu_t *next = rlink->next; 14955 sfmmu_t *prev = rlink->prev; 14956 sf_rgn_link_t *prlink; 14957 14958 ASSERT(prev != NULL); 14959 /* LINTED: constant in conditional context */ 14960 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14961 ASSERT(prlink != NULL); 14962 ASSERT(prlink->next == sfmmup); 14963 prlink->next = next; 14964 /* 14965 * if we are stopped by xc_attention() 14966 * after this point the forward link walking 14967 * will work correctly since the prev element 14968 * correctly points to the next element. 14969 */ 14970 membar_stst(); 14971 rlink->next = NULL; 14972 rlink->prev = NULL; 14973 if (next != NULL) { 14974 sf_rgn_link_t *nrlink; 14975 /* LINTED: constant in conditional context */ 14976 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14977 ASSERT(nrlink != NULL); 14978 ASSERT(nrlink->prev == sfmmup); 14979 nrlink->prev = prev; 14980 } 14981 } 14982 mutex_exit(&rgnp->rgn_mutex); 14983 } 14984 14985 /* 14986 * Link scd sfmmu onto ism or hme region list for each region in the 14987 * scd region map. 14988 */ 14989 void 14990 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14991 { 14992 uint_t rid; 14993 uint_t i; 14994 uint_t j; 14995 ulong_t w; 14996 sf_region_t *rgnp; 14997 sfmmu_t *scsfmmup; 14998 14999 scsfmmup = scdp->scd_sfmmup; 15000 ASSERT(scsfmmup->sfmmu_scdhat); 15001 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 15002 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 15003 continue; 15004 } 15005 j = 0; 15006 while (w) { 15007 if (!(w & 0x1)) { 15008 j++; 15009 w >>= 1; 15010 continue; 15011 } 15012 rid = (i << BT_ULSHIFT) | j; 15013 j++; 15014 w >>= 1; 15015 15016 if (rid < SFMMU_MAX_HME_REGIONS) { 15017 rgnp = srdp->srd_hmergnp[rid]; 15018 ASSERT(rgnp->rgn_id == rid); 15019 ASSERT(rgnp->rgn_refcnt > 0); 15020 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 15021 } else { 15022 sfmmu_t *ism_hatid = NULL; 15023 ism_ment_t *ism_ment; 15024 rid -= SFMMU_MAX_HME_REGIONS; 15025 rgnp = srdp->srd_ismrgnp[rid]; 15026 ASSERT(rgnp->rgn_id == rid); 15027 ASSERT(rgnp->rgn_refcnt > 0); 15028 15029 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 15030 ASSERT(ism_hatid->sfmmu_ismhat); 15031 ism_ment = &scdp->scd_ism_links[rid]; 15032 ism_ment->iment_hat = scsfmmup; 15033 ism_ment->iment_base_va = rgnp->rgn_saddr; 15034 mutex_enter(&ism_mlist_lock); 15035 iment_add(ism_ment, ism_hatid); 15036 mutex_exit(&ism_mlist_lock); 15037 15038 } 15039 } 15040 } 15041 } 15042 /* 15043 * Unlink scd sfmmu from ism or hme region list for each region in the 15044 * scd region map. 15045 */ 15046 void 15047 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 15048 { 15049 uint_t rid; 15050 uint_t i; 15051 uint_t j; 15052 ulong_t w; 15053 sf_region_t *rgnp; 15054 sfmmu_t *scsfmmup; 15055 15056 scsfmmup = scdp->scd_sfmmup; 15057 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 15058 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 15059 continue; 15060 } 15061 j = 0; 15062 while (w) { 15063 if (!(w & 0x1)) { 15064 j++; 15065 w >>= 1; 15066 continue; 15067 } 15068 rid = (i << BT_ULSHIFT) | j; 15069 j++; 15070 w >>= 1; 15071 15072 if (rid < SFMMU_MAX_HME_REGIONS) { 15073 rgnp = srdp->srd_hmergnp[rid]; 15074 ASSERT(rgnp->rgn_id == rid); 15075 ASSERT(rgnp->rgn_refcnt > 0); 15076 sfmmu_unlink_from_hmeregion(scsfmmup, 15077 rgnp); 15078 15079 } else { 15080 sfmmu_t *ism_hatid = NULL; 15081 ism_ment_t *ism_ment; 15082 rid -= SFMMU_MAX_HME_REGIONS; 15083 rgnp = srdp->srd_ismrgnp[rid]; 15084 ASSERT(rgnp->rgn_id == rid); 15085 ASSERT(rgnp->rgn_refcnt > 0); 15086 15087 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 15088 ASSERT(ism_hatid->sfmmu_ismhat); 15089 ism_ment = &scdp->scd_ism_links[rid]; 15090 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 15091 ASSERT(ism_ment->iment_base_va == 15092 rgnp->rgn_saddr); 15093 ism_ment->iment_hat = NULL; 15094 ism_ment->iment_base_va = 0; 15095 mutex_enter(&ism_mlist_lock); 15096 iment_sub(ism_ment, ism_hatid); 15097 mutex_exit(&ism_mlist_lock); 15098 15099 } 15100 } 15101 } 15102 } 15103 /* 15104 * Allocates and initialises a new SCD structure, this is called with 15105 * the srd_scd_mutex held and returns with the reference count 15106 * initialised to 1. 15107 */ 15108 static sf_scd_t * 15109 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 15110 { 15111 sf_scd_t *new_scdp; 15112 sfmmu_t *scsfmmup; 15113 int i; 15114 15115 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 15116 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 15117 15118 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 15119 new_scdp->scd_sfmmup = scsfmmup; 15120 scsfmmup->sfmmu_srdp = srdp; 15121 scsfmmup->sfmmu_scdp = new_scdp; 15122 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 15123 scsfmmup->sfmmu_scdhat = 1; 15124 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 15125 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 15126 15127 ASSERT(max_mmu_ctxdoms > 0); 15128 for (i = 0; i < max_mmu_ctxdoms; i++) { 15129 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 15130 scsfmmup->sfmmu_ctxs[i].gnum = 0; 15131 } 15132 15133 for (i = 0; i < MMU_PAGE_SIZES; i++) { 15134 new_scdp->scd_rttecnt[i] = 0; 15135 } 15136 15137 new_scdp->scd_region_map = *new_map; 15138 new_scdp->scd_refcnt = 1; 15139 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 15140 kmem_cache_free(scd_cache, new_scdp); 15141 kmem_cache_free(sfmmuid_cache, scsfmmup); 15142 return (NULL); 15143 } 15144 if (&mmu_init_scd) { 15145 mmu_init_scd(new_scdp); 15146 } 15147 return (new_scdp); 15148 } 15149 15150 /* 15151 * The first phase of a process joining an SCD. The hat structure is 15152 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 15153 * and a cross-call with context invalidation is used to cause the 15154 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 15155 * routine. 15156 */ 15157 static void 15158 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 15159 { 15160 hatlock_t *hatlockp; 15161 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15162 int i; 15163 sf_scd_t *old_scdp; 15164 15165 ASSERT(srdp != NULL); 15166 ASSERT(scdp != NULL); 15167 ASSERT(scdp->scd_refcnt > 0); 15168 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15169 15170 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 15171 ASSERT(old_scdp != scdp); 15172 15173 mutex_enter(&old_scdp->scd_mutex); 15174 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 15175 mutex_exit(&old_scdp->scd_mutex); 15176 /* 15177 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 15178 * include the shme rgn ttecnt for rgns that 15179 * were in the old SCD 15180 */ 15181 for (i = 0; i < mmu_page_sizes; i++) { 15182 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15183 old_scdp->scd_rttecnt[i]); 15184 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15185 sfmmup->sfmmu_scdrttecnt[i]); 15186 } 15187 } 15188 15189 /* 15190 * Move sfmmu to the scd lists. 15191 */ 15192 mutex_enter(&scdp->scd_mutex); 15193 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 15194 mutex_exit(&scdp->scd_mutex); 15195 SF_SCD_INCR_REF(scdp); 15196 15197 hatlockp = sfmmu_hat_enter(sfmmup); 15198 /* 15199 * For a multi-thread process, we must stop 15200 * all the other threads before joining the scd. 15201 */ 15202 15203 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 15204 15205 sfmmu_invalidate_ctx(sfmmup); 15206 sfmmup->sfmmu_scdp = scdp; 15207 15208 /* 15209 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 15210 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 15211 */ 15212 for (i = 0; i < mmu_page_sizes; i++) { 15213 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 15214 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 15215 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15216 -sfmmup->sfmmu_scdrttecnt[i]); 15217 if (!sfmmup->sfmmu_ttecnt[i]) { 15218 sfmmup->sfmmu_tteflags &= ~(1 << i); 15219 } 15220 } 15221 /* update tsb0 inflation count */ 15222 if (old_scdp != NULL) { 15223 sfmmup->sfmmu_tsb0_4minflcnt += 15224 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15225 } 15226 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 15227 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15228 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15229 15230 if (&mmu_set_pgsz_order) { 15231 mmu_set_pgsz_order(sfmmup, 0); 15232 } 15233 sfmmu_hat_exit(hatlockp); 15234 15235 if (old_scdp != NULL) { 15236 SF_SCD_DECR_REF(srdp, old_scdp); 15237 } 15238 15239 } 15240 15241 /* 15242 * This routine is called by a process to become part of an SCD. It is called 15243 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15244 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15245 */ 15246 static void 15247 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15248 { 15249 struct tsb_info *tsbinfop; 15250 15251 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15252 ASSERT(sfmmup->sfmmu_scdp != NULL); 15253 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15254 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15255 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15256 15257 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15258 tsbinfop = tsbinfop->tsb_next) { 15259 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15260 continue; 15261 } 15262 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15263 15264 sfmmu_inv_tsb(tsbinfop->tsb_va, 15265 TSB_BYTES(tsbinfop->tsb_szc)); 15266 } 15267 15268 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15269 sfmmu_ism_hatflags(sfmmup, 1); 15270 15271 SFMMU_STAT(sf_join_scd); 15272 } 15273 15274 /* 15275 * This routine is called in order to check if there is an SCD which matches 15276 * the process's region map if not then a new SCD may be created. 15277 */ 15278 static void 15279 sfmmu_find_scd(sfmmu_t *sfmmup) 15280 { 15281 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15282 sf_scd_t *scdp, *new_scdp; 15283 int ret; 15284 15285 ASSERT(srdp != NULL); 15286 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15287 15288 mutex_enter(&srdp->srd_scd_mutex); 15289 for (scdp = srdp->srd_scdp; scdp != NULL; 15290 scdp = scdp->scd_next) { 15291 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15292 &sfmmup->sfmmu_region_map, SFMMU_RGNMAP_WORDS, ret); 15293 if (ret == 1) { 15294 SF_SCD_INCR_REF(scdp); 15295 mutex_exit(&srdp->srd_scd_mutex); 15296 sfmmu_join_scd(scdp, sfmmup); 15297 ASSERT(scdp->scd_refcnt >= 2); 15298 atomic_add_32((volatile uint32_t *) 15299 &scdp->scd_refcnt, -1); 15300 return; 15301 } else { 15302 /* 15303 * If the sfmmu region map is a subset of the scd 15304 * region map, then the assumption is that this process 15305 * will continue attaching to ISM segments until the 15306 * region maps are equal. 15307 */ 15308 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15309 &sfmmup->sfmmu_region_map, ret); 15310 if (ret == 1) { 15311 mutex_exit(&srdp->srd_scd_mutex); 15312 return; 15313 } 15314 } 15315 } 15316 15317 ASSERT(scdp == NULL); 15318 /* 15319 * No matching SCD has been found, create a new one. 15320 */ 15321 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15322 NULL) { 15323 mutex_exit(&srdp->srd_scd_mutex); 15324 return; 15325 } 15326 15327 /* 15328 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15329 */ 15330 15331 /* Set scd_rttecnt for shme rgns in SCD */ 15332 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15333 15334 /* 15335 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15336 */ 15337 sfmmu_link_scd_to_regions(srdp, new_scdp); 15338 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15339 SFMMU_STAT_ADD(sf_create_scd, 1); 15340 15341 mutex_exit(&srdp->srd_scd_mutex); 15342 sfmmu_join_scd(new_scdp, sfmmup); 15343 ASSERT(new_scdp->scd_refcnt >= 2); 15344 atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); 15345 } 15346 15347 /* 15348 * This routine is called by a process to remove itself from an SCD. It is 15349 * either called when the processes has detached from a segment or from 15350 * hat_free_start() as a result of calling exit. 15351 */ 15352 static void 15353 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15354 { 15355 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15356 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15357 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15358 int i; 15359 15360 ASSERT(scdp != NULL); 15361 ASSERT(srdp != NULL); 15362 15363 if (sfmmup->sfmmu_free) { 15364 /* 15365 * If the process is part of an SCD the sfmmu is unlinked 15366 * from scd_sf_list. 15367 */ 15368 mutex_enter(&scdp->scd_mutex); 15369 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15370 mutex_exit(&scdp->scd_mutex); 15371 /* 15372 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15373 * are about to leave the SCD 15374 */ 15375 for (i = 0; i < mmu_page_sizes; i++) { 15376 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15377 scdp->scd_rttecnt[i]); 15378 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15379 sfmmup->sfmmu_scdrttecnt[i]); 15380 sfmmup->sfmmu_scdrttecnt[i] = 0; 15381 } 15382 sfmmup->sfmmu_scdp = NULL; 15383 15384 SF_SCD_DECR_REF(srdp, scdp); 15385 return; 15386 } 15387 15388 ASSERT(r_type != SFMMU_REGION_ISM || 15389 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15390 ASSERT(scdp->scd_refcnt); 15391 ASSERT(!sfmmup->sfmmu_free); 15392 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15393 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15394 15395 /* 15396 * Wait for ISM maps to be updated. 15397 */ 15398 if (r_type != SFMMU_REGION_ISM) { 15399 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15400 sfmmup->sfmmu_scdp != NULL) { 15401 cv_wait(&sfmmup->sfmmu_tsb_cv, 15402 HATLOCK_MUTEXP(hatlockp)); 15403 } 15404 15405 if (sfmmup->sfmmu_scdp == NULL) { 15406 sfmmu_hat_exit(hatlockp); 15407 return; 15408 } 15409 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15410 } 15411 15412 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15413 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15414 /* 15415 * Since HAT_JOIN_SCD was set our context 15416 * is still invalid. 15417 */ 15418 } else { 15419 /* 15420 * For a multi-thread process, we must stop 15421 * all the other threads before leaving the scd. 15422 */ 15423 15424 sfmmu_invalidate_ctx(sfmmup); 15425 } 15426 15427 /* Clear all the rid's for ISM, delete flags, etc */ 15428 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15429 sfmmu_ism_hatflags(sfmmup, 0); 15430 15431 /* 15432 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15433 * are in SCD before this sfmmup leaves the SCD. 15434 */ 15435 for (i = 0; i < mmu_page_sizes; i++) { 15436 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15437 scdp->scd_rttecnt[i]); 15438 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15439 sfmmup->sfmmu_scdrttecnt[i]); 15440 if (sfmmup->sfmmu_ttecnt[i] && 15441 (sfmmup->sfmmu_tteflags & (1 << i)) == 0) { 15442 sfmmup->sfmmu_tteflags |= (1 << i); 15443 } 15444 sfmmup->sfmmu_scdrttecnt[i] = 0; 15445 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15446 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15447 sfmmup->sfmmu_scdismttecnt[i] = 0; 15448 } 15449 /* update tsb0 inflation count */ 15450 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15451 15452 if (r_type != SFMMU_REGION_ISM) { 15453 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15454 } 15455 sfmmup->sfmmu_scdp = NULL; 15456 15457 if (&mmu_set_pgsz_order) { 15458 mmu_set_pgsz_order(sfmmup, 0); 15459 } 15460 sfmmu_hat_exit(hatlockp); 15461 15462 /* 15463 * Unlink sfmmu from scd_sf_list this can be done without holding 15464 * the hat lock as we hold the sfmmu_as lock which prevents 15465 * hat_join_region from adding this thread to the scd again. Other 15466 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15467 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15468 * while holding the hat lock. 15469 */ 15470 mutex_enter(&scdp->scd_mutex); 15471 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15472 mutex_exit(&scdp->scd_mutex); 15473 SFMMU_STAT(sf_leave_scd); 15474 15475 SF_SCD_DECR_REF(srdp, scdp); 15476 hatlockp = sfmmu_hat_enter(sfmmup); 15477 15478 } 15479 15480 /* 15481 * Unlink and free up an SCD structure with a reference count of 0. 15482 */ 15483 static void 15484 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15485 { 15486 sfmmu_t *scsfmmup; 15487 sf_scd_t *sp; 15488 hatlock_t *shatlockp; 15489 int i, ret; 15490 15491 mutex_enter(&srdp->srd_scd_mutex); 15492 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15493 if (sp == scdp) 15494 break; 15495 } 15496 if (sp == NULL || sp->scd_refcnt) { 15497 mutex_exit(&srdp->srd_scd_mutex); 15498 return; 15499 } 15500 15501 /* 15502 * It is possible that the scd has been freed and reallocated with a 15503 * different region map while we've been waiting for the srd_scd_mutex. 15504 */ 15505 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, 15506 SFMMU_RGNMAP_WORDS, ret); 15507 if (ret != 1) { 15508 mutex_exit(&srdp->srd_scd_mutex); 15509 return; 15510 } 15511 15512 ASSERT(scdp->scd_sf_list == NULL); 15513 /* 15514 * Unlink scd from srd_scdp list. 15515 */ 15516 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15517 mutex_exit(&srdp->srd_scd_mutex); 15518 15519 sfmmu_unlink_scd_from_regions(srdp, scdp); 15520 15521 /* Clear shared context tsb and release ctx */ 15522 scsfmmup = scdp->scd_sfmmup; 15523 15524 /* 15525 * create a barrier so that scd will not be destroyed 15526 * if other thread still holds the same shared hat lock. 15527 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15528 * shared hat lock before checking the shared tsb reloc flag. 15529 */ 15530 shatlockp = sfmmu_hat_enter(scsfmmup); 15531 sfmmu_hat_exit(shatlockp); 15532 15533 sfmmu_free_scd_tsbs(scsfmmup); 15534 15535 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15536 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15537 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15538 SFMMU_L2_HMERLINKS_SIZE); 15539 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15540 } 15541 } 15542 kmem_cache_free(sfmmuid_cache, scsfmmup); 15543 kmem_cache_free(scd_cache, scdp); 15544 SFMMU_STAT(sf_destroy_scd); 15545 } 15546 15547 /* 15548 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15549 * bits which are set in the ism_region_map parameter. This flag indicates to 15550 * the tsbmiss handler that mapping for these segments should be loaded using 15551 * the shared context. 15552 */ 15553 static void 15554 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15555 { 15556 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15557 ism_blk_t *ism_blkp; 15558 ism_map_t *ism_map; 15559 int i, rid; 15560 15561 ASSERT(sfmmup->sfmmu_iblk != NULL); 15562 ASSERT(scdp != NULL); 15563 /* 15564 * Note that the caller either set HAT_ISMBUSY flag or checked 15565 * under hat lock that HAT_ISMBUSY was not set by another thread. 15566 */ 15567 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15568 15569 ism_blkp = sfmmup->sfmmu_iblk; 15570 while (ism_blkp != NULL) { 15571 ism_map = ism_blkp->iblk_maps; 15572 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15573 rid = ism_map[i].imap_rid; 15574 if (rid == SFMMU_INVALID_ISMRID) { 15575 continue; 15576 } 15577 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15578 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15579 addflag) { 15580 ism_map[i].imap_hatflags |= 15581 HAT_CTX1_FLAG; 15582 } else { 15583 ism_map[i].imap_hatflags &= 15584 ~HAT_CTX1_FLAG; 15585 } 15586 } 15587 ism_blkp = ism_blkp->iblk_next; 15588 } 15589 } 15590 15591 static int 15592 sfmmu_srd_lock_held(sf_srd_t *srdp) 15593 { 15594 return (MUTEX_HELD(&srdp->srd_mutex)); 15595 } 15596 15597 /* ARGSUSED */ 15598 static int 15599 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15600 { 15601 sf_scd_t *scdp = (sf_scd_t *)buf; 15602 15603 bzero(buf, sizeof (sf_scd_t)); 15604 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15605 return (0); 15606 } 15607 15608 /* ARGSUSED */ 15609 static void 15610 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15611 { 15612 sf_scd_t *scdp = (sf_scd_t *)buf; 15613 15614 mutex_destroy(&scdp->scd_mutex); 15615 } 15616 15617 /* 15618 * The listp parameter is a pointer to a list of hmeblks which are partially 15619 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15620 * freeing process is to cross-call all cpus to ensure that there are no 15621 * remaining cached references. 15622 * 15623 * If the local generation number is less than the global then we can free 15624 * hmeblks which are already on the pending queue as another cpu has completed 15625 * the cross-call. 15626 * 15627 * We cross-call to make sure that there are no threads on other cpus accessing 15628 * these hmblks and then complete the process of freeing them under the 15629 * following conditions: 15630 * The total number of pending hmeblks is greater than the threshold 15631 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15632 * It is at least 1 second since the last time we cross-called 15633 * 15634 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15635 */ 15636 static void 15637 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15638 { 15639 struct hme_blk *hblkp, *pr_hblkp = NULL; 15640 int count = 0; 15641 cpuset_t cpuset = cpu_ready_set; 15642 cpu_hme_pend_t *cpuhp; 15643 timestruc_t now; 15644 int one_second_expired = 0; 15645 15646 gethrestime_lasttick(&now); 15647 15648 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15649 ASSERT(hblkp->hblk_shw_bit == 0); 15650 ASSERT(hblkp->hblk_shared == 0); 15651 count++; 15652 pr_hblkp = hblkp; 15653 } 15654 15655 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15656 mutex_enter(&cpuhp->chp_mutex); 15657 15658 if ((cpuhp->chp_count + count) == 0) { 15659 mutex_exit(&cpuhp->chp_mutex); 15660 return; 15661 } 15662 15663 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15664 one_second_expired = 1; 15665 } 15666 15667 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15668 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15669 one_second_expired)) { 15670 /* Append global list to local */ 15671 if (pr_hblkp == NULL) { 15672 *listp = cpuhp->chp_listp; 15673 } else { 15674 pr_hblkp->hblk_next = cpuhp->chp_listp; 15675 } 15676 cpuhp->chp_listp = NULL; 15677 cpuhp->chp_count = 0; 15678 cpuhp->chp_timestamp = now.tv_sec; 15679 mutex_exit(&cpuhp->chp_mutex); 15680 15681 kpreempt_disable(); 15682 CPUSET_DEL(cpuset, CPU->cpu_id); 15683 xt_sync(cpuset); 15684 xt_sync(cpuset); 15685 kpreempt_enable(); 15686 15687 /* 15688 * At this stage we know that no trap handlers on other 15689 * cpus can have references to hmeblks on the list. 15690 */ 15691 sfmmu_hblk_free(listp); 15692 } else if (*listp != NULL) { 15693 pr_hblkp->hblk_next = cpuhp->chp_listp; 15694 cpuhp->chp_listp = *listp; 15695 cpuhp->chp_count += count; 15696 *listp = NULL; 15697 mutex_exit(&cpuhp->chp_mutex); 15698 } else { 15699 mutex_exit(&cpuhp->chp_mutex); 15700 } 15701 } 15702 15703 /* 15704 * Add an hmeblk to the the hash list. 15705 */ 15706 void 15707 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15708 uint64_t hblkpa) 15709 { 15710 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15711 #ifdef DEBUG 15712 if (hmebp->hmeblkp == NULL) { 15713 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15714 } 15715 #endif /* DEBUG */ 15716 15717 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15718 /* 15719 * Since the TSB miss handler now does not lock the hash chain before 15720 * walking it, make sure that the hmeblks nextpa is globally visible 15721 * before we make the hmeblk globally visible by updating the chain root 15722 * pointer in the hash bucket. 15723 */ 15724 membar_producer(); 15725 hmebp->hmeh_nextpa = hblkpa; 15726 hmeblkp->hblk_next = hmebp->hmeblkp; 15727 hmebp->hmeblkp = hmeblkp; 15728 15729 } 15730 15731 /* 15732 * This function is the first part of a 2 part process to remove an hmeblk 15733 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15734 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15735 * a per-cpu pending list using the virtual address pointer. 15736 * 15737 * TSB miss trap handlers that start after this phase will no longer see 15738 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15739 * can still use it for further chain traversal because we haven't yet modifed 15740 * the next physical pointer or freed it. 15741 * 15742 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15743 * we reuse or free this hmeblk. This will make sure all lingering references to 15744 * the hmeblk after first phase disappear before we finally reclaim it. 15745 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15746 * during their traversal. 15747 * 15748 * The hmehash_mutex must be held when calling this function. 15749 * 15750 * Input: 15751 * hmebp - hme hash bucket pointer 15752 * hmeblkp - address of hmeblk to be removed 15753 * pr_hblk - virtual address of previous hmeblkp 15754 * listp - pointer to list of hmeblks linked by virtual address 15755 * free_now flag - indicates that a complete removal from the hash chains 15756 * is necessary. 15757 * 15758 * It is inefficient to use the free_now flag as a cross-call is required to 15759 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15760 * in short supply. 15761 */ 15762 void 15763 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15764 struct hme_blk *pr_hblk, struct hme_blk **listp, 15765 int free_now) 15766 { 15767 int shw_size, vshift; 15768 struct hme_blk *shw_hblkp; 15769 uint_t shw_mask, newshw_mask; 15770 caddr_t vaddr; 15771 int size; 15772 cpuset_t cpuset = cpu_ready_set; 15773 15774 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15775 15776 if (hmebp->hmeblkp == hmeblkp) { 15777 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15778 hmebp->hmeblkp = hmeblkp->hblk_next; 15779 } else { 15780 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15781 pr_hblk->hblk_next = hmeblkp->hblk_next; 15782 } 15783 15784 size = get_hblk_ttesz(hmeblkp); 15785 shw_hblkp = hmeblkp->hblk_shadow; 15786 if (shw_hblkp) { 15787 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15788 ASSERT(!hmeblkp->hblk_shared); 15789 #ifdef DEBUG 15790 if (mmu_page_sizes == max_mmu_page_sizes) { 15791 ASSERT(size < TTE256M); 15792 } else { 15793 ASSERT(size < TTE4M); 15794 } 15795 #endif /* DEBUG */ 15796 15797 shw_size = get_hblk_ttesz(shw_hblkp); 15798 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15799 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15800 ASSERT(vshift < 8); 15801 /* 15802 * Atomically clear shadow mask bit 15803 */ 15804 do { 15805 shw_mask = shw_hblkp->hblk_shw_mask; 15806 ASSERT(shw_mask & (1 << vshift)); 15807 newshw_mask = shw_mask & ~(1 << vshift); 15808 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 15809 shw_mask, newshw_mask); 15810 } while (newshw_mask != shw_mask); 15811 hmeblkp->hblk_shadow = NULL; 15812 } 15813 hmeblkp->hblk_shw_bit = 0; 15814 15815 if (hmeblkp->hblk_shared) { 15816 #ifdef DEBUG 15817 sf_srd_t *srdp; 15818 sf_region_t *rgnp; 15819 uint_t rid; 15820 15821 srdp = hblktosrd(hmeblkp); 15822 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15823 rid = hmeblkp->hblk_tag.htag_rid; 15824 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15825 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15826 rgnp = srdp->srd_hmergnp[rid]; 15827 ASSERT(rgnp != NULL); 15828 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15829 #endif /* DEBUG */ 15830 hmeblkp->hblk_shared = 0; 15831 } 15832 if (free_now) { 15833 kpreempt_disable(); 15834 CPUSET_DEL(cpuset, CPU->cpu_id); 15835 xt_sync(cpuset); 15836 xt_sync(cpuset); 15837 kpreempt_enable(); 15838 15839 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15840 hmeblkp->hblk_next = NULL; 15841 } else { 15842 /* Append hmeblkp to listp for processing later. */ 15843 hmeblkp->hblk_next = *listp; 15844 *listp = hmeblkp; 15845 } 15846 } 15847 15848 /* 15849 * This routine is called when memory is in short supply and returns a free 15850 * hmeblk of the requested size from the cpu pending lists. 15851 */ 15852 static struct hme_blk * 15853 sfmmu_check_pending_hblks(int size) 15854 { 15855 int i; 15856 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15857 int found_hmeblk; 15858 cpuset_t cpuset = cpu_ready_set; 15859 cpu_hme_pend_t *cpuhp; 15860 15861 /* Flush cpu hblk pending queues */ 15862 for (i = 0; i < NCPU; i++) { 15863 cpuhp = &cpu_hme_pend[i]; 15864 if (cpuhp->chp_listp != NULL) { 15865 mutex_enter(&cpuhp->chp_mutex); 15866 if (cpuhp->chp_listp == NULL) { 15867 mutex_exit(&cpuhp->chp_mutex); 15868 continue; 15869 } 15870 found_hmeblk = 0; 15871 last_hmeblkp = NULL; 15872 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15873 hmeblkp = hmeblkp->hblk_next) { 15874 if (get_hblk_ttesz(hmeblkp) == size) { 15875 if (last_hmeblkp == NULL) { 15876 cpuhp->chp_listp = 15877 hmeblkp->hblk_next; 15878 } else { 15879 last_hmeblkp->hblk_next = 15880 hmeblkp->hblk_next; 15881 } 15882 ASSERT(cpuhp->chp_count > 0); 15883 cpuhp->chp_count--; 15884 found_hmeblk = 1; 15885 break; 15886 } else { 15887 last_hmeblkp = hmeblkp; 15888 } 15889 } 15890 mutex_exit(&cpuhp->chp_mutex); 15891 15892 if (found_hmeblk) { 15893 kpreempt_disable(); 15894 CPUSET_DEL(cpuset, CPU->cpu_id); 15895 xt_sync(cpuset); 15896 xt_sync(cpuset); 15897 kpreempt_enable(); 15898 return (hmeblkp); 15899 } 15900 } 15901 } 15902 return (NULL); 15903 } 15904