1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * VM - Hardware Address Translation management for Spitfire MMU. 28 * 29 * This file implements the machine specific hardware translation 30 * needed by the VM system. The machine independent interface is 31 * described in <vm/hat.h> while the machine dependent interface 32 * and data structures are described in <vm/hat_sfmmu.h>. 33 * 34 * The hat layer manages the address translation hardware as a cache 35 * driven by calls from the higher levels in the VM system. 36 */ 37 38 #include <sys/types.h> 39 #include <sys/kstat.h> 40 #include <vm/hat.h> 41 #include <vm/hat_sfmmu.h> 42 #include <vm/page.h> 43 #include <sys/pte.h> 44 #include <sys/systm.h> 45 #include <sys/mman.h> 46 #include <sys/sysmacros.h> 47 #include <sys/machparam.h> 48 #include <sys/vtrace.h> 49 #include <sys/kmem.h> 50 #include <sys/mmu.h> 51 #include <sys/cmn_err.h> 52 #include <sys/cpu.h> 53 #include <sys/cpuvar.h> 54 #include <sys/debug.h> 55 #include <sys/lgrp.h> 56 #include <sys/archsystm.h> 57 #include <sys/machsystm.h> 58 #include <sys/vmsystm.h> 59 #include <vm/as.h> 60 #include <vm/seg.h> 61 #include <vm/seg_kp.h> 62 #include <vm/seg_kmem.h> 63 #include <vm/seg_kpm.h> 64 #include <vm/rm.h> 65 #include <sys/t_lock.h> 66 #include <sys/obpdefs.h> 67 #include <sys/vm_machparam.h> 68 #include <sys/var.h> 69 #include <sys/trap.h> 70 #include <sys/machtrap.h> 71 #include <sys/scb.h> 72 #include <sys/bitmap.h> 73 #include <sys/machlock.h> 74 #include <sys/membar.h> 75 #include <sys/atomic.h> 76 #include <sys/cpu_module.h> 77 #include <sys/prom_debug.h> 78 #include <sys/ksynch.h> 79 #include <sys/mem_config.h> 80 #include <sys/mem_cage.h> 81 #include <vm/vm_dep.h> 82 #include <vm/xhat_sfmmu.h> 83 #include <sys/fpu/fpusystm.h> 84 #include <vm/mach_kpm.h> 85 #include <sys/callb.h> 86 87 #ifdef DEBUG 88 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 89 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 90 caddr_t _eaddr = (saddr) + (len); \ 91 sf_srd_t *_srdp; \ 92 sf_region_t *_rgnp; \ 93 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 94 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 95 ASSERT((hat) != ksfmmup); \ 96 _srdp = (hat)->sfmmu_srdp; \ 97 ASSERT(_srdp != NULL); \ 98 ASSERT(_srdp->srd_refcnt != 0); \ 99 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 100 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 101 ASSERT(_rgnp->rgn_refcnt != 0); \ 102 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 103 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 104 SFMMU_REGION_HME); \ 105 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 106 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 107 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 108 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 } 110 111 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 112 { \ 113 caddr_t _hsva; \ 114 caddr_t _heva; \ 115 caddr_t _rsva; \ 116 caddr_t _reva; \ 117 int _ttesz = get_hblk_ttesz(hmeblkp); \ 118 int _flagtte; \ 119 ASSERT((srdp)->srd_refcnt != 0); \ 120 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 121 ASSERT((rgnp)->rgn_id == rid); \ 122 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 123 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 124 SFMMU_REGION_HME); \ 125 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 126 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 127 _heva = get_hblk_endaddr(hmeblkp); \ 128 _rsva = (caddr_t)P2ALIGN( \ 129 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 130 _reva = (caddr_t)P2ROUNDUP( \ 131 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 132 HBLK_MIN_BYTES); \ 133 ASSERT(_hsva >= _rsva); \ 134 ASSERT(_hsva < _reva); \ 135 ASSERT(_heva > _rsva); \ 136 ASSERT(_heva <= _reva); \ 137 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 138 _ttesz; \ 139 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 140 } 141 142 #else /* DEBUG */ 143 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 144 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 145 #endif /* DEBUG */ 146 147 #if defined(SF_ERRATA_57) 148 extern caddr_t errata57_limit; 149 #endif 150 151 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 152 (sizeof (int64_t))) 153 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 154 155 #define HBLK_RESERVE_CNT 128 156 #define HBLK_RESERVE_MIN 20 157 158 static struct hme_blk *freehblkp; 159 static kmutex_t freehblkp_lock; 160 static int freehblkcnt; 161 162 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 163 static kmutex_t hblk_reserve_lock; 164 static kthread_t *hblk_reserve_thread; 165 166 static nucleus_hblk8_info_t nucleus_hblk8; 167 static nucleus_hblk1_info_t nucleus_hblk1; 168 169 /* 170 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 171 * after the initial phase of removing an hmeblk from the hash chain, see 172 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 173 */ 174 static cpu_hme_pend_t *cpu_hme_pend; 175 static uint_t cpu_hme_pend_thresh; 176 /* 177 * SFMMU specific hat functions 178 */ 179 void hat_pagecachectl(struct page *, int); 180 181 /* flags for hat_pagecachectl */ 182 #define HAT_CACHE 0x1 183 #define HAT_UNCACHE 0x2 184 #define HAT_TMPNC 0x4 185 186 /* 187 * Flag to allow the creation of non-cacheable translations 188 * to system memory. It is off by default. At the moment this 189 * flag is used by the ecache error injector. The error injector 190 * will turn it on when creating such a translation then shut it 191 * off when it's finished. 192 */ 193 194 int sfmmu_allow_nc_trans = 0; 195 196 /* 197 * Flag to disable large page support. 198 * value of 1 => disable all large pages. 199 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 200 * 201 * For example, use the value 0x4 to disable 512K pages. 202 * 203 */ 204 #define LARGE_PAGES_OFF 0x1 205 206 /* 207 * The disable_large_pages and disable_ism_large_pages variables control 208 * hat_memload_array and the page sizes to be used by ISM and the kernel. 209 * 210 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 211 * are only used to control which OOB pages to use at upper VM segment creation 212 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 213 * Their values may come from platform or CPU specific code to disable page 214 * sizes that should not be used. 215 * 216 * WARNING: 512K pages are currently not supported for ISM/DISM. 217 */ 218 uint_t disable_large_pages = 0; 219 uint_t disable_ism_large_pages = (1 << TTE512K); 220 uint_t disable_auto_data_large_pages = 0; 221 uint_t disable_auto_text_large_pages = 0; 222 223 /* 224 * Private sfmmu data structures for hat management 225 */ 226 static struct kmem_cache *sfmmuid_cache; 227 static struct kmem_cache *mmuctxdom_cache; 228 229 /* 230 * Private sfmmu data structures for tsb management 231 */ 232 static struct kmem_cache *sfmmu_tsbinfo_cache; 233 static struct kmem_cache *sfmmu_tsb8k_cache; 234 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 235 static vmem_t *kmem_bigtsb_arena; 236 static vmem_t *kmem_tsb_arena; 237 238 /* 239 * sfmmu static variables for hmeblk resource management. 240 */ 241 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 242 static struct kmem_cache *sfmmu8_cache; 243 static struct kmem_cache *sfmmu1_cache; 244 static struct kmem_cache *pa_hment_cache; 245 246 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 247 /* 248 * private data for ism 249 */ 250 static struct kmem_cache *ism_blk_cache; 251 static struct kmem_cache *ism_ment_cache; 252 #define ISMID_STARTADDR NULL 253 254 /* 255 * Region management data structures and function declarations. 256 */ 257 258 static void sfmmu_leave_srd(sfmmu_t *); 259 static int sfmmu_srdcache_constructor(void *, void *, int); 260 static void sfmmu_srdcache_destructor(void *, void *); 261 static int sfmmu_rgncache_constructor(void *, void *, int); 262 static void sfmmu_rgncache_destructor(void *, void *); 263 static int sfrgnmap_isnull(sf_region_map_t *); 264 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 265 static int sfmmu_scdcache_constructor(void *, void *, int); 266 static void sfmmu_scdcache_destructor(void *, void *); 267 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 268 size_t, void *, u_offset_t); 269 270 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 271 static sf_srd_bucket_t *srd_buckets; 272 static struct kmem_cache *srd_cache; 273 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 274 static struct kmem_cache *region_cache; 275 static struct kmem_cache *scd_cache; 276 277 #ifdef sun4v 278 int use_bigtsb_arena = 1; 279 #else 280 int use_bigtsb_arena = 0; 281 #endif 282 283 /* External /etc/system tunable, for turning on&off the shctx support */ 284 int disable_shctx = 0; 285 /* Internal variable, set by MD if the HW supports shctx feature */ 286 int shctx_on = 0; 287 288 #ifdef DEBUG 289 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 290 #endif 291 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 292 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 293 294 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 295 static void sfmmu_find_scd(sfmmu_t *); 296 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 297 static void sfmmu_finish_join_scd(sfmmu_t *); 298 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 299 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 300 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 301 static void sfmmu_free_scd_tsbs(sfmmu_t *); 302 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 303 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 304 static void sfmmu_ism_hatflags(sfmmu_t *, int); 305 static int sfmmu_srd_lock_held(sf_srd_t *); 306 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 307 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 308 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 309 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 310 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 311 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 312 313 /* 314 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 315 * HAT flags, synchronizing TLB/TSB coherency, and context management. 316 * The lock is hashed on the sfmmup since the case where we need to lock 317 * all processes is rare but does occur (e.g. we need to unload a shared 318 * mapping from all processes using the mapping). We have a lot of buckets, 319 * and each slab of sfmmu_t's can use about a quarter of them, giving us 320 * a fairly good distribution without wasting too much space and overhead 321 * when we have to grab them all. 322 */ 323 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 324 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 325 326 /* 327 * Hash algorithm optimized for a small number of slabs. 328 * 7 is (highbit((sizeof sfmmu_t)) - 1) 329 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 330 * kmem_cache, and thus they will be sequential within that cache. In 331 * addition, each new slab will have a different "color" up to cache_maxcolor 332 * which will skew the hashing for each successive slab which is allocated. 333 * If the size of sfmmu_t changed to a larger size, this algorithm may need 334 * to be revisited. 335 */ 336 #define TSB_HASH_SHIFT_BITS (7) 337 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 338 339 #ifdef DEBUG 340 int tsb_hash_debug = 0; 341 #define TSB_HASH(sfmmup) \ 342 (tsb_hash_debug ? &hat_lock[0] : \ 343 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 344 #else /* DEBUG */ 345 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 346 #endif /* DEBUG */ 347 348 349 /* sfmmu_replace_tsb() return codes. */ 350 typedef enum tsb_replace_rc { 351 TSB_SUCCESS, 352 TSB_ALLOCFAIL, 353 TSB_LOSTRACE, 354 TSB_ALREADY_SWAPPED, 355 TSB_CANTGROW 356 } tsb_replace_rc_t; 357 358 /* 359 * Flags for TSB allocation routines. 360 */ 361 #define TSB_ALLOC 0x01 362 #define TSB_FORCEALLOC 0x02 363 #define TSB_GROW 0x04 364 #define TSB_SHRINK 0x08 365 #define TSB_SWAPIN 0x10 366 367 /* 368 * Support for HAT callbacks. 369 */ 370 #define SFMMU_MAX_RELOC_CALLBACKS 10 371 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 372 static id_t sfmmu_cb_nextid = 0; 373 static id_t sfmmu_tsb_cb_id; 374 struct sfmmu_callback *sfmmu_cb_table; 375 376 /* 377 * Kernel page relocation is enabled by default for non-caged 378 * kernel pages. This has little effect unless segkmem_reloc is 379 * set, since by default kernel memory comes from inside the 380 * kernel cage. 381 */ 382 int hat_kpr_enabled = 1; 383 384 kmutex_t kpr_mutex; 385 kmutex_t kpr_suspendlock; 386 kthread_t *kreloc_thread; 387 388 /* 389 * Enable VA->PA translation sanity checking on DEBUG kernels. 390 * Disabled by default. This is incompatible with some 391 * drivers (error injector, RSM) so if it breaks you get 392 * to keep both pieces. 393 */ 394 int hat_check_vtop = 0; 395 396 /* 397 * Private sfmmu routines (prototypes) 398 */ 399 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 400 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 401 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 402 uint_t); 403 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 404 caddr_t, demap_range_t *, uint_t); 405 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 406 caddr_t, int); 407 static void sfmmu_hblk_free(struct hme_blk **); 408 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 409 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 410 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 411 static struct hme_blk *sfmmu_hblk_steal(int); 412 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 413 struct hme_blk *, uint64_t, struct hme_blk *); 414 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 415 416 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 417 struct page **, uint_t, uint_t, uint_t); 418 static void hat_do_memload(struct hat *, caddr_t, struct page *, 419 uint_t, uint_t, uint_t); 420 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 421 uint_t, uint_t, pgcnt_t, uint_t); 422 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 423 uint_t); 424 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 425 uint_t, uint_t); 426 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 427 caddr_t, int, uint_t); 428 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 429 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 430 uint_t); 431 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 432 caddr_t, page_t **, uint_t, uint_t); 433 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 434 435 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 436 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 437 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 438 #ifdef VAC 439 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 440 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 441 int tst_tnc(page_t *pp, pgcnt_t); 442 void conv_tnc(page_t *pp, int); 443 #endif 444 445 static void sfmmu_get_ctx(sfmmu_t *); 446 static void sfmmu_free_sfmmu(sfmmu_t *); 447 448 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 449 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 450 451 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 452 static void hat_pagereload(struct page *, struct page *); 453 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 454 #ifdef VAC 455 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 456 static void sfmmu_page_cache(page_t *, int, int, int); 457 #endif 458 459 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 460 struct hme_blk *, int); 461 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 462 pfn_t, int, int, int, int); 463 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 464 pfn_t, int); 465 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 466 static void sfmmu_tlb_range_demap(demap_range_t *); 467 static void sfmmu_invalidate_ctx(sfmmu_t *); 468 static void sfmmu_sync_mmustate(sfmmu_t *); 469 470 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 471 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 472 sfmmu_t *); 473 static void sfmmu_tsb_free(struct tsb_info *); 474 static void sfmmu_tsbinfo_free(struct tsb_info *); 475 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 476 sfmmu_t *); 477 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 478 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 479 static int sfmmu_select_tsb_szc(pgcnt_t); 480 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 481 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 482 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 483 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 484 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 485 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 486 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 487 hatlock_t *, uint_t); 488 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 489 490 #ifdef VAC 491 void sfmmu_cache_flush(pfn_t, int); 492 void sfmmu_cache_flushcolor(int, pfn_t); 493 #endif 494 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 495 caddr_t, demap_range_t *, uint_t, int); 496 497 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 498 static uint_t sfmmu_ptov_attr(tte_t *); 499 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 500 caddr_t, demap_range_t *, uint_t); 501 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 502 static int sfmmu_idcache_constructor(void *, void *, int); 503 static void sfmmu_idcache_destructor(void *, void *); 504 static int sfmmu_hblkcache_constructor(void *, void *, int); 505 static void sfmmu_hblkcache_destructor(void *, void *); 506 static void sfmmu_hblkcache_reclaim(void *); 507 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 508 struct hmehash_bucket *); 509 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 510 struct hme_blk *, struct hme_blk **, int); 511 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 512 uint64_t); 513 static struct hme_blk *sfmmu_check_pending_hblks(int); 514 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 515 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 516 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 517 int, caddr_t *); 518 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 519 520 static void sfmmu_rm_large_mappings(page_t *, int); 521 522 static void hat_lock_init(void); 523 static void hat_kstat_init(void); 524 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 525 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 526 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 527 static void sfmmu_check_page_sizes(sfmmu_t *, int); 528 int fnd_mapping_sz(page_t *); 529 static void iment_add(struct ism_ment *, struct hat *); 530 static void iment_sub(struct ism_ment *, struct hat *); 531 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 532 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 533 extern void sfmmu_clear_utsbinfo(void); 534 535 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 536 537 extern int vpm_enable; 538 539 /* kpm globals */ 540 #ifdef DEBUG 541 /* 542 * Enable trap level tsbmiss handling 543 */ 544 int kpm_tsbmtl = 1; 545 546 /* 547 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 548 * required TLB shootdowns in this case, so handle w/ care. Off by default. 549 */ 550 int kpm_tlb_flush; 551 #endif /* DEBUG */ 552 553 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 554 555 #ifdef DEBUG 556 static void sfmmu_check_hblk_flist(); 557 #endif 558 559 /* 560 * Semi-private sfmmu data structures. Some of them are initialize in 561 * startup or in hat_init. Some of them are private but accessed by 562 * assembly code or mach_sfmmu.c 563 */ 564 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 565 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 566 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 567 uint64_t khme_hash_pa; /* PA of khme_hash */ 568 int uhmehash_num; /* # of buckets in user hash table */ 569 int khmehash_num; /* # of buckets in kernel hash table */ 570 571 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 572 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 573 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 574 575 #define DEFAULT_NUM_CTXS_PER_MMU 8192 576 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 577 578 int cache; /* describes system cache */ 579 580 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 581 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 582 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 583 int ktsb_sz; /* kernel 8k-indexed tsb size */ 584 585 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 586 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 587 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 588 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 589 590 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 591 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 592 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 593 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 594 595 #ifndef sun4v 596 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 597 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 598 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 599 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 600 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 601 #endif /* sun4v */ 602 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 603 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 604 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 605 606 /* 607 * Size to use for TSB slabs. Future platforms that support page sizes 608 * larger than 4M may wish to change these values, and provide their own 609 * assembly macros for building and decoding the TSB base register contents. 610 * Note disable_large_pages will override the value set here. 611 */ 612 static uint_t tsb_slab_ttesz = TTE4M; 613 size_t tsb_slab_size = MMU_PAGESIZE4M; 614 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 615 /* PFN mask for TTE */ 616 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 617 618 /* 619 * Size to use for TSB slabs. These are used only when 256M tsb arenas 620 * exist. 621 */ 622 static uint_t bigtsb_slab_ttesz = TTE256M; 623 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 624 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 625 /* 256M page alignment for 8K pfn */ 626 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 627 628 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 629 static int tsb_max_growsize = 0; 630 631 /* 632 * Tunable parameters dealing with TSB policies. 633 */ 634 635 /* 636 * This undocumented tunable forces all 8K TSBs to be allocated from 637 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 638 */ 639 #ifdef DEBUG 640 int tsb_forceheap = 0; 641 #endif /* DEBUG */ 642 643 /* 644 * Decide whether to use per-lgroup arenas, or one global set of 645 * TSB arenas. The default is not to break up per-lgroup, since 646 * most platforms don't recognize any tangible benefit from it. 647 */ 648 int tsb_lgrp_affinity = 0; 649 650 /* 651 * Used for growing the TSB based on the process RSS. 652 * tsb_rss_factor is based on the smallest TSB, and is 653 * shifted by the TSB size to determine if we need to grow. 654 * The default will grow the TSB if the number of TTEs for 655 * this page size exceeds 75% of the number of TSB entries, 656 * which should _almost_ eliminate all conflict misses 657 * (at the expense of using up lots and lots of memory). 658 */ 659 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 660 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 661 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 662 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 663 default_tsb_size) 664 #define TSB_OK_SHRINK() \ 665 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 666 #define TSB_OK_GROW() \ 667 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 668 669 int enable_tsb_rss_sizing = 1; 670 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 671 672 /* which TSB size code to use for new address spaces or if rss sizing off */ 673 int default_tsb_size = TSB_8K_SZCODE; 674 675 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 676 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 677 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 678 679 #ifdef DEBUG 680 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 681 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 682 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 683 static int tsb_alloc_fail_mtbf = 0; 684 static int tsb_alloc_count = 0; 685 #endif /* DEBUG */ 686 687 /* if set to 1, will remap valid TTEs when growing TSB. */ 688 int tsb_remap_ttes = 1; 689 690 /* 691 * If we have more than this many mappings, allocate a second TSB. 692 * This default is chosen because the I/D fully associative TLBs are 693 * assumed to have at least 8 available entries. Platforms with a 694 * larger fully-associative TLB could probably override the default. 695 */ 696 697 #ifdef sun4v 698 int tsb_sectsb_threshold = 0; 699 #else 700 int tsb_sectsb_threshold = 8; 701 #endif 702 703 /* 704 * kstat data 705 */ 706 struct sfmmu_global_stat sfmmu_global_stat; 707 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 708 709 /* 710 * Global data 711 */ 712 sfmmu_t *ksfmmup; /* kernel's hat id */ 713 714 #ifdef DEBUG 715 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 716 #endif 717 718 /* sfmmu locking operations */ 719 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 720 static int sfmmu_mlspl_held(struct page *, int); 721 722 kmutex_t *sfmmu_page_enter(page_t *); 723 void sfmmu_page_exit(kmutex_t *); 724 int sfmmu_page_spl_held(struct page *); 725 726 /* sfmmu internal locking operations - accessed directly */ 727 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 728 kmutex_t **, kmutex_t **); 729 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 730 static hatlock_t * 731 sfmmu_hat_enter(sfmmu_t *); 732 static hatlock_t * 733 sfmmu_hat_tryenter(sfmmu_t *); 734 static void sfmmu_hat_exit(hatlock_t *); 735 static void sfmmu_hat_lock_all(void); 736 static void sfmmu_hat_unlock_all(void); 737 static void sfmmu_ismhat_enter(sfmmu_t *, int); 738 static void sfmmu_ismhat_exit(sfmmu_t *, int); 739 740 /* 741 * Array of mutexes protecting a page's mapping list and p_nrm field. 742 * 743 * The hash function looks complicated, but is made up so that: 744 * 745 * "pp" not shifted, so adjacent pp values will hash to different cache lines 746 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 747 * 748 * "pp" >> mml_shift, incorporates more source bits into the hash result 749 * 750 * "& (mml_table_size - 1), should be faster than using remainder "%" 751 * 752 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 753 * cacheline, since they get declared next to each other below. We'll trust 754 * ld not to do something random. 755 */ 756 #ifdef DEBUG 757 int mlist_hash_debug = 0; 758 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 759 &mml_table[((uintptr_t)(pp) + \ 760 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 761 #else /* !DEBUG */ 762 #define MLIST_HASH(pp) &mml_table[ \ 763 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 764 #endif /* !DEBUG */ 765 766 kmutex_t *mml_table; 767 uint_t mml_table_sz; /* must be a power of 2 */ 768 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 769 770 kpm_hlk_t *kpmp_table; 771 uint_t kpmp_table_sz; /* must be a power of 2 */ 772 uchar_t kpmp_shift; 773 774 kpm_shlk_t *kpmp_stable; 775 uint_t kpmp_stable_sz; /* must be a power of 2 */ 776 777 /* 778 * SPL_HASH was improved to avoid false cache line sharing 779 */ 780 #define SPL_TABLE_SIZE 128 781 #define SPL_MASK (SPL_TABLE_SIZE - 1) 782 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 783 784 #define SPL_INDEX(pp) \ 785 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 786 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 787 (SPL_TABLE_SIZE - 1)) 788 789 #define SPL_HASH(pp) \ 790 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 791 792 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 793 794 795 /* 796 * hat_unload_callback() will group together callbacks in order 797 * to avoid xt_sync() calls. This is the maximum size of the group. 798 */ 799 #define MAX_CB_ADDR 32 800 801 tte_t hw_tte; 802 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 803 804 static char *mmu_ctx_kstat_names[] = { 805 "mmu_ctx_tsb_exceptions", 806 "mmu_ctx_tsb_raise_exception", 807 "mmu_ctx_wrap_around", 808 }; 809 810 /* 811 * Wrapper for vmem_xalloc since vmem_create only allows limited 812 * parameters for vm_source_alloc functions. This function allows us 813 * to specify alignment consistent with the size of the object being 814 * allocated. 815 */ 816 static void * 817 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 818 { 819 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 820 } 821 822 /* Common code for setting tsb_alloc_hiwater. */ 823 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 824 ptob(pages) / tsb_alloc_hiwater_factor 825 826 /* 827 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 828 * a single TSB. physmem is the number of physical pages so we need physmem 8K 829 * TTEs to represent all those physical pages. We round this up by using 830 * 1<<highbit(). To figure out which size code to use, remember that the size 831 * code is just an amount to shift the smallest TSB size to get the size of 832 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 833 * highbit() - 1) to get the size code for the smallest TSB that can represent 834 * all of physical memory, while erring on the side of too much. 835 * 836 * Restrict tsb_max_growsize to make sure that: 837 * 1) TSBs can't grow larger than the TSB slab size 838 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 839 */ 840 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 841 int _i, _szc, _slabszc, _tsbszc; \ 842 \ 843 _i = highbit(pages); \ 844 if ((1 << (_i - 1)) == (pages)) \ 845 _i--; /* 2^n case, round down */ \ 846 _szc = _i - TSB_START_SIZE; \ 847 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 848 _tsbszc = MIN(_szc, _slabszc); \ 849 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 850 } 851 852 /* 853 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 854 * tsb_info which handles that TTE size. 855 */ 856 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 857 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 858 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 859 sfmmu_hat_lock_held(sfmmup)); \ 860 if ((tte_szc) >= TTE4M) { \ 861 ASSERT((tsbinfop) != NULL); \ 862 (tsbinfop) = (tsbinfop)->tsb_next; \ 863 } \ 864 } 865 866 /* 867 * Macro to use to unload entries from the TSB. 868 * It has knowledge of which page sizes get replicated in the TSB 869 * and will call the appropriate unload routine for the appropriate size. 870 */ 871 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 872 { \ 873 int ttesz = get_hblk_ttesz(hmeblkp); \ 874 if (ttesz == TTE8K || ttesz == TTE4M) { \ 875 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 876 } else { \ 877 caddr_t sva = ismhat ? addr : \ 878 (caddr_t)get_hblk_base(hmeblkp); \ 879 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 880 ASSERT(addr >= sva && addr < eva); \ 881 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 882 } \ 883 } 884 885 886 /* Update tsb_alloc_hiwater after memory is configured. */ 887 /*ARGSUSED*/ 888 static void 889 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 890 { 891 /* Assumes physmem has already been updated. */ 892 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 893 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 894 } 895 896 /* 897 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 898 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 899 * deleted. 900 */ 901 /*ARGSUSED*/ 902 static int 903 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 904 { 905 return (0); 906 } 907 908 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 909 /*ARGSUSED*/ 910 static void 911 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 912 { 913 /* 914 * Whether the delete was cancelled or not, just go ahead and update 915 * tsb_alloc_hiwater and tsb_max_growsize. 916 */ 917 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 918 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 919 } 920 921 static kphysm_setup_vector_t sfmmu_update_vec = { 922 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 923 sfmmu_update_post_add, /* post_add */ 924 sfmmu_update_pre_del, /* pre_del */ 925 sfmmu_update_post_del /* post_del */ 926 }; 927 928 929 /* 930 * HME_BLK HASH PRIMITIVES 931 */ 932 933 /* 934 * Enter a hme on the mapping list for page pp. 935 * When large pages are more prevalent in the system we might want to 936 * keep the mapping list in ascending order by the hment size. For now, 937 * small pages are more frequent, so don't slow it down. 938 */ 939 #define HME_ADD(hme, pp) \ 940 { \ 941 ASSERT(sfmmu_mlist_held(pp)); \ 942 \ 943 hme->hme_prev = NULL; \ 944 hme->hme_next = pp->p_mapping; \ 945 hme->hme_page = pp; \ 946 if (pp->p_mapping) { \ 947 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 948 ASSERT(pp->p_share > 0); \ 949 } else { \ 950 /* EMPTY */ \ 951 ASSERT(pp->p_share == 0); \ 952 } \ 953 pp->p_mapping = hme; \ 954 pp->p_share++; \ 955 } 956 957 /* 958 * Enter a hme on the mapping list for page pp. 959 * If we are unmapping a large translation, we need to make sure that the 960 * change is reflect in the corresponding bit of the p_index field. 961 */ 962 #define HME_SUB(hme, pp) \ 963 { \ 964 ASSERT(sfmmu_mlist_held(pp)); \ 965 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 966 \ 967 if (pp->p_mapping == NULL) { \ 968 panic("hme_remove - no mappings"); \ 969 } \ 970 \ 971 membar_stst(); /* ensure previous stores finish */ \ 972 \ 973 ASSERT(pp->p_share > 0); \ 974 pp->p_share--; \ 975 \ 976 if (hme->hme_prev) { \ 977 ASSERT(pp->p_mapping != hme); \ 978 ASSERT(hme->hme_prev->hme_page == pp || \ 979 IS_PAHME(hme->hme_prev)); \ 980 hme->hme_prev->hme_next = hme->hme_next; \ 981 } else { \ 982 ASSERT(pp->p_mapping == hme); \ 983 pp->p_mapping = hme->hme_next; \ 984 ASSERT((pp->p_mapping == NULL) ? \ 985 (pp->p_share == 0) : 1); \ 986 } \ 987 \ 988 if (hme->hme_next) { \ 989 ASSERT(hme->hme_next->hme_page == pp || \ 990 IS_PAHME(hme->hme_next)); \ 991 hme->hme_next->hme_prev = hme->hme_prev; \ 992 } \ 993 \ 994 /* zero out the entry */ \ 995 hme->hme_next = NULL; \ 996 hme->hme_prev = NULL; \ 997 hme->hme_page = NULL; \ 998 \ 999 if (hme_size(hme) > TTE8K) { \ 1000 /* remove mappings for remainder of large pg */ \ 1001 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 1002 } \ 1003 } 1004 1005 /* 1006 * This function returns the hment given the hme_blk and a vaddr. 1007 * It assumes addr has already been checked to belong to hme_blk's 1008 * range. 1009 */ 1010 #define HBLKTOHME(hment, hmeblkp, addr) \ 1011 { \ 1012 int index; \ 1013 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 1014 } 1015 1016 /* 1017 * Version of HBLKTOHME that also returns the index in hmeblkp 1018 * of the hment. 1019 */ 1020 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1021 { \ 1022 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1023 \ 1024 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1025 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1026 } else \ 1027 idx = 0; \ 1028 \ 1029 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1030 } 1031 1032 /* 1033 * Disable any page sizes not supported by the CPU 1034 */ 1035 void 1036 hat_init_pagesizes() 1037 { 1038 int i; 1039 1040 mmu_exported_page_sizes = 0; 1041 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1042 1043 szc_2_userszc[i] = (uint_t)-1; 1044 userszc_2_szc[i] = (uint_t)-1; 1045 1046 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1047 disable_large_pages |= (1 << i); 1048 } else { 1049 szc_2_userszc[i] = mmu_exported_page_sizes; 1050 userszc_2_szc[mmu_exported_page_sizes] = i; 1051 mmu_exported_page_sizes++; 1052 } 1053 } 1054 1055 disable_ism_large_pages |= disable_large_pages; 1056 disable_auto_data_large_pages = disable_large_pages; 1057 disable_auto_text_large_pages = disable_large_pages; 1058 1059 /* 1060 * Initialize mmu-specific large page sizes. 1061 */ 1062 if (&mmu_large_pages_disabled) { 1063 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1064 disable_ism_large_pages |= 1065 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1066 disable_auto_data_large_pages |= 1067 mmu_large_pages_disabled(HAT_AUTO_DATA); 1068 disable_auto_text_large_pages |= 1069 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1070 } 1071 } 1072 1073 /* 1074 * Initialize the hardware address translation structures. 1075 */ 1076 void 1077 hat_init(void) 1078 { 1079 int i; 1080 uint_t sz; 1081 size_t size; 1082 1083 hat_lock_init(); 1084 hat_kstat_init(); 1085 1086 /* 1087 * Hardware-only bits in a TTE 1088 */ 1089 MAKE_TTE_MASK(&hw_tte); 1090 1091 hat_init_pagesizes(); 1092 1093 /* Initialize the hash locks */ 1094 for (i = 0; i < khmehash_num; i++) { 1095 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1096 MUTEX_DEFAULT, NULL); 1097 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1098 } 1099 for (i = 0; i < uhmehash_num; i++) { 1100 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1101 MUTEX_DEFAULT, NULL); 1102 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1103 } 1104 khmehash_num--; /* make sure counter starts from 0 */ 1105 uhmehash_num--; /* make sure counter starts from 0 */ 1106 1107 /* 1108 * Allocate context domain structures. 1109 * 1110 * A platform may choose to modify max_mmu_ctxdoms in 1111 * set_platform_defaults(). If a platform does not define 1112 * a set_platform_defaults() or does not choose to modify 1113 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1114 * 1115 * For sun4v, there will be one global context domain, this is to 1116 * avoid the ldom cpu substitution problem. 1117 * 1118 * For all platforms that have CPUs sharing MMUs, this 1119 * value must be defined. 1120 */ 1121 if (max_mmu_ctxdoms == 0) { 1122 #ifndef sun4v 1123 max_mmu_ctxdoms = max_ncpus; 1124 #else /* sun4v */ 1125 max_mmu_ctxdoms = 1; 1126 #endif /* sun4v */ 1127 } 1128 1129 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1130 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1131 1132 /* mmu_ctx_t is 64 bytes aligned */ 1133 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1134 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1135 /* 1136 * MMU context domain initialization for the Boot CPU. 1137 * This needs the context domains array allocated above. 1138 */ 1139 mutex_enter(&cpu_lock); 1140 sfmmu_cpu_init(CPU); 1141 mutex_exit(&cpu_lock); 1142 1143 /* 1144 * Intialize ism mapping list lock. 1145 */ 1146 1147 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1148 1149 /* 1150 * Each sfmmu structure carries an array of MMU context info 1151 * structures, one per context domain. The size of this array depends 1152 * on the maximum number of context domains. So, the size of the 1153 * sfmmu structure varies per platform. 1154 * 1155 * sfmmu is allocated from static arena, because trap 1156 * handler at TL > 0 is not allowed to touch kernel relocatable 1157 * memory. sfmmu's alignment is changed to 64 bytes from 1158 * default 8 bytes, as the lower 6 bits will be used to pass 1159 * pgcnt to vtag_flush_pgcnt_tl1. 1160 */ 1161 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1162 1163 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1164 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1165 NULL, NULL, static_arena, 0); 1166 1167 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1168 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1169 1170 /* 1171 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1172 * from the heap when low on memory or when TSB_FORCEALLOC is 1173 * specified, don't use magazines to cache them--we want to return 1174 * them to the system as quickly as possible. 1175 */ 1176 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1177 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1178 static_arena, KMC_NOMAGAZINE); 1179 1180 /* 1181 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1182 * memory, which corresponds to the old static reserve for TSBs. 1183 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1184 * memory we'll allocate for TSB slabs; beyond this point TSB 1185 * allocations will be taken from the kernel heap (via 1186 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1187 * consumer. 1188 */ 1189 if (tsb_alloc_hiwater_factor == 0) { 1190 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1191 } 1192 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1193 1194 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1195 if (!(disable_large_pages & (1 << sz))) 1196 break; 1197 } 1198 1199 if (sz < tsb_slab_ttesz) { 1200 tsb_slab_ttesz = sz; 1201 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1202 tsb_slab_size = 1 << tsb_slab_shift; 1203 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1204 use_bigtsb_arena = 0; 1205 } else if (use_bigtsb_arena && 1206 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1207 use_bigtsb_arena = 0; 1208 } 1209 1210 if (!use_bigtsb_arena) { 1211 bigtsb_slab_shift = tsb_slab_shift; 1212 } 1213 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1214 1215 /* 1216 * On smaller memory systems, allocate TSB memory in smaller chunks 1217 * than the default 4M slab size. We also honor disable_large_pages 1218 * here. 1219 * 1220 * The trap handlers need to be patched with the final slab shift, 1221 * since they need to be able to construct the TSB pointer at runtime. 1222 */ 1223 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1224 !(disable_large_pages & (1 << TTE512K))) { 1225 tsb_slab_ttesz = TTE512K; 1226 tsb_slab_shift = MMU_PAGESHIFT512K; 1227 tsb_slab_size = MMU_PAGESIZE512K; 1228 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1229 use_bigtsb_arena = 0; 1230 } 1231 1232 if (!use_bigtsb_arena) { 1233 bigtsb_slab_ttesz = tsb_slab_ttesz; 1234 bigtsb_slab_shift = tsb_slab_shift; 1235 bigtsb_slab_size = tsb_slab_size; 1236 bigtsb_slab_mask = tsb_slab_mask; 1237 } 1238 1239 1240 /* 1241 * Set up memory callback to update tsb_alloc_hiwater and 1242 * tsb_max_growsize. 1243 */ 1244 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1245 ASSERT(i == 0); 1246 1247 /* 1248 * kmem_tsb_arena is the source from which large TSB slabs are 1249 * drawn. The quantum of this arena corresponds to the largest 1250 * TSB size we can dynamically allocate for user processes. 1251 * Currently it must also be a supported page size since we 1252 * use exactly one translation entry to map each slab page. 1253 * 1254 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1255 * which most TSBs are allocated. Since most TSB allocations are 1256 * typically 8K we have a kmem cache we stack on top of each 1257 * kmem_tsb_default_arena to speed up those allocations. 1258 * 1259 * Note the two-level scheme of arenas is required only 1260 * because vmem_create doesn't allow us to specify alignment 1261 * requirements. If this ever changes the code could be 1262 * simplified to use only one level of arenas. 1263 * 1264 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1265 * will be provided in addition to the 4M kmem_tsb_arena. 1266 */ 1267 if (use_bigtsb_arena) { 1268 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1269 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1270 vmem_xfree, heap_arena, 0, VM_SLEEP); 1271 } 1272 1273 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1274 sfmmu_vmem_xalloc_aligned_wrapper, 1275 vmem_xfree, heap_arena, 0, VM_SLEEP); 1276 1277 if (tsb_lgrp_affinity) { 1278 char s[50]; 1279 for (i = 0; i < NLGRPS_MAX; i++) { 1280 if (use_bigtsb_arena) { 1281 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1282 kmem_bigtsb_default_arena[i] = vmem_create(s, 1283 NULL, 0, 2 * tsb_slab_size, 1284 sfmmu_tsb_segkmem_alloc, 1285 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1286 0, VM_SLEEP | VM_BESTFIT); 1287 } 1288 1289 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1290 kmem_tsb_default_arena[i] = vmem_create(s, 1291 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1292 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1293 VM_SLEEP | VM_BESTFIT); 1294 1295 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1296 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1297 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1298 kmem_tsb_default_arena[i], 0); 1299 } 1300 } else { 1301 if (use_bigtsb_arena) { 1302 kmem_bigtsb_default_arena[0] = 1303 vmem_create("kmem_bigtsb_default", NULL, 0, 1304 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1305 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1306 VM_SLEEP | VM_BESTFIT); 1307 } 1308 1309 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1310 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1311 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1312 VM_SLEEP | VM_BESTFIT); 1313 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1314 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1315 kmem_tsb_default_arena[0], 0); 1316 } 1317 1318 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1319 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1320 sfmmu_hblkcache_destructor, 1321 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1322 hat_memload_arena, KMC_NOHASH); 1323 1324 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1325 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, 1326 VMC_DUMPSAFE | VM_SLEEP); 1327 1328 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1329 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1330 sfmmu_hblkcache_destructor, 1331 NULL, (void *)HME1BLK_SZ, 1332 hat_memload1_arena, KMC_NOHASH); 1333 1334 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1335 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1336 1337 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1338 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1339 NULL, NULL, static_arena, KMC_NOHASH); 1340 1341 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1342 sizeof (ism_ment_t), 0, NULL, NULL, 1343 NULL, NULL, NULL, 0); 1344 1345 /* 1346 * We grab the first hat for the kernel, 1347 */ 1348 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1349 kas.a_hat = hat_alloc(&kas); 1350 AS_LOCK_EXIT(&kas, &kas.a_lock); 1351 1352 /* 1353 * Initialize hblk_reserve. 1354 */ 1355 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1356 va_to_pa((caddr_t)hblk_reserve); 1357 1358 #ifndef UTSB_PHYS 1359 /* 1360 * Reserve some kernel virtual address space for the locked TTEs 1361 * that allow us to probe the TSB from TL>0. 1362 */ 1363 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1364 0, 0, NULL, NULL, VM_SLEEP); 1365 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1366 0, 0, NULL, NULL, VM_SLEEP); 1367 #endif 1368 1369 #ifdef VAC 1370 /* 1371 * The big page VAC handling code assumes VAC 1372 * will not be bigger than the smallest big 1373 * page- which is 64K. 1374 */ 1375 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1376 cmn_err(CE_PANIC, "VAC too big!"); 1377 } 1378 #endif 1379 1380 (void) xhat_init(); 1381 1382 uhme_hash_pa = va_to_pa(uhme_hash); 1383 khme_hash_pa = va_to_pa(khme_hash); 1384 1385 /* 1386 * Initialize relocation locks. kpr_suspendlock is held 1387 * at PIL_MAX to prevent interrupts from pinning the holder 1388 * of a suspended TTE which may access it leading to a 1389 * deadlock condition. 1390 */ 1391 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1392 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1393 1394 /* 1395 * If Shared context support is disabled via /etc/system 1396 * set shctx_on to 0 here if it was set to 1 earlier in boot 1397 * sequence by cpu module initialization code. 1398 */ 1399 if (shctx_on && disable_shctx) { 1400 shctx_on = 0; 1401 } 1402 1403 if (shctx_on) { 1404 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1405 sizeof (srd_buckets[0]), KM_SLEEP); 1406 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1407 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1408 MUTEX_DEFAULT, NULL); 1409 } 1410 1411 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1412 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1413 NULL, NULL, NULL, 0); 1414 region_cache = kmem_cache_create("region_cache", 1415 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1416 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1417 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1418 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1419 NULL, NULL, NULL, 0); 1420 } 1421 1422 /* 1423 * Pre-allocate hrm_hashtab before enabling the collection of 1424 * refmod statistics. Allocating on the fly would mean us 1425 * running the risk of suffering recursive mutex enters or 1426 * deadlocks. 1427 */ 1428 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1429 KM_SLEEP); 1430 1431 /* Allocate per-cpu pending freelist of hmeblks */ 1432 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1433 KM_SLEEP); 1434 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1435 (uintptr_t)cpu_hme_pend, 64); 1436 1437 for (i = 0; i < NCPU; i++) { 1438 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1439 NULL); 1440 } 1441 1442 if (cpu_hme_pend_thresh == 0) { 1443 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1444 } 1445 } 1446 1447 /* 1448 * Initialize locking for the hat layer, called early during boot. 1449 */ 1450 static void 1451 hat_lock_init() 1452 { 1453 int i; 1454 1455 /* 1456 * initialize the array of mutexes protecting a page's mapping 1457 * list and p_nrm field. 1458 */ 1459 for (i = 0; i < mml_table_sz; i++) 1460 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1461 1462 if (kpm_enable) { 1463 for (i = 0; i < kpmp_table_sz; i++) { 1464 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1465 MUTEX_DEFAULT, NULL); 1466 } 1467 } 1468 1469 /* 1470 * Initialize array of mutex locks that protects sfmmu fields and 1471 * TSB lists. 1472 */ 1473 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1474 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1475 NULL); 1476 } 1477 1478 #define SFMMU_KERNEL_MAXVA \ 1479 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1480 1481 /* 1482 * Allocate a hat structure. 1483 * Called when an address space first uses a hat. 1484 */ 1485 struct hat * 1486 hat_alloc(struct as *as) 1487 { 1488 sfmmu_t *sfmmup; 1489 int i; 1490 uint64_t cnum; 1491 extern uint_t get_color_start(struct as *); 1492 1493 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1494 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1495 sfmmup->sfmmu_as = as; 1496 sfmmup->sfmmu_flags = 0; 1497 sfmmup->sfmmu_tteflags = 0; 1498 sfmmup->sfmmu_rtteflags = 0; 1499 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1500 1501 if (as == &kas) { 1502 ksfmmup = sfmmup; 1503 sfmmup->sfmmu_cext = 0; 1504 cnum = KCONTEXT; 1505 1506 sfmmup->sfmmu_clrstart = 0; 1507 sfmmup->sfmmu_tsb = NULL; 1508 /* 1509 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1510 * to setup tsb_info for ksfmmup. 1511 */ 1512 } else { 1513 1514 /* 1515 * Just set to invalid ctx. When it faults, it will 1516 * get a valid ctx. This would avoid the situation 1517 * where we get a ctx, but it gets stolen and then 1518 * we fault when we try to run and so have to get 1519 * another ctx. 1520 */ 1521 sfmmup->sfmmu_cext = 0; 1522 cnum = INVALID_CONTEXT; 1523 1524 /* initialize original physical page coloring bin */ 1525 sfmmup->sfmmu_clrstart = get_color_start(as); 1526 #ifdef DEBUG 1527 if (tsb_random_size) { 1528 uint32_t randval = (uint32_t)gettick() >> 4; 1529 int size = randval % (tsb_max_growsize + 1); 1530 1531 /* chose a random tsb size for stress testing */ 1532 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1533 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1534 } else 1535 #endif /* DEBUG */ 1536 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1537 default_tsb_size, 1538 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1539 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1540 ASSERT(sfmmup->sfmmu_tsb != NULL); 1541 } 1542 1543 ASSERT(max_mmu_ctxdoms > 0); 1544 for (i = 0; i < max_mmu_ctxdoms; i++) { 1545 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1546 sfmmup->sfmmu_ctxs[i].gnum = 0; 1547 } 1548 1549 for (i = 0; i < max_mmu_page_sizes; i++) { 1550 sfmmup->sfmmu_ttecnt[i] = 0; 1551 sfmmup->sfmmu_scdrttecnt[i] = 0; 1552 sfmmup->sfmmu_ismttecnt[i] = 0; 1553 sfmmup->sfmmu_scdismttecnt[i] = 0; 1554 sfmmup->sfmmu_pgsz[i] = TTE8K; 1555 } 1556 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1557 sfmmup->sfmmu_iblk = NULL; 1558 sfmmup->sfmmu_ismhat = 0; 1559 sfmmup->sfmmu_scdhat = 0; 1560 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1561 if (sfmmup == ksfmmup) { 1562 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1563 } else { 1564 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1565 } 1566 sfmmup->sfmmu_free = 0; 1567 sfmmup->sfmmu_rmstat = 0; 1568 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1569 sfmmup->sfmmu_xhat_provider = NULL; 1570 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1571 sfmmup->sfmmu_srdp = NULL; 1572 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1573 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1574 sfmmup->sfmmu_scdp = NULL; 1575 sfmmup->sfmmu_scd_link.next = NULL; 1576 sfmmup->sfmmu_scd_link.prev = NULL; 1577 return (sfmmup); 1578 } 1579 1580 /* 1581 * Create per-MMU context domain kstats for a given MMU ctx. 1582 */ 1583 static void 1584 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1585 { 1586 mmu_ctx_stat_t stat; 1587 kstat_t *mmu_kstat; 1588 1589 ASSERT(MUTEX_HELD(&cpu_lock)); 1590 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1591 1592 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1593 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1594 1595 if (mmu_kstat == NULL) { 1596 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1597 mmu_ctxp->mmu_idx); 1598 } else { 1599 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1600 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1601 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1602 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1603 mmu_ctxp->mmu_kstat = mmu_kstat; 1604 kstat_install(mmu_kstat); 1605 } 1606 } 1607 1608 /* 1609 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1610 * context domain information for a given CPU. If a platform does not 1611 * specify that interface, then the function below is used instead to return 1612 * default information. The defaults are as follows: 1613 * 1614 * - For sun4u systems there's one MMU context domain per CPU. 1615 * This default is used by all sun4u systems except OPL. OPL systems 1616 * provide platform specific interface to map CPU ids to MMU ids 1617 * because on OPL more than 1 CPU shares a single MMU. 1618 * Note that on sun4v, there is one global context domain for 1619 * the entire system. This is to avoid running into potential problem 1620 * with ldom physical cpu substitution feature. 1621 * - The number of MMU context IDs supported on any CPU in the 1622 * system is 8K. 1623 */ 1624 /*ARGSUSED*/ 1625 static void 1626 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1627 { 1628 infop->mmu_nctxs = nctxs; 1629 #ifndef sun4v 1630 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1631 #else /* sun4v */ 1632 infop->mmu_idx = 0; 1633 #endif /* sun4v */ 1634 } 1635 1636 /* 1637 * Called during CPU initialization to set the MMU context-related information 1638 * for a CPU. 1639 * 1640 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1641 */ 1642 void 1643 sfmmu_cpu_init(cpu_t *cp) 1644 { 1645 mmu_ctx_info_t info; 1646 mmu_ctx_t *mmu_ctxp; 1647 1648 ASSERT(MUTEX_HELD(&cpu_lock)); 1649 1650 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1651 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1652 else 1653 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1654 1655 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1656 1657 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1658 /* Each mmu_ctx is cacheline aligned. */ 1659 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1660 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1661 1662 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1663 (void *)ipltospl(DISP_LEVEL)); 1664 mmu_ctxp->mmu_idx = info.mmu_idx; 1665 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1666 /* 1667 * Globally for lifetime of a system, 1668 * gnum must always increase. 1669 * mmu_saved_gnum is protected by the cpu_lock. 1670 */ 1671 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1672 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1673 1674 sfmmu_mmu_kstat_create(mmu_ctxp); 1675 1676 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1677 } else { 1678 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1679 } 1680 1681 /* 1682 * The mmu_lock is acquired here to prevent races with 1683 * the wrap-around code. 1684 */ 1685 mutex_enter(&mmu_ctxp->mmu_lock); 1686 1687 1688 mmu_ctxp->mmu_ncpus++; 1689 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1690 CPU_MMU_IDX(cp) = info.mmu_idx; 1691 CPU_MMU_CTXP(cp) = mmu_ctxp; 1692 1693 mutex_exit(&mmu_ctxp->mmu_lock); 1694 } 1695 1696 /* 1697 * Called to perform MMU context-related cleanup for a CPU. 1698 */ 1699 void 1700 sfmmu_cpu_cleanup(cpu_t *cp) 1701 { 1702 mmu_ctx_t *mmu_ctxp; 1703 1704 ASSERT(MUTEX_HELD(&cpu_lock)); 1705 1706 mmu_ctxp = CPU_MMU_CTXP(cp); 1707 ASSERT(mmu_ctxp != NULL); 1708 1709 /* 1710 * The mmu_lock is acquired here to prevent races with 1711 * the wrap-around code. 1712 */ 1713 mutex_enter(&mmu_ctxp->mmu_lock); 1714 1715 CPU_MMU_CTXP(cp) = NULL; 1716 1717 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1718 if (--mmu_ctxp->mmu_ncpus == 0) { 1719 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1720 mutex_exit(&mmu_ctxp->mmu_lock); 1721 mutex_destroy(&mmu_ctxp->mmu_lock); 1722 1723 if (mmu_ctxp->mmu_kstat) 1724 kstat_delete(mmu_ctxp->mmu_kstat); 1725 1726 /* mmu_saved_gnum is protected by the cpu_lock. */ 1727 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1728 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1729 1730 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1731 1732 return; 1733 } 1734 1735 mutex_exit(&mmu_ctxp->mmu_lock); 1736 } 1737 1738 /* 1739 * Hat_setup, makes an address space context the current active one. 1740 * In sfmmu this translates to setting the secondary context with the 1741 * corresponding context. 1742 */ 1743 void 1744 hat_setup(struct hat *sfmmup, int allocflag) 1745 { 1746 hatlock_t *hatlockp; 1747 1748 /* Init needs some special treatment. */ 1749 if (allocflag == HAT_INIT) { 1750 /* 1751 * Make sure that we have 1752 * 1. a TSB 1753 * 2. a valid ctx that doesn't get stolen after this point. 1754 */ 1755 hatlockp = sfmmu_hat_enter(sfmmup); 1756 1757 /* 1758 * Swap in the TSB. hat_init() allocates tsbinfos without 1759 * TSBs, but we need one for init, since the kernel does some 1760 * special things to set up its stack and needs the TSB to 1761 * resolve page faults. 1762 */ 1763 sfmmu_tsb_swapin(sfmmup, hatlockp); 1764 1765 sfmmu_get_ctx(sfmmup); 1766 1767 sfmmu_hat_exit(hatlockp); 1768 } else { 1769 ASSERT(allocflag == HAT_ALLOC); 1770 1771 hatlockp = sfmmu_hat_enter(sfmmup); 1772 kpreempt_disable(); 1773 1774 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1775 /* 1776 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1777 * pagesize bits don't matter in this case since we are passing 1778 * INVALID_CONTEXT to it. 1779 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1780 */ 1781 sfmmu_setctx_sec(INVALID_CONTEXT); 1782 sfmmu_clear_utsbinfo(); 1783 1784 kpreempt_enable(); 1785 sfmmu_hat_exit(hatlockp); 1786 } 1787 } 1788 1789 /* 1790 * Free all the translation resources for the specified address space. 1791 * Called from as_free when an address space is being destroyed. 1792 */ 1793 void 1794 hat_free_start(struct hat *sfmmup) 1795 { 1796 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1797 ASSERT(sfmmup != ksfmmup); 1798 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1799 1800 sfmmup->sfmmu_free = 1; 1801 if (sfmmup->sfmmu_scdp != NULL) { 1802 sfmmu_leave_scd(sfmmup, 0); 1803 } 1804 1805 ASSERT(sfmmup->sfmmu_scdp == NULL); 1806 } 1807 1808 void 1809 hat_free_end(struct hat *sfmmup) 1810 { 1811 int i; 1812 1813 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1814 ASSERT(sfmmup->sfmmu_free == 1); 1815 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1816 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1817 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1818 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1819 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1820 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1821 1822 if (sfmmup->sfmmu_rmstat) { 1823 hat_freestat(sfmmup->sfmmu_as, NULL); 1824 } 1825 1826 while (sfmmup->sfmmu_tsb != NULL) { 1827 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1828 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1829 sfmmup->sfmmu_tsb = next; 1830 } 1831 1832 if (sfmmup->sfmmu_srdp != NULL) { 1833 sfmmu_leave_srd(sfmmup); 1834 ASSERT(sfmmup->sfmmu_srdp == NULL); 1835 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1836 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1837 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1838 SFMMU_L2_HMERLINKS_SIZE); 1839 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1840 } 1841 } 1842 } 1843 sfmmu_free_sfmmu(sfmmup); 1844 1845 #ifdef DEBUG 1846 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1847 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1848 } 1849 #endif 1850 1851 kmem_cache_free(sfmmuid_cache, sfmmup); 1852 } 1853 1854 /* 1855 * Set up any translation structures, for the specified address space, 1856 * that are needed or preferred when the process is being swapped in. 1857 */ 1858 /* ARGSUSED */ 1859 void 1860 hat_swapin(struct hat *hat) 1861 { 1862 ASSERT(hat->sfmmu_xhat_provider == NULL); 1863 } 1864 1865 /* 1866 * Free all of the translation resources, for the specified address space, 1867 * that can be freed while the process is swapped out. Called from as_swapout. 1868 * Also, free up the ctx that this process was using. 1869 */ 1870 void 1871 hat_swapout(struct hat *sfmmup) 1872 { 1873 struct hmehash_bucket *hmebp; 1874 struct hme_blk *hmeblkp; 1875 struct hme_blk *pr_hblk = NULL; 1876 struct hme_blk *nx_hblk; 1877 int i; 1878 struct hme_blk *list = NULL; 1879 hatlock_t *hatlockp; 1880 struct tsb_info *tsbinfop; 1881 struct free_tsb { 1882 struct free_tsb *next; 1883 struct tsb_info *tsbinfop; 1884 }; /* free list of TSBs */ 1885 struct free_tsb *freelist, *last, *next; 1886 1887 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1888 SFMMU_STAT(sf_swapout); 1889 1890 /* 1891 * There is no way to go from an as to all its translations in sfmmu. 1892 * Here is one of the times when we take the big hit and traverse 1893 * the hash looking for hme_blks to free up. Not only do we free up 1894 * this as hme_blks but all those that are free. We are obviously 1895 * swapping because we need memory so let's free up as much 1896 * as we can. 1897 * 1898 * Note that we don't flush TLB/TSB here -- it's not necessary 1899 * because: 1900 * 1) we free the ctx we're using and throw away the TSB(s); 1901 * 2) processes aren't runnable while being swapped out. 1902 */ 1903 ASSERT(sfmmup != KHATID); 1904 for (i = 0; i <= UHMEHASH_SZ; i++) { 1905 hmebp = &uhme_hash[i]; 1906 SFMMU_HASH_LOCK(hmebp); 1907 hmeblkp = hmebp->hmeblkp; 1908 pr_hblk = NULL; 1909 while (hmeblkp) { 1910 1911 ASSERT(!hmeblkp->hblk_xhat_bit); 1912 1913 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1914 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1915 ASSERT(!hmeblkp->hblk_shared); 1916 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1917 (caddr_t)get_hblk_base(hmeblkp), 1918 get_hblk_endaddr(hmeblkp), 1919 NULL, HAT_UNLOAD); 1920 } 1921 nx_hblk = hmeblkp->hblk_next; 1922 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1923 ASSERT(!hmeblkp->hblk_lckcnt); 1924 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 1925 &list, 0); 1926 } else { 1927 pr_hblk = hmeblkp; 1928 } 1929 hmeblkp = nx_hblk; 1930 } 1931 SFMMU_HASH_UNLOCK(hmebp); 1932 } 1933 1934 sfmmu_hblks_list_purge(&list, 0); 1935 1936 /* 1937 * Now free up the ctx so that others can reuse it. 1938 */ 1939 hatlockp = sfmmu_hat_enter(sfmmup); 1940 1941 sfmmu_invalidate_ctx(sfmmup); 1942 1943 /* 1944 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1945 * If TSBs were never swapped in, just return. 1946 * This implies that we don't support partial swapping 1947 * of TSBs -- either all are swapped out, or none are. 1948 * 1949 * We must hold the HAT lock here to prevent racing with another 1950 * thread trying to unmap TTEs from the TSB or running the post- 1951 * relocator after relocating the TSB's memory. Unfortunately, we 1952 * can't free memory while holding the HAT lock or we could 1953 * deadlock, so we build a list of TSBs to be freed after marking 1954 * the tsbinfos as swapped out and free them after dropping the 1955 * lock. 1956 */ 1957 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1958 sfmmu_hat_exit(hatlockp); 1959 return; 1960 } 1961 1962 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1963 last = freelist = NULL; 1964 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1965 tsbinfop = tsbinfop->tsb_next) { 1966 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1967 1968 /* 1969 * Cast the TSB into a struct free_tsb and put it on the free 1970 * list. 1971 */ 1972 if (freelist == NULL) { 1973 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1974 } else { 1975 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1976 last = last->next; 1977 } 1978 last->next = NULL; 1979 last->tsbinfop = tsbinfop; 1980 tsbinfop->tsb_flags |= TSB_SWAPPED; 1981 /* 1982 * Zero out the TTE to clear the valid bit. 1983 * Note we can't use a value like 0xbad because we want to 1984 * ensure diagnostic bits are NEVER set on TTEs that might 1985 * be loaded. The intent is to catch any invalid access 1986 * to the swapped TSB, such as a thread running with a valid 1987 * context without first calling sfmmu_tsb_swapin() to 1988 * allocate TSB memory. 1989 */ 1990 tsbinfop->tsb_tte.ll = 0; 1991 } 1992 1993 /* Now we can drop the lock and free the TSB memory. */ 1994 sfmmu_hat_exit(hatlockp); 1995 for (; freelist != NULL; freelist = next) { 1996 next = freelist->next; 1997 sfmmu_tsb_free(freelist->tsbinfop); 1998 } 1999 } 2000 2001 /* 2002 * Duplicate the translations of an as into another newas 2003 */ 2004 /* ARGSUSED */ 2005 int 2006 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 2007 uint_t flag) 2008 { 2009 sf_srd_t *srdp; 2010 sf_scd_t *scdp; 2011 int i; 2012 extern uint_t get_color_start(struct as *); 2013 2014 ASSERT(hat->sfmmu_xhat_provider == NULL); 2015 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 2016 (flag == HAT_DUP_SRD)); 2017 ASSERT(hat != ksfmmup); 2018 ASSERT(newhat != ksfmmup); 2019 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 2020 2021 if (flag == HAT_DUP_COW) { 2022 panic("hat_dup: HAT_DUP_COW not supported"); 2023 } 2024 2025 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2026 ASSERT(srdp->srd_evp != NULL); 2027 VN_HOLD(srdp->srd_evp); 2028 ASSERT(srdp->srd_refcnt > 0); 2029 newhat->sfmmu_srdp = srdp; 2030 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 2031 } 2032 2033 /* 2034 * HAT_DUP_ALL flag is used after as duplication is done. 2035 */ 2036 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2037 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2038 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2039 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2040 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2041 } 2042 2043 /* check if need to join scd */ 2044 if ((scdp = hat->sfmmu_scdp) != NULL && 2045 newhat->sfmmu_scdp != scdp) { 2046 int ret; 2047 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2048 &scdp->scd_region_map, ret); 2049 ASSERT(ret); 2050 sfmmu_join_scd(scdp, newhat); 2051 ASSERT(newhat->sfmmu_scdp == scdp && 2052 scdp->scd_refcnt >= 2); 2053 for (i = 0; i < max_mmu_page_sizes; i++) { 2054 newhat->sfmmu_ismttecnt[i] = 2055 hat->sfmmu_ismttecnt[i]; 2056 newhat->sfmmu_scdismttecnt[i] = 2057 hat->sfmmu_scdismttecnt[i]; 2058 } 2059 } 2060 2061 sfmmu_check_page_sizes(newhat, 1); 2062 } 2063 2064 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2065 update_proc_pgcolorbase_after_fork != 0) { 2066 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2067 } 2068 return (0); 2069 } 2070 2071 void 2072 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2073 uint_t attr, uint_t flags) 2074 { 2075 hat_do_memload(hat, addr, pp, attr, flags, 2076 SFMMU_INVALID_SHMERID); 2077 } 2078 2079 void 2080 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2081 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2082 { 2083 uint_t rid; 2084 if (rcookie == HAT_INVALID_REGION_COOKIE || 2085 hat->sfmmu_xhat_provider != NULL) { 2086 hat_do_memload(hat, addr, pp, attr, flags, 2087 SFMMU_INVALID_SHMERID); 2088 return; 2089 } 2090 rid = (uint_t)((uint64_t)rcookie); 2091 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2092 hat_do_memload(hat, addr, pp, attr, flags, rid); 2093 } 2094 2095 /* 2096 * Set up addr to map to page pp with protection prot. 2097 * As an optimization we also load the TSB with the 2098 * corresponding tte but it is no big deal if the tte gets kicked out. 2099 */ 2100 static void 2101 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2102 uint_t attr, uint_t flags, uint_t rid) 2103 { 2104 tte_t tte; 2105 2106 2107 ASSERT(hat != NULL); 2108 ASSERT(PAGE_LOCKED(pp)); 2109 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2110 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2111 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2112 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2113 2114 if (PP_ISFREE(pp)) { 2115 panic("hat_memload: loading a mapping to free page %p", 2116 (void *)pp); 2117 } 2118 2119 if (hat->sfmmu_xhat_provider) { 2120 /* no regions for xhats */ 2121 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2122 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 2123 return; 2124 } 2125 2126 ASSERT((hat == ksfmmup) || 2127 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2128 2129 if (flags & ~SFMMU_LOAD_ALLFLAG) 2130 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2131 flags & ~SFMMU_LOAD_ALLFLAG); 2132 2133 if (hat->sfmmu_rmstat) 2134 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2135 2136 #if defined(SF_ERRATA_57) 2137 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2138 (addr < errata57_limit) && (attr & PROT_EXEC) && 2139 !(flags & HAT_LOAD_SHARE)) { 2140 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2141 " page executable"); 2142 attr &= ~PROT_EXEC; 2143 } 2144 #endif 2145 2146 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2147 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2148 2149 /* 2150 * Check TSB and TLB page sizes. 2151 */ 2152 if ((flags & HAT_LOAD_SHARE) == 0) { 2153 sfmmu_check_page_sizes(hat, 1); 2154 } 2155 } 2156 2157 /* 2158 * hat_devload can be called to map real memory (e.g. 2159 * /dev/kmem) and even though hat_devload will determine pf is 2160 * for memory, it will be unable to get a shared lock on the 2161 * page (because someone else has it exclusively) and will 2162 * pass dp = NULL. If tteload doesn't get a non-NULL 2163 * page pointer it can't cache memory. 2164 */ 2165 void 2166 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2167 uint_t attr, int flags) 2168 { 2169 tte_t tte; 2170 struct page *pp = NULL; 2171 int use_lgpg = 0; 2172 2173 ASSERT(hat != NULL); 2174 2175 if (hat->sfmmu_xhat_provider) { 2176 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 2177 return; 2178 } 2179 2180 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2181 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2182 ASSERT((hat == ksfmmup) || 2183 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2184 if (len == 0) 2185 panic("hat_devload: zero len"); 2186 if (flags & ~SFMMU_LOAD_ALLFLAG) 2187 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2188 flags & ~SFMMU_LOAD_ALLFLAG); 2189 2190 #if defined(SF_ERRATA_57) 2191 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2192 (addr < errata57_limit) && (attr & PROT_EXEC) && 2193 !(flags & HAT_LOAD_SHARE)) { 2194 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2195 " page executable"); 2196 attr &= ~PROT_EXEC; 2197 } 2198 #endif 2199 2200 /* 2201 * If it's a memory page find its pp 2202 */ 2203 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2204 pp = page_numtopp_nolock(pfn); 2205 if (pp == NULL) { 2206 flags |= HAT_LOAD_NOCONSIST; 2207 } else { 2208 if (PP_ISFREE(pp)) { 2209 panic("hat_memload: loading " 2210 "a mapping to free page %p", 2211 (void *)pp); 2212 } 2213 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2214 panic("hat_memload: loading a mapping " 2215 "to unlocked relocatable page %p", 2216 (void *)pp); 2217 } 2218 ASSERT(len == MMU_PAGESIZE); 2219 } 2220 } 2221 2222 if (hat->sfmmu_rmstat) 2223 hat_resvstat(len, hat->sfmmu_as, addr); 2224 2225 if (flags & HAT_LOAD_NOCONSIST) { 2226 attr |= SFMMU_UNCACHEVTTE; 2227 use_lgpg = 1; 2228 } 2229 if (!pf_is_memory(pfn)) { 2230 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2231 use_lgpg = 1; 2232 switch (attr & HAT_ORDER_MASK) { 2233 case HAT_STRICTORDER: 2234 case HAT_UNORDERED_OK: 2235 /* 2236 * we set the side effect bit for all non 2237 * memory mappings unless merging is ok 2238 */ 2239 attr |= SFMMU_SIDEFFECT; 2240 break; 2241 case HAT_MERGING_OK: 2242 case HAT_LOADCACHING_OK: 2243 case HAT_STORECACHING_OK: 2244 break; 2245 default: 2246 panic("hat_devload: bad attr"); 2247 break; 2248 } 2249 } 2250 while (len) { 2251 if (!use_lgpg) { 2252 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2253 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2254 flags, SFMMU_INVALID_SHMERID); 2255 len -= MMU_PAGESIZE; 2256 addr += MMU_PAGESIZE; 2257 pfn++; 2258 continue; 2259 } 2260 /* 2261 * try to use large pages, check va/pa alignments 2262 * Note that 32M/256M page sizes are not (yet) supported. 2263 */ 2264 if ((len >= MMU_PAGESIZE4M) && 2265 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2266 !(disable_large_pages & (1 << TTE4M)) && 2267 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2268 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2269 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2270 flags, SFMMU_INVALID_SHMERID); 2271 len -= MMU_PAGESIZE4M; 2272 addr += MMU_PAGESIZE4M; 2273 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2274 } else if ((len >= MMU_PAGESIZE512K) && 2275 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2276 !(disable_large_pages & (1 << TTE512K)) && 2277 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2278 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2279 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2280 flags, SFMMU_INVALID_SHMERID); 2281 len -= MMU_PAGESIZE512K; 2282 addr += MMU_PAGESIZE512K; 2283 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2284 } else if ((len >= MMU_PAGESIZE64K) && 2285 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2286 !(disable_large_pages & (1 << TTE64K)) && 2287 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2288 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2289 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2290 flags, SFMMU_INVALID_SHMERID); 2291 len -= MMU_PAGESIZE64K; 2292 addr += MMU_PAGESIZE64K; 2293 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2294 } else { 2295 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2296 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2297 flags, SFMMU_INVALID_SHMERID); 2298 len -= MMU_PAGESIZE; 2299 addr += MMU_PAGESIZE; 2300 pfn++; 2301 } 2302 } 2303 2304 /* 2305 * Check TSB and TLB page sizes. 2306 */ 2307 if ((flags & HAT_LOAD_SHARE) == 0) { 2308 sfmmu_check_page_sizes(hat, 1); 2309 } 2310 } 2311 2312 void 2313 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2314 struct page **pps, uint_t attr, uint_t flags) 2315 { 2316 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2317 SFMMU_INVALID_SHMERID); 2318 } 2319 2320 void 2321 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2322 struct page **pps, uint_t attr, uint_t flags, 2323 hat_region_cookie_t rcookie) 2324 { 2325 uint_t rid; 2326 if (rcookie == HAT_INVALID_REGION_COOKIE || 2327 hat->sfmmu_xhat_provider != NULL) { 2328 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2329 SFMMU_INVALID_SHMERID); 2330 return; 2331 } 2332 rid = (uint_t)((uint64_t)rcookie); 2333 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2334 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2335 } 2336 2337 /* 2338 * Map the largest extend possible out of the page array. The array may NOT 2339 * be in order. The largest possible mapping a page can have 2340 * is specified in the p_szc field. The p_szc field 2341 * cannot change as long as there any mappings (large or small) 2342 * to any of the pages that make up the large page. (ie. any 2343 * promotion/demotion of page size is not up to the hat but up to 2344 * the page free list manager). The array 2345 * should consist of properly aligned contigous pages that are 2346 * part of a big page for a large mapping to be created. 2347 */ 2348 static void 2349 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2350 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2351 { 2352 int ttesz; 2353 size_t mapsz; 2354 pgcnt_t numpg, npgs; 2355 tte_t tte; 2356 page_t *pp; 2357 uint_t large_pages_disable; 2358 2359 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2360 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2361 2362 if (hat->sfmmu_xhat_provider) { 2363 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2364 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2365 return; 2366 } 2367 2368 if (hat->sfmmu_rmstat) 2369 hat_resvstat(len, hat->sfmmu_as, addr); 2370 2371 #if defined(SF_ERRATA_57) 2372 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2373 (addr < errata57_limit) && (attr & PROT_EXEC) && 2374 !(flags & HAT_LOAD_SHARE)) { 2375 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2376 "user page executable"); 2377 attr &= ~PROT_EXEC; 2378 } 2379 #endif 2380 2381 /* Get number of pages */ 2382 npgs = len >> MMU_PAGESHIFT; 2383 2384 if (flags & HAT_LOAD_SHARE) { 2385 large_pages_disable = disable_ism_large_pages; 2386 } else { 2387 large_pages_disable = disable_large_pages; 2388 } 2389 2390 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2391 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2392 rid); 2393 return; 2394 } 2395 2396 while (npgs >= NHMENTS) { 2397 pp = *pps; 2398 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2399 /* 2400 * Check if this page size is disabled. 2401 */ 2402 if (large_pages_disable & (1 << ttesz)) 2403 continue; 2404 2405 numpg = TTEPAGES(ttesz); 2406 mapsz = numpg << MMU_PAGESHIFT; 2407 if ((npgs >= numpg) && 2408 IS_P2ALIGNED(addr, mapsz) && 2409 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2410 /* 2411 * At this point we have enough pages and 2412 * we know the virtual address and the pfn 2413 * are properly aligned. We still need 2414 * to check for physical contiguity but since 2415 * it is very likely that this is the case 2416 * we will assume they are so and undo 2417 * the request if necessary. It would 2418 * be great if we could get a hint flag 2419 * like HAT_CONTIG which would tell us 2420 * the pages are contigous for sure. 2421 */ 2422 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2423 attr, ttesz); 2424 if (!sfmmu_tteload_array(hat, &tte, addr, 2425 pps, flags, rid)) { 2426 break; 2427 } 2428 } 2429 } 2430 if (ttesz == TTE8K) { 2431 /* 2432 * We were not able to map array using a large page 2433 * batch a hmeblk or fraction at a time. 2434 */ 2435 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2436 & (NHMENTS-1); 2437 numpg = NHMENTS - numpg; 2438 ASSERT(numpg <= npgs); 2439 mapsz = numpg * MMU_PAGESIZE; 2440 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2441 numpg, rid); 2442 } 2443 addr += mapsz; 2444 npgs -= numpg; 2445 pps += numpg; 2446 } 2447 2448 if (npgs) { 2449 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2450 rid); 2451 } 2452 2453 /* 2454 * Check TSB and TLB page sizes. 2455 */ 2456 if ((flags & HAT_LOAD_SHARE) == 0) { 2457 sfmmu_check_page_sizes(hat, 1); 2458 } 2459 } 2460 2461 /* 2462 * Function tries to batch 8K pages into the same hme blk. 2463 */ 2464 static void 2465 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2466 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2467 { 2468 tte_t tte; 2469 page_t *pp; 2470 struct hmehash_bucket *hmebp; 2471 struct hme_blk *hmeblkp; 2472 int index; 2473 2474 while (npgs) { 2475 /* 2476 * Acquire the hash bucket. 2477 */ 2478 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2479 rid); 2480 ASSERT(hmebp); 2481 2482 /* 2483 * Find the hment block. 2484 */ 2485 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2486 TTE8K, flags, rid); 2487 ASSERT(hmeblkp); 2488 2489 do { 2490 /* 2491 * Make the tte. 2492 */ 2493 pp = *pps; 2494 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2495 2496 /* 2497 * Add the translation. 2498 */ 2499 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2500 vaddr, pps, flags, rid); 2501 2502 /* 2503 * Goto next page. 2504 */ 2505 pps++; 2506 npgs--; 2507 2508 /* 2509 * Goto next address. 2510 */ 2511 vaddr += MMU_PAGESIZE; 2512 2513 /* 2514 * Don't crossover into a different hmentblk. 2515 */ 2516 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2517 (NHMENTS-1)); 2518 2519 } while (index != 0 && npgs != 0); 2520 2521 /* 2522 * Release the hash bucket. 2523 */ 2524 2525 sfmmu_tteload_release_hashbucket(hmebp); 2526 } 2527 } 2528 2529 /* 2530 * Construct a tte for a page: 2531 * 2532 * tte_valid = 1 2533 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2534 * tte_size = size 2535 * tte_nfo = attr & HAT_NOFAULT 2536 * tte_ie = attr & HAT_STRUCTURE_LE 2537 * tte_hmenum = hmenum 2538 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2539 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2540 * tte_ref = 1 (optimization) 2541 * tte_wr_perm = attr & PROT_WRITE; 2542 * tte_no_sync = attr & HAT_NOSYNC 2543 * tte_lock = attr & SFMMU_LOCKTTE 2544 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2545 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2546 * tte_e = attr & SFMMU_SIDEFFECT 2547 * tte_priv = !(attr & PROT_USER) 2548 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2549 * tte_glb = 0 2550 */ 2551 void 2552 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2553 { 2554 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2555 2556 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2557 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2558 2559 if (TTE_IS_NOSYNC(ttep)) { 2560 TTE_SET_REF(ttep); 2561 if (TTE_IS_WRITABLE(ttep)) { 2562 TTE_SET_MOD(ttep); 2563 } 2564 } 2565 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2566 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2567 } 2568 } 2569 2570 /* 2571 * This function will add a translation to the hme_blk and allocate the 2572 * hme_blk if one does not exist. 2573 * If a page structure is specified then it will add the 2574 * corresponding hment to the mapping list. 2575 * It will also update the hmenum field for the tte. 2576 * 2577 * Currently this function is only used for kernel mappings. 2578 * So pass invalid region to sfmmu_tteload_array(). 2579 */ 2580 void 2581 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2582 uint_t flags) 2583 { 2584 ASSERT(sfmmup == ksfmmup); 2585 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2586 SFMMU_INVALID_SHMERID); 2587 } 2588 2589 /* 2590 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2591 * Assumes that a particular page size may only be resident in one TSB. 2592 */ 2593 static void 2594 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2595 { 2596 struct tsb_info *tsbinfop = NULL; 2597 uint64_t tag; 2598 struct tsbe *tsbe_addr; 2599 uint64_t tsb_base; 2600 uint_t tsb_size; 2601 int vpshift = MMU_PAGESHIFT; 2602 int phys = 0; 2603 2604 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2605 phys = ktsb_phys; 2606 if (ttesz >= TTE4M) { 2607 #ifndef sun4v 2608 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2609 #endif 2610 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2611 tsb_size = ktsb4m_szcode; 2612 } else { 2613 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2614 tsb_size = ktsb_szcode; 2615 } 2616 } else { 2617 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2618 2619 /* 2620 * If there isn't a TSB for this page size, or the TSB is 2621 * swapped out, there is nothing to do. Note that the latter 2622 * case seems impossible but can occur if hat_pageunload() 2623 * is called on an ISM mapping while the process is swapped 2624 * out. 2625 */ 2626 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2627 return; 2628 2629 /* 2630 * If another thread is in the middle of relocating a TSB 2631 * we can't unload the entry so set a flag so that the 2632 * TSB will be flushed before it can be accessed by the 2633 * process. 2634 */ 2635 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2636 if (ttep == NULL) 2637 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2638 return; 2639 } 2640 #if defined(UTSB_PHYS) 2641 phys = 1; 2642 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2643 #else 2644 tsb_base = (uint64_t)tsbinfop->tsb_va; 2645 #endif 2646 tsb_size = tsbinfop->tsb_szc; 2647 } 2648 if (ttesz >= TTE4M) 2649 vpshift = MMU_PAGESHIFT4M; 2650 2651 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2652 tag = sfmmu_make_tsbtag(vaddr); 2653 2654 if (ttep == NULL) { 2655 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2656 } else { 2657 if (ttesz >= TTE4M) { 2658 SFMMU_STAT(sf_tsb_load4m); 2659 } else { 2660 SFMMU_STAT(sf_tsb_load8k); 2661 } 2662 2663 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2664 } 2665 } 2666 2667 /* 2668 * Unmap all entries from [start, end) matching the given page size. 2669 * 2670 * This function is used primarily to unmap replicated 64K or 512K entries 2671 * from the TSB that are inserted using the base page size TSB pointer, but 2672 * it may also be called to unmap a range of addresses from the TSB. 2673 */ 2674 void 2675 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2676 { 2677 struct tsb_info *tsbinfop; 2678 uint64_t tag; 2679 struct tsbe *tsbe_addr; 2680 caddr_t vaddr; 2681 uint64_t tsb_base; 2682 int vpshift, vpgsz; 2683 uint_t tsb_size; 2684 int phys = 0; 2685 2686 /* 2687 * Assumptions: 2688 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2689 * at a time shooting down any valid entries we encounter. 2690 * 2691 * If ttesz >= 4M we walk the range 4M at a time shooting 2692 * down any valid mappings we find. 2693 */ 2694 if (sfmmup == ksfmmup) { 2695 phys = ktsb_phys; 2696 if (ttesz >= TTE4M) { 2697 #ifndef sun4v 2698 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2699 #endif 2700 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2701 tsb_size = ktsb4m_szcode; 2702 } else { 2703 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2704 tsb_size = ktsb_szcode; 2705 } 2706 } else { 2707 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2708 2709 /* 2710 * If there isn't a TSB for this page size, or the TSB is 2711 * swapped out, there is nothing to do. Note that the latter 2712 * case seems impossible but can occur if hat_pageunload() 2713 * is called on an ISM mapping while the process is swapped 2714 * out. 2715 */ 2716 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2717 return; 2718 2719 /* 2720 * If another thread is in the middle of relocating a TSB 2721 * we can't unload the entry so set a flag so that the 2722 * TSB will be flushed before it can be accessed by the 2723 * process. 2724 */ 2725 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2726 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2727 return; 2728 } 2729 #if defined(UTSB_PHYS) 2730 phys = 1; 2731 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2732 #else 2733 tsb_base = (uint64_t)tsbinfop->tsb_va; 2734 #endif 2735 tsb_size = tsbinfop->tsb_szc; 2736 } 2737 if (ttesz >= TTE4M) { 2738 vpshift = MMU_PAGESHIFT4M; 2739 vpgsz = MMU_PAGESIZE4M; 2740 } else { 2741 vpshift = MMU_PAGESHIFT; 2742 vpgsz = MMU_PAGESIZE; 2743 } 2744 2745 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2746 tag = sfmmu_make_tsbtag(vaddr); 2747 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2748 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2749 } 2750 } 2751 2752 /* 2753 * Select the optimum TSB size given the number of mappings 2754 * that need to be cached. 2755 */ 2756 static int 2757 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2758 { 2759 int szc = 0; 2760 2761 #ifdef DEBUG 2762 if (tsb_grow_stress) { 2763 uint32_t randval = (uint32_t)gettick() >> 4; 2764 return (randval % (tsb_max_growsize + 1)); 2765 } 2766 #endif /* DEBUG */ 2767 2768 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2769 szc++; 2770 return (szc); 2771 } 2772 2773 /* 2774 * This function will add a translation to the hme_blk and allocate the 2775 * hme_blk if one does not exist. 2776 * If a page structure is specified then it will add the 2777 * corresponding hment to the mapping list. 2778 * It will also update the hmenum field for the tte. 2779 * Furthermore, it attempts to create a large page translation 2780 * for <addr,hat> at page array pps. It assumes addr and first 2781 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2782 */ 2783 static int 2784 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2785 page_t **pps, uint_t flags, uint_t rid) 2786 { 2787 struct hmehash_bucket *hmebp; 2788 struct hme_blk *hmeblkp; 2789 int ret; 2790 uint_t size; 2791 2792 /* 2793 * Get mapping size. 2794 */ 2795 size = TTE_CSZ(ttep); 2796 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2797 2798 /* 2799 * Acquire the hash bucket. 2800 */ 2801 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2802 ASSERT(hmebp); 2803 2804 /* 2805 * Find the hment block. 2806 */ 2807 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2808 rid); 2809 ASSERT(hmeblkp); 2810 2811 /* 2812 * Add the translation. 2813 */ 2814 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2815 rid); 2816 2817 /* 2818 * Release the hash bucket. 2819 */ 2820 sfmmu_tteload_release_hashbucket(hmebp); 2821 2822 return (ret); 2823 } 2824 2825 /* 2826 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2827 */ 2828 static struct hmehash_bucket * 2829 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2830 uint_t rid) 2831 { 2832 struct hmehash_bucket *hmebp; 2833 int hmeshift; 2834 void *htagid = sfmmutohtagid(sfmmup, rid); 2835 2836 ASSERT(htagid != NULL); 2837 2838 hmeshift = HME_HASH_SHIFT(size); 2839 2840 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2841 2842 SFMMU_HASH_LOCK(hmebp); 2843 2844 return (hmebp); 2845 } 2846 2847 /* 2848 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2849 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2850 * allocated. 2851 */ 2852 static struct hme_blk * 2853 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2854 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2855 { 2856 hmeblk_tag hblktag; 2857 int hmeshift; 2858 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2859 2860 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2861 2862 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2863 ASSERT(hblktag.htag_id != NULL); 2864 hmeshift = HME_HASH_SHIFT(size); 2865 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2866 hblktag.htag_rehash = HME_HASH_REHASH(size); 2867 hblktag.htag_rid = rid; 2868 2869 ttearray_realloc: 2870 2871 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2872 2873 /* 2874 * We block until hblk_reserve_lock is released; it's held by 2875 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2876 * replaced by a hblk from sfmmu8_cache. 2877 */ 2878 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2879 hblk_reserve_thread != curthread) { 2880 SFMMU_HASH_UNLOCK(hmebp); 2881 mutex_enter(&hblk_reserve_lock); 2882 mutex_exit(&hblk_reserve_lock); 2883 SFMMU_STAT(sf_hblk_reserve_hit); 2884 SFMMU_HASH_LOCK(hmebp); 2885 goto ttearray_realloc; 2886 } 2887 2888 if (hmeblkp == NULL) { 2889 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2890 hblktag, flags, rid); 2891 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2892 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2893 } else { 2894 /* 2895 * It is possible for 8k and 64k hblks to collide since they 2896 * have the same rehash value. This is because we 2897 * lazily free hblks and 8K/64K blks could be lingering. 2898 * If we find size mismatch we free the block and & try again. 2899 */ 2900 if (get_hblk_ttesz(hmeblkp) != size) { 2901 ASSERT(!hmeblkp->hblk_vcnt); 2902 ASSERT(!hmeblkp->hblk_hmecnt); 2903 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2904 &list, 0); 2905 goto ttearray_realloc; 2906 } 2907 if (hmeblkp->hblk_shw_bit) { 2908 /* 2909 * if the hblk was previously used as a shadow hblk then 2910 * we will change it to a normal hblk 2911 */ 2912 ASSERT(!hmeblkp->hblk_shared); 2913 if (hmeblkp->hblk_shw_mask) { 2914 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2915 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2916 goto ttearray_realloc; 2917 } else { 2918 hmeblkp->hblk_shw_bit = 0; 2919 } 2920 } 2921 SFMMU_STAT(sf_hblk_hit); 2922 } 2923 2924 /* 2925 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 2926 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 2927 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 2928 * just add these hmeblks to the per-cpu pending queue. 2929 */ 2930 sfmmu_hblks_list_purge(&list, 1); 2931 2932 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2933 ASSERT(!hmeblkp->hblk_shw_bit); 2934 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2935 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2936 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 2937 2938 return (hmeblkp); 2939 } 2940 2941 /* 2942 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2943 * otherwise. 2944 */ 2945 static int 2946 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2947 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 2948 { 2949 page_t *pp = *pps; 2950 int hmenum, size, remap; 2951 tte_t tteold, flush_tte; 2952 #ifdef DEBUG 2953 tte_t orig_old; 2954 #endif /* DEBUG */ 2955 struct sf_hment *sfhme; 2956 kmutex_t *pml, *pmtx; 2957 hatlock_t *hatlockp; 2958 int myflt; 2959 2960 /* 2961 * remove this panic when we decide to let user virtual address 2962 * space be >= USERLIMIT. 2963 */ 2964 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2965 panic("user addr %p in kernel space", (void *)vaddr); 2966 #if defined(TTE_IS_GLOBAL) 2967 if (TTE_IS_GLOBAL(ttep)) 2968 panic("sfmmu_tteload: creating global tte"); 2969 #endif 2970 2971 #ifdef DEBUG 2972 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2973 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2974 panic("sfmmu_tteload: non cacheable memory tte"); 2975 #endif /* DEBUG */ 2976 2977 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 2978 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 2979 TTE_SET_REF(ttep); 2980 TTE_SET_MOD(ttep); 2981 } 2982 2983 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2984 !TTE_IS_MOD(ttep)) { 2985 /* 2986 * Don't load TSB for dummy as in ISM. Also don't preload 2987 * the TSB if the TTE isn't writable since we're likely to 2988 * fault on it again -- preloading can be fairly expensive. 2989 */ 2990 flags |= SFMMU_NO_TSBLOAD; 2991 } 2992 2993 size = TTE_CSZ(ttep); 2994 switch (size) { 2995 case TTE8K: 2996 SFMMU_STAT(sf_tteload8k); 2997 break; 2998 case TTE64K: 2999 SFMMU_STAT(sf_tteload64k); 3000 break; 3001 case TTE512K: 3002 SFMMU_STAT(sf_tteload512k); 3003 break; 3004 case TTE4M: 3005 SFMMU_STAT(sf_tteload4m); 3006 break; 3007 case (TTE32M): 3008 SFMMU_STAT(sf_tteload32m); 3009 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3010 break; 3011 case (TTE256M): 3012 SFMMU_STAT(sf_tteload256m); 3013 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3014 break; 3015 } 3016 3017 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3018 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3019 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3020 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3021 3022 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3023 3024 /* 3025 * Need to grab mlist lock here so that pageunload 3026 * will not change tte behind us. 3027 */ 3028 if (pp) { 3029 pml = sfmmu_mlist_enter(pp); 3030 } 3031 3032 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3033 /* 3034 * Look for corresponding hment and if valid verify 3035 * pfns are equal. 3036 */ 3037 remap = TTE_IS_VALID(&tteold); 3038 if (remap) { 3039 pfn_t new_pfn, old_pfn; 3040 3041 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3042 new_pfn = TTE_TO_PFN(vaddr, ttep); 3043 3044 if (flags & HAT_LOAD_REMAP) { 3045 /* make sure we are remapping same type of pages */ 3046 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3047 panic("sfmmu_tteload - tte remap io<->memory"); 3048 } 3049 if (old_pfn != new_pfn && 3050 (pp != NULL || sfhme->hme_page != NULL)) { 3051 panic("sfmmu_tteload - tte remap pp != NULL"); 3052 } 3053 } else if (old_pfn != new_pfn) { 3054 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3055 (void *)hmeblkp); 3056 } 3057 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3058 } 3059 3060 if (pp) { 3061 if (size == TTE8K) { 3062 #ifdef VAC 3063 /* 3064 * Handle VAC consistency 3065 */ 3066 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3067 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3068 } 3069 #endif 3070 3071 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3072 pmtx = sfmmu_page_enter(pp); 3073 PP_CLRRO(pp); 3074 sfmmu_page_exit(pmtx); 3075 } else if (!PP_ISMAPPED(pp) && 3076 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3077 pmtx = sfmmu_page_enter(pp); 3078 if (!(PP_ISMOD(pp))) { 3079 PP_SETRO(pp); 3080 } 3081 sfmmu_page_exit(pmtx); 3082 } 3083 3084 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3085 /* 3086 * sfmmu_pagearray_setup failed so return 3087 */ 3088 sfmmu_mlist_exit(pml); 3089 return (1); 3090 } 3091 } 3092 3093 /* 3094 * Make sure hment is not on a mapping list. 3095 */ 3096 ASSERT(remap || (sfhme->hme_page == NULL)); 3097 3098 /* if it is not a remap then hme->next better be NULL */ 3099 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3100 3101 if (flags & HAT_LOAD_LOCK) { 3102 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3103 panic("too high lckcnt-hmeblk %p", 3104 (void *)hmeblkp); 3105 } 3106 atomic_add_32(&hmeblkp->hblk_lckcnt, 1); 3107 3108 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3109 } 3110 3111 #ifdef VAC 3112 if (pp && PP_ISNC(pp)) { 3113 /* 3114 * If the physical page is marked to be uncacheable, like 3115 * by a vac conflict, make sure the new mapping is also 3116 * uncacheable. 3117 */ 3118 TTE_CLR_VCACHEABLE(ttep); 3119 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3120 } 3121 #endif 3122 ttep->tte_hmenum = hmenum; 3123 3124 #ifdef DEBUG 3125 orig_old = tteold; 3126 #endif /* DEBUG */ 3127 3128 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3129 if ((sfmmup == KHATID) && 3130 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3131 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3132 } 3133 #ifdef DEBUG 3134 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3135 #endif /* DEBUG */ 3136 } 3137 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3138 3139 if (!TTE_IS_VALID(&tteold)) { 3140 3141 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 3142 if (rid == SFMMU_INVALID_SHMERID) { 3143 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 3144 } else { 3145 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3146 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3147 /* 3148 * We already accounted for region ttecnt's in sfmmu 3149 * during hat_join_region() processing. Here we 3150 * only update ttecnt's in region struture. 3151 */ 3152 atomic_add_long(&rgnp->rgn_ttecnt[size], 1); 3153 } 3154 } 3155 3156 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3157 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3158 sfmmup != ksfmmup) { 3159 uchar_t tteflag = 1 << size; 3160 if (rid == SFMMU_INVALID_SHMERID) { 3161 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3162 hatlockp = sfmmu_hat_enter(sfmmup); 3163 sfmmup->sfmmu_tteflags |= tteflag; 3164 sfmmu_hat_exit(hatlockp); 3165 } 3166 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3167 hatlockp = sfmmu_hat_enter(sfmmup); 3168 sfmmup->sfmmu_rtteflags |= tteflag; 3169 sfmmu_hat_exit(hatlockp); 3170 } 3171 /* 3172 * Update the current CPU tsbmiss area, so the current thread 3173 * won't need to take the tsbmiss for the new pagesize. 3174 * The other threads in the process will update their tsb 3175 * miss area lazily in sfmmu_tsbmiss_exception() when they 3176 * fail to find the translation for a newly added pagesize. 3177 */ 3178 if (size > TTE64K && myflt) { 3179 struct tsbmiss *tsbmp; 3180 kpreempt_disable(); 3181 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3182 if (rid == SFMMU_INVALID_SHMERID) { 3183 if (!(tsbmp->uhat_tteflags & tteflag)) { 3184 tsbmp->uhat_tteflags |= tteflag; 3185 } 3186 } else { 3187 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3188 tsbmp->uhat_rtteflags |= tteflag; 3189 } 3190 } 3191 kpreempt_enable(); 3192 } 3193 } 3194 3195 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3196 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3197 hatlockp = sfmmu_hat_enter(sfmmup); 3198 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3199 sfmmu_hat_exit(hatlockp); 3200 } 3201 3202 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3203 hw_tte.tte_intlo; 3204 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3205 hw_tte.tte_inthi; 3206 3207 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3208 /* 3209 * If remap and new tte differs from old tte we need 3210 * to sync the mod bit and flush TLB/TSB. We don't 3211 * need to sync ref bit because we currently always set 3212 * ref bit in tteload. 3213 */ 3214 ASSERT(TTE_IS_REF(ttep)); 3215 if (TTE_IS_MOD(&tteold)) { 3216 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3217 } 3218 /* 3219 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3220 * hmes are only used for read only text. Adding this code for 3221 * completeness and future use of shared hmeblks with writable 3222 * mappings of VMODSORT vnodes. 3223 */ 3224 if (hmeblkp->hblk_shared) { 3225 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3226 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3227 xt_sync(cpuset); 3228 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3229 } else { 3230 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3231 xt_sync(sfmmup->sfmmu_cpusran); 3232 } 3233 } 3234 3235 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3236 /* 3237 * We only preload 8K and 4M mappings into the TSB, since 3238 * 64K and 512K mappings are replicated and hence don't 3239 * have a single, unique TSB entry. Ditto for 32M/256M. 3240 */ 3241 if (size == TTE8K || size == TTE4M) { 3242 sf_scd_t *scdp; 3243 hatlockp = sfmmu_hat_enter(sfmmup); 3244 /* 3245 * Don't preload private TSB if the mapping is used 3246 * by the shctx in the SCD. 3247 */ 3248 scdp = sfmmup->sfmmu_scdp; 3249 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3250 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3251 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3252 size); 3253 } 3254 sfmmu_hat_exit(hatlockp); 3255 } 3256 } 3257 if (pp) { 3258 if (!remap) { 3259 HME_ADD(sfhme, pp); 3260 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 3261 ASSERT(hmeblkp->hblk_hmecnt > 0); 3262 3263 /* 3264 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3265 * see pageunload() for comment. 3266 */ 3267 } 3268 sfmmu_mlist_exit(pml); 3269 } 3270 3271 return (0); 3272 } 3273 /* 3274 * Function unlocks hash bucket. 3275 */ 3276 static void 3277 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3278 { 3279 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3280 SFMMU_HASH_UNLOCK(hmebp); 3281 } 3282 3283 /* 3284 * function which checks and sets up page array for a large 3285 * translation. Will set p_vcolor, p_index, p_ro fields. 3286 * Assumes addr and pfnum of first page are properly aligned. 3287 * Will check for physical contiguity. If check fails it return 3288 * non null. 3289 */ 3290 static int 3291 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3292 { 3293 int i, index, ttesz; 3294 pfn_t pfnum; 3295 pgcnt_t npgs; 3296 page_t *pp, *pp1; 3297 kmutex_t *pmtx; 3298 #ifdef VAC 3299 int osz; 3300 int cflags = 0; 3301 int vac_err = 0; 3302 #endif 3303 int newidx = 0; 3304 3305 ttesz = TTE_CSZ(ttep); 3306 3307 ASSERT(ttesz > TTE8K); 3308 3309 npgs = TTEPAGES(ttesz); 3310 index = PAGESZ_TO_INDEX(ttesz); 3311 3312 pfnum = (*pps)->p_pagenum; 3313 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3314 3315 /* 3316 * Save the first pp so we can do HAT_TMPNC at the end. 3317 */ 3318 pp1 = *pps; 3319 #ifdef VAC 3320 osz = fnd_mapping_sz(pp1); 3321 #endif 3322 3323 for (i = 0; i < npgs; i++, pps++) { 3324 pp = *pps; 3325 ASSERT(PAGE_LOCKED(pp)); 3326 ASSERT(pp->p_szc >= ttesz); 3327 ASSERT(pp->p_szc == pp1->p_szc); 3328 ASSERT(sfmmu_mlist_held(pp)); 3329 3330 /* 3331 * XXX is it possible to maintain P_RO on the root only? 3332 */ 3333 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3334 pmtx = sfmmu_page_enter(pp); 3335 PP_CLRRO(pp); 3336 sfmmu_page_exit(pmtx); 3337 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3338 !PP_ISMOD(pp)) { 3339 pmtx = sfmmu_page_enter(pp); 3340 if (!(PP_ISMOD(pp))) { 3341 PP_SETRO(pp); 3342 } 3343 sfmmu_page_exit(pmtx); 3344 } 3345 3346 /* 3347 * If this is a remap we skip vac & contiguity checks. 3348 */ 3349 if (remap) 3350 continue; 3351 3352 /* 3353 * set p_vcolor and detect any vac conflicts. 3354 */ 3355 #ifdef VAC 3356 if (vac_err == 0) { 3357 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3358 3359 } 3360 #endif 3361 3362 /* 3363 * Save current index in case we need to undo it. 3364 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3365 * "SFMMU_INDEX_SHIFT 6" 3366 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3367 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3368 * 3369 * So: index = PAGESZ_TO_INDEX(ttesz); 3370 * if ttesz == 1 then index = 0x2 3371 * 2 then index = 0x4 3372 * 3 then index = 0x8 3373 * 4 then index = 0x10 3374 * 5 then index = 0x20 3375 * The code below checks if it's a new pagesize (ie, newidx) 3376 * in case we need to take it back out of p_index, 3377 * and then or's the new index into the existing index. 3378 */ 3379 if ((PP_MAPINDEX(pp) & index) == 0) 3380 newidx = 1; 3381 pp->p_index = (PP_MAPINDEX(pp) | index); 3382 3383 /* 3384 * contiguity check 3385 */ 3386 if (pp->p_pagenum != pfnum) { 3387 /* 3388 * If we fail the contiguity test then 3389 * the only thing we need to fix is the p_index field. 3390 * We might get a few extra flushes but since this 3391 * path is rare that is ok. The p_ro field will 3392 * get automatically fixed on the next tteload to 3393 * the page. NO TNC bit is set yet. 3394 */ 3395 while (i >= 0) { 3396 pp = *pps; 3397 if (newidx) 3398 pp->p_index = (PP_MAPINDEX(pp) & 3399 ~index); 3400 pps--; 3401 i--; 3402 } 3403 return (1); 3404 } 3405 pfnum++; 3406 addr += MMU_PAGESIZE; 3407 } 3408 3409 #ifdef VAC 3410 if (vac_err) { 3411 if (ttesz > osz) { 3412 /* 3413 * There are some smaller mappings that causes vac 3414 * conflicts. Convert all existing small mappings to 3415 * TNC. 3416 */ 3417 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3418 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3419 npgs); 3420 } else { 3421 /* EMPTY */ 3422 /* 3423 * If there exists an big page mapping, 3424 * that means the whole existing big page 3425 * has TNC setting already. No need to covert to 3426 * TNC again. 3427 */ 3428 ASSERT(PP_ISTNC(pp1)); 3429 } 3430 } 3431 #endif /* VAC */ 3432 3433 return (0); 3434 } 3435 3436 #ifdef VAC 3437 /* 3438 * Routine that detects vac consistency for a large page. It also 3439 * sets virtual color for all pp's for this big mapping. 3440 */ 3441 static int 3442 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3443 { 3444 int vcolor, ocolor; 3445 3446 ASSERT(sfmmu_mlist_held(pp)); 3447 3448 if (PP_ISNC(pp)) { 3449 return (HAT_TMPNC); 3450 } 3451 3452 vcolor = addr_to_vcolor(addr); 3453 if (PP_NEWPAGE(pp)) { 3454 PP_SET_VCOLOR(pp, vcolor); 3455 return (0); 3456 } 3457 3458 ocolor = PP_GET_VCOLOR(pp); 3459 if (ocolor == vcolor) { 3460 return (0); 3461 } 3462 3463 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3464 /* 3465 * Previous user of page had a differnet color 3466 * but since there are no current users 3467 * we just flush the cache and change the color. 3468 * As an optimization for large pages we flush the 3469 * entire cache of that color and set a flag. 3470 */ 3471 SFMMU_STAT(sf_pgcolor_conflict); 3472 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3473 CacheColor_SetFlushed(*cflags, ocolor); 3474 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3475 } 3476 PP_SET_VCOLOR(pp, vcolor); 3477 return (0); 3478 } 3479 3480 /* 3481 * We got a real conflict with a current mapping. 3482 * set flags to start unencaching all mappings 3483 * and return failure so we restart looping 3484 * the pp array from the beginning. 3485 */ 3486 return (HAT_TMPNC); 3487 } 3488 #endif /* VAC */ 3489 3490 /* 3491 * creates a large page shadow hmeblk for a tte. 3492 * The purpose of this routine is to allow us to do quick unloads because 3493 * the vm layer can easily pass a very large but sparsely populated range. 3494 */ 3495 static struct hme_blk * 3496 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3497 { 3498 struct hmehash_bucket *hmebp; 3499 hmeblk_tag hblktag; 3500 int hmeshift, size, vshift; 3501 uint_t shw_mask, newshw_mask; 3502 struct hme_blk *hmeblkp; 3503 3504 ASSERT(sfmmup != KHATID); 3505 if (mmu_page_sizes == max_mmu_page_sizes) { 3506 ASSERT(ttesz < TTE256M); 3507 } else { 3508 ASSERT(ttesz < TTE4M); 3509 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3510 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3511 } 3512 3513 if (ttesz == TTE8K) { 3514 size = TTE512K; 3515 } else { 3516 size = ++ttesz; 3517 } 3518 3519 hblktag.htag_id = sfmmup; 3520 hmeshift = HME_HASH_SHIFT(size); 3521 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3522 hblktag.htag_rehash = HME_HASH_REHASH(size); 3523 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3524 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3525 3526 SFMMU_HASH_LOCK(hmebp); 3527 3528 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3529 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3530 if (hmeblkp == NULL) { 3531 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3532 hblktag, flags, SFMMU_INVALID_SHMERID); 3533 } 3534 ASSERT(hmeblkp); 3535 if (!hmeblkp->hblk_shw_mask) { 3536 /* 3537 * if this is a unused hblk it was just allocated or could 3538 * potentially be a previous large page hblk so we need to 3539 * set the shadow bit. 3540 */ 3541 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3542 hmeblkp->hblk_shw_bit = 1; 3543 } else if (hmeblkp->hblk_shw_bit == 0) { 3544 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3545 (void *)hmeblkp); 3546 } 3547 ASSERT(hmeblkp->hblk_shw_bit == 1); 3548 ASSERT(!hmeblkp->hblk_shared); 3549 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3550 ASSERT(vshift < 8); 3551 /* 3552 * Atomically set shw mask bit 3553 */ 3554 do { 3555 shw_mask = hmeblkp->hblk_shw_mask; 3556 newshw_mask = shw_mask | (1 << vshift); 3557 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3558 newshw_mask); 3559 } while (newshw_mask != shw_mask); 3560 3561 SFMMU_HASH_UNLOCK(hmebp); 3562 3563 return (hmeblkp); 3564 } 3565 3566 /* 3567 * This routine cleanup a previous shadow hmeblk and changes it to 3568 * a regular hblk. This happens rarely but it is possible 3569 * when a process wants to use large pages and there are hblks still 3570 * lying around from the previous as that used these hmeblks. 3571 * The alternative was to cleanup the shadow hblks at unload time 3572 * but since so few user processes actually use large pages, it is 3573 * better to be lazy and cleanup at this time. 3574 */ 3575 static void 3576 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3577 struct hmehash_bucket *hmebp) 3578 { 3579 caddr_t addr, endaddr; 3580 int hashno, size; 3581 3582 ASSERT(hmeblkp->hblk_shw_bit); 3583 ASSERT(!hmeblkp->hblk_shared); 3584 3585 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3586 3587 if (!hmeblkp->hblk_shw_mask) { 3588 hmeblkp->hblk_shw_bit = 0; 3589 return; 3590 } 3591 addr = (caddr_t)get_hblk_base(hmeblkp); 3592 endaddr = get_hblk_endaddr(hmeblkp); 3593 size = get_hblk_ttesz(hmeblkp); 3594 hashno = size - 1; 3595 ASSERT(hashno > 0); 3596 SFMMU_HASH_UNLOCK(hmebp); 3597 3598 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3599 3600 SFMMU_HASH_LOCK(hmebp); 3601 } 3602 3603 static void 3604 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3605 int hashno) 3606 { 3607 int hmeshift, shadow = 0; 3608 hmeblk_tag hblktag; 3609 struct hmehash_bucket *hmebp; 3610 struct hme_blk *hmeblkp; 3611 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3612 3613 ASSERT(hashno > 0); 3614 hblktag.htag_id = sfmmup; 3615 hblktag.htag_rehash = hashno; 3616 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3617 3618 hmeshift = HME_HASH_SHIFT(hashno); 3619 3620 while (addr < endaddr) { 3621 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3622 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3623 SFMMU_HASH_LOCK(hmebp); 3624 /* inline HME_HASH_SEARCH */ 3625 hmeblkp = hmebp->hmeblkp; 3626 pr_hblk = NULL; 3627 while (hmeblkp) { 3628 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3629 /* found hme_blk */ 3630 ASSERT(!hmeblkp->hblk_shared); 3631 if (hmeblkp->hblk_shw_bit) { 3632 if (hmeblkp->hblk_shw_mask) { 3633 shadow = 1; 3634 sfmmu_shadow_hcleanup(sfmmup, 3635 hmeblkp, hmebp); 3636 break; 3637 } else { 3638 hmeblkp->hblk_shw_bit = 0; 3639 } 3640 } 3641 3642 /* 3643 * Hblk_hmecnt and hblk_vcnt could be non zero 3644 * since hblk_unload() does not gurantee that. 3645 * 3646 * XXX - this could cause tteload() to spin 3647 * where sfmmu_shadow_hcleanup() is called. 3648 */ 3649 } 3650 3651 nx_hblk = hmeblkp->hblk_next; 3652 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3653 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3654 &list, 0); 3655 } else { 3656 pr_hblk = hmeblkp; 3657 } 3658 hmeblkp = nx_hblk; 3659 } 3660 3661 SFMMU_HASH_UNLOCK(hmebp); 3662 3663 if (shadow) { 3664 /* 3665 * We found another shadow hblk so cleaned its 3666 * children. We need to go back and cleanup 3667 * the original hblk so we don't change the 3668 * addr. 3669 */ 3670 shadow = 0; 3671 } else { 3672 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3673 (1 << hmeshift)); 3674 } 3675 } 3676 sfmmu_hblks_list_purge(&list, 0); 3677 } 3678 3679 /* 3680 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3681 * may still linger on after pageunload. 3682 */ 3683 static void 3684 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3685 { 3686 int hmeshift; 3687 hmeblk_tag hblktag; 3688 struct hmehash_bucket *hmebp; 3689 struct hme_blk *hmeblkp; 3690 struct hme_blk *pr_hblk; 3691 struct hme_blk *list = NULL; 3692 3693 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3694 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3695 3696 hmeshift = HME_HASH_SHIFT(ttesz); 3697 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3698 hblktag.htag_rehash = ttesz; 3699 hblktag.htag_rid = rid; 3700 hblktag.htag_id = srdp; 3701 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3702 3703 SFMMU_HASH_LOCK(hmebp); 3704 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3705 if (hmeblkp != NULL) { 3706 ASSERT(hmeblkp->hblk_shared); 3707 ASSERT(!hmeblkp->hblk_shw_bit); 3708 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3709 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3710 } 3711 ASSERT(!hmeblkp->hblk_lckcnt); 3712 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3713 &list, 0); 3714 } 3715 SFMMU_HASH_UNLOCK(hmebp); 3716 sfmmu_hblks_list_purge(&list, 0); 3717 } 3718 3719 /* ARGSUSED */ 3720 static void 3721 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3722 size_t r_size, void *r_obj, u_offset_t r_objoff) 3723 { 3724 } 3725 3726 /* 3727 * Searches for an hmeblk which maps addr, then unloads this mapping 3728 * and updates *eaddrp, if the hmeblk is found. 3729 */ 3730 static void 3731 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3732 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3733 { 3734 int hmeshift; 3735 hmeblk_tag hblktag; 3736 struct hmehash_bucket *hmebp; 3737 struct hme_blk *hmeblkp; 3738 struct hme_blk *pr_hblk; 3739 struct hme_blk *list = NULL; 3740 3741 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3742 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3743 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3744 3745 hmeshift = HME_HASH_SHIFT(ttesz); 3746 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3747 hblktag.htag_rehash = ttesz; 3748 hblktag.htag_rid = rid; 3749 hblktag.htag_id = srdp; 3750 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3751 3752 SFMMU_HASH_LOCK(hmebp); 3753 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3754 if (hmeblkp != NULL) { 3755 ASSERT(hmeblkp->hblk_shared); 3756 ASSERT(!hmeblkp->hblk_lckcnt); 3757 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3758 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3759 eaddr, NULL, HAT_UNLOAD); 3760 ASSERT(*eaddrp > addr); 3761 } 3762 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3763 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3764 &list, 0); 3765 } 3766 SFMMU_HASH_UNLOCK(hmebp); 3767 sfmmu_hblks_list_purge(&list, 0); 3768 } 3769 3770 static void 3771 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3772 { 3773 int ttesz = rgnp->rgn_pgszc; 3774 size_t rsz = rgnp->rgn_size; 3775 caddr_t rsaddr = rgnp->rgn_saddr; 3776 caddr_t readdr = rsaddr + rsz; 3777 caddr_t rhsaddr; 3778 caddr_t va; 3779 uint_t rid = rgnp->rgn_id; 3780 caddr_t cbsaddr; 3781 caddr_t cbeaddr; 3782 hat_rgn_cb_func_t rcbfunc; 3783 ulong_t cnt; 3784 3785 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3786 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3787 3788 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3789 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3790 if (ttesz < HBLK_MIN_TTESZ) { 3791 ttesz = HBLK_MIN_TTESZ; 3792 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3793 } else { 3794 rhsaddr = rsaddr; 3795 } 3796 3797 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3798 rcbfunc = sfmmu_rgn_cb_noop; 3799 } 3800 3801 while (ttesz >= HBLK_MIN_TTESZ) { 3802 cbsaddr = rsaddr; 3803 cbeaddr = rsaddr; 3804 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3805 ttesz--; 3806 continue; 3807 } 3808 cnt = 0; 3809 va = rsaddr; 3810 while (va < readdr) { 3811 ASSERT(va >= rhsaddr); 3812 if (va != cbeaddr) { 3813 if (cbeaddr != cbsaddr) { 3814 ASSERT(cbeaddr > cbsaddr); 3815 (*rcbfunc)(cbsaddr, cbeaddr, 3816 rsaddr, rsz, rgnp->rgn_obj, 3817 rgnp->rgn_objoff); 3818 } 3819 cbsaddr = va; 3820 cbeaddr = va; 3821 } 3822 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3823 ttesz, &cbeaddr); 3824 cnt++; 3825 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3826 } 3827 if (cbeaddr != cbsaddr) { 3828 ASSERT(cbeaddr > cbsaddr); 3829 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3830 rsz, rgnp->rgn_obj, 3831 rgnp->rgn_objoff); 3832 } 3833 ttesz--; 3834 } 3835 } 3836 3837 /* 3838 * Release one hardware address translation lock on the given address range. 3839 */ 3840 void 3841 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3842 { 3843 struct hmehash_bucket *hmebp; 3844 hmeblk_tag hblktag; 3845 int hmeshift, hashno = 1; 3846 struct hme_blk *hmeblkp, *list = NULL; 3847 caddr_t endaddr; 3848 3849 ASSERT(sfmmup != NULL); 3850 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3851 3852 ASSERT((sfmmup == ksfmmup) || 3853 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3854 ASSERT((len & MMU_PAGEOFFSET) == 0); 3855 endaddr = addr + len; 3856 hblktag.htag_id = sfmmup; 3857 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3858 3859 /* 3860 * Spitfire supports 4 page sizes. 3861 * Most pages are expected to be of the smallest page size (8K) and 3862 * these will not need to be rehashed. 64K pages also don't need to be 3863 * rehashed because an hmeblk spans 64K of address space. 512K pages 3864 * might need 1 rehash and and 4M pages might need 2 rehashes. 3865 */ 3866 while (addr < endaddr) { 3867 hmeshift = HME_HASH_SHIFT(hashno); 3868 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3869 hblktag.htag_rehash = hashno; 3870 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3871 3872 SFMMU_HASH_LOCK(hmebp); 3873 3874 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3875 if (hmeblkp != NULL) { 3876 ASSERT(!hmeblkp->hblk_shared); 3877 /* 3878 * If we encounter a shadow hmeblk then 3879 * we know there are no valid hmeblks mapping 3880 * this address at this size or larger. 3881 * Just increment address by the smallest 3882 * page size. 3883 */ 3884 if (hmeblkp->hblk_shw_bit) { 3885 addr += MMU_PAGESIZE; 3886 } else { 3887 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3888 endaddr); 3889 } 3890 SFMMU_HASH_UNLOCK(hmebp); 3891 hashno = 1; 3892 continue; 3893 } 3894 SFMMU_HASH_UNLOCK(hmebp); 3895 3896 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3897 /* 3898 * We have traversed the whole list and rehashed 3899 * if necessary without finding the address to unlock 3900 * which should never happen. 3901 */ 3902 panic("sfmmu_unlock: addr not found. " 3903 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3904 } else { 3905 hashno++; 3906 } 3907 } 3908 3909 sfmmu_hblks_list_purge(&list, 0); 3910 } 3911 3912 void 3913 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 3914 hat_region_cookie_t rcookie) 3915 { 3916 sf_srd_t *srdp; 3917 sf_region_t *rgnp; 3918 int ttesz; 3919 uint_t rid; 3920 caddr_t eaddr; 3921 caddr_t va; 3922 int hmeshift; 3923 hmeblk_tag hblktag; 3924 struct hmehash_bucket *hmebp; 3925 struct hme_blk *hmeblkp; 3926 struct hme_blk *pr_hblk; 3927 struct hme_blk *list; 3928 3929 if (rcookie == HAT_INVALID_REGION_COOKIE) { 3930 hat_unlock(sfmmup, addr, len); 3931 return; 3932 } 3933 3934 ASSERT(sfmmup != NULL); 3935 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3936 ASSERT(sfmmup != ksfmmup); 3937 3938 srdp = sfmmup->sfmmu_srdp; 3939 rid = (uint_t)((uint64_t)rcookie); 3940 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3941 eaddr = addr + len; 3942 va = addr; 3943 list = NULL; 3944 rgnp = srdp->srd_hmergnp[rid]; 3945 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 3946 3947 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 3948 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 3949 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 3950 ttesz = HBLK_MIN_TTESZ; 3951 } else { 3952 ttesz = rgnp->rgn_pgszc; 3953 } 3954 while (va < eaddr) { 3955 while (ttesz < rgnp->rgn_pgszc && 3956 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 3957 ttesz++; 3958 } 3959 while (ttesz >= HBLK_MIN_TTESZ) { 3960 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3961 ttesz--; 3962 continue; 3963 } 3964 hmeshift = HME_HASH_SHIFT(ttesz); 3965 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 3966 hblktag.htag_rehash = ttesz; 3967 hblktag.htag_rid = rid; 3968 hblktag.htag_id = srdp; 3969 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 3970 SFMMU_HASH_LOCK(hmebp); 3971 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 3972 &list); 3973 if (hmeblkp == NULL) { 3974 SFMMU_HASH_UNLOCK(hmebp); 3975 ttesz--; 3976 continue; 3977 } 3978 ASSERT(hmeblkp->hblk_shared); 3979 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 3980 ASSERT(va >= eaddr || 3981 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 3982 SFMMU_HASH_UNLOCK(hmebp); 3983 break; 3984 } 3985 if (ttesz < HBLK_MIN_TTESZ) { 3986 panic("hat_unlock_region: addr not found " 3987 "addr %p hat %p", (void *)va, (void *)sfmmup); 3988 } 3989 } 3990 sfmmu_hblks_list_purge(&list, 0); 3991 } 3992 3993 /* 3994 * Function to unlock a range of addresses in an hmeblk. It returns the 3995 * next address that needs to be unlocked. 3996 * Should be called with the hash lock held. 3997 */ 3998 static caddr_t 3999 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4000 { 4001 struct sf_hment *sfhme; 4002 tte_t tteold, ttemod; 4003 int ttesz, ret; 4004 4005 ASSERT(in_hblk_range(hmeblkp, addr)); 4006 ASSERT(hmeblkp->hblk_shw_bit == 0); 4007 4008 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4009 ttesz = get_hblk_ttesz(hmeblkp); 4010 4011 HBLKTOHME(sfhme, hmeblkp, addr); 4012 while (addr < endaddr) { 4013 readtte: 4014 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4015 if (TTE_IS_VALID(&tteold)) { 4016 4017 ttemod = tteold; 4018 4019 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4020 &sfhme->hme_tte); 4021 4022 if (ret < 0) 4023 goto readtte; 4024 4025 if (hmeblkp->hblk_lckcnt == 0) 4026 panic("zero hblk lckcnt"); 4027 4028 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4029 (uintptr_t)endaddr) 4030 panic("can't unlock large tte"); 4031 4032 ASSERT(hmeblkp->hblk_lckcnt > 0); 4033 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 4034 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4035 } else { 4036 panic("sfmmu_hblk_unlock: invalid tte"); 4037 } 4038 addr += TTEBYTES(ttesz); 4039 sfhme++; 4040 } 4041 return (addr); 4042 } 4043 4044 /* 4045 * Physical Address Mapping Framework 4046 * 4047 * General rules: 4048 * 4049 * (1) Applies only to seg_kmem memory pages. To make things easier, 4050 * seg_kpm addresses are also accepted by the routines, but nothing 4051 * is done with them since by definition their PA mappings are static. 4052 * (2) hat_add_callback() may only be called while holding the page lock 4053 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4054 * or passing HAC_PAGELOCK flag. 4055 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4056 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4057 * callbacks may not sleep or acquire adaptive mutex locks. 4058 * (4) Either prehandler() or posthandler() (but not both) may be specified 4059 * as being NULL. Specifying an errhandler() is optional. 4060 * 4061 * Details of using the framework: 4062 * 4063 * registering a callback (hat_register_callback()) 4064 * 4065 * Pass prehandler, posthandler, errhandler addresses 4066 * as described below. If capture_cpus argument is nonzero, 4067 * suspend callback to the prehandler will occur with CPUs 4068 * captured and executing xc_loop() and CPUs will remain 4069 * captured until after the posthandler suspend callback 4070 * occurs. 4071 * 4072 * adding a callback (hat_add_callback()) 4073 * 4074 * as_pagelock(); 4075 * hat_add_callback(); 4076 * save returned pfn in private data structures or program registers; 4077 * as_pageunlock(); 4078 * 4079 * prehandler() 4080 * 4081 * Stop all accesses by physical address to this memory page. 4082 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4083 * adaptive locks. The second, SUSPEND, is called at high PIL with 4084 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4085 * locks must be XCALL_PIL or higher locks). 4086 * 4087 * May return the following errors: 4088 * EIO: A fatal error has occurred. This will result in panic. 4089 * EAGAIN: The page cannot be suspended. This will fail the 4090 * relocation. 4091 * 0: Success. 4092 * 4093 * posthandler() 4094 * 4095 * Save new pfn in private data structures or program registers; 4096 * not allowed to fail (non-zero return values will result in panic). 4097 * 4098 * errhandler() 4099 * 4100 * called when an error occurs related to the callback. Currently 4101 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4102 * a page is being freed, but there are still outstanding callback(s) 4103 * registered on the page. 4104 * 4105 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4106 * 4107 * stop using physical address 4108 * hat_delete_callback(); 4109 * 4110 */ 4111 4112 /* 4113 * Register a callback class. Each subsystem should do this once and 4114 * cache the id_t returned for use in setting up and tearing down callbacks. 4115 * 4116 * There is no facility for removing callback IDs once they are created; 4117 * the "key" should be unique for each module, so in case a module is unloaded 4118 * and subsequently re-loaded, we can recycle the module's previous entry. 4119 */ 4120 id_t 4121 hat_register_callback(int key, 4122 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4123 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4124 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4125 int capture_cpus) 4126 { 4127 id_t id; 4128 4129 /* 4130 * Search the table for a pre-existing callback associated with 4131 * the identifier "key". If one exists, we re-use that entry in 4132 * the table for this instance, otherwise we assign the next 4133 * available table slot. 4134 */ 4135 for (id = 0; id < sfmmu_max_cb_id; id++) { 4136 if (sfmmu_cb_table[id].key == key) 4137 break; 4138 } 4139 4140 if (id == sfmmu_max_cb_id) { 4141 id = sfmmu_cb_nextid++; 4142 if (id >= sfmmu_max_cb_id) 4143 panic("hat_register_callback: out of callback IDs"); 4144 } 4145 4146 ASSERT(prehandler != NULL || posthandler != NULL); 4147 4148 sfmmu_cb_table[id].key = key; 4149 sfmmu_cb_table[id].prehandler = prehandler; 4150 sfmmu_cb_table[id].posthandler = posthandler; 4151 sfmmu_cb_table[id].errhandler = errhandler; 4152 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4153 4154 return (id); 4155 } 4156 4157 #define HAC_COOKIE_NONE (void *)-1 4158 4159 /* 4160 * Add relocation callbacks to the specified addr/len which will be called 4161 * when relocating the associated page. See the description of pre and 4162 * posthandler above for more details. 4163 * 4164 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4165 * locked internally so the caller must be able to deal with the callback 4166 * running even before this function has returned. If HAC_PAGELOCK is not 4167 * set, it is assumed that the underlying memory pages are locked. 4168 * 4169 * Since the caller must track the individual page boundaries anyway, 4170 * we only allow a callback to be added to a single page (large 4171 * or small). Thus [addr, addr + len) MUST be contained within a single 4172 * page. 4173 * 4174 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4175 * _provided_that_ a unique parameter is specified for each callback. 4176 * If multiple callbacks are registered on the same range the callback will 4177 * be invoked with each unique parameter. Registering the same callback with 4178 * the same argument more than once will result in corrupted kernel state. 4179 * 4180 * Returns the pfn of the underlying kernel page in *rpfn 4181 * on success, or PFN_INVALID on failure. 4182 * 4183 * cookiep (if passed) provides storage space for an opaque cookie 4184 * to return later to hat_delete_callback(). This cookie makes the callback 4185 * deletion significantly quicker by avoiding a potentially lengthy hash 4186 * search. 4187 * 4188 * Returns values: 4189 * 0: success 4190 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4191 * EINVAL: callback ID is not valid 4192 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4193 * space 4194 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4195 */ 4196 int 4197 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4198 void *pvt, pfn_t *rpfn, void **cookiep) 4199 { 4200 struct hmehash_bucket *hmebp; 4201 hmeblk_tag hblktag; 4202 struct hme_blk *hmeblkp; 4203 int hmeshift, hashno; 4204 caddr_t saddr, eaddr, baseaddr; 4205 struct pa_hment *pahmep; 4206 struct sf_hment *sfhmep, *osfhmep; 4207 kmutex_t *pml; 4208 tte_t tte; 4209 page_t *pp; 4210 vnode_t *vp; 4211 u_offset_t off; 4212 pfn_t pfn; 4213 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4214 int locked = 0; 4215 4216 /* 4217 * For KPM mappings, just return the physical address since we 4218 * don't need to register any callbacks. 4219 */ 4220 if (IS_KPM_ADDR(vaddr)) { 4221 uint64_t paddr; 4222 SFMMU_KPM_VTOP(vaddr, paddr); 4223 *rpfn = btop(paddr); 4224 if (cookiep != NULL) 4225 *cookiep = HAC_COOKIE_NONE; 4226 return (0); 4227 } 4228 4229 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4230 *rpfn = PFN_INVALID; 4231 return (EINVAL); 4232 } 4233 4234 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4235 *rpfn = PFN_INVALID; 4236 return (ENOMEM); 4237 } 4238 4239 sfhmep = &pahmep->sfment; 4240 4241 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4242 eaddr = saddr + len; 4243 4244 rehash: 4245 /* Find the mapping(s) for this page */ 4246 for (hashno = TTE64K, hmeblkp = NULL; 4247 hmeblkp == NULL && hashno <= mmu_hashcnt; 4248 hashno++) { 4249 hmeshift = HME_HASH_SHIFT(hashno); 4250 hblktag.htag_id = ksfmmup; 4251 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4252 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4253 hblktag.htag_rehash = hashno; 4254 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4255 4256 SFMMU_HASH_LOCK(hmebp); 4257 4258 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4259 4260 if (hmeblkp == NULL) 4261 SFMMU_HASH_UNLOCK(hmebp); 4262 } 4263 4264 if (hmeblkp == NULL) { 4265 kmem_cache_free(pa_hment_cache, pahmep); 4266 *rpfn = PFN_INVALID; 4267 return (ENXIO); 4268 } 4269 4270 ASSERT(!hmeblkp->hblk_shared); 4271 4272 HBLKTOHME(osfhmep, hmeblkp, saddr); 4273 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4274 4275 if (!TTE_IS_VALID(&tte)) { 4276 SFMMU_HASH_UNLOCK(hmebp); 4277 kmem_cache_free(pa_hment_cache, pahmep); 4278 *rpfn = PFN_INVALID; 4279 return (ENXIO); 4280 } 4281 4282 /* 4283 * Make sure the boundaries for the callback fall within this 4284 * single mapping. 4285 */ 4286 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4287 ASSERT(saddr >= baseaddr); 4288 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4289 SFMMU_HASH_UNLOCK(hmebp); 4290 kmem_cache_free(pa_hment_cache, pahmep); 4291 *rpfn = PFN_INVALID; 4292 return (ERANGE); 4293 } 4294 4295 pfn = sfmmu_ttetopfn(&tte, vaddr); 4296 4297 /* 4298 * The pfn may not have a page_t underneath in which case we 4299 * just return it. This can happen if we are doing I/O to a 4300 * static portion of the kernel's address space, for instance. 4301 */ 4302 pp = osfhmep->hme_page; 4303 if (pp == NULL) { 4304 SFMMU_HASH_UNLOCK(hmebp); 4305 kmem_cache_free(pa_hment_cache, pahmep); 4306 *rpfn = pfn; 4307 if (cookiep) 4308 *cookiep = HAC_COOKIE_NONE; 4309 return (0); 4310 } 4311 ASSERT(pp == PP_PAGEROOT(pp)); 4312 4313 vp = pp->p_vnode; 4314 off = pp->p_offset; 4315 4316 pml = sfmmu_mlist_enter(pp); 4317 4318 if (flags & HAC_PAGELOCK) { 4319 if (!page_trylock(pp, SE_SHARED)) { 4320 /* 4321 * Somebody is holding SE_EXCL lock. Might 4322 * even be hat_page_relocate(). Drop all 4323 * our locks, lookup the page in &kvp, and 4324 * retry. If it doesn't exist in &kvp and &zvp, 4325 * then we must be dealing with a kernel mapped 4326 * page which doesn't actually belong to 4327 * segkmem so we punt. 4328 */ 4329 sfmmu_mlist_exit(pml); 4330 SFMMU_HASH_UNLOCK(hmebp); 4331 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4332 4333 /* check zvp before giving up */ 4334 if (pp == NULL) 4335 pp = page_lookup(&zvp, (u_offset_t)saddr, 4336 SE_SHARED); 4337 4338 /* Okay, we didn't find it, give up */ 4339 if (pp == NULL) { 4340 kmem_cache_free(pa_hment_cache, pahmep); 4341 *rpfn = pfn; 4342 if (cookiep) 4343 *cookiep = HAC_COOKIE_NONE; 4344 return (0); 4345 } 4346 page_unlock(pp); 4347 goto rehash; 4348 } 4349 locked = 1; 4350 } 4351 4352 if (!PAGE_LOCKED(pp) && !panicstr) 4353 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4354 4355 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4356 pp->p_offset != off) { 4357 /* 4358 * The page moved before we got our hands on it. Drop 4359 * all the locks and try again. 4360 */ 4361 ASSERT((flags & HAC_PAGELOCK) != 0); 4362 sfmmu_mlist_exit(pml); 4363 SFMMU_HASH_UNLOCK(hmebp); 4364 page_unlock(pp); 4365 locked = 0; 4366 goto rehash; 4367 } 4368 4369 if (!VN_ISKAS(vp)) { 4370 /* 4371 * This is not a segkmem page but another page which 4372 * has been kernel mapped. It had better have at least 4373 * a share lock on it. Return the pfn. 4374 */ 4375 sfmmu_mlist_exit(pml); 4376 SFMMU_HASH_UNLOCK(hmebp); 4377 if (locked) 4378 page_unlock(pp); 4379 kmem_cache_free(pa_hment_cache, pahmep); 4380 ASSERT(PAGE_LOCKED(pp)); 4381 *rpfn = pfn; 4382 if (cookiep) 4383 *cookiep = HAC_COOKIE_NONE; 4384 return (0); 4385 } 4386 4387 /* 4388 * Setup this pa_hment and link its embedded dummy sf_hment into 4389 * the mapping list. 4390 */ 4391 pp->p_share++; 4392 pahmep->cb_id = callback_id; 4393 pahmep->addr = vaddr; 4394 pahmep->len = len; 4395 pahmep->refcnt = 1; 4396 pahmep->flags = 0; 4397 pahmep->pvt = pvt; 4398 4399 sfhmep->hme_tte.ll = 0; 4400 sfhmep->hme_data = pahmep; 4401 sfhmep->hme_prev = osfhmep; 4402 sfhmep->hme_next = osfhmep->hme_next; 4403 4404 if (osfhmep->hme_next) 4405 osfhmep->hme_next->hme_prev = sfhmep; 4406 4407 osfhmep->hme_next = sfhmep; 4408 4409 sfmmu_mlist_exit(pml); 4410 SFMMU_HASH_UNLOCK(hmebp); 4411 4412 if (locked) 4413 page_unlock(pp); 4414 4415 *rpfn = pfn; 4416 if (cookiep) 4417 *cookiep = (void *)pahmep; 4418 4419 return (0); 4420 } 4421 4422 /* 4423 * Remove the relocation callbacks from the specified addr/len. 4424 */ 4425 void 4426 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4427 void *cookie) 4428 { 4429 struct hmehash_bucket *hmebp; 4430 hmeblk_tag hblktag; 4431 struct hme_blk *hmeblkp; 4432 int hmeshift, hashno; 4433 caddr_t saddr; 4434 struct pa_hment *pahmep; 4435 struct sf_hment *sfhmep, *osfhmep; 4436 kmutex_t *pml; 4437 tte_t tte; 4438 page_t *pp; 4439 vnode_t *vp; 4440 u_offset_t off; 4441 int locked = 0; 4442 4443 /* 4444 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4445 * remove so just return. 4446 */ 4447 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4448 return; 4449 4450 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4451 4452 rehash: 4453 /* Find the mapping(s) for this page */ 4454 for (hashno = TTE64K, hmeblkp = NULL; 4455 hmeblkp == NULL && hashno <= mmu_hashcnt; 4456 hashno++) { 4457 hmeshift = HME_HASH_SHIFT(hashno); 4458 hblktag.htag_id = ksfmmup; 4459 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4460 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4461 hblktag.htag_rehash = hashno; 4462 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4463 4464 SFMMU_HASH_LOCK(hmebp); 4465 4466 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4467 4468 if (hmeblkp == NULL) 4469 SFMMU_HASH_UNLOCK(hmebp); 4470 } 4471 4472 if (hmeblkp == NULL) 4473 return; 4474 4475 ASSERT(!hmeblkp->hblk_shared); 4476 4477 HBLKTOHME(osfhmep, hmeblkp, saddr); 4478 4479 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4480 if (!TTE_IS_VALID(&tte)) { 4481 SFMMU_HASH_UNLOCK(hmebp); 4482 return; 4483 } 4484 4485 pp = osfhmep->hme_page; 4486 if (pp == NULL) { 4487 SFMMU_HASH_UNLOCK(hmebp); 4488 ASSERT(cookie == NULL); 4489 return; 4490 } 4491 4492 vp = pp->p_vnode; 4493 off = pp->p_offset; 4494 4495 pml = sfmmu_mlist_enter(pp); 4496 4497 if (flags & HAC_PAGELOCK) { 4498 if (!page_trylock(pp, SE_SHARED)) { 4499 /* 4500 * Somebody is holding SE_EXCL lock. Might 4501 * even be hat_page_relocate(). Drop all 4502 * our locks, lookup the page in &kvp, and 4503 * retry. If it doesn't exist in &kvp and &zvp, 4504 * then we must be dealing with a kernel mapped 4505 * page which doesn't actually belong to 4506 * segkmem so we punt. 4507 */ 4508 sfmmu_mlist_exit(pml); 4509 SFMMU_HASH_UNLOCK(hmebp); 4510 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4511 /* check zvp before giving up */ 4512 if (pp == NULL) 4513 pp = page_lookup(&zvp, (u_offset_t)saddr, 4514 SE_SHARED); 4515 4516 if (pp == NULL) { 4517 ASSERT(cookie == NULL); 4518 return; 4519 } 4520 page_unlock(pp); 4521 goto rehash; 4522 } 4523 locked = 1; 4524 } 4525 4526 ASSERT(PAGE_LOCKED(pp)); 4527 4528 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4529 pp->p_offset != off) { 4530 /* 4531 * The page moved before we got our hands on it. Drop 4532 * all the locks and try again. 4533 */ 4534 ASSERT((flags & HAC_PAGELOCK) != 0); 4535 sfmmu_mlist_exit(pml); 4536 SFMMU_HASH_UNLOCK(hmebp); 4537 page_unlock(pp); 4538 locked = 0; 4539 goto rehash; 4540 } 4541 4542 if (!VN_ISKAS(vp)) { 4543 /* 4544 * This is not a segkmem page but another page which 4545 * has been kernel mapped. 4546 */ 4547 sfmmu_mlist_exit(pml); 4548 SFMMU_HASH_UNLOCK(hmebp); 4549 if (locked) 4550 page_unlock(pp); 4551 ASSERT(cookie == NULL); 4552 return; 4553 } 4554 4555 if (cookie != NULL) { 4556 pahmep = (struct pa_hment *)cookie; 4557 sfhmep = &pahmep->sfment; 4558 } else { 4559 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4560 sfhmep = sfhmep->hme_next) { 4561 4562 /* 4563 * skip va<->pa mappings 4564 */ 4565 if (!IS_PAHME(sfhmep)) 4566 continue; 4567 4568 pahmep = sfhmep->hme_data; 4569 ASSERT(pahmep != NULL); 4570 4571 /* 4572 * if pa_hment matches, remove it 4573 */ 4574 if ((pahmep->pvt == pvt) && 4575 (pahmep->addr == vaddr) && 4576 (pahmep->len == len)) { 4577 break; 4578 } 4579 } 4580 } 4581 4582 if (sfhmep == NULL) { 4583 if (!panicstr) { 4584 panic("hat_delete_callback: pa_hment not found, pp %p", 4585 (void *)pp); 4586 } 4587 return; 4588 } 4589 4590 /* 4591 * Note: at this point a valid kernel mapping must still be 4592 * present on this page. 4593 */ 4594 pp->p_share--; 4595 if (pp->p_share <= 0) 4596 panic("hat_delete_callback: zero p_share"); 4597 4598 if (--pahmep->refcnt == 0) { 4599 if (pahmep->flags != 0) 4600 panic("hat_delete_callback: pa_hment is busy"); 4601 4602 /* 4603 * Remove sfhmep from the mapping list for the page. 4604 */ 4605 if (sfhmep->hme_prev) { 4606 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4607 } else { 4608 pp->p_mapping = sfhmep->hme_next; 4609 } 4610 4611 if (sfhmep->hme_next) 4612 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4613 4614 sfmmu_mlist_exit(pml); 4615 SFMMU_HASH_UNLOCK(hmebp); 4616 4617 if (locked) 4618 page_unlock(pp); 4619 4620 kmem_cache_free(pa_hment_cache, pahmep); 4621 return; 4622 } 4623 4624 sfmmu_mlist_exit(pml); 4625 SFMMU_HASH_UNLOCK(hmebp); 4626 if (locked) 4627 page_unlock(pp); 4628 } 4629 4630 /* 4631 * hat_probe returns 1 if the translation for the address 'addr' is 4632 * loaded, zero otherwise. 4633 * 4634 * hat_probe should be used only for advisorary purposes because it may 4635 * occasionally return the wrong value. The implementation must guarantee that 4636 * returning the wrong value is a very rare event. hat_probe is used 4637 * to implement optimizations in the segment drivers. 4638 * 4639 */ 4640 int 4641 hat_probe(struct hat *sfmmup, caddr_t addr) 4642 { 4643 pfn_t pfn; 4644 tte_t tte; 4645 4646 ASSERT(sfmmup != NULL); 4647 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4648 4649 ASSERT((sfmmup == ksfmmup) || 4650 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4651 4652 if (sfmmup == ksfmmup) { 4653 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4654 == PFN_SUSPENDED) { 4655 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4656 } 4657 } else { 4658 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4659 } 4660 4661 if (pfn != PFN_INVALID) 4662 return (1); 4663 else 4664 return (0); 4665 } 4666 4667 ssize_t 4668 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4669 { 4670 tte_t tte; 4671 4672 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4673 4674 if (sfmmup == ksfmmup) { 4675 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4676 return (-1); 4677 } 4678 } else { 4679 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4680 return (-1); 4681 } 4682 } 4683 4684 ASSERT(TTE_IS_VALID(&tte)); 4685 return (TTEBYTES(TTE_CSZ(&tte))); 4686 } 4687 4688 uint_t 4689 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4690 { 4691 tte_t tte; 4692 4693 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4694 4695 if (sfmmup == ksfmmup) { 4696 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4697 tte.ll = 0; 4698 } 4699 } else { 4700 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4701 tte.ll = 0; 4702 } 4703 } 4704 if (TTE_IS_VALID(&tte)) { 4705 *attr = sfmmu_ptov_attr(&tte); 4706 return (0); 4707 } 4708 *attr = 0; 4709 return ((uint_t)0xffffffff); 4710 } 4711 4712 /* 4713 * Enables more attributes on specified address range (ie. logical OR) 4714 */ 4715 void 4716 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4717 { 4718 if (hat->sfmmu_xhat_provider) { 4719 XHAT_SETATTR(hat, addr, len, attr); 4720 return; 4721 } else { 4722 /* 4723 * This must be a CPU HAT. If the address space has 4724 * XHATs attached, change attributes for all of them, 4725 * just in case 4726 */ 4727 ASSERT(hat->sfmmu_as != NULL); 4728 if (hat->sfmmu_as->a_xhat != NULL) 4729 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4730 } 4731 4732 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4733 } 4734 4735 /* 4736 * Assigns attributes to the specified address range. All the attributes 4737 * are specified. 4738 */ 4739 void 4740 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4741 { 4742 if (hat->sfmmu_xhat_provider) { 4743 XHAT_CHGATTR(hat, addr, len, attr); 4744 return; 4745 } else { 4746 /* 4747 * This must be a CPU HAT. If the address space has 4748 * XHATs attached, change attributes for all of them, 4749 * just in case 4750 */ 4751 ASSERT(hat->sfmmu_as != NULL); 4752 if (hat->sfmmu_as->a_xhat != NULL) 4753 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4754 } 4755 4756 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4757 } 4758 4759 /* 4760 * Remove attributes on the specified address range (ie. loginal NAND) 4761 */ 4762 void 4763 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4764 { 4765 if (hat->sfmmu_xhat_provider) { 4766 XHAT_CLRATTR(hat, addr, len, attr); 4767 return; 4768 } else { 4769 /* 4770 * This must be a CPU HAT. If the address space has 4771 * XHATs attached, change attributes for all of them, 4772 * just in case 4773 */ 4774 ASSERT(hat->sfmmu_as != NULL); 4775 if (hat->sfmmu_as->a_xhat != NULL) 4776 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4777 } 4778 4779 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4780 } 4781 4782 /* 4783 * Change attributes on an address range to that specified by attr and mode. 4784 */ 4785 static void 4786 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4787 int mode) 4788 { 4789 struct hmehash_bucket *hmebp; 4790 hmeblk_tag hblktag; 4791 int hmeshift, hashno = 1; 4792 struct hme_blk *hmeblkp, *list = NULL; 4793 caddr_t endaddr; 4794 cpuset_t cpuset; 4795 demap_range_t dmr; 4796 4797 CPUSET_ZERO(cpuset); 4798 4799 ASSERT((sfmmup == ksfmmup) || 4800 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4801 ASSERT((len & MMU_PAGEOFFSET) == 0); 4802 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4803 4804 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4805 ((addr + len) > (caddr_t)USERLIMIT)) { 4806 panic("user addr %p in kernel space", 4807 (void *)addr); 4808 } 4809 4810 endaddr = addr + len; 4811 hblktag.htag_id = sfmmup; 4812 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4813 DEMAP_RANGE_INIT(sfmmup, &dmr); 4814 4815 while (addr < endaddr) { 4816 hmeshift = HME_HASH_SHIFT(hashno); 4817 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4818 hblktag.htag_rehash = hashno; 4819 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4820 4821 SFMMU_HASH_LOCK(hmebp); 4822 4823 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4824 if (hmeblkp != NULL) { 4825 ASSERT(!hmeblkp->hblk_shared); 4826 /* 4827 * We've encountered a shadow hmeblk so skip the range 4828 * of the next smaller mapping size. 4829 */ 4830 if (hmeblkp->hblk_shw_bit) { 4831 ASSERT(sfmmup != ksfmmup); 4832 ASSERT(hashno > 1); 4833 addr = (caddr_t)P2END((uintptr_t)addr, 4834 TTEBYTES(hashno - 1)); 4835 } else { 4836 addr = sfmmu_hblk_chgattr(sfmmup, 4837 hmeblkp, addr, endaddr, &dmr, attr, mode); 4838 } 4839 SFMMU_HASH_UNLOCK(hmebp); 4840 hashno = 1; 4841 continue; 4842 } 4843 SFMMU_HASH_UNLOCK(hmebp); 4844 4845 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4846 /* 4847 * We have traversed the whole list and rehashed 4848 * if necessary without finding the address to chgattr. 4849 * This is ok, so we increment the address by the 4850 * smallest hmeblk range for kernel mappings or for 4851 * user mappings with no large pages, and the largest 4852 * hmeblk range, to account for shadow hmeblks, for 4853 * user mappings with large pages and continue. 4854 */ 4855 if (sfmmup == ksfmmup) 4856 addr = (caddr_t)P2END((uintptr_t)addr, 4857 TTEBYTES(1)); 4858 else 4859 addr = (caddr_t)P2END((uintptr_t)addr, 4860 TTEBYTES(hashno)); 4861 hashno = 1; 4862 } else { 4863 hashno++; 4864 } 4865 } 4866 4867 sfmmu_hblks_list_purge(&list, 0); 4868 DEMAP_RANGE_FLUSH(&dmr); 4869 cpuset = sfmmup->sfmmu_cpusran; 4870 xt_sync(cpuset); 4871 } 4872 4873 /* 4874 * This function chgattr on a range of addresses in an hmeblk. It returns the 4875 * next addres that needs to be chgattr. 4876 * It should be called with the hash lock held. 4877 * XXX It should be possible to optimize chgattr by not flushing every time but 4878 * on the other hand: 4879 * 1. do one flush crosscall. 4880 * 2. only flush if we are increasing permissions (make sure this will work) 4881 */ 4882 static caddr_t 4883 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4884 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4885 { 4886 tte_t tte, tteattr, tteflags, ttemod; 4887 struct sf_hment *sfhmep; 4888 int ttesz; 4889 struct page *pp = NULL; 4890 kmutex_t *pml, *pmtx; 4891 int ret; 4892 int use_demap_range; 4893 #if defined(SF_ERRATA_57) 4894 int check_exec; 4895 #endif 4896 4897 ASSERT(in_hblk_range(hmeblkp, addr)); 4898 ASSERT(hmeblkp->hblk_shw_bit == 0); 4899 ASSERT(!hmeblkp->hblk_shared); 4900 4901 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4902 ttesz = get_hblk_ttesz(hmeblkp); 4903 4904 /* 4905 * Flush the current demap region if addresses have been 4906 * skipped or the page size doesn't match. 4907 */ 4908 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4909 if (use_demap_range) { 4910 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4911 } else { 4912 DEMAP_RANGE_FLUSH(dmrp); 4913 } 4914 4915 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4916 #if defined(SF_ERRATA_57) 4917 check_exec = (sfmmup != ksfmmup) && 4918 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4919 TTE_IS_EXECUTABLE(&tteattr); 4920 #endif 4921 HBLKTOHME(sfhmep, hmeblkp, addr); 4922 while (addr < endaddr) { 4923 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4924 if (TTE_IS_VALID(&tte)) { 4925 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4926 /* 4927 * if the new attr is the same as old 4928 * continue 4929 */ 4930 goto next_addr; 4931 } 4932 if (!TTE_IS_WRITABLE(&tteattr)) { 4933 /* 4934 * make sure we clear hw modify bit if we 4935 * removing write protections 4936 */ 4937 tteflags.tte_intlo |= TTE_HWWR_INT; 4938 } 4939 4940 pml = NULL; 4941 pp = sfhmep->hme_page; 4942 if (pp) { 4943 pml = sfmmu_mlist_enter(pp); 4944 } 4945 4946 if (pp != sfhmep->hme_page) { 4947 /* 4948 * tte must have been unloaded. 4949 */ 4950 ASSERT(pml); 4951 sfmmu_mlist_exit(pml); 4952 continue; 4953 } 4954 4955 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4956 4957 ttemod = tte; 4958 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4959 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4960 4961 #if defined(SF_ERRATA_57) 4962 if (check_exec && addr < errata57_limit) 4963 ttemod.tte_exec_perm = 0; 4964 #endif 4965 ret = sfmmu_modifytte_try(&tte, &ttemod, 4966 &sfhmep->hme_tte); 4967 4968 if (ret < 0) { 4969 /* tte changed underneath us */ 4970 if (pml) { 4971 sfmmu_mlist_exit(pml); 4972 } 4973 continue; 4974 } 4975 4976 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4977 /* 4978 * need to sync if we are clearing modify bit. 4979 */ 4980 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4981 } 4982 4983 if (pp && PP_ISRO(pp)) { 4984 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4985 pmtx = sfmmu_page_enter(pp); 4986 PP_CLRRO(pp); 4987 sfmmu_page_exit(pmtx); 4988 } 4989 } 4990 4991 if (ret > 0 && use_demap_range) { 4992 DEMAP_RANGE_MARKPG(dmrp, addr); 4993 } else if (ret > 0) { 4994 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4995 } 4996 4997 if (pml) { 4998 sfmmu_mlist_exit(pml); 4999 } 5000 } 5001 next_addr: 5002 addr += TTEBYTES(ttesz); 5003 sfhmep++; 5004 DEMAP_RANGE_NEXTPG(dmrp); 5005 } 5006 return (addr); 5007 } 5008 5009 /* 5010 * This routine converts virtual attributes to physical ones. It will 5011 * update the tteflags field with the tte mask corresponding to the attributes 5012 * affected and it returns the new attributes. It will also clear the modify 5013 * bit if we are taking away write permission. This is necessary since the 5014 * modify bit is the hardware permission bit and we need to clear it in order 5015 * to detect write faults. 5016 */ 5017 static uint64_t 5018 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5019 { 5020 tte_t ttevalue; 5021 5022 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5023 5024 switch (mode) { 5025 case SFMMU_CHGATTR: 5026 /* all attributes specified */ 5027 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5028 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5029 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5030 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5031 break; 5032 case SFMMU_SETATTR: 5033 ASSERT(!(attr & ~HAT_PROT_MASK)); 5034 ttemaskp->ll = 0; 5035 ttevalue.ll = 0; 5036 /* 5037 * a valid tte implies exec and read for sfmmu 5038 * so no need to do anything about them. 5039 * since priviledged access implies user access 5040 * PROT_USER doesn't make sense either. 5041 */ 5042 if (attr & PROT_WRITE) { 5043 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5044 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5045 } 5046 break; 5047 case SFMMU_CLRATTR: 5048 /* attributes will be nand with current ones */ 5049 if (attr & ~(PROT_WRITE | PROT_USER)) { 5050 panic("sfmmu: attr %x not supported", attr); 5051 } 5052 ttemaskp->ll = 0; 5053 ttevalue.ll = 0; 5054 if (attr & PROT_WRITE) { 5055 /* clear both writable and modify bit */ 5056 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5057 } 5058 if (attr & PROT_USER) { 5059 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5060 ttevalue.tte_intlo |= TTE_PRIV_INT; 5061 } 5062 break; 5063 default: 5064 panic("sfmmu_vtop_attr: bad mode %x", mode); 5065 } 5066 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5067 return (ttevalue.ll); 5068 } 5069 5070 static uint_t 5071 sfmmu_ptov_attr(tte_t *ttep) 5072 { 5073 uint_t attr; 5074 5075 ASSERT(TTE_IS_VALID(ttep)); 5076 5077 attr = PROT_READ; 5078 5079 if (TTE_IS_WRITABLE(ttep)) { 5080 attr |= PROT_WRITE; 5081 } 5082 if (TTE_IS_EXECUTABLE(ttep)) { 5083 attr |= PROT_EXEC; 5084 } 5085 if (!TTE_IS_PRIVILEGED(ttep)) { 5086 attr |= PROT_USER; 5087 } 5088 if (TTE_IS_NFO(ttep)) { 5089 attr |= HAT_NOFAULT; 5090 } 5091 if (TTE_IS_NOSYNC(ttep)) { 5092 attr |= HAT_NOSYNC; 5093 } 5094 if (TTE_IS_SIDEFFECT(ttep)) { 5095 attr |= SFMMU_SIDEFFECT; 5096 } 5097 if (!TTE_IS_VCACHEABLE(ttep)) { 5098 attr |= SFMMU_UNCACHEVTTE; 5099 } 5100 if (!TTE_IS_PCACHEABLE(ttep)) { 5101 attr |= SFMMU_UNCACHEPTTE; 5102 } 5103 return (attr); 5104 } 5105 5106 /* 5107 * hat_chgprot is a deprecated hat call. New segment drivers 5108 * should store all attributes and use hat_*attr calls. 5109 * 5110 * Change the protections in the virtual address range 5111 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5112 * then remove write permission, leaving the other 5113 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5114 * 5115 */ 5116 void 5117 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5118 { 5119 struct hmehash_bucket *hmebp; 5120 hmeblk_tag hblktag; 5121 int hmeshift, hashno = 1; 5122 struct hme_blk *hmeblkp, *list = NULL; 5123 caddr_t endaddr; 5124 cpuset_t cpuset; 5125 demap_range_t dmr; 5126 5127 ASSERT((len & MMU_PAGEOFFSET) == 0); 5128 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5129 5130 if (sfmmup->sfmmu_xhat_provider) { 5131 XHAT_CHGPROT(sfmmup, addr, len, vprot); 5132 return; 5133 } else { 5134 /* 5135 * This must be a CPU HAT. If the address space has 5136 * XHATs attached, change attributes for all of them, 5137 * just in case 5138 */ 5139 ASSERT(sfmmup->sfmmu_as != NULL); 5140 if (sfmmup->sfmmu_as->a_xhat != NULL) 5141 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 5142 } 5143 5144 CPUSET_ZERO(cpuset); 5145 5146 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5147 ((addr + len) > (caddr_t)USERLIMIT)) { 5148 panic("user addr %p vprot %x in kernel space", 5149 (void *)addr, vprot); 5150 } 5151 endaddr = addr + len; 5152 hblktag.htag_id = sfmmup; 5153 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5154 DEMAP_RANGE_INIT(sfmmup, &dmr); 5155 5156 while (addr < endaddr) { 5157 hmeshift = HME_HASH_SHIFT(hashno); 5158 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5159 hblktag.htag_rehash = hashno; 5160 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5161 5162 SFMMU_HASH_LOCK(hmebp); 5163 5164 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5165 if (hmeblkp != NULL) { 5166 ASSERT(!hmeblkp->hblk_shared); 5167 /* 5168 * We've encountered a shadow hmeblk so skip the range 5169 * of the next smaller mapping size. 5170 */ 5171 if (hmeblkp->hblk_shw_bit) { 5172 ASSERT(sfmmup != ksfmmup); 5173 ASSERT(hashno > 1); 5174 addr = (caddr_t)P2END((uintptr_t)addr, 5175 TTEBYTES(hashno - 1)); 5176 } else { 5177 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5178 addr, endaddr, &dmr, vprot); 5179 } 5180 SFMMU_HASH_UNLOCK(hmebp); 5181 hashno = 1; 5182 continue; 5183 } 5184 SFMMU_HASH_UNLOCK(hmebp); 5185 5186 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5187 /* 5188 * We have traversed the whole list and rehashed 5189 * if necessary without finding the address to chgprot. 5190 * This is ok so we increment the address by the 5191 * smallest hmeblk range for kernel mappings and the 5192 * largest hmeblk range, to account for shadow hmeblks, 5193 * for user mappings and continue. 5194 */ 5195 if (sfmmup == ksfmmup) 5196 addr = (caddr_t)P2END((uintptr_t)addr, 5197 TTEBYTES(1)); 5198 else 5199 addr = (caddr_t)P2END((uintptr_t)addr, 5200 TTEBYTES(hashno)); 5201 hashno = 1; 5202 } else { 5203 hashno++; 5204 } 5205 } 5206 5207 sfmmu_hblks_list_purge(&list, 0); 5208 DEMAP_RANGE_FLUSH(&dmr); 5209 cpuset = sfmmup->sfmmu_cpusran; 5210 xt_sync(cpuset); 5211 } 5212 5213 /* 5214 * This function chgprots a range of addresses in an hmeblk. It returns the 5215 * next addres that needs to be chgprot. 5216 * It should be called with the hash lock held. 5217 * XXX It shold be possible to optimize chgprot by not flushing every time but 5218 * on the other hand: 5219 * 1. do one flush crosscall. 5220 * 2. only flush if we are increasing permissions (make sure this will work) 5221 */ 5222 static caddr_t 5223 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5224 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5225 { 5226 uint_t pprot; 5227 tte_t tte, ttemod; 5228 struct sf_hment *sfhmep; 5229 uint_t tteflags; 5230 int ttesz; 5231 struct page *pp = NULL; 5232 kmutex_t *pml, *pmtx; 5233 int ret; 5234 int use_demap_range; 5235 #if defined(SF_ERRATA_57) 5236 int check_exec; 5237 #endif 5238 5239 ASSERT(in_hblk_range(hmeblkp, addr)); 5240 ASSERT(hmeblkp->hblk_shw_bit == 0); 5241 ASSERT(!hmeblkp->hblk_shared); 5242 5243 #ifdef DEBUG 5244 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5245 (endaddr < get_hblk_endaddr(hmeblkp))) { 5246 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5247 } 5248 #endif /* DEBUG */ 5249 5250 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5251 ttesz = get_hblk_ttesz(hmeblkp); 5252 5253 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5254 #if defined(SF_ERRATA_57) 5255 check_exec = (sfmmup != ksfmmup) && 5256 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5257 ((vprot & PROT_EXEC) == PROT_EXEC); 5258 #endif 5259 HBLKTOHME(sfhmep, hmeblkp, addr); 5260 5261 /* 5262 * Flush the current demap region if addresses have been 5263 * skipped or the page size doesn't match. 5264 */ 5265 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5266 if (use_demap_range) { 5267 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5268 } else { 5269 DEMAP_RANGE_FLUSH(dmrp); 5270 } 5271 5272 while (addr < endaddr) { 5273 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5274 if (TTE_IS_VALID(&tte)) { 5275 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5276 /* 5277 * if the new protection is the same as old 5278 * continue 5279 */ 5280 goto next_addr; 5281 } 5282 pml = NULL; 5283 pp = sfhmep->hme_page; 5284 if (pp) { 5285 pml = sfmmu_mlist_enter(pp); 5286 } 5287 if (pp != sfhmep->hme_page) { 5288 /* 5289 * tte most have been unloaded 5290 * underneath us. Recheck 5291 */ 5292 ASSERT(pml); 5293 sfmmu_mlist_exit(pml); 5294 continue; 5295 } 5296 5297 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5298 5299 ttemod = tte; 5300 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5301 #if defined(SF_ERRATA_57) 5302 if (check_exec && addr < errata57_limit) 5303 ttemod.tte_exec_perm = 0; 5304 #endif 5305 ret = sfmmu_modifytte_try(&tte, &ttemod, 5306 &sfhmep->hme_tte); 5307 5308 if (ret < 0) { 5309 /* tte changed underneath us */ 5310 if (pml) { 5311 sfmmu_mlist_exit(pml); 5312 } 5313 continue; 5314 } 5315 5316 if (tteflags & TTE_HWWR_INT) { 5317 /* 5318 * need to sync if we are clearing modify bit. 5319 */ 5320 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5321 } 5322 5323 if (pp && PP_ISRO(pp)) { 5324 if (pprot & TTE_WRPRM_INT) { 5325 pmtx = sfmmu_page_enter(pp); 5326 PP_CLRRO(pp); 5327 sfmmu_page_exit(pmtx); 5328 } 5329 } 5330 5331 if (ret > 0 && use_demap_range) { 5332 DEMAP_RANGE_MARKPG(dmrp, addr); 5333 } else if (ret > 0) { 5334 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5335 } 5336 5337 if (pml) { 5338 sfmmu_mlist_exit(pml); 5339 } 5340 } 5341 next_addr: 5342 addr += TTEBYTES(ttesz); 5343 sfhmep++; 5344 DEMAP_RANGE_NEXTPG(dmrp); 5345 } 5346 return (addr); 5347 } 5348 5349 /* 5350 * This routine is deprecated and should only be used by hat_chgprot. 5351 * The correct routine is sfmmu_vtop_attr. 5352 * This routine converts virtual page protections to physical ones. It will 5353 * update the tteflags field with the tte mask corresponding to the protections 5354 * affected and it returns the new protections. It will also clear the modify 5355 * bit if we are taking away write permission. This is necessary since the 5356 * modify bit is the hardware permission bit and we need to clear it in order 5357 * to detect write faults. 5358 * It accepts the following special protections: 5359 * ~PROT_WRITE = remove write permissions. 5360 * ~PROT_USER = remove user permissions. 5361 */ 5362 static uint_t 5363 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5364 { 5365 if (vprot == (uint_t)~PROT_WRITE) { 5366 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5367 return (0); /* will cause wrprm to be cleared */ 5368 } 5369 if (vprot == (uint_t)~PROT_USER) { 5370 *tteflagsp = TTE_PRIV_INT; 5371 return (0); /* will cause privprm to be cleared */ 5372 } 5373 if ((vprot == 0) || (vprot == PROT_USER) || 5374 ((vprot & PROT_ALL) != vprot)) { 5375 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5376 } 5377 5378 switch (vprot) { 5379 case (PROT_READ): 5380 case (PROT_EXEC): 5381 case (PROT_EXEC | PROT_READ): 5382 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5383 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5384 case (PROT_WRITE): 5385 case (PROT_WRITE | PROT_READ): 5386 case (PROT_EXEC | PROT_WRITE): 5387 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5388 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5389 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5390 case (PROT_USER | PROT_READ): 5391 case (PROT_USER | PROT_EXEC): 5392 case (PROT_USER | PROT_EXEC | PROT_READ): 5393 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5394 return (0); /* clr prv and wrt */ 5395 case (PROT_USER | PROT_WRITE): 5396 case (PROT_USER | PROT_WRITE | PROT_READ): 5397 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5398 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5399 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5400 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5401 default: 5402 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5403 } 5404 return (0); 5405 } 5406 5407 /* 5408 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5409 * the normal algorithm would take too long for a very large VA range with 5410 * few real mappings. This routine just walks thru all HMEs in the global 5411 * hash table to find and remove mappings. 5412 */ 5413 static void 5414 hat_unload_large_virtual( 5415 struct hat *sfmmup, 5416 caddr_t startaddr, 5417 size_t len, 5418 uint_t flags, 5419 hat_callback_t *callback) 5420 { 5421 struct hmehash_bucket *hmebp; 5422 struct hme_blk *hmeblkp; 5423 struct hme_blk *pr_hblk = NULL; 5424 struct hme_blk *nx_hblk; 5425 struct hme_blk *list = NULL; 5426 int i; 5427 demap_range_t dmr, *dmrp; 5428 cpuset_t cpuset; 5429 caddr_t endaddr = startaddr + len; 5430 caddr_t sa; 5431 caddr_t ea; 5432 caddr_t cb_sa[MAX_CB_ADDR]; 5433 caddr_t cb_ea[MAX_CB_ADDR]; 5434 int addr_cnt = 0; 5435 int a = 0; 5436 5437 if (sfmmup->sfmmu_free) { 5438 dmrp = NULL; 5439 } else { 5440 dmrp = &dmr; 5441 DEMAP_RANGE_INIT(sfmmup, dmrp); 5442 } 5443 5444 /* 5445 * Loop through all the hash buckets of HME blocks looking for matches. 5446 */ 5447 for (i = 0; i <= UHMEHASH_SZ; i++) { 5448 hmebp = &uhme_hash[i]; 5449 SFMMU_HASH_LOCK(hmebp); 5450 hmeblkp = hmebp->hmeblkp; 5451 pr_hblk = NULL; 5452 while (hmeblkp) { 5453 nx_hblk = hmeblkp->hblk_next; 5454 5455 /* 5456 * skip if not this context, if a shadow block or 5457 * if the mapping is not in the requested range 5458 */ 5459 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5460 hmeblkp->hblk_shw_bit || 5461 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5462 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5463 pr_hblk = hmeblkp; 5464 goto next_block; 5465 } 5466 5467 ASSERT(!hmeblkp->hblk_shared); 5468 /* 5469 * unload if there are any current valid mappings 5470 */ 5471 if (hmeblkp->hblk_vcnt != 0 || 5472 hmeblkp->hblk_hmecnt != 0) 5473 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5474 sa, ea, dmrp, flags); 5475 5476 /* 5477 * on unmap we also release the HME block itself, once 5478 * all mappings are gone. 5479 */ 5480 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5481 !hmeblkp->hblk_vcnt && 5482 !hmeblkp->hblk_hmecnt) { 5483 ASSERT(!hmeblkp->hblk_lckcnt); 5484 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5485 &list, 0); 5486 } else { 5487 pr_hblk = hmeblkp; 5488 } 5489 5490 if (callback == NULL) 5491 goto next_block; 5492 5493 /* 5494 * HME blocks may span more than one page, but we may be 5495 * unmapping only one page, so check for a smaller range 5496 * for the callback 5497 */ 5498 if (sa < startaddr) 5499 sa = startaddr; 5500 if (--ea > endaddr) 5501 ea = endaddr - 1; 5502 5503 cb_sa[addr_cnt] = sa; 5504 cb_ea[addr_cnt] = ea; 5505 if (++addr_cnt == MAX_CB_ADDR) { 5506 if (dmrp != NULL) { 5507 DEMAP_RANGE_FLUSH(dmrp); 5508 cpuset = sfmmup->sfmmu_cpusran; 5509 xt_sync(cpuset); 5510 } 5511 5512 for (a = 0; a < MAX_CB_ADDR; ++a) { 5513 callback->hcb_start_addr = cb_sa[a]; 5514 callback->hcb_end_addr = cb_ea[a]; 5515 callback->hcb_function(callback); 5516 } 5517 addr_cnt = 0; 5518 } 5519 5520 next_block: 5521 hmeblkp = nx_hblk; 5522 } 5523 SFMMU_HASH_UNLOCK(hmebp); 5524 } 5525 5526 sfmmu_hblks_list_purge(&list, 0); 5527 if (dmrp != NULL) { 5528 DEMAP_RANGE_FLUSH(dmrp); 5529 cpuset = sfmmup->sfmmu_cpusran; 5530 xt_sync(cpuset); 5531 } 5532 5533 for (a = 0; a < addr_cnt; ++a) { 5534 callback->hcb_start_addr = cb_sa[a]; 5535 callback->hcb_end_addr = cb_ea[a]; 5536 callback->hcb_function(callback); 5537 } 5538 5539 /* 5540 * Check TSB and TLB page sizes if the process isn't exiting. 5541 */ 5542 if (!sfmmup->sfmmu_free) 5543 sfmmu_check_page_sizes(sfmmup, 0); 5544 } 5545 5546 /* 5547 * Unload all the mappings in the range [addr..addr+len). addr and len must 5548 * be MMU_PAGESIZE aligned. 5549 */ 5550 5551 extern struct seg *segkmap; 5552 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5553 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5554 5555 5556 void 5557 hat_unload_callback( 5558 struct hat *sfmmup, 5559 caddr_t addr, 5560 size_t len, 5561 uint_t flags, 5562 hat_callback_t *callback) 5563 { 5564 struct hmehash_bucket *hmebp; 5565 hmeblk_tag hblktag; 5566 int hmeshift, hashno, iskernel; 5567 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5568 caddr_t endaddr; 5569 cpuset_t cpuset; 5570 int addr_count = 0; 5571 int a; 5572 caddr_t cb_start_addr[MAX_CB_ADDR]; 5573 caddr_t cb_end_addr[MAX_CB_ADDR]; 5574 int issegkmap = ISSEGKMAP(sfmmup, addr); 5575 demap_range_t dmr, *dmrp; 5576 5577 if (sfmmup->sfmmu_xhat_provider) { 5578 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 5579 return; 5580 } else { 5581 /* 5582 * This must be a CPU HAT. If the address space has 5583 * XHATs attached, unload the mappings for all of them, 5584 * just in case 5585 */ 5586 ASSERT(sfmmup->sfmmu_as != NULL); 5587 if (sfmmup->sfmmu_as->a_xhat != NULL) 5588 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 5589 len, flags, callback); 5590 } 5591 5592 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5593 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5594 5595 ASSERT(sfmmup != NULL); 5596 ASSERT((len & MMU_PAGEOFFSET) == 0); 5597 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5598 5599 /* 5600 * Probing through a large VA range (say 63 bits) will be slow, even 5601 * at 4 Meg steps between the probes. So, when the virtual address range 5602 * is very large, search the HME entries for what to unload. 5603 * 5604 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5605 * 5606 * UHMEHASH_SZ is number of hash buckets to examine 5607 * 5608 */ 5609 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5610 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5611 return; 5612 } 5613 5614 CPUSET_ZERO(cpuset); 5615 5616 /* 5617 * If the process is exiting, we can save a lot of fuss since 5618 * we'll flush the TLB when we free the ctx anyway. 5619 */ 5620 if (sfmmup->sfmmu_free) 5621 dmrp = NULL; 5622 else 5623 dmrp = &dmr; 5624 5625 DEMAP_RANGE_INIT(sfmmup, dmrp); 5626 endaddr = addr + len; 5627 hblktag.htag_id = sfmmup; 5628 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5629 5630 /* 5631 * It is likely for the vm to call unload over a wide range of 5632 * addresses that are actually very sparsely populated by 5633 * translations. In order to speed this up the sfmmu hat supports 5634 * the concept of shadow hmeblks. Dummy large page hmeblks that 5635 * correspond to actual small translations are allocated at tteload 5636 * time and are referred to as shadow hmeblks. Now, during unload 5637 * time, we first check if we have a shadow hmeblk for that 5638 * translation. The absence of one means the corresponding address 5639 * range is empty and can be skipped. 5640 * 5641 * The kernel is an exception to above statement and that is why 5642 * we don't use shadow hmeblks and hash starting from the smallest 5643 * page size. 5644 */ 5645 if (sfmmup == KHATID) { 5646 iskernel = 1; 5647 hashno = TTE64K; 5648 } else { 5649 iskernel = 0; 5650 if (mmu_page_sizes == max_mmu_page_sizes) { 5651 hashno = TTE256M; 5652 } else { 5653 hashno = TTE4M; 5654 } 5655 } 5656 while (addr < endaddr) { 5657 hmeshift = HME_HASH_SHIFT(hashno); 5658 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5659 hblktag.htag_rehash = hashno; 5660 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5661 5662 SFMMU_HASH_LOCK(hmebp); 5663 5664 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5665 if (hmeblkp == NULL) { 5666 /* 5667 * didn't find an hmeblk. skip the appropiate 5668 * address range. 5669 */ 5670 SFMMU_HASH_UNLOCK(hmebp); 5671 if (iskernel) { 5672 if (hashno < mmu_hashcnt) { 5673 hashno++; 5674 continue; 5675 } else { 5676 hashno = TTE64K; 5677 addr = (caddr_t)roundup((uintptr_t)addr 5678 + 1, MMU_PAGESIZE64K); 5679 continue; 5680 } 5681 } 5682 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5683 (1 << hmeshift)); 5684 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5685 ASSERT(hashno == TTE64K); 5686 continue; 5687 } 5688 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5689 hashno = TTE512K; 5690 continue; 5691 } 5692 if (mmu_page_sizes == max_mmu_page_sizes) { 5693 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5694 hashno = TTE4M; 5695 continue; 5696 } 5697 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5698 hashno = TTE32M; 5699 continue; 5700 } 5701 hashno = TTE256M; 5702 continue; 5703 } else { 5704 hashno = TTE4M; 5705 continue; 5706 } 5707 } 5708 ASSERT(hmeblkp); 5709 ASSERT(!hmeblkp->hblk_shared); 5710 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5711 /* 5712 * If the valid count is zero we can skip the range 5713 * mapped by this hmeblk. 5714 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5715 * is used by segment drivers as a hint 5716 * that the mapping resource won't be used any longer. 5717 * The best example of this is during exit(). 5718 */ 5719 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5720 get_hblk_span(hmeblkp)); 5721 if ((flags & HAT_UNLOAD_UNMAP) || 5722 (iskernel && !issegkmap)) { 5723 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5724 &list, 0); 5725 } 5726 SFMMU_HASH_UNLOCK(hmebp); 5727 5728 if (iskernel) { 5729 hashno = TTE64K; 5730 continue; 5731 } 5732 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5733 ASSERT(hashno == TTE64K); 5734 continue; 5735 } 5736 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5737 hashno = TTE512K; 5738 continue; 5739 } 5740 if (mmu_page_sizes == max_mmu_page_sizes) { 5741 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5742 hashno = TTE4M; 5743 continue; 5744 } 5745 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5746 hashno = TTE32M; 5747 continue; 5748 } 5749 hashno = TTE256M; 5750 continue; 5751 } else { 5752 hashno = TTE4M; 5753 continue; 5754 } 5755 } 5756 if (hmeblkp->hblk_shw_bit) { 5757 /* 5758 * If we encounter a shadow hmeblk we know there is 5759 * smaller sized hmeblks mapping the same address space. 5760 * Decrement the hash size and rehash. 5761 */ 5762 ASSERT(sfmmup != KHATID); 5763 hashno--; 5764 SFMMU_HASH_UNLOCK(hmebp); 5765 continue; 5766 } 5767 5768 /* 5769 * track callback address ranges. 5770 * only start a new range when it's not contiguous 5771 */ 5772 if (callback != NULL) { 5773 if (addr_count > 0 && 5774 addr == cb_end_addr[addr_count - 1]) 5775 --addr_count; 5776 else 5777 cb_start_addr[addr_count] = addr; 5778 } 5779 5780 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5781 dmrp, flags); 5782 5783 if (callback != NULL) 5784 cb_end_addr[addr_count++] = addr; 5785 5786 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5787 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5788 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5789 } 5790 SFMMU_HASH_UNLOCK(hmebp); 5791 5792 /* 5793 * Notify our caller as to exactly which pages 5794 * have been unloaded. We do these in clumps, 5795 * to minimize the number of xt_sync()s that need to occur. 5796 */ 5797 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5798 DEMAP_RANGE_FLUSH(dmrp); 5799 if (dmrp != NULL) { 5800 cpuset = sfmmup->sfmmu_cpusran; 5801 xt_sync(cpuset); 5802 } 5803 5804 for (a = 0; a < MAX_CB_ADDR; ++a) { 5805 callback->hcb_start_addr = cb_start_addr[a]; 5806 callback->hcb_end_addr = cb_end_addr[a]; 5807 callback->hcb_function(callback); 5808 } 5809 addr_count = 0; 5810 } 5811 if (iskernel) { 5812 hashno = TTE64K; 5813 continue; 5814 } 5815 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5816 ASSERT(hashno == TTE64K); 5817 continue; 5818 } 5819 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5820 hashno = TTE512K; 5821 continue; 5822 } 5823 if (mmu_page_sizes == max_mmu_page_sizes) { 5824 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5825 hashno = TTE4M; 5826 continue; 5827 } 5828 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5829 hashno = TTE32M; 5830 continue; 5831 } 5832 hashno = TTE256M; 5833 } else { 5834 hashno = TTE4M; 5835 } 5836 } 5837 5838 sfmmu_hblks_list_purge(&list, 0); 5839 DEMAP_RANGE_FLUSH(dmrp); 5840 if (dmrp != NULL) { 5841 cpuset = sfmmup->sfmmu_cpusran; 5842 xt_sync(cpuset); 5843 } 5844 if (callback && addr_count != 0) { 5845 for (a = 0; a < addr_count; ++a) { 5846 callback->hcb_start_addr = cb_start_addr[a]; 5847 callback->hcb_end_addr = cb_end_addr[a]; 5848 callback->hcb_function(callback); 5849 } 5850 } 5851 5852 /* 5853 * Check TSB and TLB page sizes if the process isn't exiting. 5854 */ 5855 if (!sfmmup->sfmmu_free) 5856 sfmmu_check_page_sizes(sfmmup, 0); 5857 } 5858 5859 /* 5860 * Unload all the mappings in the range [addr..addr+len). addr and len must 5861 * be MMU_PAGESIZE aligned. 5862 */ 5863 void 5864 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5865 { 5866 if (sfmmup->sfmmu_xhat_provider) { 5867 XHAT_UNLOAD(sfmmup, addr, len, flags); 5868 return; 5869 } 5870 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5871 } 5872 5873 5874 /* 5875 * Find the largest mapping size for this page. 5876 */ 5877 int 5878 fnd_mapping_sz(page_t *pp) 5879 { 5880 int sz; 5881 int p_index; 5882 5883 p_index = PP_MAPINDEX(pp); 5884 5885 sz = 0; 5886 p_index >>= 1; /* don't care about 8K bit */ 5887 for (; p_index; p_index >>= 1) { 5888 sz++; 5889 } 5890 5891 return (sz); 5892 } 5893 5894 /* 5895 * This function unloads a range of addresses for an hmeblk. 5896 * It returns the next address to be unloaded. 5897 * It should be called with the hash lock held. 5898 */ 5899 static caddr_t 5900 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5901 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5902 { 5903 tte_t tte, ttemod; 5904 struct sf_hment *sfhmep; 5905 int ttesz; 5906 long ttecnt; 5907 page_t *pp; 5908 kmutex_t *pml; 5909 int ret; 5910 int use_demap_range; 5911 5912 ASSERT(in_hblk_range(hmeblkp, addr)); 5913 ASSERT(!hmeblkp->hblk_shw_bit); 5914 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5915 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5916 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5917 5918 #ifdef DEBUG 5919 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5920 (endaddr < get_hblk_endaddr(hmeblkp))) { 5921 panic("sfmmu_hblk_unload: partial unload of large page"); 5922 } 5923 #endif /* DEBUG */ 5924 5925 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5926 ttesz = get_hblk_ttesz(hmeblkp); 5927 5928 use_demap_range = ((dmrp == NULL) || 5929 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5930 5931 if (use_demap_range) { 5932 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5933 } else { 5934 DEMAP_RANGE_FLUSH(dmrp); 5935 } 5936 ttecnt = 0; 5937 HBLKTOHME(sfhmep, hmeblkp, addr); 5938 5939 while (addr < endaddr) { 5940 pml = NULL; 5941 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5942 if (TTE_IS_VALID(&tte)) { 5943 pp = sfhmep->hme_page; 5944 if (pp != NULL) { 5945 pml = sfmmu_mlist_enter(pp); 5946 } 5947 5948 /* 5949 * Verify if hme still points to 'pp' now that 5950 * we have p_mapping lock. 5951 */ 5952 if (sfhmep->hme_page != pp) { 5953 if (pp != NULL && sfhmep->hme_page != NULL) { 5954 ASSERT(pml != NULL); 5955 sfmmu_mlist_exit(pml); 5956 /* Re-start this iteration. */ 5957 continue; 5958 } 5959 ASSERT((pp != NULL) && 5960 (sfhmep->hme_page == NULL)); 5961 goto tte_unloaded; 5962 } 5963 5964 /* 5965 * This point on we have both HASH and p_mapping 5966 * lock. 5967 */ 5968 ASSERT(pp == sfhmep->hme_page); 5969 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5970 5971 /* 5972 * We need to loop on modify tte because it is 5973 * possible for pagesync to come along and 5974 * change the software bits beneath us. 5975 * 5976 * Page_unload can also invalidate the tte after 5977 * we read tte outside of p_mapping lock. 5978 */ 5979 again: 5980 ttemod = tte; 5981 5982 TTE_SET_INVALID(&ttemod); 5983 ret = sfmmu_modifytte_try(&tte, &ttemod, 5984 &sfhmep->hme_tte); 5985 5986 if (ret <= 0) { 5987 if (TTE_IS_VALID(&tte)) { 5988 ASSERT(ret < 0); 5989 goto again; 5990 } 5991 if (pp != NULL) { 5992 panic("sfmmu_hblk_unload: pp = 0x%p " 5993 "tte became invalid under mlist" 5994 " lock = 0x%p", (void *)pp, 5995 (void *)pml); 5996 } 5997 continue; 5998 } 5999 6000 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6001 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6002 } 6003 6004 /* 6005 * Ok- we invalidated the tte. Do the rest of the job. 6006 */ 6007 ttecnt++; 6008 6009 if (flags & HAT_UNLOAD_UNLOCK) { 6010 ASSERT(hmeblkp->hblk_lckcnt > 0); 6011 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 6012 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6013 } 6014 6015 /* 6016 * Normally we would need to flush the page 6017 * from the virtual cache at this point in 6018 * order to prevent a potential cache alias 6019 * inconsistency. 6020 * The particular scenario we need to worry 6021 * about is: 6022 * Given: va1 and va2 are two virtual address 6023 * that alias and map the same physical 6024 * address. 6025 * 1. mapping exists from va1 to pa and data 6026 * has been read into the cache. 6027 * 2. unload va1. 6028 * 3. load va2 and modify data using va2. 6029 * 4 unload va2. 6030 * 5. load va1 and reference data. Unless we 6031 * flush the data cache when we unload we will 6032 * get stale data. 6033 * Fortunately, page coloring eliminates the 6034 * above scenario by remembering the color a 6035 * physical page was last or is currently 6036 * mapped to. Now, we delay the flush until 6037 * the loading of translations. Only when the 6038 * new translation is of a different color 6039 * are we forced to flush. 6040 */ 6041 if (use_demap_range) { 6042 /* 6043 * Mark this page as needing a demap. 6044 */ 6045 DEMAP_RANGE_MARKPG(dmrp, addr); 6046 } else { 6047 ASSERT(sfmmup != NULL); 6048 ASSERT(!hmeblkp->hblk_shared); 6049 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6050 sfmmup->sfmmu_free, 0); 6051 } 6052 6053 if (pp) { 6054 /* 6055 * Remove the hment from the mapping list 6056 */ 6057 ASSERT(hmeblkp->hblk_hmecnt > 0); 6058 6059 /* 6060 * Again, we cannot 6061 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6062 */ 6063 HME_SUB(sfhmep, pp); 6064 membar_stst(); 6065 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6066 } 6067 6068 ASSERT(hmeblkp->hblk_vcnt > 0); 6069 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6070 6071 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6072 !hmeblkp->hblk_lckcnt); 6073 6074 #ifdef VAC 6075 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6076 if (PP_ISTNC(pp)) { 6077 /* 6078 * If page was temporary 6079 * uncached, try to recache 6080 * it. Note that HME_SUB() was 6081 * called above so p_index and 6082 * mlist had been updated. 6083 */ 6084 conv_tnc(pp, ttesz); 6085 } else if (pp->p_mapping == NULL) { 6086 ASSERT(kpm_enable); 6087 /* 6088 * Page is marked to be in VAC conflict 6089 * to an existing kpm mapping and/or is 6090 * kpm mapped using only the regular 6091 * pagesize. 6092 */ 6093 sfmmu_kpm_hme_unload(pp); 6094 } 6095 } 6096 #endif /* VAC */ 6097 } else if ((pp = sfhmep->hme_page) != NULL) { 6098 /* 6099 * TTE is invalid but the hme 6100 * still exists. let pageunload 6101 * complete its job. 6102 */ 6103 ASSERT(pml == NULL); 6104 pml = sfmmu_mlist_enter(pp); 6105 if (sfhmep->hme_page != NULL) { 6106 sfmmu_mlist_exit(pml); 6107 continue; 6108 } 6109 ASSERT(sfhmep->hme_page == NULL); 6110 } else if (hmeblkp->hblk_hmecnt != 0) { 6111 /* 6112 * pageunload may have not finished decrementing 6113 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6114 * wait for pageunload to finish. Rely on pageunload 6115 * to decrement hblk_hmecnt after hblk_vcnt. 6116 */ 6117 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6118 ASSERT(pml == NULL); 6119 if (pf_is_memory(pfn)) { 6120 pp = page_numtopp_nolock(pfn); 6121 if (pp != NULL) { 6122 pml = sfmmu_mlist_enter(pp); 6123 sfmmu_mlist_exit(pml); 6124 pml = NULL; 6125 } 6126 } 6127 } 6128 6129 tte_unloaded: 6130 /* 6131 * At this point, the tte we are looking at 6132 * should be unloaded, and hme has been unlinked 6133 * from page too. This is important because in 6134 * pageunload, it does ttesync() then HME_SUB. 6135 * We need to make sure HME_SUB has been completed 6136 * so we know ttesync() has been completed. Otherwise, 6137 * at exit time, after return from hat layer, VM will 6138 * release as structure which hat_setstat() (called 6139 * by ttesync()) needs. 6140 */ 6141 #ifdef DEBUG 6142 { 6143 tte_t dtte; 6144 6145 ASSERT(sfhmep->hme_page == NULL); 6146 6147 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6148 ASSERT(!TTE_IS_VALID(&dtte)); 6149 } 6150 #endif 6151 6152 if (pml) { 6153 sfmmu_mlist_exit(pml); 6154 } 6155 6156 addr += TTEBYTES(ttesz); 6157 sfhmep++; 6158 DEMAP_RANGE_NEXTPG(dmrp); 6159 } 6160 /* 6161 * For shared hmeblks this routine is only called when region is freed 6162 * and no longer referenced. So no need to decrement ttecnt 6163 * in the region structure here. 6164 */ 6165 if (ttecnt > 0 && sfmmup != NULL) { 6166 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6167 } 6168 return (addr); 6169 } 6170 6171 /* 6172 * Invalidate a virtual address range for the local CPU. 6173 * For best performance ensure that the va range is completely 6174 * mapped, otherwise the entire TLB will be flushed. 6175 */ 6176 void 6177 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size) 6178 { 6179 ssize_t sz; 6180 caddr_t endva = va + size; 6181 6182 while (va < endva) { 6183 sz = hat_getpagesize(sfmmup, va); 6184 if (sz < 0) { 6185 vtag_flushall(); 6186 break; 6187 } 6188 vtag_flushpage(va, (uint64_t)sfmmup); 6189 va += sz; 6190 } 6191 } 6192 6193 /* 6194 * Synchronize all the mappings in the range [addr..addr+len). 6195 * Can be called with clearflag having two states: 6196 * HAT_SYNC_DONTZERO means just return the rm stats 6197 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6198 */ 6199 void 6200 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6201 { 6202 struct hmehash_bucket *hmebp; 6203 hmeblk_tag hblktag; 6204 int hmeshift, hashno = 1; 6205 struct hme_blk *hmeblkp, *list = NULL; 6206 caddr_t endaddr; 6207 cpuset_t cpuset; 6208 6209 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 6210 ASSERT((sfmmup == ksfmmup) || 6211 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6212 ASSERT((len & MMU_PAGEOFFSET) == 0); 6213 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6214 (clearflag == HAT_SYNC_ZERORM)); 6215 6216 CPUSET_ZERO(cpuset); 6217 6218 endaddr = addr + len; 6219 hblktag.htag_id = sfmmup; 6220 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6221 6222 /* 6223 * Spitfire supports 4 page sizes. 6224 * Most pages are expected to be of the smallest page 6225 * size (8K) and these will not need to be rehashed. 64K 6226 * pages also don't need to be rehashed because the an hmeblk 6227 * spans 64K of address space. 512K pages might need 1 rehash and 6228 * and 4M pages 2 rehashes. 6229 */ 6230 while (addr < endaddr) { 6231 hmeshift = HME_HASH_SHIFT(hashno); 6232 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6233 hblktag.htag_rehash = hashno; 6234 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6235 6236 SFMMU_HASH_LOCK(hmebp); 6237 6238 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6239 if (hmeblkp != NULL) { 6240 ASSERT(!hmeblkp->hblk_shared); 6241 /* 6242 * We've encountered a shadow hmeblk so skip the range 6243 * of the next smaller mapping size. 6244 */ 6245 if (hmeblkp->hblk_shw_bit) { 6246 ASSERT(sfmmup != ksfmmup); 6247 ASSERT(hashno > 1); 6248 addr = (caddr_t)P2END((uintptr_t)addr, 6249 TTEBYTES(hashno - 1)); 6250 } else { 6251 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6252 addr, endaddr, clearflag); 6253 } 6254 SFMMU_HASH_UNLOCK(hmebp); 6255 hashno = 1; 6256 continue; 6257 } 6258 SFMMU_HASH_UNLOCK(hmebp); 6259 6260 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6261 /* 6262 * We have traversed the whole list and rehashed 6263 * if necessary without finding the address to sync. 6264 * This is ok so we increment the address by the 6265 * smallest hmeblk range for kernel mappings and the 6266 * largest hmeblk range, to account for shadow hmeblks, 6267 * for user mappings and continue. 6268 */ 6269 if (sfmmup == ksfmmup) 6270 addr = (caddr_t)P2END((uintptr_t)addr, 6271 TTEBYTES(1)); 6272 else 6273 addr = (caddr_t)P2END((uintptr_t)addr, 6274 TTEBYTES(hashno)); 6275 hashno = 1; 6276 } else { 6277 hashno++; 6278 } 6279 } 6280 sfmmu_hblks_list_purge(&list, 0); 6281 cpuset = sfmmup->sfmmu_cpusran; 6282 xt_sync(cpuset); 6283 } 6284 6285 static caddr_t 6286 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6287 caddr_t endaddr, int clearflag) 6288 { 6289 tte_t tte, ttemod; 6290 struct sf_hment *sfhmep; 6291 int ttesz; 6292 struct page *pp; 6293 kmutex_t *pml; 6294 int ret; 6295 6296 ASSERT(hmeblkp->hblk_shw_bit == 0); 6297 ASSERT(!hmeblkp->hblk_shared); 6298 6299 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6300 6301 ttesz = get_hblk_ttesz(hmeblkp); 6302 HBLKTOHME(sfhmep, hmeblkp, addr); 6303 6304 while (addr < endaddr) { 6305 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6306 if (TTE_IS_VALID(&tte)) { 6307 pml = NULL; 6308 pp = sfhmep->hme_page; 6309 if (pp) { 6310 pml = sfmmu_mlist_enter(pp); 6311 } 6312 if (pp != sfhmep->hme_page) { 6313 /* 6314 * tte most have been unloaded 6315 * underneath us. Recheck 6316 */ 6317 ASSERT(pml); 6318 sfmmu_mlist_exit(pml); 6319 continue; 6320 } 6321 6322 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6323 6324 if (clearflag == HAT_SYNC_ZERORM) { 6325 ttemod = tte; 6326 TTE_CLR_RM(&ttemod); 6327 ret = sfmmu_modifytte_try(&tte, &ttemod, 6328 &sfhmep->hme_tte); 6329 if (ret < 0) { 6330 if (pml) { 6331 sfmmu_mlist_exit(pml); 6332 } 6333 continue; 6334 } 6335 6336 if (ret > 0) { 6337 sfmmu_tlb_demap(addr, sfmmup, 6338 hmeblkp, 0, 0); 6339 } 6340 } 6341 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6342 if (pml) { 6343 sfmmu_mlist_exit(pml); 6344 } 6345 } 6346 addr += TTEBYTES(ttesz); 6347 sfhmep++; 6348 } 6349 return (addr); 6350 } 6351 6352 /* 6353 * This function will sync a tte to the page struct and it will 6354 * update the hat stats. Currently it allows us to pass a NULL pp 6355 * and we will simply update the stats. We may want to change this 6356 * so we only keep stats for pages backed by pp's. 6357 */ 6358 static void 6359 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6360 { 6361 uint_t rm = 0; 6362 int sz; 6363 pgcnt_t npgs; 6364 6365 ASSERT(TTE_IS_VALID(ttep)); 6366 6367 if (TTE_IS_NOSYNC(ttep)) { 6368 return; 6369 } 6370 6371 if (TTE_IS_REF(ttep)) { 6372 rm = P_REF; 6373 } 6374 if (TTE_IS_MOD(ttep)) { 6375 rm |= P_MOD; 6376 } 6377 6378 if (rm == 0) { 6379 return; 6380 } 6381 6382 sz = TTE_CSZ(ttep); 6383 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6384 int i; 6385 caddr_t vaddr = addr; 6386 6387 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6388 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6389 } 6390 6391 } 6392 6393 /* 6394 * XXX I want to use cas to update nrm bits but they 6395 * currently belong in common/vm and not in hat where 6396 * they should be. 6397 * The nrm bits are protected by the same mutex as 6398 * the one that protects the page's mapping list. 6399 */ 6400 if (!pp) 6401 return; 6402 ASSERT(sfmmu_mlist_held(pp)); 6403 /* 6404 * If the tte is for a large page, we need to sync all the 6405 * pages covered by the tte. 6406 */ 6407 if (sz != TTE8K) { 6408 ASSERT(pp->p_szc != 0); 6409 pp = PP_GROUPLEADER(pp, sz); 6410 ASSERT(sfmmu_mlist_held(pp)); 6411 } 6412 6413 /* Get number of pages from tte size. */ 6414 npgs = TTEPAGES(sz); 6415 6416 do { 6417 ASSERT(pp); 6418 ASSERT(sfmmu_mlist_held(pp)); 6419 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6420 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6421 hat_page_setattr(pp, rm); 6422 6423 /* 6424 * Are we done? If not, we must have a large mapping. 6425 * For large mappings we need to sync the rest of the pages 6426 * covered by this tte; goto the next page. 6427 */ 6428 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6429 } 6430 6431 /* 6432 * Execute pre-callback handler of each pa_hment linked to pp 6433 * 6434 * Inputs: 6435 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6436 * capture_cpus: pointer to return value (below) 6437 * 6438 * Returns: 6439 * Propagates the subsystem callback return values back to the caller; 6440 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6441 * is zero if all of the pa_hments are of a type that do not require 6442 * capturing CPUs prior to suspending the mapping, else it is 1. 6443 */ 6444 static int 6445 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6446 { 6447 struct sf_hment *sfhmep; 6448 struct pa_hment *pahmep; 6449 int (*f)(caddr_t, uint_t, uint_t, void *); 6450 int ret; 6451 id_t id; 6452 int locked = 0; 6453 kmutex_t *pml; 6454 6455 ASSERT(PAGE_EXCL(pp)); 6456 if (!sfmmu_mlist_held(pp)) { 6457 pml = sfmmu_mlist_enter(pp); 6458 locked = 1; 6459 } 6460 6461 if (capture_cpus) 6462 *capture_cpus = 0; 6463 6464 top: 6465 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6466 /* 6467 * skip sf_hments corresponding to VA<->PA mappings; 6468 * for pa_hment's, hme_tte.ll is zero 6469 */ 6470 if (!IS_PAHME(sfhmep)) 6471 continue; 6472 6473 pahmep = sfhmep->hme_data; 6474 ASSERT(pahmep != NULL); 6475 6476 /* 6477 * skip if pre-handler has been called earlier in this loop 6478 */ 6479 if (pahmep->flags & flag) 6480 continue; 6481 6482 id = pahmep->cb_id; 6483 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6484 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6485 *capture_cpus = 1; 6486 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6487 pahmep->flags |= flag; 6488 continue; 6489 } 6490 6491 /* 6492 * Drop the mapping list lock to avoid locking order issues. 6493 */ 6494 if (locked) 6495 sfmmu_mlist_exit(pml); 6496 6497 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6498 if (ret != 0) 6499 return (ret); /* caller must do the cleanup */ 6500 6501 if (locked) { 6502 pml = sfmmu_mlist_enter(pp); 6503 pahmep->flags |= flag; 6504 goto top; 6505 } 6506 6507 pahmep->flags |= flag; 6508 } 6509 6510 if (locked) 6511 sfmmu_mlist_exit(pml); 6512 6513 return (0); 6514 } 6515 6516 /* 6517 * Execute post-callback handler of each pa_hment linked to pp 6518 * 6519 * Same overall assumptions and restrictions apply as for 6520 * hat_pageprocess_precallbacks(). 6521 */ 6522 static void 6523 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6524 { 6525 pfn_t pgpfn = pp->p_pagenum; 6526 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6527 pfn_t newpfn; 6528 struct sf_hment *sfhmep; 6529 struct pa_hment *pahmep; 6530 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6531 id_t id; 6532 int locked = 0; 6533 kmutex_t *pml; 6534 6535 ASSERT(PAGE_EXCL(pp)); 6536 if (!sfmmu_mlist_held(pp)) { 6537 pml = sfmmu_mlist_enter(pp); 6538 locked = 1; 6539 } 6540 6541 top: 6542 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6543 /* 6544 * skip sf_hments corresponding to VA<->PA mappings; 6545 * for pa_hment's, hme_tte.ll is zero 6546 */ 6547 if (!IS_PAHME(sfhmep)) 6548 continue; 6549 6550 pahmep = sfhmep->hme_data; 6551 ASSERT(pahmep != NULL); 6552 6553 if ((pahmep->flags & flag) == 0) 6554 continue; 6555 6556 pahmep->flags &= ~flag; 6557 6558 id = pahmep->cb_id; 6559 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6560 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6561 continue; 6562 6563 /* 6564 * Convert the base page PFN into the constituent PFN 6565 * which is needed by the callback handler. 6566 */ 6567 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6568 6569 /* 6570 * Drop the mapping list lock to avoid locking order issues. 6571 */ 6572 if (locked) 6573 sfmmu_mlist_exit(pml); 6574 6575 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6576 != 0) 6577 panic("sfmmu: posthandler failed"); 6578 6579 if (locked) { 6580 pml = sfmmu_mlist_enter(pp); 6581 goto top; 6582 } 6583 } 6584 6585 if (locked) 6586 sfmmu_mlist_exit(pml); 6587 } 6588 6589 /* 6590 * Suspend locked kernel mapping 6591 */ 6592 void 6593 hat_pagesuspend(struct page *pp) 6594 { 6595 struct sf_hment *sfhmep; 6596 sfmmu_t *sfmmup; 6597 tte_t tte, ttemod; 6598 struct hme_blk *hmeblkp; 6599 caddr_t addr; 6600 int index, cons; 6601 cpuset_t cpuset; 6602 6603 ASSERT(PAGE_EXCL(pp)); 6604 ASSERT(sfmmu_mlist_held(pp)); 6605 6606 mutex_enter(&kpr_suspendlock); 6607 6608 /* 6609 * We're about to suspend a kernel mapping so mark this thread as 6610 * non-traceable by DTrace. This prevents us from running into issues 6611 * with probe context trying to touch a suspended page 6612 * in the relocation codepath itself. 6613 */ 6614 curthread->t_flag |= T_DONTDTRACE; 6615 6616 index = PP_MAPINDEX(pp); 6617 cons = TTE8K; 6618 6619 retry: 6620 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6621 6622 if (IS_PAHME(sfhmep)) 6623 continue; 6624 6625 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6626 continue; 6627 6628 /* 6629 * Loop until we successfully set the suspend bit in 6630 * the TTE. 6631 */ 6632 again: 6633 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6634 ASSERT(TTE_IS_VALID(&tte)); 6635 6636 ttemod = tte; 6637 TTE_SET_SUSPEND(&ttemod); 6638 if (sfmmu_modifytte_try(&tte, &ttemod, 6639 &sfhmep->hme_tte) < 0) 6640 goto again; 6641 6642 /* 6643 * Invalidate TSB entry 6644 */ 6645 hmeblkp = sfmmu_hmetohblk(sfhmep); 6646 6647 sfmmup = hblktosfmmu(hmeblkp); 6648 ASSERT(sfmmup == ksfmmup); 6649 ASSERT(!hmeblkp->hblk_shared); 6650 6651 addr = tte_to_vaddr(hmeblkp, tte); 6652 6653 /* 6654 * No need to make sure that the TSB for this sfmmu is 6655 * not being relocated since it is ksfmmup and thus it 6656 * will never be relocated. 6657 */ 6658 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6659 6660 /* 6661 * Update xcall stats 6662 */ 6663 cpuset = cpu_ready_set; 6664 CPUSET_DEL(cpuset, CPU->cpu_id); 6665 6666 /* LINTED: constant in conditional context */ 6667 SFMMU_XCALL_STATS(ksfmmup); 6668 6669 /* 6670 * Flush TLB entry on remote CPU's 6671 */ 6672 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6673 (uint64_t)ksfmmup); 6674 xt_sync(cpuset); 6675 6676 /* 6677 * Flush TLB entry on local CPU 6678 */ 6679 vtag_flushpage(addr, (uint64_t)ksfmmup); 6680 } 6681 6682 while (index != 0) { 6683 index = index >> 1; 6684 if (index != 0) 6685 cons++; 6686 if (index & 0x1) { 6687 pp = PP_GROUPLEADER(pp, cons); 6688 goto retry; 6689 } 6690 } 6691 } 6692 6693 #ifdef DEBUG 6694 6695 #define N_PRLE 1024 6696 struct prle { 6697 page_t *targ; 6698 page_t *repl; 6699 int status; 6700 int pausecpus; 6701 hrtime_t whence; 6702 }; 6703 6704 static struct prle page_relocate_log[N_PRLE]; 6705 static int prl_entry; 6706 static kmutex_t prl_mutex; 6707 6708 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6709 mutex_enter(&prl_mutex); \ 6710 page_relocate_log[prl_entry].targ = *(t); \ 6711 page_relocate_log[prl_entry].repl = *(r); \ 6712 page_relocate_log[prl_entry].status = (s); \ 6713 page_relocate_log[prl_entry].pausecpus = (p); \ 6714 page_relocate_log[prl_entry].whence = gethrtime(); \ 6715 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6716 mutex_exit(&prl_mutex); 6717 6718 #else /* !DEBUG */ 6719 #define PAGE_RELOCATE_LOG(t, r, s, p) 6720 #endif 6721 6722 /* 6723 * Core Kernel Page Relocation Algorithm 6724 * 6725 * Input: 6726 * 6727 * target : constituent pages are SE_EXCL locked. 6728 * replacement: constituent pages are SE_EXCL locked. 6729 * 6730 * Output: 6731 * 6732 * nrelocp: number of pages relocated 6733 */ 6734 int 6735 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6736 { 6737 page_t *targ, *repl; 6738 page_t *tpp, *rpp; 6739 kmutex_t *low, *high; 6740 spgcnt_t npages, i; 6741 page_t *pl = NULL; 6742 int old_pil; 6743 cpuset_t cpuset; 6744 int cap_cpus; 6745 int ret; 6746 #ifdef VAC 6747 int cflags = 0; 6748 #endif 6749 6750 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6751 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6752 return (EAGAIN); 6753 } 6754 6755 mutex_enter(&kpr_mutex); 6756 kreloc_thread = curthread; 6757 6758 targ = *target; 6759 repl = *replacement; 6760 ASSERT(repl != NULL); 6761 ASSERT(targ->p_szc == repl->p_szc); 6762 6763 npages = page_get_pagecnt(targ->p_szc); 6764 6765 /* 6766 * unload VA<->PA mappings that are not locked 6767 */ 6768 tpp = targ; 6769 for (i = 0; i < npages; i++) { 6770 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6771 tpp++; 6772 } 6773 6774 /* 6775 * Do "presuspend" callbacks, in a context from which we can still 6776 * block as needed. Note that we don't hold the mapping list lock 6777 * of "targ" at this point due to potential locking order issues; 6778 * we assume that between the hat_pageunload() above and holding 6779 * the SE_EXCL lock that the mapping list *cannot* change at this 6780 * point. 6781 */ 6782 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6783 if (ret != 0) { 6784 /* 6785 * EIO translates to fatal error, for all others cleanup 6786 * and return EAGAIN. 6787 */ 6788 ASSERT(ret != EIO); 6789 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6790 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6791 kreloc_thread = NULL; 6792 mutex_exit(&kpr_mutex); 6793 return (EAGAIN); 6794 } 6795 6796 /* 6797 * acquire p_mapping list lock for both the target and replacement 6798 * root pages. 6799 * 6800 * low and high refer to the need to grab the mlist locks in a 6801 * specific order in order to prevent race conditions. Thus the 6802 * lower lock must be grabbed before the higher lock. 6803 * 6804 * This will block hat_unload's accessing p_mapping list. Since 6805 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6806 * blocked. Thus, no one else will be accessing the p_mapping list 6807 * while we suspend and reload the locked mapping below. 6808 */ 6809 tpp = targ; 6810 rpp = repl; 6811 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6812 6813 kpreempt_disable(); 6814 6815 /* 6816 * We raise our PIL to 13 so that we don't get captured by 6817 * another CPU or pinned by an interrupt thread. We can't go to 6818 * PIL 14 since the nexus driver(s) may need to interrupt at 6819 * that level in the case of IOMMU pseudo mappings. 6820 */ 6821 cpuset = cpu_ready_set; 6822 CPUSET_DEL(cpuset, CPU->cpu_id); 6823 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6824 old_pil = splr(XCALL_PIL); 6825 } else { 6826 old_pil = -1; 6827 xc_attention(cpuset); 6828 } 6829 ASSERT(getpil() == XCALL_PIL); 6830 6831 /* 6832 * Now do suspend callbacks. In the case of an IOMMU mapping 6833 * this will suspend all DMA activity to the page while it is 6834 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6835 * may be captured at this point we should have acquired any needed 6836 * locks in the presuspend callback. 6837 */ 6838 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6839 if (ret != 0) { 6840 repl = targ; 6841 goto suspend_fail; 6842 } 6843 6844 /* 6845 * Raise the PIL yet again, this time to block all high-level 6846 * interrupts on this CPU. This is necessary to prevent an 6847 * interrupt routine from pinning the thread which holds the 6848 * mapping suspended and then touching the suspended page. 6849 * 6850 * Once the page is suspended we also need to be careful to 6851 * avoid calling any functions which touch any seg_kmem memory 6852 * since that memory may be backed by the very page we are 6853 * relocating in here! 6854 */ 6855 hat_pagesuspend(targ); 6856 6857 /* 6858 * Now that we are confident everybody has stopped using this page, 6859 * copy the page contents. Note we use a physical copy to prevent 6860 * locking issues and to avoid fpRAS because we can't handle it in 6861 * this context. 6862 */ 6863 for (i = 0; i < npages; i++, tpp++, rpp++) { 6864 #ifdef VAC 6865 /* 6866 * If the replacement has a different vcolor than 6867 * the one being replacd, we need to handle VAC 6868 * consistency for it just as we were setting up 6869 * a new mapping to it. 6870 */ 6871 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6872 (tpp->p_vcolor != rpp->p_vcolor) && 6873 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6874 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6875 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6876 rpp->p_pagenum); 6877 } 6878 #endif 6879 /* 6880 * Copy the contents of the page. 6881 */ 6882 ppcopy_kernel(tpp, rpp); 6883 } 6884 6885 tpp = targ; 6886 rpp = repl; 6887 for (i = 0; i < npages; i++, tpp++, rpp++) { 6888 /* 6889 * Copy attributes. VAC consistency was handled above, 6890 * if required. 6891 */ 6892 rpp->p_nrm = tpp->p_nrm; 6893 tpp->p_nrm = 0; 6894 rpp->p_index = tpp->p_index; 6895 tpp->p_index = 0; 6896 #ifdef VAC 6897 rpp->p_vcolor = tpp->p_vcolor; 6898 #endif 6899 } 6900 6901 /* 6902 * First, unsuspend the page, if we set the suspend bit, and transfer 6903 * the mapping list from the target page to the replacement page. 6904 * Next process postcallbacks; since pa_hment's are linked only to the 6905 * p_mapping list of root page, we don't iterate over the constituent 6906 * pages. 6907 */ 6908 hat_pagereload(targ, repl); 6909 6910 suspend_fail: 6911 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6912 6913 /* 6914 * Now lower our PIL and release any captured CPUs since we 6915 * are out of the "danger zone". After this it will again be 6916 * safe to acquire adaptive mutex locks, or to drop them... 6917 */ 6918 if (old_pil != -1) { 6919 splx(old_pil); 6920 } else { 6921 xc_dismissed(cpuset); 6922 } 6923 6924 kpreempt_enable(); 6925 6926 sfmmu_mlist_reloc_exit(low, high); 6927 6928 /* 6929 * Postsuspend callbacks should drop any locks held across 6930 * the suspend callbacks. As before, we don't hold the mapping 6931 * list lock at this point.. our assumption is that the mapping 6932 * list still can't change due to our holding SE_EXCL lock and 6933 * there being no unlocked mappings left. Hence the restriction 6934 * on calling context to hat_delete_callback() 6935 */ 6936 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6937 if (ret != 0) { 6938 /* 6939 * The second presuspend call failed: we got here through 6940 * the suspend_fail label above. 6941 */ 6942 ASSERT(ret != EIO); 6943 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6944 kreloc_thread = NULL; 6945 mutex_exit(&kpr_mutex); 6946 return (EAGAIN); 6947 } 6948 6949 /* 6950 * Now that we're out of the performance critical section we can 6951 * take care of updating the hash table, since we still 6952 * hold all the pages locked SE_EXCL at this point we 6953 * needn't worry about things changing out from under us. 6954 */ 6955 tpp = targ; 6956 rpp = repl; 6957 for (i = 0; i < npages; i++, tpp++, rpp++) { 6958 6959 /* 6960 * replace targ with replacement in page_hash table 6961 */ 6962 targ = tpp; 6963 page_relocate_hash(rpp, targ); 6964 6965 /* 6966 * concatenate target; caller of platform_page_relocate() 6967 * expects target to be concatenated after returning. 6968 */ 6969 ASSERT(targ->p_next == targ); 6970 ASSERT(targ->p_prev == targ); 6971 page_list_concat(&pl, &targ); 6972 } 6973 6974 ASSERT(*target == pl); 6975 *nrelocp = npages; 6976 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6977 kreloc_thread = NULL; 6978 mutex_exit(&kpr_mutex); 6979 return (0); 6980 } 6981 6982 /* 6983 * Called when stray pa_hments are found attached to a page which is 6984 * being freed. Notify the subsystem which attached the pa_hment of 6985 * the error if it registered a suitable handler, else panic. 6986 */ 6987 static void 6988 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6989 { 6990 id_t cb_id = pahmep->cb_id; 6991 6992 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6993 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6994 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6995 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6996 return; /* non-fatal */ 6997 } 6998 panic("pa_hment leaked: 0x%p", (void *)pahmep); 6999 } 7000 7001 /* 7002 * Remove all mappings to page 'pp'. 7003 */ 7004 int 7005 hat_pageunload(struct page *pp, uint_t forceflag) 7006 { 7007 struct page *origpp = pp; 7008 struct sf_hment *sfhme, *tmphme; 7009 struct hme_blk *hmeblkp; 7010 kmutex_t *pml; 7011 #ifdef VAC 7012 kmutex_t *pmtx; 7013 #endif 7014 cpuset_t cpuset, tset; 7015 int index, cons; 7016 int xhme_blks; 7017 int pa_hments; 7018 7019 ASSERT(PAGE_EXCL(pp)); 7020 7021 retry_xhat: 7022 tmphme = NULL; 7023 xhme_blks = 0; 7024 pa_hments = 0; 7025 CPUSET_ZERO(cpuset); 7026 7027 pml = sfmmu_mlist_enter(pp); 7028 7029 #ifdef VAC 7030 if (pp->p_kpmref) 7031 sfmmu_kpm_pageunload(pp); 7032 ASSERT(!PP_ISMAPPED_KPM(pp)); 7033 #endif 7034 /* 7035 * Clear vpm reference. Since the page is exclusively locked 7036 * vpm cannot be referencing it. 7037 */ 7038 if (vpm_enable) { 7039 pp->p_vpmref = 0; 7040 } 7041 7042 index = PP_MAPINDEX(pp); 7043 cons = TTE8K; 7044 retry: 7045 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7046 tmphme = sfhme->hme_next; 7047 7048 if (IS_PAHME(sfhme)) { 7049 ASSERT(sfhme->hme_data != NULL); 7050 pa_hments++; 7051 continue; 7052 } 7053 7054 hmeblkp = sfmmu_hmetohblk(sfhme); 7055 if (hmeblkp->hblk_xhat_bit) { 7056 struct xhat_hme_blk *xblk = 7057 (struct xhat_hme_blk *)hmeblkp; 7058 7059 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 7060 pp, forceflag, XBLK2PROVBLK(xblk)); 7061 7062 xhme_blks = 1; 7063 continue; 7064 } 7065 7066 /* 7067 * If there are kernel mappings don't unload them, they will 7068 * be suspended. 7069 */ 7070 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7071 hmeblkp->hblk_tag.htag_id == ksfmmup) 7072 continue; 7073 7074 tset = sfmmu_pageunload(pp, sfhme, cons); 7075 CPUSET_OR(cpuset, tset); 7076 } 7077 7078 while (index != 0) { 7079 index = index >> 1; 7080 if (index != 0) 7081 cons++; 7082 if (index & 0x1) { 7083 /* Go to leading page */ 7084 pp = PP_GROUPLEADER(pp, cons); 7085 ASSERT(sfmmu_mlist_held(pp)); 7086 goto retry; 7087 } 7088 } 7089 7090 /* 7091 * cpuset may be empty if the page was only mapped by segkpm, 7092 * in which case we won't actually cross-trap. 7093 */ 7094 xt_sync(cpuset); 7095 7096 /* 7097 * The page should have no mappings at this point, unless 7098 * we were called from hat_page_relocate() in which case we 7099 * leave the locked mappings which will be suspended later. 7100 */ 7101 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 7102 (forceflag == SFMMU_KERNEL_RELOC)); 7103 7104 #ifdef VAC 7105 if (PP_ISTNC(pp)) { 7106 if (cons == TTE8K) { 7107 pmtx = sfmmu_page_enter(pp); 7108 PP_CLRTNC(pp); 7109 sfmmu_page_exit(pmtx); 7110 } else { 7111 conv_tnc(pp, cons); 7112 } 7113 } 7114 #endif /* VAC */ 7115 7116 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7117 /* 7118 * Unlink any pa_hments and free them, calling back 7119 * the responsible subsystem to notify it of the error. 7120 * This can occur in situations such as drivers leaking 7121 * DMA handles: naughty, but common enough that we'd like 7122 * to keep the system running rather than bringing it 7123 * down with an obscure error like "pa_hment leaked" 7124 * which doesn't aid the user in debugging their driver. 7125 */ 7126 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7127 tmphme = sfhme->hme_next; 7128 if (IS_PAHME(sfhme)) { 7129 struct pa_hment *pahmep = sfhme->hme_data; 7130 sfmmu_pahment_leaked(pahmep); 7131 HME_SUB(sfhme, pp); 7132 kmem_cache_free(pa_hment_cache, pahmep); 7133 } 7134 } 7135 7136 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 7137 } 7138 7139 sfmmu_mlist_exit(pml); 7140 7141 /* 7142 * XHAT may not have finished unloading pages 7143 * because some other thread was waiting for 7144 * mlist lock and XHAT_PAGEUNLOAD let it do 7145 * the job. 7146 */ 7147 if (xhme_blks) { 7148 pp = origpp; 7149 goto retry_xhat; 7150 } 7151 7152 return (0); 7153 } 7154 7155 cpuset_t 7156 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7157 { 7158 struct hme_blk *hmeblkp; 7159 sfmmu_t *sfmmup; 7160 tte_t tte, ttemod; 7161 #ifdef DEBUG 7162 tte_t orig_old; 7163 #endif /* DEBUG */ 7164 caddr_t addr; 7165 int ttesz; 7166 int ret; 7167 cpuset_t cpuset; 7168 7169 ASSERT(pp != NULL); 7170 ASSERT(sfmmu_mlist_held(pp)); 7171 ASSERT(!PP_ISKAS(pp)); 7172 7173 CPUSET_ZERO(cpuset); 7174 7175 hmeblkp = sfmmu_hmetohblk(sfhme); 7176 7177 readtte: 7178 sfmmu_copytte(&sfhme->hme_tte, &tte); 7179 if (TTE_IS_VALID(&tte)) { 7180 sfmmup = hblktosfmmu(hmeblkp); 7181 ttesz = get_hblk_ttesz(hmeblkp); 7182 /* 7183 * Only unload mappings of 'cons' size. 7184 */ 7185 if (ttesz != cons) 7186 return (cpuset); 7187 7188 /* 7189 * Note that we have p_mapping lock, but no hash lock here. 7190 * hblk_unload() has to have both hash lock AND p_mapping 7191 * lock before it tries to modify tte. So, the tte could 7192 * not become invalid in the sfmmu_modifytte_try() below. 7193 */ 7194 ttemod = tte; 7195 #ifdef DEBUG 7196 orig_old = tte; 7197 #endif /* DEBUG */ 7198 7199 TTE_SET_INVALID(&ttemod); 7200 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7201 if (ret < 0) { 7202 #ifdef DEBUG 7203 /* only R/M bits can change. */ 7204 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7205 #endif /* DEBUG */ 7206 goto readtte; 7207 } 7208 7209 if (ret == 0) { 7210 panic("pageunload: cas failed?"); 7211 } 7212 7213 addr = tte_to_vaddr(hmeblkp, tte); 7214 7215 if (hmeblkp->hblk_shared) { 7216 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7217 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7218 sf_region_t *rgnp; 7219 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7220 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7221 ASSERT(srdp != NULL); 7222 rgnp = srdp->srd_hmergnp[rid]; 7223 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7224 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7225 sfmmu_ttesync(NULL, addr, &tte, pp); 7226 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7227 atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); 7228 } else { 7229 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7230 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 7231 7232 /* 7233 * We need to flush the page from the virtual cache 7234 * in order to prevent a virtual cache alias 7235 * inconsistency. The particular scenario we need 7236 * to worry about is: 7237 * Given: va1 and va2 are two virtual address that 7238 * alias and will map the same physical address. 7239 * 1. mapping exists from va1 to pa and data has 7240 * been read into the cache. 7241 * 2. unload va1. 7242 * 3. load va2 and modify data using va2. 7243 * 4 unload va2. 7244 * 5. load va1 and reference data. Unless we flush 7245 * the data cache when we unload we will get 7246 * stale data. 7247 * This scenario is taken care of by using virtual 7248 * page coloring. 7249 */ 7250 if (sfmmup->sfmmu_ismhat) { 7251 /* 7252 * Flush TSBs, TLBs and caches 7253 * of every process 7254 * sharing this ism segment. 7255 */ 7256 sfmmu_hat_lock_all(); 7257 mutex_enter(&ism_mlist_lock); 7258 kpreempt_disable(); 7259 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7260 pp->p_pagenum, CACHE_NO_FLUSH); 7261 kpreempt_enable(); 7262 mutex_exit(&ism_mlist_lock); 7263 sfmmu_hat_unlock_all(); 7264 cpuset = cpu_ready_set; 7265 } else { 7266 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7267 cpuset = sfmmup->sfmmu_cpusran; 7268 } 7269 } 7270 7271 /* 7272 * Hme_sub has to run after ttesync() and a_rss update. 7273 * See hblk_unload(). 7274 */ 7275 HME_SUB(sfhme, pp); 7276 membar_stst(); 7277 7278 /* 7279 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7280 * since pteload may have done a HME_ADD() right after 7281 * we did the HME_SUB() above. Hmecnt is now maintained 7282 * by cas only. no lock guranteed its value. The only 7283 * gurantee we have is the hmecnt should not be less than 7284 * what it should be so the hblk will not be taken away. 7285 * It's also important that we decremented the hmecnt after 7286 * we are done with hmeblkp so that this hmeblk won't be 7287 * stolen. 7288 */ 7289 ASSERT(hmeblkp->hblk_hmecnt > 0); 7290 ASSERT(hmeblkp->hblk_vcnt > 0); 7291 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 7292 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 7293 /* 7294 * This is bug 4063182. 7295 * XXX: fixme 7296 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7297 * !hmeblkp->hblk_lckcnt); 7298 */ 7299 } else { 7300 panic("invalid tte? pp %p &tte %p", 7301 (void *)pp, (void *)&tte); 7302 } 7303 7304 return (cpuset); 7305 } 7306 7307 /* 7308 * While relocating a kernel page, this function will move the mappings 7309 * from tpp to dpp and modify any associated data with these mappings. 7310 * It also unsuspends the suspended kernel mapping. 7311 */ 7312 static void 7313 hat_pagereload(struct page *tpp, struct page *dpp) 7314 { 7315 struct sf_hment *sfhme; 7316 tte_t tte, ttemod; 7317 int index, cons; 7318 7319 ASSERT(getpil() == PIL_MAX); 7320 ASSERT(sfmmu_mlist_held(tpp)); 7321 ASSERT(sfmmu_mlist_held(dpp)); 7322 7323 index = PP_MAPINDEX(tpp); 7324 cons = TTE8K; 7325 7326 /* Update real mappings to the page */ 7327 retry: 7328 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7329 if (IS_PAHME(sfhme)) 7330 continue; 7331 sfmmu_copytte(&sfhme->hme_tte, &tte); 7332 ttemod = tte; 7333 7334 /* 7335 * replace old pfn with new pfn in TTE 7336 */ 7337 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7338 7339 /* 7340 * clear suspend bit 7341 */ 7342 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7343 TTE_CLR_SUSPEND(&ttemod); 7344 7345 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7346 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7347 7348 /* 7349 * set hme_page point to new page 7350 */ 7351 sfhme->hme_page = dpp; 7352 } 7353 7354 /* 7355 * move p_mapping list from old page to new page 7356 */ 7357 dpp->p_mapping = tpp->p_mapping; 7358 tpp->p_mapping = NULL; 7359 dpp->p_share = tpp->p_share; 7360 tpp->p_share = 0; 7361 7362 while (index != 0) { 7363 index = index >> 1; 7364 if (index != 0) 7365 cons++; 7366 if (index & 0x1) { 7367 tpp = PP_GROUPLEADER(tpp, cons); 7368 dpp = PP_GROUPLEADER(dpp, cons); 7369 goto retry; 7370 } 7371 } 7372 7373 curthread->t_flag &= ~T_DONTDTRACE; 7374 mutex_exit(&kpr_suspendlock); 7375 } 7376 7377 uint_t 7378 hat_pagesync(struct page *pp, uint_t clearflag) 7379 { 7380 struct sf_hment *sfhme, *tmphme = NULL; 7381 struct hme_blk *hmeblkp; 7382 kmutex_t *pml; 7383 cpuset_t cpuset, tset; 7384 int index, cons; 7385 extern ulong_t po_share; 7386 page_t *save_pp = pp; 7387 int stop_on_sh = 0; 7388 uint_t shcnt; 7389 7390 CPUSET_ZERO(cpuset); 7391 7392 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7393 return (PP_GENERIC_ATTR(pp)); 7394 } 7395 7396 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7397 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7398 return (PP_GENERIC_ATTR(pp)); 7399 } 7400 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7401 return (PP_GENERIC_ATTR(pp)); 7402 } 7403 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7404 if (pp->p_share > po_share) { 7405 hat_page_setattr(pp, P_REF); 7406 return (PP_GENERIC_ATTR(pp)); 7407 } 7408 stop_on_sh = 1; 7409 shcnt = 0; 7410 } 7411 } 7412 7413 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7414 pml = sfmmu_mlist_enter(pp); 7415 index = PP_MAPINDEX(pp); 7416 cons = TTE8K; 7417 retry: 7418 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7419 /* 7420 * We need to save the next hment on the list since 7421 * it is possible for pagesync to remove an invalid hment 7422 * from the list. 7423 */ 7424 tmphme = sfhme->hme_next; 7425 if (IS_PAHME(sfhme)) 7426 continue; 7427 /* 7428 * If we are looking for large mappings and this hme doesn't 7429 * reach the range we are seeking, just ignore it. 7430 */ 7431 hmeblkp = sfmmu_hmetohblk(sfhme); 7432 if (hmeblkp->hblk_xhat_bit) 7433 continue; 7434 7435 if (hme_size(sfhme) < cons) 7436 continue; 7437 7438 if (stop_on_sh) { 7439 if (hmeblkp->hblk_shared) { 7440 sf_srd_t *srdp = hblktosrd(hmeblkp); 7441 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7442 sf_region_t *rgnp; 7443 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7444 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7445 ASSERT(srdp != NULL); 7446 rgnp = srdp->srd_hmergnp[rid]; 7447 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7448 rgnp, rid); 7449 shcnt += rgnp->rgn_refcnt; 7450 } else { 7451 shcnt++; 7452 } 7453 if (shcnt > po_share) { 7454 /* 7455 * tell the pager to spare the page this time 7456 * around. 7457 */ 7458 hat_page_setattr(save_pp, P_REF); 7459 index = 0; 7460 break; 7461 } 7462 } 7463 tset = sfmmu_pagesync(pp, sfhme, 7464 clearflag & ~HAT_SYNC_STOPON_RM); 7465 CPUSET_OR(cpuset, tset); 7466 7467 /* 7468 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7469 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7470 */ 7471 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7472 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7473 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7474 index = 0; 7475 break; 7476 } 7477 } 7478 7479 while (index) { 7480 index = index >> 1; 7481 cons++; 7482 if (index & 0x1) { 7483 /* Go to leading page */ 7484 pp = PP_GROUPLEADER(pp, cons); 7485 goto retry; 7486 } 7487 } 7488 7489 xt_sync(cpuset); 7490 sfmmu_mlist_exit(pml); 7491 return (PP_GENERIC_ATTR(save_pp)); 7492 } 7493 7494 /* 7495 * Get all the hardware dependent attributes for a page struct 7496 */ 7497 static cpuset_t 7498 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7499 uint_t clearflag) 7500 { 7501 caddr_t addr; 7502 tte_t tte, ttemod; 7503 struct hme_blk *hmeblkp; 7504 int ret; 7505 sfmmu_t *sfmmup; 7506 cpuset_t cpuset; 7507 7508 ASSERT(pp != NULL); 7509 ASSERT(sfmmu_mlist_held(pp)); 7510 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7511 (clearflag == HAT_SYNC_ZERORM)); 7512 7513 SFMMU_STAT(sf_pagesync); 7514 7515 CPUSET_ZERO(cpuset); 7516 7517 sfmmu_pagesync_retry: 7518 7519 sfmmu_copytte(&sfhme->hme_tte, &tte); 7520 if (TTE_IS_VALID(&tte)) { 7521 hmeblkp = sfmmu_hmetohblk(sfhme); 7522 sfmmup = hblktosfmmu(hmeblkp); 7523 addr = tte_to_vaddr(hmeblkp, tte); 7524 if (clearflag == HAT_SYNC_ZERORM) { 7525 ttemod = tte; 7526 TTE_CLR_RM(&ttemod); 7527 ret = sfmmu_modifytte_try(&tte, &ttemod, 7528 &sfhme->hme_tte); 7529 if (ret < 0) { 7530 /* 7531 * cas failed and the new value is not what 7532 * we want. 7533 */ 7534 goto sfmmu_pagesync_retry; 7535 } 7536 7537 if (ret > 0) { 7538 /* we win the cas */ 7539 if (hmeblkp->hblk_shared) { 7540 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7541 uint_t rid = 7542 hmeblkp->hblk_tag.htag_rid; 7543 sf_region_t *rgnp; 7544 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7545 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7546 ASSERT(srdp != NULL); 7547 rgnp = srdp->srd_hmergnp[rid]; 7548 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7549 srdp, rgnp, rid); 7550 cpuset = sfmmu_rgntlb_demap(addr, 7551 rgnp, hmeblkp, 1); 7552 } else { 7553 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7554 0, 0); 7555 cpuset = sfmmup->sfmmu_cpusran; 7556 } 7557 } 7558 } 7559 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7560 &tte, pp); 7561 } 7562 return (cpuset); 7563 } 7564 7565 /* 7566 * Remove write permission from a mappings to a page, so that 7567 * we can detect the next modification of it. This requires modifying 7568 * the TTE then invalidating (demap) any TLB entry using that TTE. 7569 * This code is similar to sfmmu_pagesync(). 7570 */ 7571 static cpuset_t 7572 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7573 { 7574 caddr_t addr; 7575 tte_t tte; 7576 tte_t ttemod; 7577 struct hme_blk *hmeblkp; 7578 int ret; 7579 sfmmu_t *sfmmup; 7580 cpuset_t cpuset; 7581 7582 ASSERT(pp != NULL); 7583 ASSERT(sfmmu_mlist_held(pp)); 7584 7585 CPUSET_ZERO(cpuset); 7586 SFMMU_STAT(sf_clrwrt); 7587 7588 retry: 7589 7590 sfmmu_copytte(&sfhme->hme_tte, &tte); 7591 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7592 hmeblkp = sfmmu_hmetohblk(sfhme); 7593 7594 /* 7595 * xhat mappings should never be to a VMODSORT page. 7596 */ 7597 ASSERT(hmeblkp->hblk_xhat_bit == 0); 7598 7599 sfmmup = hblktosfmmu(hmeblkp); 7600 addr = tte_to_vaddr(hmeblkp, tte); 7601 7602 ttemod = tte; 7603 TTE_CLR_WRT(&ttemod); 7604 TTE_CLR_MOD(&ttemod); 7605 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7606 7607 /* 7608 * if cas failed and the new value is not what 7609 * we want retry 7610 */ 7611 if (ret < 0) 7612 goto retry; 7613 7614 /* we win the cas */ 7615 if (ret > 0) { 7616 if (hmeblkp->hblk_shared) { 7617 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7618 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7619 sf_region_t *rgnp; 7620 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7621 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7622 ASSERT(srdp != NULL); 7623 rgnp = srdp->srd_hmergnp[rid]; 7624 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7625 srdp, rgnp, rid); 7626 cpuset = sfmmu_rgntlb_demap(addr, 7627 rgnp, hmeblkp, 1); 7628 } else { 7629 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7630 cpuset = sfmmup->sfmmu_cpusran; 7631 } 7632 } 7633 } 7634 7635 return (cpuset); 7636 } 7637 7638 /* 7639 * Walk all mappings of a page, removing write permission and clearing the 7640 * ref/mod bits. This code is similar to hat_pagesync() 7641 */ 7642 static void 7643 hat_page_clrwrt(page_t *pp) 7644 { 7645 struct sf_hment *sfhme; 7646 struct sf_hment *tmphme = NULL; 7647 kmutex_t *pml; 7648 cpuset_t cpuset; 7649 cpuset_t tset; 7650 int index; 7651 int cons; 7652 7653 CPUSET_ZERO(cpuset); 7654 7655 pml = sfmmu_mlist_enter(pp); 7656 index = PP_MAPINDEX(pp); 7657 cons = TTE8K; 7658 retry: 7659 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7660 tmphme = sfhme->hme_next; 7661 7662 /* 7663 * If we are looking for large mappings and this hme doesn't 7664 * reach the range we are seeking, just ignore its. 7665 */ 7666 7667 if (hme_size(sfhme) < cons) 7668 continue; 7669 7670 tset = sfmmu_pageclrwrt(pp, sfhme); 7671 CPUSET_OR(cpuset, tset); 7672 } 7673 7674 while (index) { 7675 index = index >> 1; 7676 cons++; 7677 if (index & 0x1) { 7678 /* Go to leading page */ 7679 pp = PP_GROUPLEADER(pp, cons); 7680 goto retry; 7681 } 7682 } 7683 7684 xt_sync(cpuset); 7685 sfmmu_mlist_exit(pml); 7686 } 7687 7688 /* 7689 * Set the given REF/MOD/RO bits for the given page. 7690 * For a vnode with a sorted v_pages list, we need to change 7691 * the attributes and the v_pages list together under page_vnode_mutex. 7692 */ 7693 void 7694 hat_page_setattr(page_t *pp, uint_t flag) 7695 { 7696 vnode_t *vp = pp->p_vnode; 7697 page_t **listp; 7698 kmutex_t *pmtx; 7699 kmutex_t *vphm = NULL; 7700 int noshuffle; 7701 7702 noshuffle = flag & P_NSH; 7703 flag &= ~P_NSH; 7704 7705 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7706 7707 /* 7708 * nothing to do if attribute already set 7709 */ 7710 if ((pp->p_nrm & flag) == flag) 7711 return; 7712 7713 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7714 !noshuffle) { 7715 vphm = page_vnode_mutex(vp); 7716 mutex_enter(vphm); 7717 } 7718 7719 pmtx = sfmmu_page_enter(pp); 7720 pp->p_nrm |= flag; 7721 sfmmu_page_exit(pmtx); 7722 7723 if (vphm != NULL) { 7724 /* 7725 * Some File Systems examine v_pages for NULL w/o 7726 * grabbing the vphm mutex. Must not let it become NULL when 7727 * pp is the only page on the list. 7728 */ 7729 if (pp->p_vpnext != pp) { 7730 page_vpsub(&vp->v_pages, pp); 7731 if (vp->v_pages != NULL) 7732 listp = &vp->v_pages->p_vpprev->p_vpnext; 7733 else 7734 listp = &vp->v_pages; 7735 page_vpadd(listp, pp); 7736 } 7737 mutex_exit(vphm); 7738 } 7739 } 7740 7741 void 7742 hat_page_clrattr(page_t *pp, uint_t flag) 7743 { 7744 vnode_t *vp = pp->p_vnode; 7745 kmutex_t *pmtx; 7746 7747 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7748 7749 pmtx = sfmmu_page_enter(pp); 7750 7751 /* 7752 * Caller is expected to hold page's io lock for VMODSORT to work 7753 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7754 * bit is cleared. 7755 * We don't have assert to avoid tripping some existing third party 7756 * code. The dirty page is moved back to top of the v_page list 7757 * after IO is done in pvn_write_done(). 7758 */ 7759 pp->p_nrm &= ~flag; 7760 sfmmu_page_exit(pmtx); 7761 7762 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7763 7764 /* 7765 * VMODSORT works by removing write permissions and getting 7766 * a fault when a page is made dirty. At this point 7767 * we need to remove write permission from all mappings 7768 * to this page. 7769 */ 7770 hat_page_clrwrt(pp); 7771 } 7772 } 7773 7774 uint_t 7775 hat_page_getattr(page_t *pp, uint_t flag) 7776 { 7777 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7778 return ((uint_t)(pp->p_nrm & flag)); 7779 } 7780 7781 /* 7782 * DEBUG kernels: verify that a kernel va<->pa translation 7783 * is safe by checking the underlying page_t is in a page 7784 * relocation-safe state. 7785 */ 7786 #ifdef DEBUG 7787 void 7788 sfmmu_check_kpfn(pfn_t pfn) 7789 { 7790 page_t *pp; 7791 int index, cons; 7792 7793 if (hat_check_vtop == 0) 7794 return; 7795 7796 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7797 return; 7798 7799 pp = page_numtopp_nolock(pfn); 7800 if (!pp) 7801 return; 7802 7803 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7804 return; 7805 7806 /* 7807 * Handed a large kernel page, we dig up the root page since we 7808 * know the root page might have the lock also. 7809 */ 7810 if (pp->p_szc != 0) { 7811 index = PP_MAPINDEX(pp); 7812 cons = TTE8K; 7813 again: 7814 while (index != 0) { 7815 index >>= 1; 7816 if (index != 0) 7817 cons++; 7818 if (index & 0x1) { 7819 pp = PP_GROUPLEADER(pp, cons); 7820 goto again; 7821 } 7822 } 7823 } 7824 7825 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7826 return; 7827 7828 /* 7829 * Pages need to be locked or allocated "permanent" (either from 7830 * static_arena arena or explicitly setting PG_NORELOC when calling 7831 * page_create_va()) for VA->PA translations to be valid. 7832 */ 7833 if (!PP_ISNORELOC(pp)) 7834 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7835 (void *)pp); 7836 else 7837 panic("Illegal VA->PA translation, pp 0x%p not locked", 7838 (void *)pp); 7839 } 7840 #endif /* DEBUG */ 7841 7842 /* 7843 * Returns a page frame number for a given virtual address. 7844 * Returns PFN_INVALID to indicate an invalid mapping 7845 */ 7846 pfn_t 7847 hat_getpfnum(struct hat *hat, caddr_t addr) 7848 { 7849 pfn_t pfn; 7850 tte_t tte; 7851 7852 /* 7853 * We would like to 7854 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7855 * but we can't because the iommu driver will call this 7856 * routine at interrupt time and it can't grab the as lock 7857 * or it will deadlock: A thread could have the as lock 7858 * and be waiting for io. The io can't complete 7859 * because the interrupt thread is blocked trying to grab 7860 * the as lock. 7861 */ 7862 7863 ASSERT(hat->sfmmu_xhat_provider == NULL); 7864 7865 if (hat == ksfmmup) { 7866 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7867 ASSERT(segkmem_lpszc > 0); 7868 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7869 if (pfn != PFN_INVALID) { 7870 sfmmu_check_kpfn(pfn); 7871 return (pfn); 7872 } 7873 } else if (segkpm && IS_KPM_ADDR(addr)) { 7874 return (sfmmu_kpm_vatopfn(addr)); 7875 } 7876 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7877 == PFN_SUSPENDED) { 7878 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7879 } 7880 sfmmu_check_kpfn(pfn); 7881 return (pfn); 7882 } else { 7883 return (sfmmu_uvatopfn(addr, hat, NULL)); 7884 } 7885 } 7886 7887 /* 7888 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7889 * Use hat_getpfnum(kas.a_hat, ...) instead. 7890 * 7891 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7892 * but can't right now due to the fact that some software has grown to use 7893 * this interface incorrectly. So for now when the interface is misused, 7894 * return a warning to the user that in the future it won't work in the 7895 * way they're abusing it, and carry on (after disabling page relocation). 7896 */ 7897 pfn_t 7898 hat_getkpfnum(caddr_t addr) 7899 { 7900 pfn_t pfn; 7901 tte_t tte; 7902 int badcaller = 0; 7903 extern int segkmem_reloc; 7904 7905 if (segkpm && IS_KPM_ADDR(addr)) { 7906 badcaller = 1; 7907 pfn = sfmmu_kpm_vatopfn(addr); 7908 } else { 7909 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7910 == PFN_SUSPENDED) { 7911 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7912 } 7913 badcaller = pf_is_memory(pfn); 7914 } 7915 7916 if (badcaller) { 7917 /* 7918 * We can't return PFN_INVALID or the caller may panic 7919 * or corrupt the system. The only alternative is to 7920 * disable page relocation at this point for all kernel 7921 * memory. This will impact any callers of page_relocate() 7922 * such as FMA or DR. 7923 * 7924 * RFE: Add junk here to spit out an ereport so the sysadmin 7925 * can be advised that he should upgrade his device driver 7926 * so that this doesn't happen. 7927 */ 7928 hat_getkpfnum_badcall(caller()); 7929 if (hat_kpr_enabled && segkmem_reloc) { 7930 hat_kpr_enabled = 0; 7931 segkmem_reloc = 0; 7932 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7933 } 7934 } 7935 return (pfn); 7936 } 7937 7938 /* 7939 * This routine will return both pfn and tte for the vaddr. 7940 */ 7941 static pfn_t 7942 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7943 { 7944 struct hmehash_bucket *hmebp; 7945 hmeblk_tag hblktag; 7946 int hmeshift, hashno = 1; 7947 struct hme_blk *hmeblkp = NULL; 7948 tte_t tte; 7949 7950 struct sf_hment *sfhmep; 7951 pfn_t pfn; 7952 7953 /* support for ISM */ 7954 ism_map_t *ism_map; 7955 ism_blk_t *ism_blkp; 7956 int i; 7957 sfmmu_t *ism_hatid = NULL; 7958 sfmmu_t *locked_hatid = NULL; 7959 sfmmu_t *sv_sfmmup = sfmmup; 7960 caddr_t sv_vaddr = vaddr; 7961 sf_srd_t *srdp; 7962 7963 if (ttep == NULL) { 7964 ttep = &tte; 7965 } else { 7966 ttep->ll = 0; 7967 } 7968 7969 ASSERT(sfmmup != ksfmmup); 7970 SFMMU_STAT(sf_user_vtop); 7971 /* 7972 * Set ism_hatid if vaddr falls in a ISM segment. 7973 */ 7974 ism_blkp = sfmmup->sfmmu_iblk; 7975 if (ism_blkp != NULL) { 7976 sfmmu_ismhat_enter(sfmmup, 0); 7977 locked_hatid = sfmmup; 7978 } 7979 while (ism_blkp != NULL && ism_hatid == NULL) { 7980 ism_map = ism_blkp->iblk_maps; 7981 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7982 if (vaddr >= ism_start(ism_map[i]) && 7983 vaddr < ism_end(ism_map[i])) { 7984 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7985 vaddr = (caddr_t)(vaddr - 7986 ism_start(ism_map[i])); 7987 break; 7988 } 7989 } 7990 ism_blkp = ism_blkp->iblk_next; 7991 } 7992 if (locked_hatid) { 7993 sfmmu_ismhat_exit(locked_hatid, 0); 7994 } 7995 7996 hblktag.htag_id = sfmmup; 7997 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7998 do { 7999 hmeshift = HME_HASH_SHIFT(hashno); 8000 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 8001 hblktag.htag_rehash = hashno; 8002 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 8003 8004 SFMMU_HASH_LOCK(hmebp); 8005 8006 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 8007 if (hmeblkp != NULL) { 8008 ASSERT(!hmeblkp->hblk_shared); 8009 HBLKTOHME(sfhmep, hmeblkp, vaddr); 8010 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8011 SFMMU_HASH_UNLOCK(hmebp); 8012 if (TTE_IS_VALID(ttep)) { 8013 pfn = TTE_TO_PFN(vaddr, ttep); 8014 return (pfn); 8015 } 8016 break; 8017 } 8018 SFMMU_HASH_UNLOCK(hmebp); 8019 hashno++; 8020 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 8021 8022 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 8023 return (PFN_INVALID); 8024 } 8025 srdp = sv_sfmmup->sfmmu_srdp; 8026 ASSERT(srdp != NULL); 8027 ASSERT(srdp->srd_refcnt != 0); 8028 hblktag.htag_id = srdp; 8029 hashno = 1; 8030 do { 8031 hmeshift = HME_HASH_SHIFT(hashno); 8032 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 8033 hblktag.htag_rehash = hashno; 8034 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 8035 8036 SFMMU_HASH_LOCK(hmebp); 8037 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 8038 hmeblkp = hmeblkp->hblk_next) { 8039 uint_t rid; 8040 sf_region_t *rgnp; 8041 caddr_t rsaddr; 8042 caddr_t readdr; 8043 8044 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 8045 sv_sfmmup->sfmmu_hmeregion_map)) { 8046 continue; 8047 } 8048 ASSERT(hmeblkp->hblk_shared); 8049 rid = hmeblkp->hblk_tag.htag_rid; 8050 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8051 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8052 rgnp = srdp->srd_hmergnp[rid]; 8053 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 8054 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 8055 sfmmu_copytte(&sfhmep->hme_tte, ttep); 8056 rsaddr = rgnp->rgn_saddr; 8057 readdr = rsaddr + rgnp->rgn_size; 8058 #ifdef DEBUG 8059 if (TTE_IS_VALID(ttep) || 8060 get_hblk_ttesz(hmeblkp) > TTE8K) { 8061 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 8062 ASSERT(eva > sv_vaddr); 8063 ASSERT(sv_vaddr >= rsaddr); 8064 ASSERT(sv_vaddr < readdr); 8065 ASSERT(eva <= readdr); 8066 } 8067 #endif /* DEBUG */ 8068 /* 8069 * Continue the search if we 8070 * found an invalid 8K tte outside of the area 8071 * covered by this hmeblk's region. 8072 */ 8073 if (TTE_IS_VALID(ttep)) { 8074 SFMMU_HASH_UNLOCK(hmebp); 8075 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8076 return (pfn); 8077 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8078 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8079 SFMMU_HASH_UNLOCK(hmebp); 8080 pfn = PFN_INVALID; 8081 return (pfn); 8082 } 8083 } 8084 SFMMU_HASH_UNLOCK(hmebp); 8085 hashno++; 8086 } while (hashno <= mmu_hashcnt); 8087 return (PFN_INVALID); 8088 } 8089 8090 8091 /* 8092 * For compatability with AT&T and later optimizations 8093 */ 8094 /* ARGSUSED */ 8095 void 8096 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8097 { 8098 ASSERT(hat != NULL); 8099 ASSERT(hat->sfmmu_xhat_provider == NULL); 8100 } 8101 8102 /* 8103 * Return the number of mappings to a particular page. This number is an 8104 * approximation of the number of people sharing the page. 8105 * 8106 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8107 * hat_page_checkshare() can be used to compare threshold to share 8108 * count that reflects the number of region sharers albeit at higher cost. 8109 */ 8110 ulong_t 8111 hat_page_getshare(page_t *pp) 8112 { 8113 page_t *spp = pp; /* start page */ 8114 kmutex_t *pml; 8115 ulong_t cnt; 8116 int index, sz = TTE64K; 8117 8118 /* 8119 * We need to grab the mlist lock to make sure any outstanding 8120 * load/unloads complete. Otherwise we could return zero 8121 * even though the unload(s) hasn't finished yet. 8122 */ 8123 pml = sfmmu_mlist_enter(spp); 8124 cnt = spp->p_share; 8125 8126 #ifdef VAC 8127 if (kpm_enable) 8128 cnt += spp->p_kpmref; 8129 #endif 8130 if (vpm_enable && pp->p_vpmref) { 8131 cnt += 1; 8132 } 8133 8134 /* 8135 * If we have any large mappings, we count the number of 8136 * mappings that this large page is part of. 8137 */ 8138 index = PP_MAPINDEX(spp); 8139 index >>= 1; 8140 while (index) { 8141 pp = PP_GROUPLEADER(spp, sz); 8142 if ((index & 0x1) && pp != spp) { 8143 cnt += pp->p_share; 8144 spp = pp; 8145 } 8146 index >>= 1; 8147 sz++; 8148 } 8149 sfmmu_mlist_exit(pml); 8150 return (cnt); 8151 } 8152 8153 /* 8154 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8155 * otherwise. Count shared hmeblks by region's refcnt. 8156 */ 8157 int 8158 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8159 { 8160 kmutex_t *pml; 8161 ulong_t cnt = 0; 8162 int index, sz = TTE8K; 8163 struct sf_hment *sfhme, *tmphme = NULL; 8164 struct hme_blk *hmeblkp; 8165 8166 pml = sfmmu_mlist_enter(pp); 8167 8168 #ifdef VAC 8169 if (kpm_enable) 8170 cnt = pp->p_kpmref; 8171 #endif 8172 8173 if (vpm_enable && pp->p_vpmref) { 8174 cnt += 1; 8175 } 8176 8177 if (pp->p_share + cnt > sh_thresh) { 8178 sfmmu_mlist_exit(pml); 8179 return (1); 8180 } 8181 8182 index = PP_MAPINDEX(pp); 8183 8184 again: 8185 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8186 tmphme = sfhme->hme_next; 8187 if (IS_PAHME(sfhme)) { 8188 continue; 8189 } 8190 8191 hmeblkp = sfmmu_hmetohblk(sfhme); 8192 if (hmeblkp->hblk_xhat_bit) { 8193 cnt++; 8194 if (cnt > sh_thresh) { 8195 sfmmu_mlist_exit(pml); 8196 return (1); 8197 } 8198 continue; 8199 } 8200 if (hme_size(sfhme) != sz) { 8201 continue; 8202 } 8203 8204 if (hmeblkp->hblk_shared) { 8205 sf_srd_t *srdp = hblktosrd(hmeblkp); 8206 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8207 sf_region_t *rgnp; 8208 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8209 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8210 ASSERT(srdp != NULL); 8211 rgnp = srdp->srd_hmergnp[rid]; 8212 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8213 rgnp, rid); 8214 cnt += rgnp->rgn_refcnt; 8215 } else { 8216 cnt++; 8217 } 8218 if (cnt > sh_thresh) { 8219 sfmmu_mlist_exit(pml); 8220 return (1); 8221 } 8222 } 8223 8224 index >>= 1; 8225 sz++; 8226 while (index) { 8227 pp = PP_GROUPLEADER(pp, sz); 8228 ASSERT(sfmmu_mlist_held(pp)); 8229 if (index & 0x1) { 8230 goto again; 8231 } 8232 index >>= 1; 8233 sz++; 8234 } 8235 sfmmu_mlist_exit(pml); 8236 return (0); 8237 } 8238 8239 /* 8240 * Unload all large mappings to the pp and reset the p_szc field of every 8241 * constituent page according to the remaining mappings. 8242 * 8243 * pp must be locked SE_EXCL. Even though no other constituent pages are 8244 * locked it's legal to unload the large mappings to the pp because all 8245 * constituent pages of large locked mappings have to be locked SE_SHARED. 8246 * This means if we have SE_EXCL lock on one of constituent pages none of the 8247 * large mappings to pp are locked. 8248 * 8249 * Decrease p_szc field starting from the last constituent page and ending 8250 * with the root page. This method is used because other threads rely on the 8251 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8252 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8253 * ensures that p_szc changes of the constituent pages appears atomic for all 8254 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8255 * 8256 * This mechanism is only used for file system pages where it's not always 8257 * possible to get SE_EXCL locks on all constituent pages to demote the size 8258 * code (as is done for anonymous or kernel large pages). 8259 * 8260 * See more comments in front of sfmmu_mlspl_enter(). 8261 */ 8262 void 8263 hat_page_demote(page_t *pp) 8264 { 8265 int index; 8266 int sz; 8267 cpuset_t cpuset; 8268 int sync = 0; 8269 page_t *rootpp; 8270 struct sf_hment *sfhme; 8271 struct sf_hment *tmphme = NULL; 8272 struct hme_blk *hmeblkp; 8273 uint_t pszc; 8274 page_t *lastpp; 8275 cpuset_t tset; 8276 pgcnt_t npgs; 8277 kmutex_t *pml; 8278 kmutex_t *pmtx = NULL; 8279 8280 ASSERT(PAGE_EXCL(pp)); 8281 ASSERT(!PP_ISFREE(pp)); 8282 ASSERT(!PP_ISKAS(pp)); 8283 ASSERT(page_szc_lock_assert(pp)); 8284 pml = sfmmu_mlist_enter(pp); 8285 8286 pszc = pp->p_szc; 8287 if (pszc == 0) { 8288 goto out; 8289 } 8290 8291 index = PP_MAPINDEX(pp) >> 1; 8292 8293 if (index) { 8294 CPUSET_ZERO(cpuset); 8295 sz = TTE64K; 8296 sync = 1; 8297 } 8298 8299 while (index) { 8300 if (!(index & 0x1)) { 8301 index >>= 1; 8302 sz++; 8303 continue; 8304 } 8305 ASSERT(sz <= pszc); 8306 rootpp = PP_GROUPLEADER(pp, sz); 8307 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8308 tmphme = sfhme->hme_next; 8309 ASSERT(!IS_PAHME(sfhme)); 8310 hmeblkp = sfmmu_hmetohblk(sfhme); 8311 if (hme_size(sfhme) != sz) { 8312 continue; 8313 } 8314 if (hmeblkp->hblk_xhat_bit) { 8315 cmn_err(CE_PANIC, 8316 "hat_page_demote: xhat hmeblk"); 8317 } 8318 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8319 CPUSET_OR(cpuset, tset); 8320 } 8321 if (index >>= 1) { 8322 sz++; 8323 } 8324 } 8325 8326 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8327 8328 if (sync) { 8329 xt_sync(cpuset); 8330 #ifdef VAC 8331 if (PP_ISTNC(pp)) { 8332 conv_tnc(rootpp, sz); 8333 } 8334 #endif /* VAC */ 8335 } 8336 8337 pmtx = sfmmu_page_enter(pp); 8338 8339 ASSERT(pp->p_szc == pszc); 8340 rootpp = PP_PAGEROOT(pp); 8341 ASSERT(rootpp->p_szc == pszc); 8342 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8343 8344 while (lastpp != rootpp) { 8345 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8346 ASSERT(sz < pszc); 8347 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8348 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8349 while (--npgs > 0) { 8350 lastpp->p_szc = (uchar_t)sz; 8351 lastpp = PP_PAGEPREV(lastpp); 8352 } 8353 if (sz) { 8354 /* 8355 * make sure before current root's pszc 8356 * is updated all updates to constituent pages pszc 8357 * fields are globally visible. 8358 */ 8359 membar_producer(); 8360 } 8361 lastpp->p_szc = sz; 8362 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8363 if (lastpp != rootpp) { 8364 lastpp = PP_PAGEPREV(lastpp); 8365 } 8366 } 8367 if (sz == 0) { 8368 /* the loop above doesn't cover this case */ 8369 rootpp->p_szc = 0; 8370 } 8371 out: 8372 ASSERT(pp->p_szc == 0); 8373 if (pmtx != NULL) { 8374 sfmmu_page_exit(pmtx); 8375 } 8376 sfmmu_mlist_exit(pml); 8377 } 8378 8379 /* 8380 * Refresh the HAT ismttecnt[] element for size szc. 8381 * Caller must have set ISM busy flag to prevent mapping 8382 * lists from changing while we're traversing them. 8383 */ 8384 pgcnt_t 8385 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8386 { 8387 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8388 ism_map_t *ism_map; 8389 pgcnt_t npgs = 0; 8390 pgcnt_t npgs_scd = 0; 8391 int j; 8392 sf_scd_t *scdp; 8393 uchar_t rid; 8394 8395 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8396 scdp = sfmmup->sfmmu_scdp; 8397 8398 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8399 ism_map = ism_blkp->iblk_maps; 8400 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8401 rid = ism_map[j].imap_rid; 8402 ASSERT(rid == SFMMU_INVALID_ISMRID || 8403 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8404 8405 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8406 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8407 /* ISM is in sfmmup's SCD */ 8408 npgs_scd += 8409 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8410 } else { 8411 /* ISMs is not in SCD */ 8412 npgs += 8413 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8414 } 8415 } 8416 } 8417 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8418 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8419 return (npgs); 8420 } 8421 8422 /* 8423 * Yield the memory claim requirement for an address space. 8424 * 8425 * This is currently implemented as the number of bytes that have active 8426 * hardware translations that have page structures. Therefore, it can 8427 * underestimate the traditional resident set size, eg, if the 8428 * physical page is present and the hardware translation is missing; 8429 * and it can overestimate the rss, eg, if there are active 8430 * translations to a frame buffer with page structs. 8431 * Also, it does not take sharing into account. 8432 * 8433 * Note that we don't acquire locks here since this function is most often 8434 * called from the clock thread. 8435 */ 8436 size_t 8437 hat_get_mapped_size(struct hat *hat) 8438 { 8439 size_t assize = 0; 8440 int i; 8441 8442 if (hat == NULL) 8443 return (0); 8444 8445 ASSERT(hat->sfmmu_xhat_provider == NULL); 8446 8447 for (i = 0; i < mmu_page_sizes; i++) 8448 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8449 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8450 8451 if (hat->sfmmu_iblk == NULL) 8452 return (assize); 8453 8454 for (i = 0; i < mmu_page_sizes; i++) 8455 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8456 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8457 8458 return (assize); 8459 } 8460 8461 int 8462 hat_stats_enable(struct hat *hat) 8463 { 8464 hatlock_t *hatlockp; 8465 8466 ASSERT(hat->sfmmu_xhat_provider == NULL); 8467 8468 hatlockp = sfmmu_hat_enter(hat); 8469 hat->sfmmu_rmstat++; 8470 sfmmu_hat_exit(hatlockp); 8471 return (1); 8472 } 8473 8474 void 8475 hat_stats_disable(struct hat *hat) 8476 { 8477 hatlock_t *hatlockp; 8478 8479 ASSERT(hat->sfmmu_xhat_provider == NULL); 8480 8481 hatlockp = sfmmu_hat_enter(hat); 8482 hat->sfmmu_rmstat--; 8483 sfmmu_hat_exit(hatlockp); 8484 } 8485 8486 /* 8487 * Routines for entering or removing ourselves from the 8488 * ism_hat's mapping list. This is used for both private and 8489 * SCD hats. 8490 */ 8491 static void 8492 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8493 { 8494 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8495 8496 iment->iment_prev = NULL; 8497 iment->iment_next = ism_hat->sfmmu_iment; 8498 if (ism_hat->sfmmu_iment) { 8499 ism_hat->sfmmu_iment->iment_prev = iment; 8500 } 8501 ism_hat->sfmmu_iment = iment; 8502 } 8503 8504 static void 8505 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8506 { 8507 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8508 8509 if (ism_hat->sfmmu_iment == NULL) { 8510 panic("ism map entry remove - no entries"); 8511 } 8512 8513 if (iment->iment_prev) { 8514 ASSERT(ism_hat->sfmmu_iment != iment); 8515 iment->iment_prev->iment_next = iment->iment_next; 8516 } else { 8517 ASSERT(ism_hat->sfmmu_iment == iment); 8518 ism_hat->sfmmu_iment = iment->iment_next; 8519 } 8520 8521 if (iment->iment_next) { 8522 iment->iment_next->iment_prev = iment->iment_prev; 8523 } 8524 8525 /* 8526 * zero out the entry 8527 */ 8528 iment->iment_next = NULL; 8529 iment->iment_prev = NULL; 8530 iment->iment_hat = NULL; 8531 iment->iment_base_va = 0; 8532 } 8533 8534 /* 8535 * Hat_share()/unshare() return an (non-zero) error 8536 * when saddr and daddr are not properly aligned. 8537 * 8538 * The top level mapping element determines the alignment 8539 * requirement for saddr and daddr, depending on different 8540 * architectures. 8541 * 8542 * When hat_share()/unshare() are not supported, 8543 * HATOP_SHARE()/UNSHARE() return 0 8544 */ 8545 int 8546 hat_share(struct hat *sfmmup, caddr_t addr, 8547 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8548 { 8549 ism_blk_t *ism_blkp; 8550 ism_blk_t *new_iblk; 8551 ism_map_t *ism_map; 8552 ism_ment_t *ism_ment; 8553 int i, added; 8554 hatlock_t *hatlockp; 8555 int reload_mmu = 0; 8556 uint_t ismshift = page_get_shift(ismszc); 8557 size_t ismpgsz = page_get_pagesize(ismszc); 8558 uint_t ismmask = (uint_t)ismpgsz - 1; 8559 size_t sh_size = ISM_SHIFT(ismshift, len); 8560 ushort_t ismhatflag; 8561 hat_region_cookie_t rcookie; 8562 sf_scd_t *old_scdp; 8563 8564 #ifdef DEBUG 8565 caddr_t eaddr = addr + len; 8566 #endif /* DEBUG */ 8567 8568 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8569 ASSERT(sptaddr == ISMID_STARTADDR); 8570 /* 8571 * Check the alignment. 8572 */ 8573 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8574 return (EINVAL); 8575 8576 /* 8577 * Check size alignment. 8578 */ 8579 if (!ISM_ALIGNED(ismshift, len)) 8580 return (EINVAL); 8581 8582 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 8583 8584 /* 8585 * Allocate ism_ment for the ism_hat's mapping list, and an 8586 * ism map blk in case we need one. We must do our 8587 * allocations before acquiring locks to prevent a deadlock 8588 * in the kmem allocator on the mapping list lock. 8589 */ 8590 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8591 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8592 8593 /* 8594 * Serialize ISM mappings with the ISM busy flag, and also the 8595 * trap handlers. 8596 */ 8597 sfmmu_ismhat_enter(sfmmup, 0); 8598 8599 /* 8600 * Allocate an ism map blk if necessary. 8601 */ 8602 if (sfmmup->sfmmu_iblk == NULL) { 8603 sfmmup->sfmmu_iblk = new_iblk; 8604 bzero(new_iblk, sizeof (*new_iblk)); 8605 new_iblk->iblk_nextpa = (uint64_t)-1; 8606 membar_stst(); /* make sure next ptr visible to all CPUs */ 8607 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8608 reload_mmu = 1; 8609 new_iblk = NULL; 8610 } 8611 8612 #ifdef DEBUG 8613 /* 8614 * Make sure mapping does not already exist. 8615 */ 8616 ism_blkp = sfmmup->sfmmu_iblk; 8617 while (ism_blkp != NULL) { 8618 ism_map = ism_blkp->iblk_maps; 8619 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8620 if ((addr >= ism_start(ism_map[i]) && 8621 addr < ism_end(ism_map[i])) || 8622 eaddr > ism_start(ism_map[i]) && 8623 eaddr <= ism_end(ism_map[i])) { 8624 panic("sfmmu_share: Already mapped!"); 8625 } 8626 } 8627 ism_blkp = ism_blkp->iblk_next; 8628 } 8629 #endif /* DEBUG */ 8630 8631 ASSERT(ismszc >= TTE4M); 8632 if (ismszc == TTE4M) { 8633 ismhatflag = HAT_4M_FLAG; 8634 } else if (ismszc == TTE32M) { 8635 ismhatflag = HAT_32M_FLAG; 8636 } else if (ismszc == TTE256M) { 8637 ismhatflag = HAT_256M_FLAG; 8638 } 8639 /* 8640 * Add mapping to first available mapping slot. 8641 */ 8642 ism_blkp = sfmmup->sfmmu_iblk; 8643 added = 0; 8644 while (!added) { 8645 ism_map = ism_blkp->iblk_maps; 8646 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8647 if (ism_map[i].imap_ismhat == NULL) { 8648 8649 ism_map[i].imap_ismhat = ism_hatid; 8650 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8651 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8652 ism_map[i].imap_hatflags = ismhatflag; 8653 ism_map[i].imap_sz_mask = ismmask; 8654 /* 8655 * imap_seg is checked in ISM_CHECK to see if 8656 * non-NULL, then other info assumed valid. 8657 */ 8658 membar_stst(); 8659 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8660 ism_map[i].imap_ment = ism_ment; 8661 8662 /* 8663 * Now add ourselves to the ism_hat's 8664 * mapping list. 8665 */ 8666 ism_ment->iment_hat = sfmmup; 8667 ism_ment->iment_base_va = addr; 8668 ism_hatid->sfmmu_ismhat = 1; 8669 mutex_enter(&ism_mlist_lock); 8670 iment_add(ism_ment, ism_hatid); 8671 mutex_exit(&ism_mlist_lock); 8672 added = 1; 8673 break; 8674 } 8675 } 8676 if (!added && ism_blkp->iblk_next == NULL) { 8677 ism_blkp->iblk_next = new_iblk; 8678 new_iblk = NULL; 8679 bzero(ism_blkp->iblk_next, 8680 sizeof (*ism_blkp->iblk_next)); 8681 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8682 membar_stst(); 8683 ism_blkp->iblk_nextpa = 8684 va_to_pa((caddr_t)ism_blkp->iblk_next); 8685 } 8686 ism_blkp = ism_blkp->iblk_next; 8687 } 8688 8689 /* 8690 * After calling hat_join_region, sfmmup may join a new SCD or 8691 * move from the old scd to a new scd, in which case, we want to 8692 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8693 * sfmmu_check_page_sizes at the end of this routine. 8694 */ 8695 old_scdp = sfmmup->sfmmu_scdp; 8696 8697 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8698 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8699 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8700 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8701 } 8702 /* 8703 * Update our counters for this sfmmup's ism mappings. 8704 */ 8705 for (i = 0; i <= ismszc; i++) { 8706 if (!(disable_ism_large_pages & (1 << i))) 8707 (void) ism_tsb_entries(sfmmup, i); 8708 } 8709 8710 /* 8711 * For ISM and DISM we do not support 512K pages, so we only only 8712 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8713 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8714 * 8715 * Need to set 32M/256M ISM flags to make sure 8716 * sfmmu_check_page_sizes() enables them on Panther. 8717 */ 8718 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8719 8720 switch (ismszc) { 8721 case TTE256M: 8722 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8723 hatlockp = sfmmu_hat_enter(sfmmup); 8724 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8725 sfmmu_hat_exit(hatlockp); 8726 } 8727 break; 8728 case TTE32M: 8729 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8730 hatlockp = sfmmu_hat_enter(sfmmup); 8731 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8732 sfmmu_hat_exit(hatlockp); 8733 } 8734 break; 8735 default: 8736 break; 8737 } 8738 8739 /* 8740 * If we updated the ismblkpa for this HAT we must make 8741 * sure all CPUs running this process reload their tsbmiss area. 8742 * Otherwise they will fail to load the mappings in the tsbmiss 8743 * handler and will loop calling pagefault(). 8744 */ 8745 if (reload_mmu) { 8746 hatlockp = sfmmu_hat_enter(sfmmup); 8747 sfmmu_sync_mmustate(sfmmup); 8748 sfmmu_hat_exit(hatlockp); 8749 } 8750 8751 sfmmu_ismhat_exit(sfmmup, 0); 8752 8753 /* 8754 * Free up ismblk if we didn't use it. 8755 */ 8756 if (new_iblk != NULL) 8757 kmem_cache_free(ism_blk_cache, new_iblk); 8758 8759 /* 8760 * Check TSB and TLB page sizes. 8761 */ 8762 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8763 sfmmu_check_page_sizes(sfmmup, 0); 8764 } else { 8765 sfmmu_check_page_sizes(sfmmup, 1); 8766 } 8767 return (0); 8768 } 8769 8770 /* 8771 * hat_unshare removes exactly one ism_map from 8772 * this process's as. It expects multiple calls 8773 * to hat_unshare for multiple shm segments. 8774 */ 8775 void 8776 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8777 { 8778 ism_map_t *ism_map; 8779 ism_ment_t *free_ment = NULL; 8780 ism_blk_t *ism_blkp; 8781 struct hat *ism_hatid; 8782 int found, i; 8783 hatlock_t *hatlockp; 8784 struct tsb_info *tsbinfo; 8785 uint_t ismshift = page_get_shift(ismszc); 8786 size_t sh_size = ISM_SHIFT(ismshift, len); 8787 uchar_t ism_rid; 8788 sf_scd_t *old_scdp; 8789 8790 ASSERT(ISM_ALIGNED(ismshift, addr)); 8791 ASSERT(ISM_ALIGNED(ismshift, len)); 8792 ASSERT(sfmmup != NULL); 8793 ASSERT(sfmmup != ksfmmup); 8794 8795 if (sfmmup->sfmmu_xhat_provider) { 8796 XHAT_UNSHARE(sfmmup, addr, len); 8797 return; 8798 } else { 8799 /* 8800 * This must be a CPU HAT. If the address space has 8801 * XHATs attached, inform all XHATs that ISM segment 8802 * is going away 8803 */ 8804 ASSERT(sfmmup->sfmmu_as != NULL); 8805 if (sfmmup->sfmmu_as->a_xhat != NULL) 8806 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 8807 } 8808 8809 /* 8810 * Make sure that during the entire time ISM mappings are removed, 8811 * the trap handlers serialize behind us, and that no one else 8812 * can be mucking with ISM mappings. This also lets us get away 8813 * with not doing expensive cross calls to flush the TLB -- we 8814 * just discard the context, flush the entire TSB, and call it 8815 * a day. 8816 */ 8817 sfmmu_ismhat_enter(sfmmup, 0); 8818 8819 /* 8820 * Remove the mapping. 8821 * 8822 * We can't have any holes in the ism map. 8823 * The tsb miss code while searching the ism map will 8824 * stop on an empty map slot. So we must move 8825 * everyone past the hole up 1 if any. 8826 * 8827 * Also empty ism map blks are not freed until the 8828 * process exits. This is to prevent a MT race condition 8829 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8830 */ 8831 found = 0; 8832 ism_blkp = sfmmup->sfmmu_iblk; 8833 while (!found && ism_blkp != NULL) { 8834 ism_map = ism_blkp->iblk_maps; 8835 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8836 if (addr == ism_start(ism_map[i]) && 8837 sh_size == (size_t)(ism_size(ism_map[i]))) { 8838 found = 1; 8839 break; 8840 } 8841 } 8842 if (!found) 8843 ism_blkp = ism_blkp->iblk_next; 8844 } 8845 8846 if (found) { 8847 ism_hatid = ism_map[i].imap_ismhat; 8848 ism_rid = ism_map[i].imap_rid; 8849 ASSERT(ism_hatid != NULL); 8850 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8851 8852 /* 8853 * After hat_leave_region, the sfmmup may leave SCD, 8854 * in which case, we want to grow the private tsb size when 8855 * calling sfmmu_check_page_sizes at the end of the routine. 8856 */ 8857 old_scdp = sfmmup->sfmmu_scdp; 8858 /* 8859 * Then remove ourselves from the region. 8860 */ 8861 if (ism_rid != SFMMU_INVALID_ISMRID) { 8862 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8863 HAT_REGION_ISM); 8864 } 8865 8866 /* 8867 * And now guarantee that any other cpu 8868 * that tries to process an ISM miss 8869 * will go to tl=0. 8870 */ 8871 hatlockp = sfmmu_hat_enter(sfmmup); 8872 sfmmu_invalidate_ctx(sfmmup); 8873 sfmmu_hat_exit(hatlockp); 8874 8875 /* 8876 * Remove ourselves from the ism mapping list. 8877 */ 8878 mutex_enter(&ism_mlist_lock); 8879 iment_sub(ism_map[i].imap_ment, ism_hatid); 8880 mutex_exit(&ism_mlist_lock); 8881 free_ment = ism_map[i].imap_ment; 8882 8883 /* 8884 * We delete the ism map by copying 8885 * the next map over the current one. 8886 * We will take the next one in the maps 8887 * array or from the next ism_blk. 8888 */ 8889 while (ism_blkp != NULL) { 8890 ism_map = ism_blkp->iblk_maps; 8891 while (i < (ISM_MAP_SLOTS - 1)) { 8892 ism_map[i] = ism_map[i + 1]; 8893 i++; 8894 } 8895 /* i == (ISM_MAP_SLOTS - 1) */ 8896 ism_blkp = ism_blkp->iblk_next; 8897 if (ism_blkp != NULL) { 8898 ism_map[i] = ism_blkp->iblk_maps[0]; 8899 i = 0; 8900 } else { 8901 ism_map[i].imap_seg = 0; 8902 ism_map[i].imap_vb_shift = 0; 8903 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8904 ism_map[i].imap_hatflags = 0; 8905 ism_map[i].imap_sz_mask = 0; 8906 ism_map[i].imap_ismhat = NULL; 8907 ism_map[i].imap_ment = NULL; 8908 } 8909 } 8910 8911 /* 8912 * Now flush entire TSB for the process, since 8913 * demapping page by page can be too expensive. 8914 * We don't have to flush the TLB here anymore 8915 * since we switch to a new TLB ctx instead. 8916 * Also, there is no need to flush if the process 8917 * is exiting since the TSB will be freed later. 8918 */ 8919 if (!sfmmup->sfmmu_free) { 8920 hatlockp = sfmmu_hat_enter(sfmmup); 8921 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8922 tsbinfo = tsbinfo->tsb_next) { 8923 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8924 continue; 8925 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8926 tsbinfo->tsb_flags |= 8927 TSB_FLUSH_NEEDED; 8928 continue; 8929 } 8930 8931 sfmmu_inv_tsb(tsbinfo->tsb_va, 8932 TSB_BYTES(tsbinfo->tsb_szc)); 8933 } 8934 sfmmu_hat_exit(hatlockp); 8935 } 8936 } 8937 8938 /* 8939 * Update our counters for this sfmmup's ism mappings. 8940 */ 8941 for (i = 0; i <= ismszc; i++) { 8942 if (!(disable_ism_large_pages & (1 << i))) 8943 (void) ism_tsb_entries(sfmmup, i); 8944 } 8945 8946 sfmmu_ismhat_exit(sfmmup, 0); 8947 8948 /* 8949 * We must do our freeing here after dropping locks 8950 * to prevent a deadlock in the kmem allocator on the 8951 * mapping list lock. 8952 */ 8953 if (free_ment != NULL) 8954 kmem_cache_free(ism_ment_cache, free_ment); 8955 8956 /* 8957 * Check TSB and TLB page sizes if the process isn't exiting. 8958 */ 8959 if (!sfmmup->sfmmu_free) { 8960 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8961 sfmmu_check_page_sizes(sfmmup, 1); 8962 } else { 8963 sfmmu_check_page_sizes(sfmmup, 0); 8964 } 8965 } 8966 } 8967 8968 /* ARGSUSED */ 8969 static int 8970 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8971 { 8972 /* void *buf is sfmmu_t pointer */ 8973 bzero(buf, sizeof (sfmmu_t)); 8974 8975 return (0); 8976 } 8977 8978 /* ARGSUSED */ 8979 static void 8980 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8981 { 8982 /* void *buf is sfmmu_t pointer */ 8983 } 8984 8985 /* 8986 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8987 * field to be the pa of this hmeblk 8988 */ 8989 /* ARGSUSED */ 8990 static int 8991 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8992 { 8993 struct hme_blk *hmeblkp; 8994 8995 bzero(buf, (size_t)cdrarg); 8996 hmeblkp = (struct hme_blk *)buf; 8997 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8998 8999 #ifdef HBLK_TRACE 9000 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 9001 #endif /* HBLK_TRACE */ 9002 9003 return (0); 9004 } 9005 9006 /* ARGSUSED */ 9007 static void 9008 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 9009 { 9010 9011 #ifdef HBLK_TRACE 9012 9013 struct hme_blk *hmeblkp; 9014 9015 hmeblkp = (struct hme_blk *)buf; 9016 mutex_destroy(&hmeblkp->hblk_audit_lock); 9017 9018 #endif /* HBLK_TRACE */ 9019 } 9020 9021 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 9022 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 9023 /* 9024 * The kmem allocator will callback into our reclaim routine when the system 9025 * is running low in memory. We traverse the hash and free up all unused but 9026 * still cached hme_blks. We also traverse the free list and free them up 9027 * as well. 9028 */ 9029 /*ARGSUSED*/ 9030 static void 9031 sfmmu_hblkcache_reclaim(void *cdrarg) 9032 { 9033 int i; 9034 struct hmehash_bucket *hmebp; 9035 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 9036 static struct hmehash_bucket *uhmehash_reclaim_hand; 9037 static struct hmehash_bucket *khmehash_reclaim_hand; 9038 struct hme_blk *list = NULL, *last_hmeblkp; 9039 cpuset_t cpuset = cpu_ready_set; 9040 cpu_hme_pend_t *cpuhp; 9041 9042 /* Free up hmeblks on the cpu pending lists */ 9043 for (i = 0; i < NCPU; i++) { 9044 cpuhp = &cpu_hme_pend[i]; 9045 if (cpuhp->chp_listp != NULL) { 9046 mutex_enter(&cpuhp->chp_mutex); 9047 if (cpuhp->chp_listp == NULL) { 9048 mutex_exit(&cpuhp->chp_mutex); 9049 continue; 9050 } 9051 for (last_hmeblkp = cpuhp->chp_listp; 9052 last_hmeblkp->hblk_next != NULL; 9053 last_hmeblkp = last_hmeblkp->hblk_next) 9054 ; 9055 last_hmeblkp->hblk_next = list; 9056 list = cpuhp->chp_listp; 9057 cpuhp->chp_listp = NULL; 9058 cpuhp->chp_count = 0; 9059 mutex_exit(&cpuhp->chp_mutex); 9060 } 9061 9062 } 9063 9064 if (list != NULL) { 9065 kpreempt_disable(); 9066 CPUSET_DEL(cpuset, CPU->cpu_id); 9067 xt_sync(cpuset); 9068 xt_sync(cpuset); 9069 kpreempt_enable(); 9070 sfmmu_hblk_free(&list); 9071 list = NULL; 9072 } 9073 9074 hmebp = uhmehash_reclaim_hand; 9075 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 9076 uhmehash_reclaim_hand = hmebp = uhme_hash; 9077 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9078 9079 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9080 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9081 hmeblkp = hmebp->hmeblkp; 9082 pr_hblk = NULL; 9083 while (hmeblkp) { 9084 nx_hblk = hmeblkp->hblk_next; 9085 if (!hmeblkp->hblk_vcnt && 9086 !hmeblkp->hblk_hmecnt) { 9087 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9088 pr_hblk, &list, 0); 9089 } else { 9090 pr_hblk = hmeblkp; 9091 } 9092 hmeblkp = nx_hblk; 9093 } 9094 SFMMU_HASH_UNLOCK(hmebp); 9095 } 9096 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9097 hmebp = uhme_hash; 9098 } 9099 9100 hmebp = khmehash_reclaim_hand; 9101 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9102 khmehash_reclaim_hand = hmebp = khme_hash; 9103 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9104 9105 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9106 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9107 hmeblkp = hmebp->hmeblkp; 9108 pr_hblk = NULL; 9109 while (hmeblkp) { 9110 nx_hblk = hmeblkp->hblk_next; 9111 if (!hmeblkp->hblk_vcnt && 9112 !hmeblkp->hblk_hmecnt) { 9113 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9114 pr_hblk, &list, 0); 9115 } else { 9116 pr_hblk = hmeblkp; 9117 } 9118 hmeblkp = nx_hblk; 9119 } 9120 SFMMU_HASH_UNLOCK(hmebp); 9121 } 9122 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9123 hmebp = khme_hash; 9124 } 9125 sfmmu_hblks_list_purge(&list, 0); 9126 } 9127 9128 /* 9129 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9130 * same goes for sfmmu_get_addrvcolor(). 9131 * 9132 * This function will return the virtual color for the specified page. The 9133 * virtual color corresponds to this page current mapping or its last mapping. 9134 * It is used by memory allocators to choose addresses with the correct 9135 * alignment so vac consistency is automatically maintained. If the page 9136 * has no color it returns -1. 9137 */ 9138 /*ARGSUSED*/ 9139 int 9140 sfmmu_get_ppvcolor(struct page *pp) 9141 { 9142 #ifdef VAC 9143 int color; 9144 9145 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9146 return (-1); 9147 } 9148 color = PP_GET_VCOLOR(pp); 9149 ASSERT(color < mmu_btop(shm_alignment)); 9150 return (color); 9151 #else 9152 return (-1); 9153 #endif /* VAC */ 9154 } 9155 9156 /* 9157 * This function will return the desired alignment for vac consistency 9158 * (vac color) given a virtual address. If no vac is present it returns -1. 9159 */ 9160 /*ARGSUSED*/ 9161 int 9162 sfmmu_get_addrvcolor(caddr_t vaddr) 9163 { 9164 #ifdef VAC 9165 if (cache & CACHE_VAC) { 9166 return (addr_to_vcolor(vaddr)); 9167 } else { 9168 return (-1); 9169 } 9170 #else 9171 return (-1); 9172 #endif /* VAC */ 9173 } 9174 9175 #ifdef VAC 9176 /* 9177 * Check for conflicts. 9178 * A conflict exists if the new and existent mappings do not match in 9179 * their "shm_alignment fields. If conflicts exist, the existant mappings 9180 * are flushed unless one of them is locked. If one of them is locked, then 9181 * the mappings are flushed and converted to non-cacheable mappings. 9182 */ 9183 static void 9184 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9185 { 9186 struct hat *tmphat; 9187 struct sf_hment *sfhmep, *tmphme = NULL; 9188 struct hme_blk *hmeblkp; 9189 int vcolor; 9190 tte_t tte; 9191 9192 ASSERT(sfmmu_mlist_held(pp)); 9193 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9194 9195 vcolor = addr_to_vcolor(addr); 9196 if (PP_NEWPAGE(pp)) { 9197 PP_SET_VCOLOR(pp, vcolor); 9198 return; 9199 } 9200 9201 if (PP_GET_VCOLOR(pp) == vcolor) { 9202 return; 9203 } 9204 9205 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9206 /* 9207 * Previous user of page had a different color 9208 * but since there are no current users 9209 * we just flush the cache and change the color. 9210 */ 9211 SFMMU_STAT(sf_pgcolor_conflict); 9212 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9213 PP_SET_VCOLOR(pp, vcolor); 9214 return; 9215 } 9216 9217 /* 9218 * If we get here we have a vac conflict with a current 9219 * mapping. VAC conflict policy is as follows. 9220 * - The default is to unload the other mappings unless: 9221 * - If we have a large mapping we uncache the page. 9222 * We need to uncache the rest of the large page too. 9223 * - If any of the mappings are locked we uncache the page. 9224 * - If the requested mapping is inconsistent 9225 * with another mapping and that mapping 9226 * is in the same address space we have to 9227 * make it non-cached. The default thing 9228 * to do is unload the inconsistent mapping 9229 * but if they are in the same address space 9230 * we run the risk of unmapping the pc or the 9231 * stack which we will use as we return to the user, 9232 * in which case we can then fault on the thing 9233 * we just unloaded and get into an infinite loop. 9234 */ 9235 if (PP_ISMAPPED_LARGE(pp)) { 9236 int sz; 9237 9238 /* 9239 * Existing mapping is for big pages. We don't unload 9240 * existing big mappings to satisfy new mappings. 9241 * Always convert all mappings to TNC. 9242 */ 9243 sz = fnd_mapping_sz(pp); 9244 pp = PP_GROUPLEADER(pp, sz); 9245 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9246 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9247 TTEPAGES(sz)); 9248 9249 return; 9250 } 9251 9252 /* 9253 * check if any mapping is in same as or if it is locked 9254 * since in that case we need to uncache. 9255 */ 9256 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9257 tmphme = sfhmep->hme_next; 9258 if (IS_PAHME(sfhmep)) 9259 continue; 9260 hmeblkp = sfmmu_hmetohblk(sfhmep); 9261 if (hmeblkp->hblk_xhat_bit) 9262 continue; 9263 tmphat = hblktosfmmu(hmeblkp); 9264 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9265 ASSERT(TTE_IS_VALID(&tte)); 9266 if (hmeblkp->hblk_shared || tmphat == hat || 9267 hmeblkp->hblk_lckcnt) { 9268 /* 9269 * We have an uncache conflict 9270 */ 9271 SFMMU_STAT(sf_uncache_conflict); 9272 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9273 return; 9274 } 9275 } 9276 9277 /* 9278 * We have an unload conflict 9279 * We have already checked for LARGE mappings, therefore 9280 * the remaining mapping(s) must be TTE8K. 9281 */ 9282 SFMMU_STAT(sf_unload_conflict); 9283 9284 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9285 tmphme = sfhmep->hme_next; 9286 if (IS_PAHME(sfhmep)) 9287 continue; 9288 hmeblkp = sfmmu_hmetohblk(sfhmep); 9289 if (hmeblkp->hblk_xhat_bit) 9290 continue; 9291 ASSERT(!hmeblkp->hblk_shared); 9292 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9293 } 9294 9295 if (PP_ISMAPPED_KPM(pp)) 9296 sfmmu_kpm_vac_unload(pp, addr); 9297 9298 /* 9299 * Unloads only do TLB flushes so we need to flush the 9300 * cache here. 9301 */ 9302 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9303 PP_SET_VCOLOR(pp, vcolor); 9304 } 9305 9306 /* 9307 * Whenever a mapping is unloaded and the page is in TNC state, 9308 * we see if the page can be made cacheable again. 'pp' is 9309 * the page that we just unloaded a mapping from, the size 9310 * of mapping that was unloaded is 'ottesz'. 9311 * Remark: 9312 * The recache policy for mpss pages can leave a performance problem 9313 * under the following circumstances: 9314 * . A large page in uncached mode has just been unmapped. 9315 * . All constituent pages are TNC due to a conflicting small mapping. 9316 * . There are many other, non conflicting, small mappings around for 9317 * a lot of the constituent pages. 9318 * . We're called w/ the "old" groupleader page and the old ottesz, 9319 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9320 * we end up w/ TTE8K or npages == 1. 9321 * . We call tst_tnc w/ the old groupleader only, and if there is no 9322 * conflict, we re-cache only this page. 9323 * . All other small mappings are not checked and will be left in TNC mode. 9324 * The problem is not very serious because: 9325 * . mpss is actually only defined for heap and stack, so the probability 9326 * is not very high that a large page mapping exists in parallel to a small 9327 * one (this is possible, but seems to be bad programming style in the 9328 * appl). 9329 * . The problem gets a little bit more serious, when those TNC pages 9330 * have to be mapped into kernel space, e.g. for networking. 9331 * . When VAC alias conflicts occur in applications, this is regarded 9332 * as an application bug. So if kstat's show them, the appl should 9333 * be changed anyway. 9334 */ 9335 void 9336 conv_tnc(page_t *pp, int ottesz) 9337 { 9338 int cursz, dosz; 9339 pgcnt_t curnpgs, dopgs; 9340 pgcnt_t pg64k; 9341 page_t *pp2; 9342 9343 /* 9344 * Determine how big a range we check for TNC and find 9345 * leader page. cursz is the size of the biggest 9346 * mapping that still exist on 'pp'. 9347 */ 9348 if (PP_ISMAPPED_LARGE(pp)) { 9349 cursz = fnd_mapping_sz(pp); 9350 } else { 9351 cursz = TTE8K; 9352 } 9353 9354 if (ottesz >= cursz) { 9355 dosz = ottesz; 9356 pp2 = pp; 9357 } else { 9358 dosz = cursz; 9359 pp2 = PP_GROUPLEADER(pp, dosz); 9360 } 9361 9362 pg64k = TTEPAGES(TTE64K); 9363 dopgs = TTEPAGES(dosz); 9364 9365 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9366 9367 while (dopgs != 0) { 9368 curnpgs = TTEPAGES(cursz); 9369 if (tst_tnc(pp2, curnpgs)) { 9370 SFMMU_STAT_ADD(sf_recache, curnpgs); 9371 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9372 curnpgs); 9373 } 9374 9375 ASSERT(dopgs >= curnpgs); 9376 dopgs -= curnpgs; 9377 9378 if (dopgs == 0) { 9379 break; 9380 } 9381 9382 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9383 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9384 cursz = fnd_mapping_sz(pp2); 9385 } else { 9386 cursz = TTE8K; 9387 } 9388 } 9389 } 9390 9391 /* 9392 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9393 * returns 0 otherwise. Note that oaddr argument is valid for only 9394 * 8k pages. 9395 */ 9396 int 9397 tst_tnc(page_t *pp, pgcnt_t npages) 9398 { 9399 struct sf_hment *sfhme; 9400 struct hme_blk *hmeblkp; 9401 tte_t tte; 9402 caddr_t vaddr; 9403 int clr_valid = 0; 9404 int color, color1, bcolor; 9405 int i, ncolors; 9406 9407 ASSERT(pp != NULL); 9408 ASSERT(!(cache & CACHE_WRITEBACK)); 9409 9410 if (npages > 1) { 9411 ncolors = CACHE_NUM_COLOR; 9412 } 9413 9414 for (i = 0; i < npages; i++) { 9415 ASSERT(sfmmu_mlist_held(pp)); 9416 ASSERT(PP_ISTNC(pp)); 9417 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9418 9419 if (PP_ISPNC(pp)) { 9420 return (0); 9421 } 9422 9423 clr_valid = 0; 9424 if (PP_ISMAPPED_KPM(pp)) { 9425 caddr_t kpmvaddr; 9426 9427 ASSERT(kpm_enable); 9428 kpmvaddr = hat_kpm_page2va(pp, 1); 9429 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9430 color1 = addr_to_vcolor(kpmvaddr); 9431 clr_valid = 1; 9432 } 9433 9434 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9435 if (IS_PAHME(sfhme)) 9436 continue; 9437 hmeblkp = sfmmu_hmetohblk(sfhme); 9438 if (hmeblkp->hblk_xhat_bit) 9439 continue; 9440 9441 sfmmu_copytte(&sfhme->hme_tte, &tte); 9442 ASSERT(TTE_IS_VALID(&tte)); 9443 9444 vaddr = tte_to_vaddr(hmeblkp, tte); 9445 color = addr_to_vcolor(vaddr); 9446 9447 if (npages > 1) { 9448 /* 9449 * If there is a big mapping, make sure 9450 * 8K mapping is consistent with the big 9451 * mapping. 9452 */ 9453 bcolor = i % ncolors; 9454 if (color != bcolor) { 9455 return (0); 9456 } 9457 } 9458 if (!clr_valid) { 9459 clr_valid = 1; 9460 color1 = color; 9461 } 9462 9463 if (color1 != color) { 9464 return (0); 9465 } 9466 } 9467 9468 pp = PP_PAGENEXT(pp); 9469 } 9470 9471 return (1); 9472 } 9473 9474 void 9475 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9476 pgcnt_t npages) 9477 { 9478 kmutex_t *pmtx; 9479 int i, ncolors, bcolor; 9480 kpm_hlk_t *kpmp; 9481 cpuset_t cpuset; 9482 9483 ASSERT(pp != NULL); 9484 ASSERT(!(cache & CACHE_WRITEBACK)); 9485 9486 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9487 pmtx = sfmmu_page_enter(pp); 9488 9489 /* 9490 * Fast path caching single unmapped page 9491 */ 9492 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9493 flags == HAT_CACHE) { 9494 PP_CLRTNC(pp); 9495 PP_CLRPNC(pp); 9496 sfmmu_page_exit(pmtx); 9497 sfmmu_kpm_kpmp_exit(kpmp); 9498 return; 9499 } 9500 9501 /* 9502 * We need to capture all cpus in order to change cacheability 9503 * because we can't allow one cpu to access the same physical 9504 * page using a cacheable and a non-cachebale mapping at the same 9505 * time. Since we may end up walking the ism mapping list 9506 * have to grab it's lock now since we can't after all the 9507 * cpus have been captured. 9508 */ 9509 sfmmu_hat_lock_all(); 9510 mutex_enter(&ism_mlist_lock); 9511 kpreempt_disable(); 9512 cpuset = cpu_ready_set; 9513 xc_attention(cpuset); 9514 9515 if (npages > 1) { 9516 /* 9517 * Make sure all colors are flushed since the 9518 * sfmmu_page_cache() only flushes one color- 9519 * it does not know big pages. 9520 */ 9521 ncolors = CACHE_NUM_COLOR; 9522 if (flags & HAT_TMPNC) { 9523 for (i = 0; i < ncolors; i++) { 9524 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9525 } 9526 cache_flush_flag = CACHE_NO_FLUSH; 9527 } 9528 } 9529 9530 for (i = 0; i < npages; i++) { 9531 9532 ASSERT(sfmmu_mlist_held(pp)); 9533 9534 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9535 9536 if (npages > 1) { 9537 bcolor = i % ncolors; 9538 } else { 9539 bcolor = NO_VCOLOR; 9540 } 9541 9542 sfmmu_page_cache(pp, flags, cache_flush_flag, 9543 bcolor); 9544 } 9545 9546 pp = PP_PAGENEXT(pp); 9547 } 9548 9549 xt_sync(cpuset); 9550 xc_dismissed(cpuset); 9551 mutex_exit(&ism_mlist_lock); 9552 sfmmu_hat_unlock_all(); 9553 sfmmu_page_exit(pmtx); 9554 sfmmu_kpm_kpmp_exit(kpmp); 9555 kpreempt_enable(); 9556 } 9557 9558 /* 9559 * This function changes the virtual cacheability of all mappings to a 9560 * particular page. When changing from uncache to cacheable the mappings will 9561 * only be changed if all of them have the same virtual color. 9562 * We need to flush the cache in all cpus. It is possible that 9563 * a process referenced a page as cacheable but has sinced exited 9564 * and cleared the mapping list. We still to flush it but have no 9565 * state so all cpus is the only alternative. 9566 */ 9567 static void 9568 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9569 { 9570 struct sf_hment *sfhme; 9571 struct hme_blk *hmeblkp; 9572 sfmmu_t *sfmmup; 9573 tte_t tte, ttemod; 9574 caddr_t vaddr; 9575 int ret, color; 9576 pfn_t pfn; 9577 9578 color = bcolor; 9579 pfn = pp->p_pagenum; 9580 9581 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9582 9583 if (IS_PAHME(sfhme)) 9584 continue; 9585 hmeblkp = sfmmu_hmetohblk(sfhme); 9586 9587 if (hmeblkp->hblk_xhat_bit) 9588 continue; 9589 9590 sfmmu_copytte(&sfhme->hme_tte, &tte); 9591 ASSERT(TTE_IS_VALID(&tte)); 9592 vaddr = tte_to_vaddr(hmeblkp, tte); 9593 color = addr_to_vcolor(vaddr); 9594 9595 #ifdef DEBUG 9596 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9597 ASSERT(color == bcolor); 9598 } 9599 #endif 9600 9601 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9602 9603 ttemod = tte; 9604 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9605 TTE_CLR_VCACHEABLE(&ttemod); 9606 } else { /* flags & HAT_CACHE */ 9607 TTE_SET_VCACHEABLE(&ttemod); 9608 } 9609 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9610 if (ret < 0) { 9611 /* 9612 * Since all cpus are captured modifytte should not 9613 * fail. 9614 */ 9615 panic("sfmmu_page_cache: write to tte failed"); 9616 } 9617 9618 sfmmup = hblktosfmmu(hmeblkp); 9619 if (cache_flush_flag == CACHE_FLUSH) { 9620 /* 9621 * Flush TSBs, TLBs and caches 9622 */ 9623 if (hmeblkp->hblk_shared) { 9624 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9625 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9626 sf_region_t *rgnp; 9627 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9628 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9629 ASSERT(srdp != NULL); 9630 rgnp = srdp->srd_hmergnp[rid]; 9631 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9632 srdp, rgnp, rid); 9633 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9634 hmeblkp, 0); 9635 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9636 } else if (sfmmup->sfmmu_ismhat) { 9637 if (flags & HAT_CACHE) { 9638 SFMMU_STAT(sf_ism_recache); 9639 } else { 9640 SFMMU_STAT(sf_ism_uncache); 9641 } 9642 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9643 pfn, CACHE_FLUSH); 9644 } else { 9645 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9646 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9647 } 9648 9649 /* 9650 * all cache entries belonging to this pfn are 9651 * now flushed. 9652 */ 9653 cache_flush_flag = CACHE_NO_FLUSH; 9654 } else { 9655 /* 9656 * Flush only TSBs and TLBs. 9657 */ 9658 if (hmeblkp->hblk_shared) { 9659 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9660 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9661 sf_region_t *rgnp; 9662 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9663 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9664 ASSERT(srdp != NULL); 9665 rgnp = srdp->srd_hmergnp[rid]; 9666 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9667 srdp, rgnp, rid); 9668 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9669 hmeblkp, 0); 9670 } else if (sfmmup->sfmmu_ismhat) { 9671 if (flags & HAT_CACHE) { 9672 SFMMU_STAT(sf_ism_recache); 9673 } else { 9674 SFMMU_STAT(sf_ism_uncache); 9675 } 9676 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9677 pfn, CACHE_NO_FLUSH); 9678 } else { 9679 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9680 } 9681 } 9682 } 9683 9684 if (PP_ISMAPPED_KPM(pp)) 9685 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9686 9687 switch (flags) { 9688 9689 default: 9690 panic("sfmmu_pagecache: unknown flags"); 9691 break; 9692 9693 case HAT_CACHE: 9694 PP_CLRTNC(pp); 9695 PP_CLRPNC(pp); 9696 PP_SET_VCOLOR(pp, color); 9697 break; 9698 9699 case HAT_TMPNC: 9700 PP_SETTNC(pp); 9701 PP_SET_VCOLOR(pp, NO_VCOLOR); 9702 break; 9703 9704 case HAT_UNCACHE: 9705 PP_SETPNC(pp); 9706 PP_CLRTNC(pp); 9707 PP_SET_VCOLOR(pp, NO_VCOLOR); 9708 break; 9709 } 9710 } 9711 #endif /* VAC */ 9712 9713 9714 /* 9715 * Wrapper routine used to return a context. 9716 * 9717 * It's the responsibility of the caller to guarantee that the 9718 * process serializes on calls here by taking the HAT lock for 9719 * the hat. 9720 * 9721 */ 9722 static void 9723 sfmmu_get_ctx(sfmmu_t *sfmmup) 9724 { 9725 mmu_ctx_t *mmu_ctxp; 9726 uint_t pstate_save; 9727 int ret; 9728 9729 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9730 ASSERT(sfmmup != ksfmmup); 9731 9732 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9733 sfmmu_setup_tsbinfo(sfmmup); 9734 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9735 } 9736 9737 kpreempt_disable(); 9738 9739 mmu_ctxp = CPU_MMU_CTXP(CPU); 9740 ASSERT(mmu_ctxp); 9741 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9742 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9743 9744 /* 9745 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9746 */ 9747 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9748 sfmmu_ctx_wrap_around(mmu_ctxp); 9749 9750 /* 9751 * Let the MMU set up the page sizes to use for 9752 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9753 */ 9754 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9755 mmu_set_ctx_page_sizes(sfmmup); 9756 } 9757 9758 /* 9759 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9760 * interrupts disabled to prevent race condition with wrap-around 9761 * ctx invalidatation. In sun4v, ctx invalidation also involves 9762 * a HV call to set the number of TSBs to 0. If interrupts are not 9763 * disabled until after sfmmu_load_mmustate is complete TSBs may 9764 * become assigned to INVALID_CONTEXT. This is not allowed. 9765 */ 9766 pstate_save = sfmmu_disable_intrs(); 9767 9768 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9769 sfmmup->sfmmu_scdp != NULL) { 9770 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9771 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9772 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9773 /* debug purpose only */ 9774 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9775 != INVALID_CONTEXT); 9776 } 9777 sfmmu_load_mmustate(sfmmup); 9778 9779 sfmmu_enable_intrs(pstate_save); 9780 9781 kpreempt_enable(); 9782 } 9783 9784 /* 9785 * When all cnums are used up in a MMU, cnum will wrap around to the 9786 * next generation and start from 2. 9787 */ 9788 static void 9789 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 9790 { 9791 9792 /* caller must have disabled the preemption */ 9793 ASSERT(curthread->t_preempt >= 1); 9794 ASSERT(mmu_ctxp != NULL); 9795 9796 /* acquire Per-MMU (PM) spin lock */ 9797 mutex_enter(&mmu_ctxp->mmu_lock); 9798 9799 /* re-check to see if wrap-around is needed */ 9800 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9801 goto done; 9802 9803 SFMMU_MMU_STAT(mmu_wrap_around); 9804 9805 /* update gnum */ 9806 ASSERT(mmu_ctxp->mmu_gnum != 0); 9807 mmu_ctxp->mmu_gnum++; 9808 if (mmu_ctxp->mmu_gnum == 0 || 9809 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9810 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9811 (void *)mmu_ctxp); 9812 } 9813 9814 if (mmu_ctxp->mmu_ncpus > 1) { 9815 cpuset_t cpuset; 9816 9817 membar_enter(); /* make sure updated gnum visible */ 9818 9819 SFMMU_XCALL_STATS(NULL); 9820 9821 /* xcall to others on the same MMU to invalidate ctx */ 9822 cpuset = mmu_ctxp->mmu_cpuset; 9823 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 9824 CPUSET_DEL(cpuset, CPU->cpu_id); 9825 CPUSET_AND(cpuset, cpu_ready_set); 9826 9827 /* 9828 * Pass in INVALID_CONTEXT as the first parameter to 9829 * sfmmu_raise_tsb_exception, which invalidates the context 9830 * of any process running on the CPUs in the MMU. 9831 */ 9832 xt_some(cpuset, sfmmu_raise_tsb_exception, 9833 INVALID_CONTEXT, INVALID_CONTEXT); 9834 xt_sync(cpuset); 9835 9836 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9837 } 9838 9839 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9840 sfmmu_setctx_sec(INVALID_CONTEXT); 9841 sfmmu_clear_utsbinfo(); 9842 } 9843 9844 /* 9845 * No xcall is needed here. For sun4u systems all CPUs in context 9846 * domain share a single physical MMU therefore it's enough to flush 9847 * TLB on local CPU. On sun4v systems we use 1 global context 9848 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9849 * handler. Note that vtag_flushall_uctxs() is called 9850 * for Ultra II machine, where the equivalent flushall functionality 9851 * is implemented in SW, and only user ctx TLB entries are flushed. 9852 */ 9853 if (&vtag_flushall_uctxs != NULL) { 9854 vtag_flushall_uctxs(); 9855 } else { 9856 vtag_flushall(); 9857 } 9858 9859 /* reset mmu cnum, skips cnum 0 and 1 */ 9860 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9861 9862 done: 9863 mutex_exit(&mmu_ctxp->mmu_lock); 9864 } 9865 9866 9867 /* 9868 * For multi-threaded process, set the process context to INVALID_CONTEXT 9869 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9870 * process, we can just load the MMU state directly without having to 9871 * set context invalid. Caller must hold the hat lock since we don't 9872 * acquire it here. 9873 */ 9874 static void 9875 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9876 { 9877 uint_t cnum; 9878 uint_t pstate_save; 9879 9880 ASSERT(sfmmup != ksfmmup); 9881 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9882 9883 kpreempt_disable(); 9884 9885 /* 9886 * We check whether the pass'ed-in sfmmup is the same as the 9887 * current running proc. This is to makes sure the current proc 9888 * stays single-threaded if it already is. 9889 */ 9890 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9891 (curthread->t_procp->p_lwpcnt == 1)) { 9892 /* single-thread */ 9893 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9894 if (cnum != INVALID_CONTEXT) { 9895 uint_t curcnum; 9896 /* 9897 * Disable interrupts to prevent race condition 9898 * with sfmmu_ctx_wrap_around ctx invalidation. 9899 * In sun4v, ctx invalidation involves setting 9900 * TSB to NULL, hence, interrupts should be disabled 9901 * untill after sfmmu_load_mmustate is completed. 9902 */ 9903 pstate_save = sfmmu_disable_intrs(); 9904 curcnum = sfmmu_getctx_sec(); 9905 if (curcnum == cnum) 9906 sfmmu_load_mmustate(sfmmup); 9907 sfmmu_enable_intrs(pstate_save); 9908 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9909 } 9910 } else { 9911 /* 9912 * multi-thread 9913 * or when sfmmup is not the same as the curproc. 9914 */ 9915 sfmmu_invalidate_ctx(sfmmup); 9916 } 9917 9918 kpreempt_enable(); 9919 } 9920 9921 9922 /* 9923 * Replace the specified TSB with a new TSB. This function gets called when 9924 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9925 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9926 * (8K). 9927 * 9928 * Caller must hold the HAT lock, but should assume any tsb_info 9929 * pointers it has are no longer valid after calling this function. 9930 * 9931 * Return values: 9932 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9933 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9934 * something to this tsbinfo/TSB 9935 * TSB_SUCCESS Operation succeeded 9936 */ 9937 static tsb_replace_rc_t 9938 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9939 hatlock_t *hatlockp, uint_t flags) 9940 { 9941 struct tsb_info *new_tsbinfo = NULL; 9942 struct tsb_info *curtsb, *prevtsb; 9943 uint_t tte_sz_mask; 9944 int i; 9945 9946 ASSERT(sfmmup != ksfmmup); 9947 ASSERT(sfmmup->sfmmu_ismhat == 0); 9948 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9949 ASSERT(szc <= tsb_max_growsize); 9950 9951 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9952 return (TSB_LOSTRACE); 9953 9954 /* 9955 * Find the tsb_info ahead of this one in the list, and 9956 * also make sure that the tsb_info passed in really 9957 * exists! 9958 */ 9959 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9960 curtsb != old_tsbinfo && curtsb != NULL; 9961 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9962 ; 9963 ASSERT(curtsb != NULL); 9964 9965 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9966 /* 9967 * The process is swapped out, so just set the new size 9968 * code. When it swaps back in, we'll allocate a new one 9969 * of the new chosen size. 9970 */ 9971 curtsb->tsb_szc = szc; 9972 return (TSB_SUCCESS); 9973 } 9974 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9975 9976 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9977 9978 /* 9979 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9980 * If we fail to allocate a TSB, exit. 9981 * 9982 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9983 * then try 4M slab after the initial alloc fails. 9984 * 9985 * If tsb swapin with tsb size > 4M, then try 4M after the 9986 * initial alloc fails. 9987 */ 9988 sfmmu_hat_exit(hatlockp); 9989 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9990 tte_sz_mask, flags, sfmmup) && 9991 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9992 (!(flags & TSB_SWAPIN) && 9993 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9994 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9995 tte_sz_mask, flags, sfmmup))) { 9996 (void) sfmmu_hat_enter(sfmmup); 9997 if (!(flags & TSB_SWAPIN)) 9998 SFMMU_STAT(sf_tsb_resize_failures); 9999 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10000 return (TSB_ALLOCFAIL); 10001 } 10002 (void) sfmmu_hat_enter(sfmmup); 10003 10004 /* 10005 * Re-check to make sure somebody else didn't muck with us while we 10006 * didn't hold the HAT lock. If the process swapped out, fine, just 10007 * exit; this can happen if we try to shrink the TSB from the context 10008 * of another process (such as on an ISM unmap), though it is rare. 10009 */ 10010 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10011 SFMMU_STAT(sf_tsb_resize_failures); 10012 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10013 sfmmu_hat_exit(hatlockp); 10014 sfmmu_tsbinfo_free(new_tsbinfo); 10015 (void) sfmmu_hat_enter(sfmmup); 10016 return (TSB_LOSTRACE); 10017 } 10018 10019 #ifdef DEBUG 10020 /* Reverify that the tsb_info still exists.. for debugging only */ 10021 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 10022 curtsb != old_tsbinfo && curtsb != NULL; 10023 prevtsb = curtsb, curtsb = curtsb->tsb_next) 10024 ; 10025 ASSERT(curtsb != NULL); 10026 #endif /* DEBUG */ 10027 10028 /* 10029 * Quiesce any CPUs running this process on their next TLB miss 10030 * so they atomically see the new tsb_info. We temporarily set the 10031 * context to invalid context so new threads that come on processor 10032 * after we do the xcall to cpusran will also serialize behind the 10033 * HAT lock on TLB miss and will see the new TSB. Since this short 10034 * race with a new thread coming on processor is relatively rare, 10035 * this synchronization mechanism should be cheaper than always 10036 * pausing all CPUs for the duration of the setup, which is what 10037 * the old implementation did. This is particuarly true if we are 10038 * copying a huge chunk of memory around during that window. 10039 * 10040 * The memory barriers are to make sure things stay consistent 10041 * with resume() since it does not hold the HAT lock while 10042 * walking the list of tsb_info structures. 10043 */ 10044 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 10045 /* The TSB is either growing or shrinking. */ 10046 sfmmu_invalidate_ctx(sfmmup); 10047 } else { 10048 /* 10049 * It is illegal to swap in TSBs from a process other 10050 * than a process being swapped in. This in turn 10051 * implies we do not have a valid MMU context here 10052 * since a process needs one to resolve translation 10053 * misses. 10054 */ 10055 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 10056 } 10057 10058 #ifdef DEBUG 10059 ASSERT(max_mmu_ctxdoms > 0); 10060 10061 /* 10062 * Process should have INVALID_CONTEXT on all MMUs 10063 */ 10064 for (i = 0; i < max_mmu_ctxdoms; i++) { 10065 10066 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 10067 } 10068 #endif 10069 10070 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 10071 membar_stst(); /* strict ordering required */ 10072 if (prevtsb) 10073 prevtsb->tsb_next = new_tsbinfo; 10074 else 10075 sfmmup->sfmmu_tsb = new_tsbinfo; 10076 membar_enter(); /* make sure new TSB globally visible */ 10077 10078 /* 10079 * We need to migrate TSB entries from the old TSB to the new TSB 10080 * if tsb_remap_ttes is set and the TSB is growing. 10081 */ 10082 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 10083 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 10084 10085 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10086 10087 /* 10088 * Drop the HAT lock to free our old tsb_info. 10089 */ 10090 sfmmu_hat_exit(hatlockp); 10091 10092 if ((flags & TSB_GROW) == TSB_GROW) { 10093 SFMMU_STAT(sf_tsb_grow); 10094 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 10095 SFMMU_STAT(sf_tsb_shrink); 10096 } 10097 10098 sfmmu_tsbinfo_free(old_tsbinfo); 10099 10100 (void) sfmmu_hat_enter(sfmmup); 10101 return (TSB_SUCCESS); 10102 } 10103 10104 /* 10105 * This function will re-program hat pgsz array, and invalidate the 10106 * process' context, forcing the process to switch to another 10107 * context on the next TLB miss, and therefore start using the 10108 * TLB that is reprogrammed for the new page sizes. 10109 */ 10110 void 10111 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10112 { 10113 int i; 10114 hatlock_t *hatlockp = NULL; 10115 10116 hatlockp = sfmmu_hat_enter(sfmmup); 10117 /* USIII+-IV+ optimization, requires hat lock */ 10118 if (tmp_pgsz) { 10119 for (i = 0; i < mmu_page_sizes; i++) 10120 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10121 } 10122 SFMMU_STAT(sf_tlb_reprog_pgsz); 10123 10124 sfmmu_invalidate_ctx(sfmmup); 10125 10126 sfmmu_hat_exit(hatlockp); 10127 } 10128 10129 /* 10130 * The scd_rttecnt field in the SCD must be updated to take account of the 10131 * regions which it contains. 10132 */ 10133 static void 10134 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10135 { 10136 uint_t rid; 10137 uint_t i, j; 10138 ulong_t w; 10139 sf_region_t *rgnp; 10140 10141 ASSERT(srdp != NULL); 10142 10143 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10144 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10145 continue; 10146 } 10147 10148 j = 0; 10149 while (w) { 10150 if (!(w & 0x1)) { 10151 j++; 10152 w >>= 1; 10153 continue; 10154 } 10155 rid = (i << BT_ULSHIFT) | j; 10156 j++; 10157 w >>= 1; 10158 10159 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10160 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10161 rgnp = srdp->srd_hmergnp[rid]; 10162 ASSERT(rgnp->rgn_refcnt > 0); 10163 ASSERT(rgnp->rgn_id == rid); 10164 10165 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10166 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10167 10168 /* 10169 * Maintain the tsb0 inflation cnt for the regions 10170 * in the SCD. 10171 */ 10172 if (rgnp->rgn_pgszc >= TTE4M) { 10173 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10174 rgnp->rgn_size >> 10175 (TTE_PAGE_SHIFT(TTE8K) + 2); 10176 } 10177 } 10178 } 10179 } 10180 10181 /* 10182 * This function assumes that there are either four or six supported page 10183 * sizes and at most two programmable TLBs, so we need to decide which 10184 * page sizes are most important and then tell the MMU layer so it 10185 * can adjust the TLB page sizes accordingly (if supported). 10186 * 10187 * If these assumptions change, this function will need to be 10188 * updated to support whatever the new limits are. 10189 * 10190 * The growing flag is nonzero if we are growing the address space, 10191 * and zero if it is shrinking. This allows us to decide whether 10192 * to grow or shrink our TSB, depending upon available memory 10193 * conditions. 10194 */ 10195 static void 10196 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10197 { 10198 uint64_t ttecnt[MMU_PAGE_SIZES]; 10199 uint64_t tte8k_cnt, tte4m_cnt; 10200 uint8_t i; 10201 int sectsb_thresh; 10202 10203 /* 10204 * Kernel threads, processes with small address spaces not using 10205 * large pages, and dummy ISM HATs need not apply. 10206 */ 10207 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10208 return; 10209 10210 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10211 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10212 return; 10213 10214 for (i = 0; i < mmu_page_sizes; i++) { 10215 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10216 sfmmup->sfmmu_ismttecnt[i]; 10217 } 10218 10219 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10220 if (&mmu_check_page_sizes) 10221 mmu_check_page_sizes(sfmmup, ttecnt); 10222 10223 /* 10224 * Calculate the number of 8k ttes to represent the span of these 10225 * pages. 10226 */ 10227 tte8k_cnt = ttecnt[TTE8K] + 10228 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10229 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10230 if (mmu_page_sizes == max_mmu_page_sizes) { 10231 tte4m_cnt = ttecnt[TTE4M] + 10232 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10233 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10234 } else { 10235 tte4m_cnt = ttecnt[TTE4M]; 10236 } 10237 10238 /* 10239 * Inflate tte8k_cnt to allow for region large page allocation failure. 10240 */ 10241 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10242 10243 /* 10244 * Inflate TSB sizes by a factor of 2 if this process 10245 * uses 4M text pages to minimize extra conflict misses 10246 * in the first TSB since without counting text pages 10247 * 8K TSB may become too small. 10248 * 10249 * Also double the size of the second TSB to minimize 10250 * extra conflict misses due to competition between 4M text pages 10251 * and data pages. 10252 * 10253 * We need to adjust the second TSB allocation threshold by the 10254 * inflation factor, since there is no point in creating a second 10255 * TSB when we know all the mappings can fit in the I/D TLBs. 10256 */ 10257 sectsb_thresh = tsb_sectsb_threshold; 10258 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10259 tte8k_cnt <<= 1; 10260 tte4m_cnt <<= 1; 10261 sectsb_thresh <<= 1; 10262 } 10263 10264 /* 10265 * Check to see if our TSB is the right size; we may need to 10266 * grow or shrink it. If the process is small, our work is 10267 * finished at this point. 10268 */ 10269 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10270 return; 10271 } 10272 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10273 } 10274 10275 static void 10276 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10277 uint64_t tte4m_cnt, int sectsb_thresh) 10278 { 10279 int tsb_bits; 10280 uint_t tsb_szc; 10281 struct tsb_info *tsbinfop; 10282 hatlock_t *hatlockp = NULL; 10283 10284 hatlockp = sfmmu_hat_enter(sfmmup); 10285 ASSERT(hatlockp != NULL); 10286 tsbinfop = sfmmup->sfmmu_tsb; 10287 ASSERT(tsbinfop != NULL); 10288 10289 /* 10290 * If we're growing, select the size based on RSS. If we're 10291 * shrinking, leave some room so we don't have to turn around and 10292 * grow again immediately. 10293 */ 10294 if (growing) 10295 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10296 else 10297 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10298 10299 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10300 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10301 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10302 hatlockp, TSB_SHRINK); 10303 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10304 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10305 hatlockp, TSB_GROW); 10306 } 10307 tsbinfop = sfmmup->sfmmu_tsb; 10308 10309 /* 10310 * With the TLB and first TSB out of the way, we need to see if 10311 * we need a second TSB for 4M pages. If we managed to reprogram 10312 * the TLB page sizes above, the process will start using this new 10313 * TSB right away; otherwise, it will start using it on the next 10314 * context switch. Either way, it's no big deal so there's no 10315 * synchronization with the trap handlers here unless we grow the 10316 * TSB (in which case it's required to prevent using the old one 10317 * after it's freed). Note: second tsb is required for 32M/256M 10318 * page sizes. 10319 */ 10320 if (tte4m_cnt > sectsb_thresh) { 10321 /* 10322 * If we're growing, select the size based on RSS. If we're 10323 * shrinking, leave some room so we don't have to turn 10324 * around and grow again immediately. 10325 */ 10326 if (growing) 10327 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10328 else 10329 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10330 if (tsbinfop->tsb_next == NULL) { 10331 struct tsb_info *newtsb; 10332 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10333 0 : TSB_ALLOC; 10334 10335 sfmmu_hat_exit(hatlockp); 10336 10337 /* 10338 * Try to allocate a TSB for 4[32|256]M pages. If we 10339 * can't get the size we want, retry w/a minimum sized 10340 * TSB. If that still didn't work, give up; we can 10341 * still run without one. 10342 */ 10343 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10344 TSB4M|TSB32M|TSB256M:TSB4M; 10345 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10346 allocflags, sfmmup)) && 10347 (tsb_szc <= TSB_4M_SZCODE || 10348 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10349 tsb_bits, allocflags, sfmmup)) && 10350 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10351 tsb_bits, allocflags, sfmmup)) { 10352 return; 10353 } 10354 10355 hatlockp = sfmmu_hat_enter(sfmmup); 10356 10357 sfmmu_invalidate_ctx(sfmmup); 10358 10359 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10360 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10361 SFMMU_STAT(sf_tsb_sectsb_create); 10362 sfmmu_hat_exit(hatlockp); 10363 return; 10364 } else { 10365 /* 10366 * It's annoying, but possible for us 10367 * to get here.. we dropped the HAT lock 10368 * because of locking order in the kmem 10369 * allocator, and while we were off getting 10370 * our memory, some other thread decided to 10371 * do us a favor and won the race to get a 10372 * second TSB for this process. Sigh. 10373 */ 10374 sfmmu_hat_exit(hatlockp); 10375 sfmmu_tsbinfo_free(newtsb); 10376 return; 10377 } 10378 } 10379 10380 /* 10381 * We have a second TSB, see if it's big enough. 10382 */ 10383 tsbinfop = tsbinfop->tsb_next; 10384 10385 /* 10386 * Check to see if our second TSB is the right size; 10387 * we may need to grow or shrink it. 10388 * To prevent thrashing (e.g. growing the TSB on a 10389 * subsequent map operation), only try to shrink if 10390 * the TSB reach exceeds twice the virtual address 10391 * space size. 10392 */ 10393 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10394 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10395 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10396 tsb_szc, hatlockp, TSB_SHRINK); 10397 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10398 TSB_OK_GROW()) { 10399 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10400 tsb_szc, hatlockp, TSB_GROW); 10401 } 10402 } 10403 10404 sfmmu_hat_exit(hatlockp); 10405 } 10406 10407 /* 10408 * Free up a sfmmu 10409 * Since the sfmmu is currently embedded in the hat struct we simply zero 10410 * out our fields and free up the ism map blk list if any. 10411 */ 10412 static void 10413 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10414 { 10415 ism_blk_t *blkp, *nx_blkp; 10416 #ifdef DEBUG 10417 ism_map_t *map; 10418 int i; 10419 #endif 10420 10421 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10422 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10423 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10424 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10425 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10426 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10427 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10428 10429 sfmmup->sfmmu_free = 0; 10430 sfmmup->sfmmu_ismhat = 0; 10431 10432 blkp = sfmmup->sfmmu_iblk; 10433 sfmmup->sfmmu_iblk = NULL; 10434 10435 while (blkp) { 10436 #ifdef DEBUG 10437 map = blkp->iblk_maps; 10438 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10439 ASSERT(map[i].imap_seg == 0); 10440 ASSERT(map[i].imap_ismhat == NULL); 10441 ASSERT(map[i].imap_ment == NULL); 10442 } 10443 #endif 10444 nx_blkp = blkp->iblk_next; 10445 blkp->iblk_next = NULL; 10446 blkp->iblk_nextpa = (uint64_t)-1; 10447 kmem_cache_free(ism_blk_cache, blkp); 10448 blkp = nx_blkp; 10449 } 10450 } 10451 10452 /* 10453 * Locking primitves accessed by HATLOCK macros 10454 */ 10455 10456 #define SFMMU_SPL_MTX (0x0) 10457 #define SFMMU_ML_MTX (0x1) 10458 10459 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10460 SPL_HASH(pg) : MLIST_HASH(pg)) 10461 10462 kmutex_t * 10463 sfmmu_page_enter(struct page *pp) 10464 { 10465 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10466 } 10467 10468 void 10469 sfmmu_page_exit(kmutex_t *spl) 10470 { 10471 mutex_exit(spl); 10472 } 10473 10474 int 10475 sfmmu_page_spl_held(struct page *pp) 10476 { 10477 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10478 } 10479 10480 kmutex_t * 10481 sfmmu_mlist_enter(struct page *pp) 10482 { 10483 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10484 } 10485 10486 void 10487 sfmmu_mlist_exit(kmutex_t *mml) 10488 { 10489 mutex_exit(mml); 10490 } 10491 10492 int 10493 sfmmu_mlist_held(struct page *pp) 10494 { 10495 10496 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10497 } 10498 10499 /* 10500 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10501 * sfmmu_mlist_enter() case mml_table lock array is used and for 10502 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10503 * 10504 * The lock is taken on a root page so that it protects an operation on all 10505 * constituent pages of a large page pp belongs to. 10506 * 10507 * The routine takes a lock from the appropriate array. The lock is determined 10508 * by hashing the root page. After taking the lock this routine checks if the 10509 * root page has the same size code that was used to determine the root (i.e 10510 * that root hasn't changed). If root page has the expected p_szc field we 10511 * have the right lock and it's returned to the caller. If root's p_szc 10512 * decreased we release the lock and retry from the beginning. This case can 10513 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10514 * value and taking the lock. The number of retries due to p_szc decrease is 10515 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10516 * determined by hashing pp itself. 10517 * 10518 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10519 * possible that p_szc can increase. To increase p_szc a thread has to lock 10520 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10521 * callers that don't hold a page locked recheck if hmeblk through which pp 10522 * was found still maps this pp. If it doesn't map it anymore returned lock 10523 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10524 * p_szc increase after taking the lock it returns this lock without further 10525 * retries because in this case the caller doesn't care about which lock was 10526 * taken. The caller will drop it right away. 10527 * 10528 * After the routine returns it's guaranteed that hat_page_demote() can't 10529 * change p_szc field of any of constituent pages of a large page pp belongs 10530 * to as long as pp was either locked at least SHARED prior to this call or 10531 * the caller finds that hment that pointed to this pp still references this 10532 * pp (this also assumes that the caller holds hme hash bucket lock so that 10533 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10534 * hat_pageunload()). 10535 */ 10536 static kmutex_t * 10537 sfmmu_mlspl_enter(struct page *pp, int type) 10538 { 10539 kmutex_t *mtx; 10540 uint_t prev_rszc = UINT_MAX; 10541 page_t *rootpp; 10542 uint_t szc; 10543 uint_t rszc; 10544 uint_t pszc = pp->p_szc; 10545 10546 ASSERT(pp != NULL); 10547 10548 again: 10549 if (pszc == 0) { 10550 mtx = SFMMU_MLSPL_MTX(type, pp); 10551 mutex_enter(mtx); 10552 return (mtx); 10553 } 10554 10555 /* The lock lives in the root page */ 10556 rootpp = PP_GROUPLEADER(pp, pszc); 10557 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10558 mutex_enter(mtx); 10559 10560 /* 10561 * Return mml in the following 3 cases: 10562 * 10563 * 1) If pp itself is root since if its p_szc decreased before we took 10564 * the lock pp is still the root of smaller szc page. And if its p_szc 10565 * increased it doesn't matter what lock we return (see comment in 10566 * front of this routine). 10567 * 10568 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10569 * large page we have the right lock since any previous potential 10570 * hat_page_demote() is done demoting from greater than current root's 10571 * p_szc because hat_page_demote() changes root's p_szc last. No 10572 * further hat_page_demote() can start or be in progress since it 10573 * would need the same lock we currently hold. 10574 * 10575 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10576 * matter what lock we return (see comment in front of this routine). 10577 */ 10578 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10579 rszc >= prev_rszc) { 10580 return (mtx); 10581 } 10582 10583 /* 10584 * hat_page_demote() could have decreased root's p_szc. 10585 * In this case pp's p_szc must also be smaller than pszc. 10586 * Retry. 10587 */ 10588 if (rszc < pszc) { 10589 szc = pp->p_szc; 10590 if (szc < pszc) { 10591 mutex_exit(mtx); 10592 pszc = szc; 10593 goto again; 10594 } 10595 /* 10596 * pp's p_szc increased after it was decreased. 10597 * page cannot be mapped. Return current lock. The caller 10598 * will drop it right away. 10599 */ 10600 return (mtx); 10601 } 10602 10603 /* 10604 * root's p_szc is greater than pp's p_szc. 10605 * hat_page_demote() is not done with all pages 10606 * yet. Wait for it to complete. 10607 */ 10608 mutex_exit(mtx); 10609 rootpp = PP_GROUPLEADER(rootpp, rszc); 10610 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10611 mutex_enter(mtx); 10612 mutex_exit(mtx); 10613 prev_rszc = rszc; 10614 goto again; 10615 } 10616 10617 static int 10618 sfmmu_mlspl_held(struct page *pp, int type) 10619 { 10620 kmutex_t *mtx; 10621 10622 ASSERT(pp != NULL); 10623 /* The lock lives in the root page */ 10624 pp = PP_PAGEROOT(pp); 10625 ASSERT(pp != NULL); 10626 10627 mtx = SFMMU_MLSPL_MTX(type, pp); 10628 return (MUTEX_HELD(mtx)); 10629 } 10630 10631 static uint_t 10632 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10633 { 10634 struct hme_blk *hblkp; 10635 10636 10637 if (freehblkp != NULL) { 10638 mutex_enter(&freehblkp_lock); 10639 if (freehblkp != NULL) { 10640 /* 10641 * If the current thread is owning hblk_reserve OR 10642 * critical request from sfmmu_hblk_steal() 10643 * let it succeed even if freehblkcnt is really low. 10644 */ 10645 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10646 SFMMU_STAT(sf_get_free_throttle); 10647 mutex_exit(&freehblkp_lock); 10648 return (0); 10649 } 10650 freehblkcnt--; 10651 *hmeblkpp = freehblkp; 10652 hblkp = *hmeblkpp; 10653 freehblkp = hblkp->hblk_next; 10654 mutex_exit(&freehblkp_lock); 10655 hblkp->hblk_next = NULL; 10656 SFMMU_STAT(sf_get_free_success); 10657 10658 ASSERT(hblkp->hblk_hmecnt == 0); 10659 ASSERT(hblkp->hblk_vcnt == 0); 10660 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10661 10662 return (1); 10663 } 10664 mutex_exit(&freehblkp_lock); 10665 } 10666 10667 /* Check cpu hblk pending queues */ 10668 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10669 hblkp = *hmeblkpp; 10670 hblkp->hblk_next = NULL; 10671 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10672 10673 ASSERT(hblkp->hblk_hmecnt == 0); 10674 ASSERT(hblkp->hblk_vcnt == 0); 10675 10676 return (1); 10677 } 10678 10679 SFMMU_STAT(sf_get_free_fail); 10680 return (0); 10681 } 10682 10683 static uint_t 10684 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10685 { 10686 struct hme_blk *hblkp; 10687 10688 ASSERT(hmeblkp->hblk_hmecnt == 0); 10689 ASSERT(hmeblkp->hblk_vcnt == 0); 10690 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10691 10692 /* 10693 * If the current thread is mapping into kernel space, 10694 * let it succede even if freehblkcnt is max 10695 * so that it will avoid freeing it to kmem. 10696 * This will prevent stack overflow due to 10697 * possible recursion since kmem_cache_free() 10698 * might require creation of a slab which 10699 * in turn needs an hmeblk to map that slab; 10700 * let's break this vicious chain at the first 10701 * opportunity. 10702 */ 10703 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10704 mutex_enter(&freehblkp_lock); 10705 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10706 SFMMU_STAT(sf_put_free_success); 10707 freehblkcnt++; 10708 hmeblkp->hblk_next = freehblkp; 10709 freehblkp = hmeblkp; 10710 mutex_exit(&freehblkp_lock); 10711 return (1); 10712 } 10713 mutex_exit(&freehblkp_lock); 10714 } 10715 10716 /* 10717 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10718 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10719 * we are not in the process of mapping into kernel space. 10720 */ 10721 ASSERT(!critical); 10722 while (freehblkcnt > HBLK_RESERVE_CNT) { 10723 mutex_enter(&freehblkp_lock); 10724 if (freehblkcnt > HBLK_RESERVE_CNT) { 10725 freehblkcnt--; 10726 hblkp = freehblkp; 10727 freehblkp = hblkp->hblk_next; 10728 mutex_exit(&freehblkp_lock); 10729 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10730 kmem_cache_free(sfmmu8_cache, hblkp); 10731 continue; 10732 } 10733 mutex_exit(&freehblkp_lock); 10734 } 10735 SFMMU_STAT(sf_put_free_fail); 10736 return (0); 10737 } 10738 10739 static void 10740 sfmmu_hblk_swap(struct hme_blk *new) 10741 { 10742 struct hme_blk *old, *hblkp, *prev; 10743 uint64_t newpa; 10744 caddr_t base, vaddr, endaddr; 10745 struct hmehash_bucket *hmebp; 10746 struct sf_hment *osfhme, *nsfhme; 10747 page_t *pp; 10748 kmutex_t *pml; 10749 tte_t tte; 10750 struct hme_blk *list = NULL; 10751 10752 #ifdef DEBUG 10753 hmeblk_tag hblktag; 10754 struct hme_blk *found; 10755 #endif 10756 old = HBLK_RESERVE; 10757 ASSERT(!old->hblk_shared); 10758 10759 /* 10760 * save pa before bcopy clobbers it 10761 */ 10762 newpa = new->hblk_nextpa; 10763 10764 base = (caddr_t)get_hblk_base(old); 10765 endaddr = base + get_hblk_span(old); 10766 10767 /* 10768 * acquire hash bucket lock. 10769 */ 10770 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10771 SFMMU_INVALID_SHMERID); 10772 10773 /* 10774 * copy contents from old to new 10775 */ 10776 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10777 10778 /* 10779 * add new to hash chain 10780 */ 10781 sfmmu_hblk_hash_add(hmebp, new, newpa); 10782 10783 /* 10784 * search hash chain for hblk_reserve; this needs to be performed 10785 * after adding new, otherwise prev won't correspond to the hblk which 10786 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10787 * remove old later. 10788 */ 10789 for (prev = NULL, 10790 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10791 prev = hblkp, hblkp = hblkp->hblk_next) 10792 ; 10793 10794 if (hblkp != old) 10795 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10796 10797 /* 10798 * p_mapping list is still pointing to hments in hblk_reserve; 10799 * fix up p_mapping list so that they point to hments in new. 10800 * 10801 * Since all these mappings are created by hblk_reserve_thread 10802 * on the way and it's using at least one of the buffers from each of 10803 * the newly minted slabs, there is no danger of any of these 10804 * mappings getting unloaded by another thread. 10805 * 10806 * tsbmiss could only modify ref/mod bits of hments in old/new. 10807 * Since all of these hments hold mappings established by segkmem 10808 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10809 * have no meaning for the mappings in hblk_reserve. hments in 10810 * old and new are identical except for ref/mod bits. 10811 */ 10812 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10813 10814 HBLKTOHME(osfhme, old, vaddr); 10815 sfmmu_copytte(&osfhme->hme_tte, &tte); 10816 10817 if (TTE_IS_VALID(&tte)) { 10818 if ((pp = osfhme->hme_page) == NULL) 10819 panic("sfmmu_hblk_swap: page not mapped"); 10820 10821 pml = sfmmu_mlist_enter(pp); 10822 10823 if (pp != osfhme->hme_page) 10824 panic("sfmmu_hblk_swap: mapping changed"); 10825 10826 HBLKTOHME(nsfhme, new, vaddr); 10827 10828 HME_ADD(nsfhme, pp); 10829 HME_SUB(osfhme, pp); 10830 10831 sfmmu_mlist_exit(pml); 10832 } 10833 } 10834 10835 /* 10836 * remove old from hash chain 10837 */ 10838 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10839 10840 #ifdef DEBUG 10841 10842 hblktag.htag_id = ksfmmup; 10843 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10844 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10845 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10846 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10847 10848 if (found != new) 10849 panic("sfmmu_hblk_swap: new hblk not found"); 10850 #endif 10851 10852 SFMMU_HASH_UNLOCK(hmebp); 10853 10854 /* 10855 * Reset hblk_reserve 10856 */ 10857 bzero((void *)old, HME8BLK_SZ); 10858 old->hblk_nextpa = va_to_pa((caddr_t)old); 10859 } 10860 10861 /* 10862 * Grab the mlist mutex for both pages passed in. 10863 * 10864 * low and high will be returned as pointers to the mutexes for these pages. 10865 * low refers to the mutex residing in the lower bin of the mlist hash, while 10866 * high refers to the mutex residing in the higher bin of the mlist hash. This 10867 * is due to the locking order restrictions on the same thread grabbing 10868 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10869 * 10870 * If both pages hash to the same mutex, only grab that single mutex, and 10871 * high will be returned as NULL 10872 * If the pages hash to different bins in the hash, grab the lower addressed 10873 * lock first and then the higher addressed lock in order to follow the locking 10874 * rules involved with the same thread grabbing multiple mlist mutexes. 10875 * low and high will both have non-NULL values. 10876 */ 10877 static void 10878 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10879 kmutex_t **low, kmutex_t **high) 10880 { 10881 kmutex_t *mml_targ, *mml_repl; 10882 10883 /* 10884 * no need to do the dance around szc as in sfmmu_mlist_enter() 10885 * because this routine is only called by hat_page_relocate() and all 10886 * targ and repl pages are already locked EXCL so szc can't change. 10887 */ 10888 10889 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10890 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10891 10892 if (mml_targ == mml_repl) { 10893 *low = mml_targ; 10894 *high = NULL; 10895 } else { 10896 if (mml_targ < mml_repl) { 10897 *low = mml_targ; 10898 *high = mml_repl; 10899 } else { 10900 *low = mml_repl; 10901 *high = mml_targ; 10902 } 10903 } 10904 10905 mutex_enter(*low); 10906 if (*high) 10907 mutex_enter(*high); 10908 } 10909 10910 static void 10911 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10912 { 10913 if (high) 10914 mutex_exit(high); 10915 mutex_exit(low); 10916 } 10917 10918 static hatlock_t * 10919 sfmmu_hat_enter(sfmmu_t *sfmmup) 10920 { 10921 hatlock_t *hatlockp; 10922 10923 if (sfmmup != ksfmmup) { 10924 hatlockp = TSB_HASH(sfmmup); 10925 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10926 return (hatlockp); 10927 } 10928 return (NULL); 10929 } 10930 10931 static hatlock_t * 10932 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10933 { 10934 hatlock_t *hatlockp; 10935 10936 if (sfmmup != ksfmmup) { 10937 hatlockp = TSB_HASH(sfmmup); 10938 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10939 return (NULL); 10940 return (hatlockp); 10941 } 10942 return (NULL); 10943 } 10944 10945 static void 10946 sfmmu_hat_exit(hatlock_t *hatlockp) 10947 { 10948 if (hatlockp != NULL) 10949 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10950 } 10951 10952 static void 10953 sfmmu_hat_lock_all(void) 10954 { 10955 int i; 10956 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10957 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10958 } 10959 10960 static void 10961 sfmmu_hat_unlock_all(void) 10962 { 10963 int i; 10964 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10965 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10966 } 10967 10968 int 10969 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10970 { 10971 ASSERT(sfmmup != ksfmmup); 10972 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10973 } 10974 10975 /* 10976 * Locking primitives to provide consistency between ISM unmap 10977 * and other operations. Since ISM unmap can take a long time, we 10978 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10979 * contention on the hatlock buckets while ISM segments are being 10980 * unmapped. The tradeoff is that the flags don't prevent priority 10981 * inversion from occurring, so we must request kernel priority in 10982 * case we have to sleep to keep from getting buried while holding 10983 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10984 * threads from running (for example, in sfmmu_uvatopfn()). 10985 */ 10986 static void 10987 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10988 { 10989 hatlock_t *hatlockp; 10990 10991 THREAD_KPRI_REQUEST(); 10992 if (!hatlock_held) 10993 hatlockp = sfmmu_hat_enter(sfmmup); 10994 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10995 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10996 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10997 if (!hatlock_held) 10998 sfmmu_hat_exit(hatlockp); 10999 } 11000 11001 static void 11002 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 11003 { 11004 hatlock_t *hatlockp; 11005 11006 if (!hatlock_held) 11007 hatlockp = sfmmu_hat_enter(sfmmup); 11008 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 11009 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 11010 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11011 if (!hatlock_held) 11012 sfmmu_hat_exit(hatlockp); 11013 THREAD_KPRI_RELEASE(); 11014 } 11015 11016 /* 11017 * 11018 * Algorithm: 11019 * 11020 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 11021 * hblks. 11022 * 11023 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 11024 * 11025 * (a) try to return an hblk from reserve pool of free hblks; 11026 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 11027 * and return hblk_reserve. 11028 * 11029 * (3) call kmem_cache_alloc() to allocate hblk; 11030 * 11031 * (a) if hblk_reserve_lock is held by the current thread, 11032 * atomically replace hblk_reserve by the hblk that is 11033 * returned by kmem_cache_alloc; release hblk_reserve_lock 11034 * and call kmem_cache_alloc() again. 11035 * (b) if reserve pool is not full, add the hblk that is 11036 * returned by kmem_cache_alloc to reserve pool and 11037 * call kmem_cache_alloc again. 11038 * 11039 */ 11040 static struct hme_blk * 11041 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 11042 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 11043 uint_t flags, uint_t rid) 11044 { 11045 struct hme_blk *hmeblkp = NULL; 11046 struct hme_blk *newhblkp; 11047 struct hme_blk *shw_hblkp = NULL; 11048 struct kmem_cache *sfmmu_cache = NULL; 11049 uint64_t hblkpa; 11050 ulong_t index; 11051 uint_t owner; /* set to 1 if using hblk_reserve */ 11052 uint_t forcefree; 11053 int sleep; 11054 sf_srd_t *srdp; 11055 sf_region_t *rgnp; 11056 11057 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11058 ASSERT(hblktag.htag_rid == rid); 11059 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 11060 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11061 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 11062 11063 /* 11064 * If segkmem is not created yet, allocate from static hmeblks 11065 * created at the end of startup_modules(). See the block comment 11066 * in startup_modules() describing how we estimate the number of 11067 * static hmeblks that will be needed during re-map. 11068 */ 11069 if (!hblk_alloc_dynamic) { 11070 11071 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11072 11073 if (size == TTE8K) { 11074 index = nucleus_hblk8.index; 11075 if (index >= nucleus_hblk8.len) { 11076 /* 11077 * If we panic here, see startup_modules() to 11078 * make sure that we are calculating the 11079 * number of hblk8's that we need correctly. 11080 */ 11081 prom_panic("no nucleus hblk8 to allocate"); 11082 } 11083 hmeblkp = 11084 (struct hme_blk *)&nucleus_hblk8.list[index]; 11085 nucleus_hblk8.index++; 11086 SFMMU_STAT(sf_hblk8_nalloc); 11087 } else { 11088 index = nucleus_hblk1.index; 11089 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 11090 /* 11091 * If we panic here, see startup_modules(). 11092 * Most likely you need to update the 11093 * calculation of the number of hblk1 elements 11094 * that the kernel needs to boot. 11095 */ 11096 prom_panic("no nucleus hblk1 to allocate"); 11097 } 11098 hmeblkp = 11099 (struct hme_blk *)&nucleus_hblk1.list[index]; 11100 nucleus_hblk1.index++; 11101 SFMMU_STAT(sf_hblk1_nalloc); 11102 } 11103 11104 goto hblk_init; 11105 } 11106 11107 SFMMU_HASH_UNLOCK(hmebp); 11108 11109 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11110 if (mmu_page_sizes == max_mmu_page_sizes) { 11111 if (size < TTE256M) 11112 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11113 size, flags); 11114 } else { 11115 if (size < TTE4M) 11116 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11117 size, flags); 11118 } 11119 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11120 /* 11121 * Shared hmes use per region bitmaps in rgn_hmeflag 11122 * rather than shadow hmeblks to keep track of the 11123 * mapping sizes which have been allocated for the region. 11124 * Here we cleanup old invalid hmeblks with this rid, 11125 * which may be left around by pageunload(). 11126 */ 11127 int ttesz; 11128 caddr_t va; 11129 caddr_t eva = vaddr + TTEBYTES(size); 11130 11131 ASSERT(sfmmup != KHATID); 11132 11133 srdp = sfmmup->sfmmu_srdp; 11134 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11135 rgnp = srdp->srd_hmergnp[rid]; 11136 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11137 ASSERT(rgnp->rgn_refcnt != 0); 11138 ASSERT(size <= rgnp->rgn_pgszc); 11139 11140 ttesz = HBLK_MIN_TTESZ; 11141 do { 11142 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11143 continue; 11144 } 11145 11146 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11147 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11148 } else if (ttesz < size) { 11149 for (va = vaddr; va < eva; 11150 va += TTEBYTES(ttesz)) { 11151 sfmmu_cleanup_rhblk(srdp, va, rid, 11152 ttesz); 11153 } 11154 } 11155 } while (++ttesz <= rgnp->rgn_pgszc); 11156 } 11157 11158 fill_hblk: 11159 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11160 11161 if (owner && size == TTE8K) { 11162 11163 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11164 /* 11165 * We are really in a tight spot. We already own 11166 * hblk_reserve and we need another hblk. In anticipation 11167 * of this kind of scenario, we specifically set aside 11168 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11169 * by owner of hblk_reserve. 11170 */ 11171 SFMMU_STAT(sf_hblk_recurse_cnt); 11172 11173 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11174 panic("sfmmu_hblk_alloc: reserve list is empty"); 11175 11176 goto hblk_verify; 11177 } 11178 11179 ASSERT(!owner); 11180 11181 if ((flags & HAT_NO_KALLOC) == 0) { 11182 11183 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11184 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11185 11186 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11187 hmeblkp = sfmmu_hblk_steal(size); 11188 } else { 11189 /* 11190 * if we are the owner of hblk_reserve, 11191 * swap hblk_reserve with hmeblkp and 11192 * start a fresh life. Hope things go 11193 * better this time. 11194 */ 11195 if (hblk_reserve_thread == curthread) { 11196 ASSERT(sfmmu_cache == sfmmu8_cache); 11197 sfmmu_hblk_swap(hmeblkp); 11198 hblk_reserve_thread = NULL; 11199 mutex_exit(&hblk_reserve_lock); 11200 goto fill_hblk; 11201 } 11202 /* 11203 * let's donate this hblk to our reserve list if 11204 * we are not mapping kernel range 11205 */ 11206 if (size == TTE8K && sfmmup != KHATID) { 11207 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11208 goto fill_hblk; 11209 } 11210 } 11211 } else { 11212 /* 11213 * We are here to map the slab in sfmmu8_cache; let's 11214 * check if we could tap our reserve list; if successful, 11215 * this will avoid the pain of going thru sfmmu_hblk_swap 11216 */ 11217 SFMMU_STAT(sf_hblk_slab_cnt); 11218 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11219 /* 11220 * let's start hblk_reserve dance 11221 */ 11222 SFMMU_STAT(sf_hblk_reserve_cnt); 11223 owner = 1; 11224 mutex_enter(&hblk_reserve_lock); 11225 hmeblkp = HBLK_RESERVE; 11226 hblk_reserve_thread = curthread; 11227 } 11228 } 11229 11230 hblk_verify: 11231 ASSERT(hmeblkp != NULL); 11232 set_hblk_sz(hmeblkp, size); 11233 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11234 SFMMU_HASH_LOCK(hmebp); 11235 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11236 if (newhblkp != NULL) { 11237 SFMMU_HASH_UNLOCK(hmebp); 11238 if (hmeblkp != HBLK_RESERVE) { 11239 /* 11240 * This is really tricky! 11241 * 11242 * vmem_alloc(vmem_seg_arena) 11243 * vmem_alloc(vmem_internal_arena) 11244 * segkmem_alloc(heap_arena) 11245 * vmem_alloc(heap_arena) 11246 * page_create() 11247 * hat_memload() 11248 * kmem_cache_free() 11249 * kmem_cache_alloc() 11250 * kmem_slab_create() 11251 * vmem_alloc(kmem_internal_arena) 11252 * segkmem_alloc(heap_arena) 11253 * vmem_alloc(heap_arena) 11254 * page_create() 11255 * hat_memload() 11256 * kmem_cache_free() 11257 * ... 11258 * 11259 * Thus, hat_memload() could call kmem_cache_free 11260 * for enough number of times that we could easily 11261 * hit the bottom of the stack or run out of reserve 11262 * list of vmem_seg structs. So, we must donate 11263 * this hblk to reserve list if it's allocated 11264 * from sfmmu8_cache *and* mapping kernel range. 11265 * We don't need to worry about freeing hmeblk1's 11266 * to kmem since they don't map any kmem slabs. 11267 * 11268 * Note: When segkmem supports largepages, we must 11269 * free hmeblk1's to reserve list as well. 11270 */ 11271 forcefree = (sfmmup == KHATID) ? 1 : 0; 11272 if (size == TTE8K && 11273 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11274 goto re_verify; 11275 } 11276 ASSERT(sfmmup != KHATID); 11277 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11278 } else { 11279 /* 11280 * Hey! we don't need hblk_reserve any more. 11281 */ 11282 ASSERT(owner); 11283 hblk_reserve_thread = NULL; 11284 mutex_exit(&hblk_reserve_lock); 11285 owner = 0; 11286 } 11287 re_verify: 11288 /* 11289 * let's check if the goodies are still present 11290 */ 11291 SFMMU_HASH_LOCK(hmebp); 11292 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11293 if (newhblkp != NULL) { 11294 /* 11295 * return newhblkp if it's not hblk_reserve; 11296 * if newhblkp is hblk_reserve, return it 11297 * _only if_ we are the owner of hblk_reserve. 11298 */ 11299 if (newhblkp != HBLK_RESERVE || owner) { 11300 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11301 newhblkp->hblk_shared); 11302 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11303 !newhblkp->hblk_shared); 11304 return (newhblkp); 11305 } else { 11306 /* 11307 * we just hit hblk_reserve in the hash and 11308 * we are not the owner of that; 11309 * 11310 * block until hblk_reserve_thread completes 11311 * swapping hblk_reserve and try the dance 11312 * once again. 11313 */ 11314 SFMMU_HASH_UNLOCK(hmebp); 11315 mutex_enter(&hblk_reserve_lock); 11316 mutex_exit(&hblk_reserve_lock); 11317 SFMMU_STAT(sf_hblk_reserve_hit); 11318 goto fill_hblk; 11319 } 11320 } else { 11321 /* 11322 * it's no more! try the dance once again. 11323 */ 11324 SFMMU_HASH_UNLOCK(hmebp); 11325 goto fill_hblk; 11326 } 11327 } 11328 11329 hblk_init: 11330 if (SFMMU_IS_SHMERID_VALID(rid)) { 11331 uint16_t tteflag = 0x1 << 11332 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11333 11334 if (!(rgnp->rgn_hmeflags & tteflag)) { 11335 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11336 } 11337 hmeblkp->hblk_shared = 1; 11338 } else { 11339 hmeblkp->hblk_shared = 0; 11340 } 11341 set_hblk_sz(hmeblkp, size); 11342 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11343 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11344 hmeblkp->hblk_tag = hblktag; 11345 hmeblkp->hblk_shadow = shw_hblkp; 11346 hblkpa = hmeblkp->hblk_nextpa; 11347 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11348 11349 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11350 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11351 ASSERT(hmeblkp->hblk_hmecnt == 0); 11352 ASSERT(hmeblkp->hblk_vcnt == 0); 11353 ASSERT(hmeblkp->hblk_lckcnt == 0); 11354 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11355 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11356 return (hmeblkp); 11357 } 11358 11359 /* 11360 * This function cleans up the hme_blk and returns it to the free list. 11361 */ 11362 /* ARGSUSED */ 11363 static void 11364 sfmmu_hblk_free(struct hme_blk **listp) 11365 { 11366 struct hme_blk *hmeblkp, *next_hmeblkp; 11367 int size; 11368 uint_t critical; 11369 uint64_t hblkpa; 11370 11371 ASSERT(*listp != NULL); 11372 11373 hmeblkp = *listp; 11374 while (hmeblkp != NULL) { 11375 next_hmeblkp = hmeblkp->hblk_next; 11376 ASSERT(!hmeblkp->hblk_hmecnt); 11377 ASSERT(!hmeblkp->hblk_vcnt); 11378 ASSERT(!hmeblkp->hblk_lckcnt); 11379 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11380 ASSERT(hmeblkp->hblk_shared == 0); 11381 ASSERT(hmeblkp->hblk_shw_bit == 0); 11382 ASSERT(hmeblkp->hblk_shadow == NULL); 11383 11384 hblkpa = va_to_pa((caddr_t)hmeblkp); 11385 ASSERT(hblkpa != (uint64_t)-1); 11386 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11387 11388 size = get_hblk_ttesz(hmeblkp); 11389 hmeblkp->hblk_next = NULL; 11390 hmeblkp->hblk_nextpa = hblkpa; 11391 11392 if (hmeblkp->hblk_nuc_bit == 0) { 11393 11394 if (size != TTE8K || 11395 !sfmmu_put_free_hblk(hmeblkp, critical)) 11396 kmem_cache_free(get_hblk_cache(hmeblkp), 11397 hmeblkp); 11398 } 11399 hmeblkp = next_hmeblkp; 11400 } 11401 } 11402 11403 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11404 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11405 11406 static uint_t sfmmu_hblk_steal_twice; 11407 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11408 11409 /* 11410 * Steal a hmeblk from user or kernel hme hash lists. 11411 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11412 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11413 * tap into critical reserve of freehblkp. 11414 * Note: We remain looping in this routine until we find one. 11415 */ 11416 static struct hme_blk * 11417 sfmmu_hblk_steal(int size) 11418 { 11419 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11420 struct hmehash_bucket *hmebp; 11421 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11422 uint64_t hblkpa; 11423 int i; 11424 uint_t loop_cnt = 0, critical; 11425 11426 for (;;) { 11427 /* Check cpu hblk pending queues */ 11428 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11429 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11430 ASSERT(hmeblkp->hblk_hmecnt == 0); 11431 ASSERT(hmeblkp->hblk_vcnt == 0); 11432 return (hmeblkp); 11433 } 11434 11435 if (size == TTE8K) { 11436 critical = 11437 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11438 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11439 return (hmeblkp); 11440 } 11441 11442 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11443 uhmehash_steal_hand; 11444 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11445 11446 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11447 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11448 SFMMU_HASH_LOCK(hmebp); 11449 hmeblkp = hmebp->hmeblkp; 11450 hblkpa = hmebp->hmeh_nextpa; 11451 pr_hblk = NULL; 11452 while (hmeblkp) { 11453 /* 11454 * check if it is a hmeblk that is not locked 11455 * and not shared. skip shadow hmeblks with 11456 * shadow_mask set i.e valid count non zero. 11457 */ 11458 if ((get_hblk_ttesz(hmeblkp) == size) && 11459 (hmeblkp->hblk_shw_bit == 0 || 11460 hmeblkp->hblk_vcnt == 0) && 11461 (hmeblkp->hblk_lckcnt == 0)) { 11462 /* 11463 * there is a high probability that we 11464 * will find a free one. search some 11465 * buckets for a free hmeblk initially 11466 * before unloading a valid hmeblk. 11467 */ 11468 if ((hmeblkp->hblk_vcnt == 0 && 11469 hmeblkp->hblk_hmecnt == 0) || (i >= 11470 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11471 if (sfmmu_steal_this_hblk(hmebp, 11472 hmeblkp, hblkpa, pr_hblk)) { 11473 /* 11474 * Hblk is unloaded 11475 * successfully 11476 */ 11477 break; 11478 } 11479 } 11480 } 11481 pr_hblk = hmeblkp; 11482 hblkpa = hmeblkp->hblk_nextpa; 11483 hmeblkp = hmeblkp->hblk_next; 11484 } 11485 11486 SFMMU_HASH_UNLOCK(hmebp); 11487 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11488 hmebp = uhme_hash; 11489 } 11490 uhmehash_steal_hand = hmebp; 11491 11492 if (hmeblkp != NULL) 11493 break; 11494 11495 /* 11496 * in the worst case, look for a free one in the kernel 11497 * hash table. 11498 */ 11499 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11500 SFMMU_HASH_LOCK(hmebp); 11501 hmeblkp = hmebp->hmeblkp; 11502 hblkpa = hmebp->hmeh_nextpa; 11503 pr_hblk = NULL; 11504 while (hmeblkp) { 11505 /* 11506 * check if it is free hmeblk 11507 */ 11508 if ((get_hblk_ttesz(hmeblkp) == size) && 11509 (hmeblkp->hblk_lckcnt == 0) && 11510 (hmeblkp->hblk_vcnt == 0) && 11511 (hmeblkp->hblk_hmecnt == 0)) { 11512 if (sfmmu_steal_this_hblk(hmebp, 11513 hmeblkp, hblkpa, pr_hblk)) { 11514 break; 11515 } else { 11516 /* 11517 * Cannot fail since we have 11518 * hash lock. 11519 */ 11520 panic("fail to steal?"); 11521 } 11522 } 11523 11524 pr_hblk = hmeblkp; 11525 hblkpa = hmeblkp->hblk_nextpa; 11526 hmeblkp = hmeblkp->hblk_next; 11527 } 11528 11529 SFMMU_HASH_UNLOCK(hmebp); 11530 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11531 hmebp = khme_hash; 11532 } 11533 11534 if (hmeblkp != NULL) 11535 break; 11536 sfmmu_hblk_steal_twice++; 11537 } 11538 return (hmeblkp); 11539 } 11540 11541 /* 11542 * This routine does real work to prepare a hblk to be "stolen" by 11543 * unloading the mappings, updating shadow counts .... 11544 * It returns 1 if the block is ready to be reused (stolen), or 0 11545 * means the block cannot be stolen yet- pageunload is still working 11546 * on this hblk. 11547 */ 11548 static int 11549 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11550 uint64_t hblkpa, struct hme_blk *pr_hblk) 11551 { 11552 int shw_size, vshift; 11553 struct hme_blk *shw_hblkp; 11554 caddr_t vaddr; 11555 uint_t shw_mask, newshw_mask; 11556 struct hme_blk *list = NULL; 11557 11558 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11559 11560 /* 11561 * check if the hmeblk is free, unload if necessary 11562 */ 11563 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11564 sfmmu_t *sfmmup; 11565 demap_range_t dmr; 11566 11567 sfmmup = hblktosfmmu(hmeblkp); 11568 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11569 return (0); 11570 } 11571 DEMAP_RANGE_INIT(sfmmup, &dmr); 11572 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11573 (caddr_t)get_hblk_base(hmeblkp), 11574 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11575 DEMAP_RANGE_FLUSH(&dmr); 11576 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11577 /* 11578 * Pageunload is working on the same hblk. 11579 */ 11580 return (0); 11581 } 11582 11583 sfmmu_hblk_steal_unload_count++; 11584 } 11585 11586 ASSERT(hmeblkp->hblk_lckcnt == 0); 11587 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11588 11589 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11590 hmeblkp->hblk_nextpa = hblkpa; 11591 11592 shw_hblkp = hmeblkp->hblk_shadow; 11593 if (shw_hblkp) { 11594 ASSERT(!hmeblkp->hblk_shared); 11595 shw_size = get_hblk_ttesz(shw_hblkp); 11596 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11597 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11598 ASSERT(vshift < 8); 11599 /* 11600 * Atomically clear shadow mask bit 11601 */ 11602 do { 11603 shw_mask = shw_hblkp->hblk_shw_mask; 11604 ASSERT(shw_mask & (1 << vshift)); 11605 newshw_mask = shw_mask & ~(1 << vshift); 11606 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11607 shw_mask, newshw_mask); 11608 } while (newshw_mask != shw_mask); 11609 hmeblkp->hblk_shadow = NULL; 11610 } 11611 11612 /* 11613 * remove shadow bit if we are stealing an unused shadow hmeblk. 11614 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11615 * we are indeed allocating a shadow hmeblk. 11616 */ 11617 hmeblkp->hblk_shw_bit = 0; 11618 11619 if (hmeblkp->hblk_shared) { 11620 sf_srd_t *srdp; 11621 sf_region_t *rgnp; 11622 uint_t rid; 11623 11624 srdp = hblktosrd(hmeblkp); 11625 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11626 rid = hmeblkp->hblk_tag.htag_rid; 11627 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11628 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11629 rgnp = srdp->srd_hmergnp[rid]; 11630 ASSERT(rgnp != NULL); 11631 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11632 hmeblkp->hblk_shared = 0; 11633 } 11634 11635 sfmmu_hblk_steal_count++; 11636 SFMMU_STAT(sf_steal_count); 11637 11638 return (1); 11639 } 11640 11641 struct hme_blk * 11642 sfmmu_hmetohblk(struct sf_hment *sfhme) 11643 { 11644 struct hme_blk *hmeblkp; 11645 struct sf_hment *sfhme0; 11646 struct hme_blk *hblk_dummy = 0; 11647 11648 /* 11649 * No dummy sf_hments, please. 11650 */ 11651 ASSERT(sfhme->hme_tte.ll != 0); 11652 11653 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11654 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11655 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11656 11657 return (hmeblkp); 11658 } 11659 11660 /* 11661 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11662 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11663 * KM_SLEEP allocation. 11664 * 11665 * Return 0 on success, -1 otherwise. 11666 */ 11667 static void 11668 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11669 { 11670 struct tsb_info *tsbinfop, *next; 11671 tsb_replace_rc_t rc; 11672 boolean_t gotfirst = B_FALSE; 11673 11674 ASSERT(sfmmup != ksfmmup); 11675 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11676 11677 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11678 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11679 } 11680 11681 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11682 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11683 } else { 11684 return; 11685 } 11686 11687 ASSERT(sfmmup->sfmmu_tsb != NULL); 11688 11689 /* 11690 * Loop over all tsbinfo's replacing them with ones that actually have 11691 * a TSB. If any of the replacements ever fail, bail out of the loop. 11692 */ 11693 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11694 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11695 next = tsbinfop->tsb_next; 11696 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11697 hatlockp, TSB_SWAPIN); 11698 if (rc != TSB_SUCCESS) { 11699 break; 11700 } 11701 gotfirst = B_TRUE; 11702 } 11703 11704 switch (rc) { 11705 case TSB_SUCCESS: 11706 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11707 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11708 return; 11709 case TSB_LOSTRACE: 11710 break; 11711 case TSB_ALLOCFAIL: 11712 break; 11713 default: 11714 panic("sfmmu_replace_tsb returned unrecognized failure code " 11715 "%d", rc); 11716 } 11717 11718 /* 11719 * In this case, we failed to get one of our TSBs. If we failed to 11720 * get the first TSB, get one of minimum size (8KB). Walk the list 11721 * and throw away the tsbinfos, starting where the allocation failed; 11722 * we can get by with just one TSB as long as we don't leave the 11723 * SWAPPED tsbinfo structures lying around. 11724 */ 11725 tsbinfop = sfmmup->sfmmu_tsb; 11726 next = tsbinfop->tsb_next; 11727 tsbinfop->tsb_next = NULL; 11728 11729 sfmmu_hat_exit(hatlockp); 11730 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11731 next = tsbinfop->tsb_next; 11732 sfmmu_tsbinfo_free(tsbinfop); 11733 } 11734 hatlockp = sfmmu_hat_enter(sfmmup); 11735 11736 /* 11737 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11738 * pages. 11739 */ 11740 if (!gotfirst) { 11741 tsbinfop = sfmmup->sfmmu_tsb; 11742 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11743 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11744 ASSERT(rc == TSB_SUCCESS); 11745 } 11746 11747 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11748 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11749 } 11750 11751 static int 11752 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11753 { 11754 ulong_t bix = 0; 11755 uint_t rid; 11756 sf_region_t *rgnp; 11757 11758 ASSERT(srdp != NULL); 11759 ASSERT(srdp->srd_refcnt != 0); 11760 11761 w <<= BT_ULSHIFT; 11762 while (bmw) { 11763 if (!(bmw & 0x1)) { 11764 bix++; 11765 bmw >>= 1; 11766 continue; 11767 } 11768 rid = w | bix; 11769 rgnp = srdp->srd_hmergnp[rid]; 11770 ASSERT(rgnp->rgn_refcnt > 0); 11771 ASSERT(rgnp->rgn_id == rid); 11772 if (addr < rgnp->rgn_saddr || 11773 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11774 bix++; 11775 bmw >>= 1; 11776 } else { 11777 return (1); 11778 } 11779 } 11780 return (0); 11781 } 11782 11783 /* 11784 * Handle exceptions for low level tsb_handler. 11785 * 11786 * There are many scenarios that could land us here: 11787 * 11788 * If the context is invalid we land here. The context can be invalid 11789 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11790 * perform a wrap around operation in order to allocate a new context. 11791 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11792 * TSBs configuration is changeing for this process and we are forced into 11793 * here to do a syncronization operation. If the context is valid we can 11794 * be here from window trap hanlder. In this case just call trap to handle 11795 * the fault. 11796 * 11797 * Note that the process will run in INVALID_CONTEXT before 11798 * faulting into here and subsequently loading the MMU registers 11799 * (including the TSB base register) associated with this process. 11800 * For this reason, the trap handlers must all test for 11801 * INVALID_CONTEXT before attempting to access any registers other 11802 * than the context registers. 11803 */ 11804 void 11805 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11806 { 11807 sfmmu_t *sfmmup, *shsfmmup; 11808 uint_t ctxtype; 11809 klwp_id_t lwp; 11810 char lwp_save_state; 11811 hatlock_t *hatlockp, *shatlockp; 11812 struct tsb_info *tsbinfop; 11813 struct tsbmiss *tsbmp; 11814 sf_scd_t *scdp; 11815 11816 SFMMU_STAT(sf_tsb_exceptions); 11817 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11818 sfmmup = astosfmmu(curthread->t_procp->p_as); 11819 /* 11820 * note that in sun4u, tagacces register contains ctxnum 11821 * while sun4v passes ctxtype in the tagaccess register. 11822 */ 11823 ctxtype = tagaccess & TAGACC_CTX_MASK; 11824 11825 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11826 ASSERT(sfmmup->sfmmu_ismhat == 0); 11827 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11828 ctxtype == INVALID_CONTEXT); 11829 11830 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11831 /* 11832 * We may land here because shme bitmap and pagesize 11833 * flags are updated lazily in tsbmiss area on other cpus. 11834 * If we detect here that tsbmiss area is out of sync with 11835 * sfmmu update it and retry the trapped instruction. 11836 * Otherwise call trap(). 11837 */ 11838 int ret = 0; 11839 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11840 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11841 11842 /* 11843 * Must set lwp state to LWP_SYS before 11844 * trying to acquire any adaptive lock 11845 */ 11846 lwp = ttolwp(curthread); 11847 ASSERT(lwp); 11848 lwp_save_state = lwp->lwp_state; 11849 lwp->lwp_state = LWP_SYS; 11850 11851 hatlockp = sfmmu_hat_enter(sfmmup); 11852 kpreempt_disable(); 11853 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11854 ASSERT(sfmmup == tsbmp->usfmmup); 11855 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11856 ~tteflag_mask) || 11857 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11858 ~tteflag_mask)) { 11859 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11860 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11861 ret = 1; 11862 } 11863 if (sfmmup->sfmmu_srdp != NULL) { 11864 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11865 ulong_t *tm = tsbmp->shmermap; 11866 ulong_t i; 11867 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11868 ulong_t d = tm[i] ^ sm[i]; 11869 if (d) { 11870 if (d & sm[i]) { 11871 if (!ret && sfmmu_is_rgnva( 11872 sfmmup->sfmmu_srdp, 11873 addr, i, d & sm[i])) { 11874 ret = 1; 11875 } 11876 } 11877 tm[i] = sm[i]; 11878 } 11879 } 11880 } 11881 kpreempt_enable(); 11882 sfmmu_hat_exit(hatlockp); 11883 lwp->lwp_state = lwp_save_state; 11884 if (ret) { 11885 return; 11886 } 11887 } else if (ctxtype == INVALID_CONTEXT) { 11888 /* 11889 * First, make sure we come out of here with a valid ctx, 11890 * since if we don't get one we'll simply loop on the 11891 * faulting instruction. 11892 * 11893 * If the ISM mappings are changing, the TSB is relocated, 11894 * the process is swapped, the process is joining SCD or 11895 * leaving SCD or shared regions we serialize behind the 11896 * controlling thread with hat lock, sfmmu_flags and 11897 * sfmmu_tsb_cv condition variable. 11898 */ 11899 11900 /* 11901 * Must set lwp state to LWP_SYS before 11902 * trying to acquire any adaptive lock 11903 */ 11904 lwp = ttolwp(curthread); 11905 ASSERT(lwp); 11906 lwp_save_state = lwp->lwp_state; 11907 lwp->lwp_state = LWP_SYS; 11908 11909 hatlockp = sfmmu_hat_enter(sfmmup); 11910 retry: 11911 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11912 shsfmmup = scdp->scd_sfmmup; 11913 ASSERT(shsfmmup != NULL); 11914 11915 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11916 tsbinfop = tsbinfop->tsb_next) { 11917 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11918 /* drop the private hat lock */ 11919 sfmmu_hat_exit(hatlockp); 11920 /* acquire the shared hat lock */ 11921 shatlockp = sfmmu_hat_enter(shsfmmup); 11922 /* 11923 * recheck to see if anything changed 11924 * after we drop the private hat lock. 11925 */ 11926 if (sfmmup->sfmmu_scdp == scdp && 11927 shsfmmup == scdp->scd_sfmmup) { 11928 sfmmu_tsb_chk_reloc(shsfmmup, 11929 shatlockp); 11930 } 11931 sfmmu_hat_exit(shatlockp); 11932 hatlockp = sfmmu_hat_enter(sfmmup); 11933 goto retry; 11934 } 11935 } 11936 } 11937 11938 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11939 tsbinfop = tsbinfop->tsb_next) { 11940 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11941 cv_wait(&sfmmup->sfmmu_tsb_cv, 11942 HATLOCK_MUTEXP(hatlockp)); 11943 goto retry; 11944 } 11945 } 11946 11947 /* 11948 * Wait for ISM maps to be updated. 11949 */ 11950 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11951 cv_wait(&sfmmup->sfmmu_tsb_cv, 11952 HATLOCK_MUTEXP(hatlockp)); 11953 goto retry; 11954 } 11955 11956 /* Is this process joining an SCD? */ 11957 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11958 /* 11959 * Flush private TSB and setup shared TSB. 11960 * sfmmu_finish_join_scd() does not drop the 11961 * hat lock. 11962 */ 11963 sfmmu_finish_join_scd(sfmmup); 11964 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11965 } 11966 11967 /* 11968 * If we're swapping in, get TSB(s). Note that we must do 11969 * this before we get a ctx or load the MMU state. Once 11970 * we swap in we have to recheck to make sure the TSB(s) and 11971 * ISM mappings didn't change while we slept. 11972 */ 11973 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11974 sfmmu_tsb_swapin(sfmmup, hatlockp); 11975 goto retry; 11976 } 11977 11978 sfmmu_get_ctx(sfmmup); 11979 11980 sfmmu_hat_exit(hatlockp); 11981 /* 11982 * Must restore lwp_state if not calling 11983 * trap() for further processing. Restore 11984 * it anyway. 11985 */ 11986 lwp->lwp_state = lwp_save_state; 11987 return; 11988 } 11989 trap(rp, (caddr_t)tagaccess, traptype, 0); 11990 } 11991 11992 static void 11993 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11994 { 11995 struct tsb_info *tp; 11996 11997 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11998 11999 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 12000 if (tp->tsb_flags & TSB_RELOC_FLAG) { 12001 cv_wait(&sfmmup->sfmmu_tsb_cv, 12002 HATLOCK_MUTEXP(hatlockp)); 12003 break; 12004 } 12005 } 12006 } 12007 12008 /* 12009 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 12010 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 12011 * rather than spinning to avoid send mondo timeouts with 12012 * interrupts enabled. When the lock is acquired it is immediately 12013 * released and we return back to sfmmu_vatopfn just after 12014 * the GET_TTE call. 12015 */ 12016 void 12017 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 12018 { 12019 struct page **pp; 12020 12021 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12022 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 12023 } 12024 12025 /* 12026 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 12027 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 12028 * cross traps which cannot be handled while spinning in the 12029 * trap handlers. Simply enter and exit the kpr_suspendlock spin 12030 * mutex, which is held by the holder of the suspend bit, and then 12031 * retry the trapped instruction after unwinding. 12032 */ 12033 /*ARGSUSED*/ 12034 void 12035 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 12036 { 12037 ASSERT(curthread != kreloc_thread); 12038 mutex_enter(&kpr_suspendlock); 12039 mutex_exit(&kpr_suspendlock); 12040 } 12041 12042 /* 12043 * This routine could be optimized to reduce the number of xcalls by flushing 12044 * the entire TLBs if region reference count is above some threshold but the 12045 * tradeoff will depend on the size of the TLB. So for now flush the specific 12046 * page a context at a time. 12047 * 12048 * If uselocks is 0 then it's called after all cpus were captured and all the 12049 * hat locks were taken. In this case don't take the region lock by relying on 12050 * the order of list region update operations in hat_join_region(), 12051 * hat_leave_region() and hat_dup_region(). The ordering in those routines 12052 * guarantees that list is always forward walkable and reaches active sfmmus 12053 * regardless of where xc_attention() captures a cpu. 12054 */ 12055 cpuset_t 12056 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 12057 struct hme_blk *hmeblkp, int uselocks) 12058 { 12059 sfmmu_t *sfmmup; 12060 cpuset_t cpuset; 12061 cpuset_t rcpuset; 12062 hatlock_t *hatlockp; 12063 uint_t rid = rgnp->rgn_id; 12064 sf_rgn_link_t *rlink; 12065 sf_scd_t *scdp; 12066 12067 ASSERT(hmeblkp->hblk_shared); 12068 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 12069 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 12070 12071 CPUSET_ZERO(rcpuset); 12072 if (uselocks) { 12073 mutex_enter(&rgnp->rgn_mutex); 12074 } 12075 sfmmup = rgnp->rgn_sfmmu_head; 12076 while (sfmmup != NULL) { 12077 if (uselocks) { 12078 hatlockp = sfmmu_hat_enter(sfmmup); 12079 } 12080 12081 /* 12082 * When an SCD is created the SCD hat is linked on the sfmmu 12083 * region lists for each hme region which is part of the 12084 * SCD. If we find an SCD hat, when walking these lists, 12085 * then we flush the shared TSBs, if we find a private hat, 12086 * which is part of an SCD, but where the region 12087 * is not part of the SCD then we flush the private TSBs. 12088 */ 12089 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12090 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12091 scdp = sfmmup->sfmmu_scdp; 12092 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 12093 if (uselocks) { 12094 sfmmu_hat_exit(hatlockp); 12095 } 12096 goto next; 12097 } 12098 } 12099 12100 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12101 12102 kpreempt_disable(); 12103 cpuset = sfmmup->sfmmu_cpusran; 12104 CPUSET_AND(cpuset, cpu_ready_set); 12105 CPUSET_DEL(cpuset, CPU->cpu_id); 12106 SFMMU_XCALL_STATS(sfmmup); 12107 xt_some(cpuset, vtag_flushpage_tl1, 12108 (uint64_t)addr, (uint64_t)sfmmup); 12109 vtag_flushpage(addr, (uint64_t)sfmmup); 12110 if (uselocks) { 12111 sfmmu_hat_exit(hatlockp); 12112 } 12113 kpreempt_enable(); 12114 CPUSET_OR(rcpuset, cpuset); 12115 12116 next: 12117 /* LINTED: constant in conditional context */ 12118 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12119 ASSERT(rlink != NULL); 12120 sfmmup = rlink->next; 12121 } 12122 if (uselocks) { 12123 mutex_exit(&rgnp->rgn_mutex); 12124 } 12125 return (rcpuset); 12126 } 12127 12128 /* 12129 * This routine takes an sfmmu pointer and the va for an adddress in an 12130 * ISM region as input and returns the corresponding region id in ism_rid. 12131 * The return value of 1 indicates that a region has been found and ism_rid 12132 * is valid, otherwise 0 is returned. 12133 */ 12134 static int 12135 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12136 { 12137 ism_blk_t *ism_blkp; 12138 int i; 12139 ism_map_t *ism_map; 12140 #ifdef DEBUG 12141 struct hat *ism_hatid; 12142 #endif 12143 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12144 12145 ism_blkp = sfmmup->sfmmu_iblk; 12146 while (ism_blkp != NULL) { 12147 ism_map = ism_blkp->iblk_maps; 12148 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12149 if ((va >= ism_start(ism_map[i])) && 12150 (va < ism_end(ism_map[i]))) { 12151 12152 *ism_rid = ism_map[i].imap_rid; 12153 #ifdef DEBUG 12154 ism_hatid = ism_map[i].imap_ismhat; 12155 ASSERT(ism_hatid == ism_sfmmup); 12156 ASSERT(ism_hatid->sfmmu_ismhat); 12157 #endif 12158 return (1); 12159 } 12160 } 12161 ism_blkp = ism_blkp->iblk_next; 12162 } 12163 return (0); 12164 } 12165 12166 /* 12167 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12168 * This routine may be called with all cpu's captured. Therefore, the 12169 * caller is responsible for holding all locks and disabling kernel 12170 * preemption. 12171 */ 12172 /* ARGSUSED */ 12173 static void 12174 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12175 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12176 { 12177 cpuset_t cpuset; 12178 caddr_t va; 12179 ism_ment_t *ment; 12180 sfmmu_t *sfmmup; 12181 #ifdef VAC 12182 int vcolor; 12183 #endif 12184 12185 sf_scd_t *scdp; 12186 uint_t ism_rid; 12187 12188 ASSERT(!hmeblkp->hblk_shared); 12189 /* 12190 * Walk the ism_hat's mapping list and flush the page 12191 * from every hat sharing this ism_hat. This routine 12192 * may be called while all cpu's have been captured. 12193 * Therefore we can't attempt to grab any locks. For now 12194 * this means we will protect the ism mapping list under 12195 * a single lock which will be grabbed by the caller. 12196 * If hat_share/unshare scalibility becomes a performance 12197 * problem then we may need to re-think ism mapping list locking. 12198 */ 12199 ASSERT(ism_sfmmup->sfmmu_ismhat); 12200 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12201 addr = addr - ISMID_STARTADDR; 12202 12203 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12204 12205 sfmmup = ment->iment_hat; 12206 12207 va = ment->iment_base_va; 12208 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12209 12210 /* 12211 * When an SCD is created the SCD hat is linked on the ism 12212 * mapping lists for each ISM segment which is part of the 12213 * SCD. If we find an SCD hat, when walking these lists, 12214 * then we flush the shared TSBs, if we find a private hat, 12215 * which is part of an SCD, but where the region 12216 * corresponding to this va is not part of the SCD then we 12217 * flush the private TSBs. 12218 */ 12219 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12220 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12221 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12222 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12223 &ism_rid)) { 12224 cmn_err(CE_PANIC, 12225 "can't find matching ISM rid!"); 12226 } 12227 12228 scdp = sfmmup->sfmmu_scdp; 12229 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12230 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12231 ism_rid)) { 12232 continue; 12233 } 12234 } 12235 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12236 12237 cpuset = sfmmup->sfmmu_cpusran; 12238 CPUSET_AND(cpuset, cpu_ready_set); 12239 CPUSET_DEL(cpuset, CPU->cpu_id); 12240 SFMMU_XCALL_STATS(sfmmup); 12241 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12242 (uint64_t)sfmmup); 12243 vtag_flushpage(va, (uint64_t)sfmmup); 12244 12245 #ifdef VAC 12246 /* 12247 * Flush D$ 12248 * When flushing D$ we must flush all 12249 * cpu's. See sfmmu_cache_flush(). 12250 */ 12251 if (cache_flush_flag == CACHE_FLUSH) { 12252 cpuset = cpu_ready_set; 12253 CPUSET_DEL(cpuset, CPU->cpu_id); 12254 12255 SFMMU_XCALL_STATS(sfmmup); 12256 vcolor = addr_to_vcolor(va); 12257 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12258 vac_flushpage(pfnum, vcolor); 12259 } 12260 #endif /* VAC */ 12261 } 12262 } 12263 12264 /* 12265 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12266 * a particular virtual address and ctx. If noflush is set we do not 12267 * flush the TLB/TSB. This function may or may not be called with the 12268 * HAT lock held. 12269 */ 12270 static void 12271 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12272 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12273 int hat_lock_held) 12274 { 12275 #ifdef VAC 12276 int vcolor; 12277 #endif 12278 cpuset_t cpuset; 12279 hatlock_t *hatlockp; 12280 12281 ASSERT(!hmeblkp->hblk_shared); 12282 12283 #if defined(lint) && !defined(VAC) 12284 pfnum = pfnum; 12285 cpu_flag = cpu_flag; 12286 cache_flush_flag = cache_flush_flag; 12287 #endif 12288 12289 /* 12290 * There is no longer a need to protect against ctx being 12291 * stolen here since we don't store the ctx in the TSB anymore. 12292 */ 12293 #ifdef VAC 12294 vcolor = addr_to_vcolor(addr); 12295 #endif 12296 12297 /* 12298 * We must hold the hat lock during the flush of TLB, 12299 * to avoid a race with sfmmu_invalidate_ctx(), where 12300 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12301 * causing TLB demap routine to skip flush on that MMU. 12302 * If the context on a MMU has already been set to 12303 * INVALID_CONTEXT, we just get an extra flush on 12304 * that MMU. 12305 */ 12306 if (!hat_lock_held && !tlb_noflush) 12307 hatlockp = sfmmu_hat_enter(sfmmup); 12308 12309 kpreempt_disable(); 12310 if (!tlb_noflush) { 12311 /* 12312 * Flush the TSB and TLB. 12313 */ 12314 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12315 12316 cpuset = sfmmup->sfmmu_cpusran; 12317 CPUSET_AND(cpuset, cpu_ready_set); 12318 CPUSET_DEL(cpuset, CPU->cpu_id); 12319 12320 SFMMU_XCALL_STATS(sfmmup); 12321 12322 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12323 (uint64_t)sfmmup); 12324 12325 vtag_flushpage(addr, (uint64_t)sfmmup); 12326 } 12327 12328 if (!hat_lock_held && !tlb_noflush) 12329 sfmmu_hat_exit(hatlockp); 12330 12331 #ifdef VAC 12332 /* 12333 * Flush the D$ 12334 * 12335 * Even if the ctx is stolen, we need to flush the 12336 * cache. Our ctx stealer only flushes the TLBs. 12337 */ 12338 if (cache_flush_flag == CACHE_FLUSH) { 12339 if (cpu_flag & FLUSH_ALL_CPUS) { 12340 cpuset = cpu_ready_set; 12341 } else { 12342 cpuset = sfmmup->sfmmu_cpusran; 12343 CPUSET_AND(cpuset, cpu_ready_set); 12344 } 12345 CPUSET_DEL(cpuset, CPU->cpu_id); 12346 SFMMU_XCALL_STATS(sfmmup); 12347 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12348 vac_flushpage(pfnum, vcolor); 12349 } 12350 #endif /* VAC */ 12351 kpreempt_enable(); 12352 } 12353 12354 /* 12355 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12356 * address and ctx. If noflush is set we do not currently do anything. 12357 * This function may or may not be called with the HAT lock held. 12358 */ 12359 static void 12360 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12361 int tlb_noflush, int hat_lock_held) 12362 { 12363 cpuset_t cpuset; 12364 hatlock_t *hatlockp; 12365 12366 ASSERT(!hmeblkp->hblk_shared); 12367 12368 /* 12369 * If the process is exiting we have nothing to do. 12370 */ 12371 if (tlb_noflush) 12372 return; 12373 12374 /* 12375 * Flush TSB. 12376 */ 12377 if (!hat_lock_held) 12378 hatlockp = sfmmu_hat_enter(sfmmup); 12379 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12380 12381 kpreempt_disable(); 12382 12383 cpuset = sfmmup->sfmmu_cpusran; 12384 CPUSET_AND(cpuset, cpu_ready_set); 12385 CPUSET_DEL(cpuset, CPU->cpu_id); 12386 12387 SFMMU_XCALL_STATS(sfmmup); 12388 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12389 12390 vtag_flushpage(addr, (uint64_t)sfmmup); 12391 12392 if (!hat_lock_held) 12393 sfmmu_hat_exit(hatlockp); 12394 12395 kpreempt_enable(); 12396 12397 } 12398 12399 /* 12400 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12401 * call handler that can flush a range of pages to save on xcalls. 12402 */ 12403 static int sfmmu_xcall_save; 12404 12405 /* 12406 * this routine is never used for demaping addresses backed by SRD hmeblks. 12407 */ 12408 static void 12409 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12410 { 12411 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12412 hatlock_t *hatlockp; 12413 cpuset_t cpuset; 12414 uint64_t sfmmu_pgcnt; 12415 pgcnt_t pgcnt = 0; 12416 int pgunload = 0; 12417 int dirtypg = 0; 12418 caddr_t addr = dmrp->dmr_addr; 12419 caddr_t eaddr; 12420 uint64_t bitvec = dmrp->dmr_bitvec; 12421 12422 ASSERT(bitvec & 1); 12423 12424 /* 12425 * Flush TSB and calculate number of pages to flush. 12426 */ 12427 while (bitvec != 0) { 12428 dirtypg = 0; 12429 /* 12430 * Find the first page to flush and then count how many 12431 * pages there are after it that also need to be flushed. 12432 * This way the number of TSB flushes is minimized. 12433 */ 12434 while ((bitvec & 1) == 0) { 12435 pgcnt++; 12436 addr += MMU_PAGESIZE; 12437 bitvec >>= 1; 12438 } 12439 while (bitvec & 1) { 12440 dirtypg++; 12441 bitvec >>= 1; 12442 } 12443 eaddr = addr + ptob(dirtypg); 12444 hatlockp = sfmmu_hat_enter(sfmmup); 12445 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12446 sfmmu_hat_exit(hatlockp); 12447 pgunload += dirtypg; 12448 addr = eaddr; 12449 pgcnt += dirtypg; 12450 } 12451 12452 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12453 if (sfmmup->sfmmu_free == 0) { 12454 addr = dmrp->dmr_addr; 12455 bitvec = dmrp->dmr_bitvec; 12456 12457 /* 12458 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12459 * as it will be used to pack argument for xt_some 12460 */ 12461 ASSERT((pgcnt > 0) && 12462 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12463 12464 /* 12465 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12466 * the low 6 bits of sfmmup. This is doable since pgcnt 12467 * always >= 1. 12468 */ 12469 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12470 sfmmu_pgcnt = (uint64_t)sfmmup | 12471 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12472 12473 /* 12474 * We must hold the hat lock during the flush of TLB, 12475 * to avoid a race with sfmmu_invalidate_ctx(), where 12476 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12477 * causing TLB demap routine to skip flush on that MMU. 12478 * If the context on a MMU has already been set to 12479 * INVALID_CONTEXT, we just get an extra flush on 12480 * that MMU. 12481 */ 12482 hatlockp = sfmmu_hat_enter(sfmmup); 12483 kpreempt_disable(); 12484 12485 cpuset = sfmmup->sfmmu_cpusran; 12486 CPUSET_AND(cpuset, cpu_ready_set); 12487 CPUSET_DEL(cpuset, CPU->cpu_id); 12488 12489 SFMMU_XCALL_STATS(sfmmup); 12490 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12491 sfmmu_pgcnt); 12492 12493 for (; bitvec != 0; bitvec >>= 1) { 12494 if (bitvec & 1) 12495 vtag_flushpage(addr, (uint64_t)sfmmup); 12496 addr += MMU_PAGESIZE; 12497 } 12498 kpreempt_enable(); 12499 sfmmu_hat_exit(hatlockp); 12500 12501 sfmmu_xcall_save += (pgunload-1); 12502 } 12503 dmrp->dmr_bitvec = 0; 12504 } 12505 12506 /* 12507 * In cases where we need to synchronize with TLB/TSB miss trap 12508 * handlers, _and_ need to flush the TLB, it's a lot easier to 12509 * throw away the context from the process than to do a 12510 * special song and dance to keep things consistent for the 12511 * handlers. 12512 * 12513 * Since the process suddenly ends up without a context and our caller 12514 * holds the hat lock, threads that fault after this function is called 12515 * will pile up on the lock. We can then do whatever we need to 12516 * atomically from the context of the caller. The first blocked thread 12517 * to resume executing will get the process a new context, and the 12518 * process will resume executing. 12519 * 12520 * One added advantage of this approach is that on MMUs that 12521 * support a "flush all" operation, we will delay the flush until 12522 * cnum wrap-around, and then flush the TLB one time. This 12523 * is rather rare, so it's a lot less expensive than making 8000 12524 * x-calls to flush the TLB 8000 times. 12525 * 12526 * A per-process (PP) lock is used to synchronize ctx allocations in 12527 * resume() and ctx invalidations here. 12528 */ 12529 static void 12530 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12531 { 12532 cpuset_t cpuset; 12533 int cnum, currcnum; 12534 mmu_ctx_t *mmu_ctxp; 12535 int i; 12536 uint_t pstate_save; 12537 12538 SFMMU_STAT(sf_ctx_inv); 12539 12540 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12541 ASSERT(sfmmup != ksfmmup); 12542 12543 kpreempt_disable(); 12544 12545 mmu_ctxp = CPU_MMU_CTXP(CPU); 12546 ASSERT(mmu_ctxp); 12547 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12548 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12549 12550 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12551 12552 pstate_save = sfmmu_disable_intrs(); 12553 12554 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12555 /* set HAT cnum invalid across all context domains. */ 12556 for (i = 0; i < max_mmu_ctxdoms; i++) { 12557 12558 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12559 if (cnum == INVALID_CONTEXT) { 12560 continue; 12561 } 12562 12563 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12564 } 12565 membar_enter(); /* make sure globally visible to all CPUs */ 12566 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12567 12568 sfmmu_enable_intrs(pstate_save); 12569 12570 cpuset = sfmmup->sfmmu_cpusran; 12571 CPUSET_DEL(cpuset, CPU->cpu_id); 12572 CPUSET_AND(cpuset, cpu_ready_set); 12573 if (!CPUSET_ISNULL(cpuset)) { 12574 SFMMU_XCALL_STATS(sfmmup); 12575 xt_some(cpuset, sfmmu_raise_tsb_exception, 12576 (uint64_t)sfmmup, INVALID_CONTEXT); 12577 xt_sync(cpuset); 12578 SFMMU_STAT(sf_tsb_raise_exception); 12579 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12580 } 12581 12582 /* 12583 * If the hat to-be-invalidated is the same as the current 12584 * process on local CPU we need to invalidate 12585 * this CPU context as well. 12586 */ 12587 if ((sfmmu_getctx_sec() == currcnum) && 12588 (currcnum != INVALID_CONTEXT)) { 12589 /* sets shared context to INVALID too */ 12590 sfmmu_setctx_sec(INVALID_CONTEXT); 12591 sfmmu_clear_utsbinfo(); 12592 } 12593 12594 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12595 12596 kpreempt_enable(); 12597 12598 /* 12599 * we hold the hat lock, so nobody should allocate a context 12600 * for us yet 12601 */ 12602 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12603 } 12604 12605 #ifdef VAC 12606 /* 12607 * We need to flush the cache in all cpus. It is possible that 12608 * a process referenced a page as cacheable but has sinced exited 12609 * and cleared the mapping list. We still to flush it but have no 12610 * state so all cpus is the only alternative. 12611 */ 12612 void 12613 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12614 { 12615 cpuset_t cpuset; 12616 12617 kpreempt_disable(); 12618 cpuset = cpu_ready_set; 12619 CPUSET_DEL(cpuset, CPU->cpu_id); 12620 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12621 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12622 xt_sync(cpuset); 12623 vac_flushpage(pfnum, vcolor); 12624 kpreempt_enable(); 12625 } 12626 12627 void 12628 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12629 { 12630 cpuset_t cpuset; 12631 12632 ASSERT(vcolor >= 0); 12633 12634 kpreempt_disable(); 12635 cpuset = cpu_ready_set; 12636 CPUSET_DEL(cpuset, CPU->cpu_id); 12637 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12638 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12639 xt_sync(cpuset); 12640 vac_flushcolor(vcolor, pfnum); 12641 kpreempt_enable(); 12642 } 12643 #endif /* VAC */ 12644 12645 /* 12646 * We need to prevent processes from accessing the TSB using a cached physical 12647 * address. It's alright if they try to access the TSB via virtual address 12648 * since they will just fault on that virtual address once the mapping has 12649 * been suspended. 12650 */ 12651 #pragma weak sendmondo_in_recover 12652 12653 /* ARGSUSED */ 12654 static int 12655 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12656 { 12657 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12658 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12659 hatlock_t *hatlockp; 12660 sf_scd_t *scdp; 12661 12662 if (flags != HAT_PRESUSPEND) 12663 return (0); 12664 12665 /* 12666 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12667 * be a shared hat, then set SCD's tsbinfo's flag. 12668 * If tsb is not shared, sfmmup is a private hat, then set 12669 * its private tsbinfo's flag. 12670 */ 12671 hatlockp = sfmmu_hat_enter(sfmmup); 12672 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12673 12674 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12675 sfmmu_tsb_inv_ctx(sfmmup); 12676 sfmmu_hat_exit(hatlockp); 12677 } else { 12678 /* release lock on the shared hat */ 12679 sfmmu_hat_exit(hatlockp); 12680 /* sfmmup is a shared hat */ 12681 ASSERT(sfmmup->sfmmu_scdhat); 12682 scdp = sfmmup->sfmmu_scdp; 12683 ASSERT(scdp != NULL); 12684 /* get private hat from the scd list */ 12685 mutex_enter(&scdp->scd_mutex); 12686 sfmmup = scdp->scd_sf_list; 12687 while (sfmmup != NULL) { 12688 hatlockp = sfmmu_hat_enter(sfmmup); 12689 /* 12690 * We do not call sfmmu_tsb_inv_ctx here because 12691 * sendmondo_in_recover check is only needed for 12692 * sun4u. 12693 */ 12694 sfmmu_invalidate_ctx(sfmmup); 12695 sfmmu_hat_exit(hatlockp); 12696 sfmmup = sfmmup->sfmmu_scd_link.next; 12697 12698 } 12699 mutex_exit(&scdp->scd_mutex); 12700 } 12701 return (0); 12702 } 12703 12704 static void 12705 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12706 { 12707 extern uint32_t sendmondo_in_recover; 12708 12709 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12710 12711 /* 12712 * For Cheetah+ Erratum 25: 12713 * Wait for any active recovery to finish. We can't risk 12714 * relocating the TSB of the thread running mondo_recover_proc() 12715 * since, if we did that, we would deadlock. The scenario we are 12716 * trying to avoid is as follows: 12717 * 12718 * THIS CPU RECOVER CPU 12719 * -------- ----------- 12720 * Begins recovery, walking through TSB 12721 * hat_pagesuspend() TSB TTE 12722 * TLB miss on TSB TTE, spins at TL1 12723 * xt_sync() 12724 * send_mondo_timeout() 12725 * mondo_recover_proc() 12726 * ((deadlocked)) 12727 * 12728 * The second half of the workaround is that mondo_recover_proc() 12729 * checks to see if the tsb_info has the RELOC flag set, and if it 12730 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12731 * and hence avoiding the TLB miss that could result in a deadlock. 12732 */ 12733 if (&sendmondo_in_recover) { 12734 membar_enter(); /* make sure RELOC flag visible */ 12735 while (sendmondo_in_recover) { 12736 drv_usecwait(1); 12737 membar_consumer(); 12738 } 12739 } 12740 12741 sfmmu_invalidate_ctx(sfmmup); 12742 } 12743 12744 /* ARGSUSED */ 12745 static int 12746 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12747 void *tsbinfo, pfn_t newpfn) 12748 { 12749 hatlock_t *hatlockp; 12750 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12751 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12752 12753 if (flags != HAT_POSTUNSUSPEND) 12754 return (0); 12755 12756 hatlockp = sfmmu_hat_enter(sfmmup); 12757 12758 SFMMU_STAT(sf_tsb_reloc); 12759 12760 /* 12761 * The process may have swapped out while we were relocating one 12762 * of its TSBs. If so, don't bother doing the setup since the 12763 * process can't be using the memory anymore. 12764 */ 12765 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12766 ASSERT(va == tsbinfop->tsb_va); 12767 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12768 12769 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12770 sfmmu_inv_tsb(tsbinfop->tsb_va, 12771 TSB_BYTES(tsbinfop->tsb_szc)); 12772 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12773 } 12774 } 12775 12776 membar_exit(); 12777 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12778 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12779 12780 sfmmu_hat_exit(hatlockp); 12781 12782 return (0); 12783 } 12784 12785 /* 12786 * Allocate and initialize a tsb_info structure. Note that we may or may not 12787 * allocate a TSB here, depending on the flags passed in. 12788 */ 12789 static int 12790 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12791 uint_t flags, sfmmu_t *sfmmup) 12792 { 12793 int err; 12794 12795 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12796 sfmmu_tsbinfo_cache, KM_SLEEP); 12797 12798 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12799 tsb_szc, flags, sfmmup)) != 0) { 12800 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12801 SFMMU_STAT(sf_tsb_allocfail); 12802 *tsbinfopp = NULL; 12803 return (err); 12804 } 12805 SFMMU_STAT(sf_tsb_alloc); 12806 12807 /* 12808 * Bump the TSB size counters for this TSB size. 12809 */ 12810 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12811 return (0); 12812 } 12813 12814 static void 12815 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12816 { 12817 caddr_t tsbva = tsbinfo->tsb_va; 12818 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12819 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12820 vmem_t *vmp = tsbinfo->tsb_vmp; 12821 12822 /* 12823 * If we allocated this TSB from relocatable kernel memory, then we 12824 * need to uninstall the callback handler. 12825 */ 12826 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12827 uintptr_t slab_mask; 12828 caddr_t slab_vaddr; 12829 page_t **ppl; 12830 int ret; 12831 12832 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12833 if (tsb_size > MMU_PAGESIZE4M) 12834 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12835 else 12836 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12837 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12838 12839 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12840 ASSERT(ret == 0); 12841 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12842 0, NULL); 12843 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12844 } 12845 12846 if (kmem_cachep != NULL) { 12847 kmem_cache_free(kmem_cachep, tsbva); 12848 } else { 12849 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12850 } 12851 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12852 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12853 } 12854 12855 static void 12856 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12857 { 12858 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12859 sfmmu_tsb_free(tsbinfo); 12860 } 12861 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12862 12863 } 12864 12865 /* 12866 * Setup all the references to physical memory for this tsbinfo. 12867 * The underlying page(s) must be locked. 12868 */ 12869 static void 12870 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12871 { 12872 ASSERT(pfn != PFN_INVALID); 12873 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12874 12875 #ifndef sun4v 12876 if (tsbinfo->tsb_szc == 0) { 12877 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12878 PROT_WRITE|PROT_READ, TTE8K); 12879 } else { 12880 /* 12881 * Round down PA and use a large mapping; the handlers will 12882 * compute the TSB pointer at the correct offset into the 12883 * big virtual page. NOTE: this assumes all TSBs larger 12884 * than 8K must come from physically contiguous slabs of 12885 * size tsb_slab_size. 12886 */ 12887 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12888 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12889 } 12890 tsbinfo->tsb_pa = ptob(pfn); 12891 12892 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12893 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12894 12895 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12896 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12897 #else /* sun4v */ 12898 tsbinfo->tsb_pa = ptob(pfn); 12899 #endif /* sun4v */ 12900 } 12901 12902 12903 /* 12904 * Returns zero on success, ENOMEM if over the high water mark, 12905 * or EAGAIN if the caller needs to retry with a smaller TSB 12906 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12907 * 12908 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12909 * is specified and the TSB requested is PAGESIZE, though it 12910 * may sleep waiting for memory if sufficient memory is not 12911 * available. 12912 */ 12913 static int 12914 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12915 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12916 { 12917 caddr_t vaddr = NULL; 12918 caddr_t slab_vaddr; 12919 uintptr_t slab_mask; 12920 int tsbbytes = TSB_BYTES(tsbcode); 12921 int lowmem = 0; 12922 struct kmem_cache *kmem_cachep = NULL; 12923 vmem_t *vmp = NULL; 12924 lgrp_id_t lgrpid = LGRP_NONE; 12925 pfn_t pfn; 12926 uint_t cbflags = HAC_SLEEP; 12927 page_t **pplist; 12928 int ret; 12929 12930 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12931 if (tsbbytes > MMU_PAGESIZE4M) 12932 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12933 else 12934 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12935 12936 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12937 flags |= TSB_ALLOC; 12938 12939 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12940 12941 tsbinfo->tsb_sfmmu = sfmmup; 12942 12943 /* 12944 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12945 * return. 12946 */ 12947 if ((flags & TSB_ALLOC) == 0) { 12948 tsbinfo->tsb_szc = tsbcode; 12949 tsbinfo->tsb_ttesz_mask = tteszmask; 12950 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12951 tsbinfo->tsb_pa = -1; 12952 tsbinfo->tsb_tte.ll = 0; 12953 tsbinfo->tsb_next = NULL; 12954 tsbinfo->tsb_flags = TSB_SWAPPED; 12955 tsbinfo->tsb_cache = NULL; 12956 tsbinfo->tsb_vmp = NULL; 12957 return (0); 12958 } 12959 12960 #ifdef DEBUG 12961 /* 12962 * For debugging: 12963 * Randomly force allocation failures every tsb_alloc_mtbf 12964 * tries if TSB_FORCEALLOC is not specified. This will 12965 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12966 * it is even, to allow testing of both failure paths... 12967 */ 12968 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12969 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12970 tsb_alloc_count = 0; 12971 tsb_alloc_fail_mtbf++; 12972 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12973 } 12974 #endif /* DEBUG */ 12975 12976 /* 12977 * Enforce high water mark if we are not doing a forced allocation 12978 * and are not shrinking a process' TSB. 12979 */ 12980 if ((flags & TSB_SHRINK) == 0 && 12981 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12982 if ((flags & TSB_FORCEALLOC) == 0) 12983 return (ENOMEM); 12984 lowmem = 1; 12985 } 12986 12987 /* 12988 * Allocate from the correct location based upon the size of the TSB 12989 * compared to the base page size, and what memory conditions dictate. 12990 * Note we always do nonblocking allocations from the TSB arena since 12991 * we don't want memory fragmentation to cause processes to block 12992 * indefinitely waiting for memory; until the kernel algorithms that 12993 * coalesce large pages are improved this is our best option. 12994 * 12995 * Algorithm: 12996 * If allocating a "large" TSB (>8K), allocate from the 12997 * appropriate kmem_tsb_default_arena vmem arena 12998 * else if low on memory or the TSB_FORCEALLOC flag is set or 12999 * tsb_forceheap is set 13000 * Allocate from kernel heap via sfmmu_tsb8k_cache with 13001 * KM_SLEEP (never fails) 13002 * else 13003 * Allocate from appropriate sfmmu_tsb_cache with 13004 * KM_NOSLEEP 13005 * endif 13006 */ 13007 if (tsb_lgrp_affinity) 13008 lgrpid = lgrp_home_id(curthread); 13009 if (lgrpid == LGRP_NONE) 13010 lgrpid = 0; /* use lgrp of boot CPU */ 13011 13012 if (tsbbytes > MMU_PAGESIZE) { 13013 if (tsbbytes > MMU_PAGESIZE4M) { 13014 vmp = kmem_bigtsb_default_arena[lgrpid]; 13015 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 13016 0, 0, NULL, NULL, VM_NOSLEEP); 13017 } else { 13018 vmp = kmem_tsb_default_arena[lgrpid]; 13019 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 13020 0, 0, NULL, NULL, VM_NOSLEEP); 13021 } 13022 #ifdef DEBUG 13023 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 13024 #else /* !DEBUG */ 13025 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 13026 #endif /* DEBUG */ 13027 kmem_cachep = sfmmu_tsb8k_cache; 13028 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 13029 ASSERT(vaddr != NULL); 13030 } else { 13031 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 13032 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 13033 } 13034 13035 tsbinfo->tsb_cache = kmem_cachep; 13036 tsbinfo->tsb_vmp = vmp; 13037 13038 if (vaddr == NULL) { 13039 return (EAGAIN); 13040 } 13041 13042 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 13043 kmem_cachep = tsbinfo->tsb_cache; 13044 13045 /* 13046 * If we are allocating from outside the cage, then we need to 13047 * register a relocation callback handler. Note that for now 13048 * since pseudo mappings always hang off of the slab's root page, 13049 * we need only lock the first 8K of the TSB slab. This is a bit 13050 * hacky but it is good for performance. 13051 */ 13052 if (kmem_cachep != sfmmu_tsb8k_cache) { 13053 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 13054 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 13055 ASSERT(ret == 0); 13056 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 13057 cbflags, (void *)tsbinfo, &pfn, NULL); 13058 13059 /* 13060 * Need to free up resources if we could not successfully 13061 * add the callback function and return an error condition. 13062 */ 13063 if (ret != 0) { 13064 if (kmem_cachep) { 13065 kmem_cache_free(kmem_cachep, vaddr); 13066 } else { 13067 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 13068 } 13069 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 13070 S_WRITE); 13071 return (EAGAIN); 13072 } 13073 } else { 13074 /* 13075 * Since allocation of 8K TSBs from heap is rare and occurs 13076 * during memory pressure we allocate them from permanent 13077 * memory rather than using callbacks to get the PFN. 13078 */ 13079 pfn = hat_getpfnum(kas.a_hat, vaddr); 13080 } 13081 13082 tsbinfo->tsb_va = vaddr; 13083 tsbinfo->tsb_szc = tsbcode; 13084 tsbinfo->tsb_ttesz_mask = tteszmask; 13085 tsbinfo->tsb_next = NULL; 13086 tsbinfo->tsb_flags = 0; 13087 13088 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 13089 13090 sfmmu_inv_tsb(vaddr, tsbbytes); 13091 13092 if (kmem_cachep != sfmmu_tsb8k_cache) { 13093 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 13094 } 13095 13096 return (0); 13097 } 13098 13099 /* 13100 * Initialize per cpu tsb and per cpu tsbmiss_area 13101 */ 13102 void 13103 sfmmu_init_tsbs(void) 13104 { 13105 int i; 13106 struct tsbmiss *tsbmissp; 13107 struct kpmtsbm *kpmtsbmp; 13108 #ifndef sun4v 13109 extern int dcache_line_mask; 13110 #endif /* sun4v */ 13111 extern uint_t vac_colors; 13112 13113 /* 13114 * Init. tsb miss area. 13115 */ 13116 tsbmissp = tsbmiss_area; 13117 13118 for (i = 0; i < NCPU; tsbmissp++, i++) { 13119 /* 13120 * initialize the tsbmiss area. 13121 * Do this for all possible CPUs as some may be added 13122 * while the system is running. There is no cost to this. 13123 */ 13124 tsbmissp->ksfmmup = ksfmmup; 13125 #ifndef sun4v 13126 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13127 #endif /* sun4v */ 13128 tsbmissp->khashstart = 13129 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13130 tsbmissp->uhashstart = 13131 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13132 tsbmissp->khashsz = khmehash_num; 13133 tsbmissp->uhashsz = uhmehash_num; 13134 } 13135 13136 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13137 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13138 13139 if (kpm_enable == 0) 13140 return; 13141 13142 /* -- Begin KPM specific init -- */ 13143 13144 if (kpm_smallpages) { 13145 /* 13146 * If we're using base pagesize pages for seg_kpm 13147 * mappings, we use the kernel TSB since we can't afford 13148 * to allocate a second huge TSB for these mappings. 13149 */ 13150 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13151 kpm_tsbsz = ktsb_szcode; 13152 kpmsm_tsbbase = kpm_tsbbase; 13153 kpmsm_tsbsz = kpm_tsbsz; 13154 } else { 13155 /* 13156 * In VAC conflict case, just put the entries in the 13157 * kernel 8K indexed TSB for now so we can find them. 13158 * This could really be changed in the future if we feel 13159 * the need... 13160 */ 13161 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13162 kpmsm_tsbsz = ktsb_szcode; 13163 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13164 kpm_tsbsz = ktsb4m_szcode; 13165 } 13166 13167 kpmtsbmp = kpmtsbm_area; 13168 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13169 /* 13170 * Initialize the kpmtsbm area. 13171 * Do this for all possible CPUs as some may be added 13172 * while the system is running. There is no cost to this. 13173 */ 13174 kpmtsbmp->vbase = kpm_vbase; 13175 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13176 kpmtsbmp->sz_shift = kpm_size_shift; 13177 kpmtsbmp->kpmp_shift = kpmp_shift; 13178 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13179 if (kpm_smallpages == 0) { 13180 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13181 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13182 } else { 13183 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13184 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13185 } 13186 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13187 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13188 #ifdef DEBUG 13189 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13190 #endif /* DEBUG */ 13191 if (ktsb_phys) 13192 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13193 } 13194 13195 /* -- End KPM specific init -- */ 13196 } 13197 13198 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13199 struct tsb_info ktsb_info[2]; 13200 13201 /* 13202 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13203 */ 13204 void 13205 sfmmu_init_ktsbinfo() 13206 { 13207 ASSERT(ksfmmup != NULL); 13208 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13209 /* 13210 * Allocate tsbinfos for kernel and copy in data 13211 * to make debug easier and sun4v setup easier. 13212 */ 13213 ktsb_info[0].tsb_sfmmu = ksfmmup; 13214 ktsb_info[0].tsb_szc = ktsb_szcode; 13215 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13216 ktsb_info[0].tsb_va = ktsb_base; 13217 ktsb_info[0].tsb_pa = ktsb_pbase; 13218 ktsb_info[0].tsb_flags = 0; 13219 ktsb_info[0].tsb_tte.ll = 0; 13220 ktsb_info[0].tsb_cache = NULL; 13221 13222 ktsb_info[1].tsb_sfmmu = ksfmmup; 13223 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13224 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13225 ktsb_info[1].tsb_va = ktsb4m_base; 13226 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13227 ktsb_info[1].tsb_flags = 0; 13228 ktsb_info[1].tsb_tte.ll = 0; 13229 ktsb_info[1].tsb_cache = NULL; 13230 13231 /* Link them into ksfmmup. */ 13232 ktsb_info[0].tsb_next = &ktsb_info[1]; 13233 ktsb_info[1].tsb_next = NULL; 13234 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13235 13236 sfmmu_setup_tsbinfo(ksfmmup); 13237 } 13238 13239 /* 13240 * Cache the last value returned from va_to_pa(). If the VA specified 13241 * in the current call to cached_va_to_pa() maps to the same Page (as the 13242 * previous call to cached_va_to_pa()), then compute the PA using 13243 * cached info, else call va_to_pa(). 13244 * 13245 * Note: this function is neither MT-safe nor consistent in the presence 13246 * of multiple, interleaved threads. This function was created to enable 13247 * an optimization used during boot (at a point when there's only one thread 13248 * executing on the "boot CPU", and before startup_vm() has been called). 13249 */ 13250 static uint64_t 13251 cached_va_to_pa(void *vaddr) 13252 { 13253 static uint64_t prev_vaddr_base = 0; 13254 static uint64_t prev_pfn = 0; 13255 13256 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13257 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13258 } else { 13259 uint64_t pa = va_to_pa(vaddr); 13260 13261 if (pa != ((uint64_t)-1)) { 13262 /* 13263 * Computed physical address is valid. Cache its 13264 * related info for the next cached_va_to_pa() call. 13265 */ 13266 prev_pfn = pa & MMU_PAGEMASK; 13267 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13268 } 13269 13270 return (pa); 13271 } 13272 } 13273 13274 /* 13275 * Carve up our nucleus hblk region. We may allocate more hblks than 13276 * asked due to rounding errors but we are guaranteed to have at least 13277 * enough space to allocate the requested number of hblk8's and hblk1's. 13278 */ 13279 void 13280 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13281 { 13282 struct hme_blk *hmeblkp; 13283 size_t hme8blk_sz, hme1blk_sz; 13284 size_t i; 13285 size_t hblk8_bound; 13286 ulong_t j = 0, k = 0; 13287 13288 ASSERT(addr != NULL && size != 0); 13289 13290 /* Need to use proper structure alignment */ 13291 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13292 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13293 13294 nucleus_hblk8.list = (void *)addr; 13295 nucleus_hblk8.index = 0; 13296 13297 /* 13298 * Use as much memory as possible for hblk8's since we 13299 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13300 * We need to hold back enough space for the hblk1's which 13301 * we'll allocate next. 13302 */ 13303 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13304 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13305 hmeblkp = (struct hme_blk *)addr; 13306 addr += hme8blk_sz; 13307 hmeblkp->hblk_nuc_bit = 1; 13308 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13309 } 13310 nucleus_hblk8.len = j; 13311 ASSERT(j >= nhblk8); 13312 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13313 13314 nucleus_hblk1.list = (void *)addr; 13315 nucleus_hblk1.index = 0; 13316 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13317 hmeblkp = (struct hme_blk *)addr; 13318 addr += hme1blk_sz; 13319 hmeblkp->hblk_nuc_bit = 1; 13320 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13321 } 13322 ASSERT(k >= nhblk1); 13323 nucleus_hblk1.len = k; 13324 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13325 } 13326 13327 /* 13328 * This function is currently not supported on this platform. For what 13329 * it's supposed to do, see hat.c and hat_srmmu.c 13330 */ 13331 /* ARGSUSED */ 13332 faultcode_t 13333 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13334 uint_t flags) 13335 { 13336 ASSERT(hat->sfmmu_xhat_provider == NULL); 13337 return (FC_NOSUPPORT); 13338 } 13339 13340 /* 13341 * Searchs the mapping list of the page for a mapping of the same size. If not 13342 * found the corresponding bit is cleared in the p_index field. When large 13343 * pages are more prevalent in the system, we can maintain the mapping list 13344 * in order and we don't have to traverse the list each time. Just check the 13345 * next and prev entries, and if both are of different size, we clear the bit. 13346 */ 13347 static void 13348 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13349 { 13350 struct sf_hment *sfhmep; 13351 struct hme_blk *hmeblkp; 13352 int index; 13353 pgcnt_t npgs; 13354 13355 ASSERT(ttesz > TTE8K); 13356 13357 ASSERT(sfmmu_mlist_held(pp)); 13358 13359 ASSERT(PP_ISMAPPED_LARGE(pp)); 13360 13361 /* 13362 * Traverse mapping list looking for another mapping of same size. 13363 * since we only want to clear index field if all mappings of 13364 * that size are gone. 13365 */ 13366 13367 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13368 if (IS_PAHME(sfhmep)) 13369 continue; 13370 hmeblkp = sfmmu_hmetohblk(sfhmep); 13371 if (hmeblkp->hblk_xhat_bit) 13372 continue; 13373 if (hme_size(sfhmep) == ttesz) { 13374 /* 13375 * another mapping of the same size. don't clear index. 13376 */ 13377 return; 13378 } 13379 } 13380 13381 /* 13382 * Clear the p_index bit for large page. 13383 */ 13384 index = PAGESZ_TO_INDEX(ttesz); 13385 npgs = TTEPAGES(ttesz); 13386 while (npgs-- > 0) { 13387 ASSERT(pp->p_index & index); 13388 pp->p_index &= ~index; 13389 pp = PP_PAGENEXT(pp); 13390 } 13391 } 13392 13393 /* 13394 * return supported features 13395 */ 13396 /* ARGSUSED */ 13397 int 13398 hat_supported(enum hat_features feature, void *arg) 13399 { 13400 switch (feature) { 13401 case HAT_SHARED_PT: 13402 case HAT_DYNAMIC_ISM_UNMAP: 13403 case HAT_VMODSORT: 13404 return (1); 13405 case HAT_SHARED_REGIONS: 13406 if (shctx_on) 13407 return (1); 13408 else 13409 return (0); 13410 default: 13411 return (0); 13412 } 13413 } 13414 13415 void 13416 hat_enter(struct hat *hat) 13417 { 13418 hatlock_t *hatlockp; 13419 13420 if (hat != ksfmmup) { 13421 hatlockp = TSB_HASH(hat); 13422 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13423 } 13424 } 13425 13426 void 13427 hat_exit(struct hat *hat) 13428 { 13429 hatlock_t *hatlockp; 13430 13431 if (hat != ksfmmup) { 13432 hatlockp = TSB_HASH(hat); 13433 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13434 } 13435 } 13436 13437 /*ARGSUSED*/ 13438 void 13439 hat_reserve(struct as *as, caddr_t addr, size_t len) 13440 { 13441 } 13442 13443 static void 13444 hat_kstat_init(void) 13445 { 13446 kstat_t *ksp; 13447 13448 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13449 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13450 KSTAT_FLAG_VIRTUAL); 13451 if (ksp) { 13452 ksp->ks_data = (void *) &sfmmu_global_stat; 13453 kstat_install(ksp); 13454 } 13455 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13456 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13457 KSTAT_FLAG_VIRTUAL); 13458 if (ksp) { 13459 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13460 kstat_install(ksp); 13461 } 13462 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13463 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13464 KSTAT_FLAG_WRITABLE); 13465 if (ksp) { 13466 ksp->ks_update = sfmmu_kstat_percpu_update; 13467 kstat_install(ksp); 13468 } 13469 } 13470 13471 /* ARGSUSED */ 13472 static int 13473 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13474 { 13475 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13476 struct tsbmiss *tsbm = tsbmiss_area; 13477 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13478 int i; 13479 13480 ASSERT(cpu_kstat); 13481 if (rw == KSTAT_READ) { 13482 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13483 cpu_kstat->sf_itlb_misses = 0; 13484 cpu_kstat->sf_dtlb_misses = 0; 13485 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13486 tsbm->uprot_traps; 13487 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13488 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13489 cpu_kstat->sf_tsb_hits = 0; 13490 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13491 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13492 } 13493 } else { 13494 /* KSTAT_WRITE is used to clear stats */ 13495 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13496 tsbm->utsb_misses = 0; 13497 tsbm->ktsb_misses = 0; 13498 tsbm->uprot_traps = 0; 13499 tsbm->kprot_traps = 0; 13500 kpmtsbm->kpm_dtlb_misses = 0; 13501 kpmtsbm->kpm_tsb_misses = 0; 13502 } 13503 } 13504 return (0); 13505 } 13506 13507 #ifdef DEBUG 13508 13509 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13510 13511 /* 13512 * A tte checker. *orig_old is the value we read before cas. 13513 * *cur is the value returned by cas. 13514 * *new is the desired value when we do the cas. 13515 * 13516 * *hmeblkp is currently unused. 13517 */ 13518 13519 /* ARGSUSED */ 13520 void 13521 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13522 { 13523 pfn_t i, j, k; 13524 int cpuid = CPU->cpu_id; 13525 13526 gorig[cpuid] = orig_old; 13527 gcur[cpuid] = cur; 13528 gnew[cpuid] = new; 13529 13530 #ifdef lint 13531 hmeblkp = hmeblkp; 13532 #endif 13533 13534 if (TTE_IS_VALID(orig_old)) { 13535 if (TTE_IS_VALID(cur)) { 13536 i = TTE_TO_TTEPFN(orig_old); 13537 j = TTE_TO_TTEPFN(cur); 13538 k = TTE_TO_TTEPFN(new); 13539 if (i != j) { 13540 /* remap error? */ 13541 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13542 } 13543 13544 if (i != k) { 13545 /* remap error? */ 13546 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13547 } 13548 } else { 13549 if (TTE_IS_VALID(new)) { 13550 panic("chk_tte: invalid cur? "); 13551 } 13552 13553 i = TTE_TO_TTEPFN(orig_old); 13554 k = TTE_TO_TTEPFN(new); 13555 if (i != k) { 13556 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13557 } 13558 } 13559 } else { 13560 if (TTE_IS_VALID(cur)) { 13561 j = TTE_TO_TTEPFN(cur); 13562 if (TTE_IS_VALID(new)) { 13563 k = TTE_TO_TTEPFN(new); 13564 if (j != k) { 13565 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13566 j, k); 13567 } 13568 } else { 13569 panic("chk_tte: why here?"); 13570 } 13571 } else { 13572 if (!TTE_IS_VALID(new)) { 13573 panic("chk_tte: why here2 ?"); 13574 } 13575 } 13576 } 13577 } 13578 13579 #endif /* DEBUG */ 13580 13581 extern void prefetch_tsbe_read(struct tsbe *); 13582 extern void prefetch_tsbe_write(struct tsbe *); 13583 13584 13585 /* 13586 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13587 * us optimal performance on Cheetah+. You can only have 8 outstanding 13588 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13589 * prefetch to make the most utilization of the prefetch capability. 13590 */ 13591 #define TSBE_PREFETCH_STRIDE (7) 13592 13593 void 13594 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13595 { 13596 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13597 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13598 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13599 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13600 struct tsbe *old; 13601 struct tsbe *new; 13602 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13603 uint64_t va; 13604 int new_offset; 13605 int i; 13606 int vpshift; 13607 int last_prefetch; 13608 13609 if (old_bytes == new_bytes) { 13610 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13611 } else { 13612 13613 /* 13614 * A TSBE is 16 bytes which means there are four TSBE's per 13615 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13616 */ 13617 old = (struct tsbe *)old_tsbinfo->tsb_va; 13618 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13619 for (i = 0; i < old_entries; i++, old++) { 13620 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13621 prefetch_tsbe_read(old); 13622 if (!old->tte_tag.tag_invalid) { 13623 /* 13624 * We have a valid TTE to remap. Check the 13625 * size. We won't remap 64K or 512K TTEs 13626 * because they span more than one TSB entry 13627 * and are indexed using an 8K virt. page. 13628 * Ditto for 32M and 256M TTEs. 13629 */ 13630 if (TTE_CSZ(&old->tte_data) == TTE64K || 13631 TTE_CSZ(&old->tte_data) == TTE512K) 13632 continue; 13633 if (mmu_page_sizes == max_mmu_page_sizes) { 13634 if (TTE_CSZ(&old->tte_data) == TTE32M || 13635 TTE_CSZ(&old->tte_data) == TTE256M) 13636 continue; 13637 } 13638 13639 /* clear the lower 22 bits of the va */ 13640 va = *(uint64_t *)old << 22; 13641 /* turn va into a virtual pfn */ 13642 va >>= 22 - TSB_START_SIZE; 13643 /* 13644 * or in bits from the offset in the tsb 13645 * to get the real virtual pfn. These 13646 * correspond to bits [21:13] in the va 13647 */ 13648 vpshift = 13649 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13650 0x1ff; 13651 va |= (i << vpshift); 13652 va >>= vpshift; 13653 new_offset = va & (new_entries - 1); 13654 new = new_base + new_offset; 13655 prefetch_tsbe_write(new); 13656 *new = *old; 13657 } 13658 } 13659 } 13660 } 13661 13662 /* 13663 * unused in sfmmu 13664 */ 13665 void 13666 hat_dump(void) 13667 { 13668 } 13669 13670 /* 13671 * Called when a thread is exiting and we have switched to the kernel address 13672 * space. Perform the same VM initialization resume() uses when switching 13673 * processes. 13674 * 13675 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13676 * we call it anyway in case the semantics change in the future. 13677 */ 13678 /*ARGSUSED*/ 13679 void 13680 hat_thread_exit(kthread_t *thd) 13681 { 13682 uint_t pgsz_cnum; 13683 uint_t pstate_save; 13684 13685 ASSERT(thd->t_procp->p_as == &kas); 13686 13687 pgsz_cnum = KCONTEXT; 13688 #ifdef sun4u 13689 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13690 #endif 13691 13692 /* 13693 * Note that sfmmu_load_mmustate() is currently a no-op for 13694 * kernel threads. We need to disable interrupts here, 13695 * simply because otherwise sfmmu_load_mmustate() would panic 13696 * if the caller does not disable interrupts. 13697 */ 13698 pstate_save = sfmmu_disable_intrs(); 13699 13700 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13701 sfmmu_setctx_sec(pgsz_cnum); 13702 sfmmu_load_mmustate(ksfmmup); 13703 sfmmu_enable_intrs(pstate_save); 13704 } 13705 13706 13707 /* 13708 * SRD support 13709 */ 13710 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13711 (((uintptr_t)(vp)) >> 11)) & \ 13712 srd_hashmask) 13713 13714 /* 13715 * Attach the process to the srd struct associated with the exec vnode 13716 * from which the process is started. 13717 */ 13718 void 13719 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13720 { 13721 uint_t hash = SRD_HASH_FUNCTION(evp); 13722 sf_srd_t *srdp; 13723 sf_srd_t *newsrdp; 13724 13725 ASSERT(sfmmup != ksfmmup); 13726 ASSERT(sfmmup->sfmmu_srdp == NULL); 13727 13728 if (!shctx_on) { 13729 return; 13730 } 13731 13732 VN_HOLD(evp); 13733 13734 if (srd_buckets[hash].srdb_srdp != NULL) { 13735 mutex_enter(&srd_buckets[hash].srdb_lock); 13736 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13737 srdp = srdp->srd_hash) { 13738 if (srdp->srd_evp == evp) { 13739 ASSERT(srdp->srd_refcnt >= 0); 13740 sfmmup->sfmmu_srdp = srdp; 13741 atomic_add_32( 13742 (volatile uint_t *)&srdp->srd_refcnt, 1); 13743 mutex_exit(&srd_buckets[hash].srdb_lock); 13744 return; 13745 } 13746 } 13747 mutex_exit(&srd_buckets[hash].srdb_lock); 13748 } 13749 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13750 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13751 13752 newsrdp->srd_evp = evp; 13753 newsrdp->srd_refcnt = 1; 13754 newsrdp->srd_hmergnfree = NULL; 13755 newsrdp->srd_ismrgnfree = NULL; 13756 13757 mutex_enter(&srd_buckets[hash].srdb_lock); 13758 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13759 srdp = srdp->srd_hash) { 13760 if (srdp->srd_evp == evp) { 13761 ASSERT(srdp->srd_refcnt >= 0); 13762 sfmmup->sfmmu_srdp = srdp; 13763 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 13764 mutex_exit(&srd_buckets[hash].srdb_lock); 13765 kmem_cache_free(srd_cache, newsrdp); 13766 return; 13767 } 13768 } 13769 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13770 srd_buckets[hash].srdb_srdp = newsrdp; 13771 sfmmup->sfmmu_srdp = newsrdp; 13772 13773 mutex_exit(&srd_buckets[hash].srdb_lock); 13774 13775 } 13776 13777 static void 13778 sfmmu_leave_srd(sfmmu_t *sfmmup) 13779 { 13780 vnode_t *evp; 13781 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13782 uint_t hash; 13783 sf_srd_t **prev_srdpp; 13784 sf_region_t *rgnp; 13785 sf_region_t *nrgnp; 13786 #ifdef DEBUG 13787 int rgns = 0; 13788 #endif 13789 int i; 13790 13791 ASSERT(sfmmup != ksfmmup); 13792 ASSERT(srdp != NULL); 13793 ASSERT(srdp->srd_refcnt > 0); 13794 ASSERT(sfmmup->sfmmu_scdp == NULL); 13795 ASSERT(sfmmup->sfmmu_free == 1); 13796 13797 sfmmup->sfmmu_srdp = NULL; 13798 evp = srdp->srd_evp; 13799 ASSERT(evp != NULL); 13800 if (atomic_add_32_nv( 13801 (volatile uint_t *)&srdp->srd_refcnt, -1)) { 13802 VN_RELE(evp); 13803 return; 13804 } 13805 13806 hash = SRD_HASH_FUNCTION(evp); 13807 mutex_enter(&srd_buckets[hash].srdb_lock); 13808 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13809 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13810 if (srdp->srd_evp == evp) { 13811 break; 13812 } 13813 } 13814 if (srdp == NULL || srdp->srd_refcnt) { 13815 mutex_exit(&srd_buckets[hash].srdb_lock); 13816 VN_RELE(evp); 13817 return; 13818 } 13819 *prev_srdpp = srdp->srd_hash; 13820 mutex_exit(&srd_buckets[hash].srdb_lock); 13821 13822 ASSERT(srdp->srd_refcnt == 0); 13823 VN_RELE(evp); 13824 13825 #ifdef DEBUG 13826 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13827 ASSERT(srdp->srd_rgnhash[i] == NULL); 13828 } 13829 #endif /* DEBUG */ 13830 13831 /* free each hme regions in the srd */ 13832 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13833 nrgnp = rgnp->rgn_next; 13834 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13835 ASSERT(rgnp->rgn_refcnt == 0); 13836 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13837 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13838 ASSERT(rgnp->rgn_hmeflags == 0); 13839 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13840 #ifdef DEBUG 13841 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13842 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13843 } 13844 rgns++; 13845 #endif /* DEBUG */ 13846 kmem_cache_free(region_cache, rgnp); 13847 } 13848 ASSERT(rgns == srdp->srd_next_hmerid); 13849 13850 #ifdef DEBUG 13851 rgns = 0; 13852 #endif 13853 /* free each ism rgns in the srd */ 13854 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13855 nrgnp = rgnp->rgn_next; 13856 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13857 ASSERT(rgnp->rgn_refcnt == 0); 13858 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13859 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13860 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13861 #ifdef DEBUG 13862 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13863 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13864 } 13865 rgns++; 13866 #endif /* DEBUG */ 13867 kmem_cache_free(region_cache, rgnp); 13868 } 13869 ASSERT(rgns == srdp->srd_next_ismrid); 13870 ASSERT(srdp->srd_ismbusyrgns == 0); 13871 ASSERT(srdp->srd_hmebusyrgns == 0); 13872 13873 srdp->srd_next_ismrid = 0; 13874 srdp->srd_next_hmerid = 0; 13875 13876 bzero((void *)srdp->srd_ismrgnp, 13877 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13878 bzero((void *)srdp->srd_hmergnp, 13879 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13880 13881 ASSERT(srdp->srd_scdp == NULL); 13882 kmem_cache_free(srd_cache, srdp); 13883 } 13884 13885 /* ARGSUSED */ 13886 static int 13887 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13888 { 13889 sf_srd_t *srdp = (sf_srd_t *)buf; 13890 bzero(buf, sizeof (*srdp)); 13891 13892 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13893 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13894 return (0); 13895 } 13896 13897 /* ARGSUSED */ 13898 static void 13899 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13900 { 13901 sf_srd_t *srdp = (sf_srd_t *)buf; 13902 13903 mutex_destroy(&srdp->srd_mutex); 13904 mutex_destroy(&srdp->srd_scd_mutex); 13905 } 13906 13907 /* 13908 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13909 * at the same time for the same process and address range. This is ensured by 13910 * the fact that address space is locked as writer when a process joins the 13911 * regions. Therefore there's no need to hold an srd lock during the entire 13912 * execution of hat_join_region()/hat_leave_region(). 13913 */ 13914 13915 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13916 (((uintptr_t)(obj)) >> 11)) & \ 13917 srd_rgn_hashmask) 13918 /* 13919 * This routine implements the shared context functionality required when 13920 * attaching a segment to an address space. It must be called from 13921 * hat_share() for D(ISM) segments and from segvn_create() for segments 13922 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13923 * which is saved in the private segment data for hme segments and 13924 * the ism_map structure for ism segments. 13925 */ 13926 hat_region_cookie_t 13927 hat_join_region(struct hat *sfmmup, 13928 caddr_t r_saddr, 13929 size_t r_size, 13930 void *r_obj, 13931 u_offset_t r_objoff, 13932 uchar_t r_perm, 13933 uchar_t r_pgszc, 13934 hat_rgn_cb_func_t r_cb_function, 13935 uint_t flags) 13936 { 13937 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13938 uint_t rhash; 13939 uint_t rid; 13940 hatlock_t *hatlockp; 13941 sf_region_t *rgnp; 13942 sf_region_t *new_rgnp = NULL; 13943 int i; 13944 uint16_t *nextidp; 13945 sf_region_t **freelistp; 13946 int maxids; 13947 sf_region_t **rarrp; 13948 uint16_t *busyrgnsp; 13949 ulong_t rttecnt; 13950 uchar_t tteflag; 13951 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13952 int text = (r_type == HAT_REGION_TEXT); 13953 13954 if (srdp == NULL || r_size == 0) { 13955 return (HAT_INVALID_REGION_COOKIE); 13956 } 13957 13958 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 13959 ASSERT(sfmmup != ksfmmup); 13960 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 13961 ASSERT(srdp->srd_refcnt > 0); 13962 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13963 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13964 ASSERT(r_pgszc < mmu_page_sizes); 13965 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13966 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13967 panic("hat_join_region: region addr or size is not aligned\n"); 13968 } 13969 13970 13971 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13972 SFMMU_REGION_HME; 13973 /* 13974 * Currently only support shared hmes for the read only main text 13975 * region. 13976 */ 13977 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13978 (r_perm & PROT_WRITE))) { 13979 return (HAT_INVALID_REGION_COOKIE); 13980 } 13981 13982 rhash = RGN_HASH_FUNCTION(r_obj); 13983 13984 if (r_type == SFMMU_REGION_ISM) { 13985 nextidp = &srdp->srd_next_ismrid; 13986 freelistp = &srdp->srd_ismrgnfree; 13987 maxids = SFMMU_MAX_ISM_REGIONS; 13988 rarrp = srdp->srd_ismrgnp; 13989 busyrgnsp = &srdp->srd_ismbusyrgns; 13990 } else { 13991 nextidp = &srdp->srd_next_hmerid; 13992 freelistp = &srdp->srd_hmergnfree; 13993 maxids = SFMMU_MAX_HME_REGIONS; 13994 rarrp = srdp->srd_hmergnp; 13995 busyrgnsp = &srdp->srd_hmebusyrgns; 13996 } 13997 13998 mutex_enter(&srdp->srd_mutex); 13999 14000 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14001 rgnp = rgnp->rgn_hash) { 14002 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 14003 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 14004 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 14005 break; 14006 } 14007 } 14008 14009 rfound: 14010 if (rgnp != NULL) { 14011 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14012 ASSERT(rgnp->rgn_cb_function == r_cb_function); 14013 ASSERT(rgnp->rgn_refcnt >= 0); 14014 rid = rgnp->rgn_id; 14015 ASSERT(rid < maxids); 14016 ASSERT(rarrp[rid] == rgnp); 14017 ASSERT(rid < *nextidp); 14018 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14019 mutex_exit(&srdp->srd_mutex); 14020 if (new_rgnp != NULL) { 14021 kmem_cache_free(region_cache, new_rgnp); 14022 } 14023 if (r_type == SFMMU_REGION_HME) { 14024 int myjoin = 14025 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 14026 14027 sfmmu_link_to_hmeregion(sfmmup, rgnp); 14028 /* 14029 * bitmap should be updated after linking sfmmu on 14030 * region list so that pageunload() doesn't skip 14031 * TSB/TLB flush. As soon as bitmap is updated another 14032 * thread in this process can already start accessing 14033 * this region. 14034 */ 14035 /* 14036 * Normally ttecnt accounting is done as part of 14037 * pagefault handling. But a process may not take any 14038 * pagefaults on shared hmeblks created by some other 14039 * process. To compensate for this assume that the 14040 * entire region will end up faulted in using 14041 * the region's pagesize. 14042 * 14043 */ 14044 if (r_pgszc > TTE8K) { 14045 tteflag = 1 << r_pgszc; 14046 if (disable_large_pages & tteflag) { 14047 tteflag = 0; 14048 } 14049 } else { 14050 tteflag = 0; 14051 } 14052 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 14053 hatlockp = sfmmu_hat_enter(sfmmup); 14054 sfmmup->sfmmu_rtteflags |= tteflag; 14055 sfmmu_hat_exit(hatlockp); 14056 } 14057 hatlockp = sfmmu_hat_enter(sfmmup); 14058 14059 /* 14060 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 14061 * region to allow for large page allocation failure. 14062 */ 14063 if (r_pgszc >= TTE4M) { 14064 sfmmup->sfmmu_tsb0_4minflcnt += 14065 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14066 } 14067 14068 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14069 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14070 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14071 rttecnt); 14072 14073 if (text && r_pgszc >= TTE4M && 14074 (tteflag || ((disable_large_pages >> TTE4M) & 14075 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 14076 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 14077 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 14078 } 14079 14080 sfmmu_hat_exit(hatlockp); 14081 /* 14082 * On Panther we need to make sure TLB is programmed 14083 * to accept 32M/256M pages. Call 14084 * sfmmu_check_page_sizes() now to make sure TLB is 14085 * setup before making hmeregions visible to other 14086 * threads. 14087 */ 14088 sfmmu_check_page_sizes(sfmmup, 1); 14089 hatlockp = sfmmu_hat_enter(sfmmup); 14090 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14091 14092 /* 14093 * if context is invalid tsb miss exception code will 14094 * call sfmmu_check_page_sizes() and update tsbmiss 14095 * area later. 14096 */ 14097 kpreempt_disable(); 14098 if (myjoin && 14099 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 14100 != INVALID_CONTEXT)) { 14101 struct tsbmiss *tsbmp; 14102 14103 tsbmp = &tsbmiss_area[CPU->cpu_id]; 14104 ASSERT(sfmmup == tsbmp->usfmmup); 14105 BT_SET(tsbmp->shmermap, rid); 14106 if (r_pgszc > TTE64K) { 14107 tsbmp->uhat_rtteflags |= tteflag; 14108 } 14109 14110 } 14111 kpreempt_enable(); 14112 14113 sfmmu_hat_exit(hatlockp); 14114 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14115 HAT_INVALID_REGION_COOKIE); 14116 } else { 14117 hatlockp = sfmmu_hat_enter(sfmmup); 14118 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14119 sfmmu_hat_exit(hatlockp); 14120 } 14121 ASSERT(rid < maxids); 14122 14123 if (r_type == SFMMU_REGION_ISM) { 14124 sfmmu_find_scd(sfmmup); 14125 } 14126 return ((hat_region_cookie_t)((uint64_t)rid)); 14127 } 14128 14129 ASSERT(new_rgnp == NULL); 14130 14131 if (*busyrgnsp >= maxids) { 14132 mutex_exit(&srdp->srd_mutex); 14133 return (HAT_INVALID_REGION_COOKIE); 14134 } 14135 14136 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14137 if (*freelistp != NULL) { 14138 rgnp = *freelistp; 14139 *freelistp = rgnp->rgn_next; 14140 ASSERT(rgnp->rgn_id < *nextidp); 14141 ASSERT(rgnp->rgn_id < maxids); 14142 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14143 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14144 == r_type); 14145 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14146 ASSERT(rgnp->rgn_hmeflags == 0); 14147 } else { 14148 /* 14149 * release local locks before memory allocation. 14150 */ 14151 mutex_exit(&srdp->srd_mutex); 14152 14153 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14154 14155 mutex_enter(&srdp->srd_mutex); 14156 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14157 rgnp = rgnp->rgn_hash) { 14158 if (rgnp->rgn_saddr == r_saddr && 14159 rgnp->rgn_size == r_size && 14160 rgnp->rgn_obj == r_obj && 14161 rgnp->rgn_objoff == r_objoff && 14162 rgnp->rgn_perm == r_perm && 14163 rgnp->rgn_pgszc == r_pgszc) { 14164 break; 14165 } 14166 } 14167 if (rgnp != NULL) { 14168 goto rfound; 14169 } 14170 14171 if (*nextidp >= maxids) { 14172 mutex_exit(&srdp->srd_mutex); 14173 goto fail; 14174 } 14175 rgnp = new_rgnp; 14176 new_rgnp = NULL; 14177 rgnp->rgn_id = (*nextidp)++; 14178 ASSERT(rgnp->rgn_id < maxids); 14179 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14180 rarrp[rgnp->rgn_id] = rgnp; 14181 } 14182 14183 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14184 ASSERT(rgnp->rgn_hmeflags == 0); 14185 #ifdef DEBUG 14186 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14187 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14188 } 14189 #endif 14190 rgnp->rgn_saddr = r_saddr; 14191 rgnp->rgn_size = r_size; 14192 rgnp->rgn_obj = r_obj; 14193 rgnp->rgn_objoff = r_objoff; 14194 rgnp->rgn_perm = r_perm; 14195 rgnp->rgn_pgszc = r_pgszc; 14196 rgnp->rgn_flags = r_type; 14197 rgnp->rgn_refcnt = 0; 14198 rgnp->rgn_cb_function = r_cb_function; 14199 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14200 srdp->srd_rgnhash[rhash] = rgnp; 14201 (*busyrgnsp)++; 14202 ASSERT(*busyrgnsp <= maxids); 14203 goto rfound; 14204 14205 fail: 14206 ASSERT(new_rgnp != NULL); 14207 kmem_cache_free(region_cache, new_rgnp); 14208 return (HAT_INVALID_REGION_COOKIE); 14209 } 14210 14211 /* 14212 * This function implements the shared context functionality required 14213 * when detaching a segment from an address space. It must be called 14214 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14215 * for segments with a valid region_cookie. 14216 * It will also be called from all seg_vn routines which change a 14217 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14218 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14219 * from segvn_fault(). 14220 */ 14221 void 14222 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14223 { 14224 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14225 sf_scd_t *scdp; 14226 uint_t rhash; 14227 uint_t rid = (uint_t)((uint64_t)rcookie); 14228 hatlock_t *hatlockp = NULL; 14229 sf_region_t *rgnp; 14230 sf_region_t **prev_rgnpp; 14231 sf_region_t *cur_rgnp; 14232 void *r_obj; 14233 int i; 14234 caddr_t r_saddr; 14235 caddr_t r_eaddr; 14236 size_t r_size; 14237 uchar_t r_pgszc; 14238 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14239 14240 ASSERT(sfmmup != ksfmmup); 14241 ASSERT(srdp != NULL); 14242 ASSERT(srdp->srd_refcnt > 0); 14243 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14244 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14245 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14246 14247 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14248 SFMMU_REGION_HME; 14249 14250 if (r_type == SFMMU_REGION_ISM) { 14251 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14252 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14253 rgnp = srdp->srd_ismrgnp[rid]; 14254 } else { 14255 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14256 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14257 rgnp = srdp->srd_hmergnp[rid]; 14258 } 14259 ASSERT(rgnp != NULL); 14260 ASSERT(rgnp->rgn_id == rid); 14261 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14262 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14263 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14264 14265 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14266 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { 14267 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, 14268 rgnp->rgn_size, 0, NULL); 14269 } 14270 14271 if (sfmmup->sfmmu_free) { 14272 ulong_t rttecnt; 14273 r_pgszc = rgnp->rgn_pgszc; 14274 r_size = rgnp->rgn_size; 14275 14276 ASSERT(sfmmup->sfmmu_scdp == NULL); 14277 if (r_type == SFMMU_REGION_ISM) { 14278 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14279 } else { 14280 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14281 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14282 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14283 14284 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14285 -rttecnt); 14286 14287 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14288 } 14289 } else if (r_type == SFMMU_REGION_ISM) { 14290 hatlockp = sfmmu_hat_enter(sfmmup); 14291 ASSERT(rid < srdp->srd_next_ismrid); 14292 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14293 scdp = sfmmup->sfmmu_scdp; 14294 if (scdp != NULL && 14295 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14296 sfmmu_leave_scd(sfmmup, r_type); 14297 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14298 } 14299 sfmmu_hat_exit(hatlockp); 14300 } else { 14301 ulong_t rttecnt; 14302 r_pgszc = rgnp->rgn_pgszc; 14303 r_saddr = rgnp->rgn_saddr; 14304 r_size = rgnp->rgn_size; 14305 r_eaddr = r_saddr + r_size; 14306 14307 ASSERT(r_type == SFMMU_REGION_HME); 14308 hatlockp = sfmmu_hat_enter(sfmmup); 14309 ASSERT(rid < srdp->srd_next_hmerid); 14310 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14311 14312 /* 14313 * If region is part of an SCD call sfmmu_leave_scd(). 14314 * Otherwise if process is not exiting and has valid context 14315 * just drop the context on the floor to lose stale TLB 14316 * entries and force the update of tsb miss area to reflect 14317 * the new region map. After that clean our TSB entries. 14318 */ 14319 scdp = sfmmup->sfmmu_scdp; 14320 if (scdp != NULL && 14321 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14322 sfmmu_leave_scd(sfmmup, r_type); 14323 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14324 } 14325 sfmmu_invalidate_ctx(sfmmup); 14326 14327 i = TTE8K; 14328 while (i < mmu_page_sizes) { 14329 if (rgnp->rgn_ttecnt[i] != 0) { 14330 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14331 r_eaddr, i); 14332 if (i < TTE4M) { 14333 i = TTE4M; 14334 continue; 14335 } else { 14336 break; 14337 } 14338 } 14339 i++; 14340 } 14341 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14342 if (r_pgszc >= TTE4M) { 14343 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14344 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14345 rttecnt); 14346 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14347 } 14348 14349 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14350 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14351 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14352 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14353 14354 sfmmu_hat_exit(hatlockp); 14355 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14356 /* sfmmup left the scd, grow private tsb */ 14357 sfmmu_check_page_sizes(sfmmup, 1); 14358 } else { 14359 sfmmu_check_page_sizes(sfmmup, 0); 14360 } 14361 } 14362 14363 if (r_type == SFMMU_REGION_HME) { 14364 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14365 } 14366 14367 r_obj = rgnp->rgn_obj; 14368 if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { 14369 return; 14370 } 14371 14372 /* 14373 * looks like nobody uses this region anymore. Free it. 14374 */ 14375 rhash = RGN_HASH_FUNCTION(r_obj); 14376 mutex_enter(&srdp->srd_mutex); 14377 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14378 (cur_rgnp = *prev_rgnpp) != NULL; 14379 prev_rgnpp = &cur_rgnp->rgn_hash) { 14380 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14381 break; 14382 } 14383 } 14384 14385 if (cur_rgnp == NULL) { 14386 mutex_exit(&srdp->srd_mutex); 14387 return; 14388 } 14389 14390 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14391 *prev_rgnpp = rgnp->rgn_hash; 14392 if (r_type == SFMMU_REGION_ISM) { 14393 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14394 ASSERT(rid < srdp->srd_next_ismrid); 14395 rgnp->rgn_next = srdp->srd_ismrgnfree; 14396 srdp->srd_ismrgnfree = rgnp; 14397 ASSERT(srdp->srd_ismbusyrgns > 0); 14398 srdp->srd_ismbusyrgns--; 14399 mutex_exit(&srdp->srd_mutex); 14400 return; 14401 } 14402 mutex_exit(&srdp->srd_mutex); 14403 14404 /* 14405 * Destroy region's hmeblks. 14406 */ 14407 sfmmu_unload_hmeregion(srdp, rgnp); 14408 14409 rgnp->rgn_hmeflags = 0; 14410 14411 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14412 ASSERT(rgnp->rgn_id == rid); 14413 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14414 rgnp->rgn_ttecnt[i] = 0; 14415 } 14416 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14417 mutex_enter(&srdp->srd_mutex); 14418 ASSERT(rid < srdp->srd_next_hmerid); 14419 rgnp->rgn_next = srdp->srd_hmergnfree; 14420 srdp->srd_hmergnfree = rgnp; 14421 ASSERT(srdp->srd_hmebusyrgns > 0); 14422 srdp->srd_hmebusyrgns--; 14423 mutex_exit(&srdp->srd_mutex); 14424 } 14425 14426 /* 14427 * For now only called for hmeblk regions and not for ISM regions. 14428 */ 14429 void 14430 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14431 { 14432 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14433 uint_t rid = (uint_t)((uint64_t)rcookie); 14434 sf_region_t *rgnp; 14435 sf_rgn_link_t *rlink; 14436 sf_rgn_link_t *hrlink; 14437 ulong_t rttecnt; 14438 14439 ASSERT(sfmmup != ksfmmup); 14440 ASSERT(srdp != NULL); 14441 ASSERT(srdp->srd_refcnt > 0); 14442 14443 ASSERT(rid < srdp->srd_next_hmerid); 14444 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14445 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14446 14447 rgnp = srdp->srd_hmergnp[rid]; 14448 ASSERT(rgnp->rgn_refcnt > 0); 14449 ASSERT(rgnp->rgn_id == rid); 14450 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14451 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14452 14453 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14454 14455 /* LINTED: constant in conditional context */ 14456 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14457 ASSERT(rlink != NULL); 14458 mutex_enter(&rgnp->rgn_mutex); 14459 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14460 /* LINTED: constant in conditional context */ 14461 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14462 ASSERT(hrlink != NULL); 14463 ASSERT(hrlink->prev == NULL); 14464 rlink->next = rgnp->rgn_sfmmu_head; 14465 rlink->prev = NULL; 14466 hrlink->prev = sfmmup; 14467 /* 14468 * make sure rlink's next field is correct 14469 * before making this link visible. 14470 */ 14471 membar_stst(); 14472 rgnp->rgn_sfmmu_head = sfmmup; 14473 mutex_exit(&rgnp->rgn_mutex); 14474 14475 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14476 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14477 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14478 /* update tsb0 inflation count */ 14479 if (rgnp->rgn_pgszc >= TTE4M) { 14480 sfmmup->sfmmu_tsb0_4minflcnt += 14481 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14482 } 14483 /* 14484 * Update regionid bitmask without hat lock since no other thread 14485 * can update this region bitmask right now. 14486 */ 14487 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14488 } 14489 14490 /* ARGSUSED */ 14491 static int 14492 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14493 { 14494 sf_region_t *rgnp = (sf_region_t *)buf; 14495 bzero(buf, sizeof (*rgnp)); 14496 14497 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14498 14499 return (0); 14500 } 14501 14502 /* ARGSUSED */ 14503 static void 14504 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14505 { 14506 sf_region_t *rgnp = (sf_region_t *)buf; 14507 mutex_destroy(&rgnp->rgn_mutex); 14508 } 14509 14510 static int 14511 sfrgnmap_isnull(sf_region_map_t *map) 14512 { 14513 int i; 14514 14515 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14516 if (map->bitmap[i] != 0) { 14517 return (0); 14518 } 14519 } 14520 return (1); 14521 } 14522 14523 static int 14524 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14525 { 14526 int i; 14527 14528 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14529 if (map->bitmap[i] != 0) { 14530 return (0); 14531 } 14532 } 14533 return (1); 14534 } 14535 14536 #ifdef DEBUG 14537 static void 14538 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14539 { 14540 sfmmu_t *sp; 14541 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14542 14543 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14544 ASSERT(srdp == sp->sfmmu_srdp); 14545 if (sp == sfmmup) { 14546 if (onlist) { 14547 return; 14548 } else { 14549 panic("shctx: sfmmu 0x%p found on scd" 14550 "list 0x%p", (void *)sfmmup, 14551 (void *)*headp); 14552 } 14553 } 14554 } 14555 if (onlist) { 14556 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14557 (void *)sfmmup, (void *)*headp); 14558 } else { 14559 return; 14560 } 14561 } 14562 #else /* DEBUG */ 14563 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14564 #endif /* DEBUG */ 14565 14566 /* 14567 * Removes an sfmmu from the SCD sfmmu list. 14568 */ 14569 static void 14570 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14571 { 14572 ASSERT(sfmmup->sfmmu_srdp != NULL); 14573 check_scd_sfmmu_list(headp, sfmmup, 1); 14574 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14575 ASSERT(*headp != sfmmup); 14576 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14577 sfmmup->sfmmu_scd_link.next; 14578 } else { 14579 ASSERT(*headp == sfmmup); 14580 *headp = sfmmup->sfmmu_scd_link.next; 14581 } 14582 if (sfmmup->sfmmu_scd_link.next != NULL) { 14583 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14584 sfmmup->sfmmu_scd_link.prev; 14585 } 14586 } 14587 14588 14589 /* 14590 * Adds an sfmmu to the start of the queue. 14591 */ 14592 static void 14593 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14594 { 14595 check_scd_sfmmu_list(headp, sfmmup, 0); 14596 sfmmup->sfmmu_scd_link.prev = NULL; 14597 sfmmup->sfmmu_scd_link.next = *headp; 14598 if (*headp != NULL) 14599 (*headp)->sfmmu_scd_link.prev = sfmmup; 14600 *headp = sfmmup; 14601 } 14602 14603 /* 14604 * Remove an scd from the start of the queue. 14605 */ 14606 static void 14607 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14608 { 14609 if (scdp->scd_prev != NULL) { 14610 ASSERT(*headp != scdp); 14611 scdp->scd_prev->scd_next = scdp->scd_next; 14612 } else { 14613 ASSERT(*headp == scdp); 14614 *headp = scdp->scd_next; 14615 } 14616 14617 if (scdp->scd_next != NULL) { 14618 scdp->scd_next->scd_prev = scdp->scd_prev; 14619 } 14620 } 14621 14622 /* 14623 * Add an scd to the start of the queue. 14624 */ 14625 static void 14626 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14627 { 14628 scdp->scd_prev = NULL; 14629 scdp->scd_next = *headp; 14630 if (*headp != NULL) { 14631 (*headp)->scd_prev = scdp; 14632 } 14633 *headp = scdp; 14634 } 14635 14636 static int 14637 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14638 { 14639 uint_t rid; 14640 uint_t i; 14641 uint_t j; 14642 ulong_t w; 14643 sf_region_t *rgnp; 14644 ulong_t tte8k_cnt = 0; 14645 ulong_t tte4m_cnt = 0; 14646 uint_t tsb_szc; 14647 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14648 sfmmu_t *ism_hatid; 14649 struct tsb_info *newtsb; 14650 int szc; 14651 14652 ASSERT(srdp != NULL); 14653 14654 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14655 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14656 continue; 14657 } 14658 j = 0; 14659 while (w) { 14660 if (!(w & 0x1)) { 14661 j++; 14662 w >>= 1; 14663 continue; 14664 } 14665 rid = (i << BT_ULSHIFT) | j; 14666 j++; 14667 w >>= 1; 14668 14669 if (rid < SFMMU_MAX_HME_REGIONS) { 14670 rgnp = srdp->srd_hmergnp[rid]; 14671 ASSERT(rgnp->rgn_id == rid); 14672 ASSERT(rgnp->rgn_refcnt > 0); 14673 14674 if (rgnp->rgn_pgszc < TTE4M) { 14675 tte8k_cnt += rgnp->rgn_size >> 14676 TTE_PAGE_SHIFT(TTE8K); 14677 } else { 14678 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14679 tte4m_cnt += rgnp->rgn_size >> 14680 TTE_PAGE_SHIFT(TTE4M); 14681 /* 14682 * Inflate SCD tsb0 by preallocating 14683 * 1/4 8k ttecnt for 4M regions to 14684 * allow for lgpg alloc failure. 14685 */ 14686 tte8k_cnt += rgnp->rgn_size >> 14687 (TTE_PAGE_SHIFT(TTE8K) + 2); 14688 } 14689 } else { 14690 rid -= SFMMU_MAX_HME_REGIONS; 14691 rgnp = srdp->srd_ismrgnp[rid]; 14692 ASSERT(rgnp->rgn_id == rid); 14693 ASSERT(rgnp->rgn_refcnt > 0); 14694 14695 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14696 ASSERT(ism_hatid->sfmmu_ismhat); 14697 14698 for (szc = 0; szc < TTE4M; szc++) { 14699 tte8k_cnt += 14700 ism_hatid->sfmmu_ttecnt[szc] << 14701 TTE_BSZS_SHIFT(szc); 14702 } 14703 14704 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14705 if (rgnp->rgn_pgszc >= TTE4M) { 14706 tte4m_cnt += rgnp->rgn_size >> 14707 TTE_PAGE_SHIFT(TTE4M); 14708 } 14709 } 14710 } 14711 } 14712 14713 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14714 14715 /* Allocate both the SCD TSBs here. */ 14716 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14717 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14718 (tsb_szc <= TSB_4M_SZCODE || 14719 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14720 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14721 TSB_ALLOC, scsfmmup))) { 14722 14723 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14724 return (TSB_ALLOCFAIL); 14725 } else { 14726 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14727 14728 if (tte4m_cnt) { 14729 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14730 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14731 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14732 (tsb_szc <= TSB_4M_SZCODE || 14733 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14734 TSB4M|TSB32M|TSB256M, 14735 TSB_ALLOC, scsfmmup))) { 14736 /* 14737 * If we fail to allocate the 2nd shared tsb, 14738 * just free the 1st tsb, return failure. 14739 */ 14740 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14741 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14742 return (TSB_ALLOCFAIL); 14743 } else { 14744 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14745 newtsb->tsb_flags |= TSB_SHAREDCTX; 14746 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14747 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14748 } 14749 } 14750 SFMMU_STAT(sf_scd_1sttsb_alloc); 14751 } 14752 return (TSB_SUCCESS); 14753 } 14754 14755 static void 14756 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14757 { 14758 while (scd_sfmmu->sfmmu_tsb != NULL) { 14759 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14760 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14761 scd_sfmmu->sfmmu_tsb = next; 14762 } 14763 } 14764 14765 /* 14766 * Link the sfmmu onto the hme region list. 14767 */ 14768 void 14769 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14770 { 14771 uint_t rid; 14772 sf_rgn_link_t *rlink; 14773 sfmmu_t *head; 14774 sf_rgn_link_t *hrlink; 14775 14776 rid = rgnp->rgn_id; 14777 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14778 14779 /* LINTED: constant in conditional context */ 14780 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14781 ASSERT(rlink != NULL); 14782 mutex_enter(&rgnp->rgn_mutex); 14783 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14784 rlink->next = NULL; 14785 rlink->prev = NULL; 14786 /* 14787 * make sure rlink's next field is NULL 14788 * before making this link visible. 14789 */ 14790 membar_stst(); 14791 rgnp->rgn_sfmmu_head = sfmmup; 14792 } else { 14793 /* LINTED: constant in conditional context */ 14794 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14795 ASSERT(hrlink != NULL); 14796 ASSERT(hrlink->prev == NULL); 14797 rlink->next = head; 14798 rlink->prev = NULL; 14799 hrlink->prev = sfmmup; 14800 /* 14801 * make sure rlink's next field is correct 14802 * before making this link visible. 14803 */ 14804 membar_stst(); 14805 rgnp->rgn_sfmmu_head = sfmmup; 14806 } 14807 mutex_exit(&rgnp->rgn_mutex); 14808 } 14809 14810 /* 14811 * Unlink the sfmmu from the hme region list. 14812 */ 14813 void 14814 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14815 { 14816 uint_t rid; 14817 sf_rgn_link_t *rlink; 14818 14819 rid = rgnp->rgn_id; 14820 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14821 14822 /* LINTED: constant in conditional context */ 14823 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14824 ASSERT(rlink != NULL); 14825 mutex_enter(&rgnp->rgn_mutex); 14826 if (rgnp->rgn_sfmmu_head == sfmmup) { 14827 sfmmu_t *next = rlink->next; 14828 rgnp->rgn_sfmmu_head = next; 14829 /* 14830 * if we are stopped by xc_attention() after this 14831 * point the forward link walking in 14832 * sfmmu_rgntlb_demap() will work correctly since the 14833 * head correctly points to the next element. 14834 */ 14835 membar_stst(); 14836 rlink->next = NULL; 14837 ASSERT(rlink->prev == NULL); 14838 if (next != NULL) { 14839 sf_rgn_link_t *nrlink; 14840 /* LINTED: constant in conditional context */ 14841 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14842 ASSERT(nrlink != NULL); 14843 ASSERT(nrlink->prev == sfmmup); 14844 nrlink->prev = NULL; 14845 } 14846 } else { 14847 sfmmu_t *next = rlink->next; 14848 sfmmu_t *prev = rlink->prev; 14849 sf_rgn_link_t *prlink; 14850 14851 ASSERT(prev != NULL); 14852 /* LINTED: constant in conditional context */ 14853 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14854 ASSERT(prlink != NULL); 14855 ASSERT(prlink->next == sfmmup); 14856 prlink->next = next; 14857 /* 14858 * if we are stopped by xc_attention() 14859 * after this point the forward link walking 14860 * will work correctly since the prev element 14861 * correctly points to the next element. 14862 */ 14863 membar_stst(); 14864 rlink->next = NULL; 14865 rlink->prev = NULL; 14866 if (next != NULL) { 14867 sf_rgn_link_t *nrlink; 14868 /* LINTED: constant in conditional context */ 14869 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14870 ASSERT(nrlink != NULL); 14871 ASSERT(nrlink->prev == sfmmup); 14872 nrlink->prev = prev; 14873 } 14874 } 14875 mutex_exit(&rgnp->rgn_mutex); 14876 } 14877 14878 /* 14879 * Link scd sfmmu onto ism or hme region list for each region in the 14880 * scd region map. 14881 */ 14882 void 14883 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14884 { 14885 uint_t rid; 14886 uint_t i; 14887 uint_t j; 14888 ulong_t w; 14889 sf_region_t *rgnp; 14890 sfmmu_t *scsfmmup; 14891 14892 scsfmmup = scdp->scd_sfmmup; 14893 ASSERT(scsfmmup->sfmmu_scdhat); 14894 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14895 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14896 continue; 14897 } 14898 j = 0; 14899 while (w) { 14900 if (!(w & 0x1)) { 14901 j++; 14902 w >>= 1; 14903 continue; 14904 } 14905 rid = (i << BT_ULSHIFT) | j; 14906 j++; 14907 w >>= 1; 14908 14909 if (rid < SFMMU_MAX_HME_REGIONS) { 14910 rgnp = srdp->srd_hmergnp[rid]; 14911 ASSERT(rgnp->rgn_id == rid); 14912 ASSERT(rgnp->rgn_refcnt > 0); 14913 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14914 } else { 14915 sfmmu_t *ism_hatid = NULL; 14916 ism_ment_t *ism_ment; 14917 rid -= SFMMU_MAX_HME_REGIONS; 14918 rgnp = srdp->srd_ismrgnp[rid]; 14919 ASSERT(rgnp->rgn_id == rid); 14920 ASSERT(rgnp->rgn_refcnt > 0); 14921 14922 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14923 ASSERT(ism_hatid->sfmmu_ismhat); 14924 ism_ment = &scdp->scd_ism_links[rid]; 14925 ism_ment->iment_hat = scsfmmup; 14926 ism_ment->iment_base_va = rgnp->rgn_saddr; 14927 mutex_enter(&ism_mlist_lock); 14928 iment_add(ism_ment, ism_hatid); 14929 mutex_exit(&ism_mlist_lock); 14930 14931 } 14932 } 14933 } 14934 } 14935 /* 14936 * Unlink scd sfmmu from ism or hme region list for each region in the 14937 * scd region map. 14938 */ 14939 void 14940 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14941 { 14942 uint_t rid; 14943 uint_t i; 14944 uint_t j; 14945 ulong_t w; 14946 sf_region_t *rgnp; 14947 sfmmu_t *scsfmmup; 14948 14949 scsfmmup = scdp->scd_sfmmup; 14950 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14951 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14952 continue; 14953 } 14954 j = 0; 14955 while (w) { 14956 if (!(w & 0x1)) { 14957 j++; 14958 w >>= 1; 14959 continue; 14960 } 14961 rid = (i << BT_ULSHIFT) | j; 14962 j++; 14963 w >>= 1; 14964 14965 if (rid < SFMMU_MAX_HME_REGIONS) { 14966 rgnp = srdp->srd_hmergnp[rid]; 14967 ASSERT(rgnp->rgn_id == rid); 14968 ASSERT(rgnp->rgn_refcnt > 0); 14969 sfmmu_unlink_from_hmeregion(scsfmmup, 14970 rgnp); 14971 14972 } else { 14973 sfmmu_t *ism_hatid = NULL; 14974 ism_ment_t *ism_ment; 14975 rid -= SFMMU_MAX_HME_REGIONS; 14976 rgnp = srdp->srd_ismrgnp[rid]; 14977 ASSERT(rgnp->rgn_id == rid); 14978 ASSERT(rgnp->rgn_refcnt > 0); 14979 14980 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14981 ASSERT(ism_hatid->sfmmu_ismhat); 14982 ism_ment = &scdp->scd_ism_links[rid]; 14983 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14984 ASSERT(ism_ment->iment_base_va == 14985 rgnp->rgn_saddr); 14986 mutex_enter(&ism_mlist_lock); 14987 iment_sub(ism_ment, ism_hatid); 14988 mutex_exit(&ism_mlist_lock); 14989 14990 } 14991 } 14992 } 14993 } 14994 /* 14995 * Allocates and initialises a new SCD structure, this is called with 14996 * the srd_scd_mutex held and returns with the reference count 14997 * initialised to 1. 14998 */ 14999 static sf_scd_t * 15000 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 15001 { 15002 sf_scd_t *new_scdp; 15003 sfmmu_t *scsfmmup; 15004 int i; 15005 15006 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 15007 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 15008 15009 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 15010 new_scdp->scd_sfmmup = scsfmmup; 15011 scsfmmup->sfmmu_srdp = srdp; 15012 scsfmmup->sfmmu_scdp = new_scdp; 15013 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 15014 scsfmmup->sfmmu_scdhat = 1; 15015 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 15016 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 15017 15018 ASSERT(max_mmu_ctxdoms > 0); 15019 for (i = 0; i < max_mmu_ctxdoms; i++) { 15020 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 15021 scsfmmup->sfmmu_ctxs[i].gnum = 0; 15022 } 15023 15024 for (i = 0; i < MMU_PAGE_SIZES; i++) { 15025 new_scdp->scd_rttecnt[i] = 0; 15026 } 15027 15028 new_scdp->scd_region_map = *new_map; 15029 new_scdp->scd_refcnt = 1; 15030 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 15031 kmem_cache_free(scd_cache, new_scdp); 15032 kmem_cache_free(sfmmuid_cache, scsfmmup); 15033 return (NULL); 15034 } 15035 if (&mmu_init_scd) { 15036 mmu_init_scd(new_scdp); 15037 } 15038 return (new_scdp); 15039 } 15040 15041 /* 15042 * The first phase of a process joining an SCD. The hat structure is 15043 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 15044 * and a cross-call with context invalidation is used to cause the 15045 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 15046 * routine. 15047 */ 15048 static void 15049 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 15050 { 15051 hatlock_t *hatlockp; 15052 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15053 int i; 15054 sf_scd_t *old_scdp; 15055 15056 ASSERT(srdp != NULL); 15057 ASSERT(scdp != NULL); 15058 ASSERT(scdp->scd_refcnt > 0); 15059 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15060 15061 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 15062 ASSERT(old_scdp != scdp); 15063 15064 mutex_enter(&old_scdp->scd_mutex); 15065 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 15066 mutex_exit(&old_scdp->scd_mutex); 15067 /* 15068 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 15069 * include the shme rgn ttecnt for rgns that 15070 * were in the old SCD 15071 */ 15072 for (i = 0; i < mmu_page_sizes; i++) { 15073 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15074 old_scdp->scd_rttecnt[i]); 15075 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15076 sfmmup->sfmmu_scdrttecnt[i]); 15077 } 15078 } 15079 15080 /* 15081 * Move sfmmu to the scd lists. 15082 */ 15083 mutex_enter(&scdp->scd_mutex); 15084 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 15085 mutex_exit(&scdp->scd_mutex); 15086 SF_SCD_INCR_REF(scdp); 15087 15088 hatlockp = sfmmu_hat_enter(sfmmup); 15089 /* 15090 * For a multi-thread process, we must stop 15091 * all the other threads before joining the scd. 15092 */ 15093 15094 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 15095 15096 sfmmu_invalidate_ctx(sfmmup); 15097 sfmmup->sfmmu_scdp = scdp; 15098 15099 /* 15100 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 15101 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 15102 */ 15103 for (i = 0; i < mmu_page_sizes; i++) { 15104 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 15105 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 15106 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15107 -sfmmup->sfmmu_scdrttecnt[i]); 15108 } 15109 /* update tsb0 inflation count */ 15110 if (old_scdp != NULL) { 15111 sfmmup->sfmmu_tsb0_4minflcnt += 15112 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15113 } 15114 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 15115 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15116 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15117 15118 sfmmu_hat_exit(hatlockp); 15119 15120 if (old_scdp != NULL) { 15121 SF_SCD_DECR_REF(srdp, old_scdp); 15122 } 15123 15124 } 15125 15126 /* 15127 * This routine is called by a process to become part of an SCD. It is called 15128 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15129 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15130 */ 15131 static void 15132 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15133 { 15134 struct tsb_info *tsbinfop; 15135 15136 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15137 ASSERT(sfmmup->sfmmu_scdp != NULL); 15138 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15139 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15140 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15141 15142 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15143 tsbinfop = tsbinfop->tsb_next) { 15144 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15145 continue; 15146 } 15147 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15148 15149 sfmmu_inv_tsb(tsbinfop->tsb_va, 15150 TSB_BYTES(tsbinfop->tsb_szc)); 15151 } 15152 15153 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15154 sfmmu_ism_hatflags(sfmmup, 1); 15155 15156 SFMMU_STAT(sf_join_scd); 15157 } 15158 15159 /* 15160 * This routine is called in order to check if there is an SCD which matches 15161 * the process's region map if not then a new SCD may be created. 15162 */ 15163 static void 15164 sfmmu_find_scd(sfmmu_t *sfmmup) 15165 { 15166 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15167 sf_scd_t *scdp, *new_scdp; 15168 int ret; 15169 15170 ASSERT(srdp != NULL); 15171 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15172 15173 mutex_enter(&srdp->srd_scd_mutex); 15174 for (scdp = srdp->srd_scdp; scdp != NULL; 15175 scdp = scdp->scd_next) { 15176 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15177 &sfmmup->sfmmu_region_map, ret); 15178 if (ret == 1) { 15179 SF_SCD_INCR_REF(scdp); 15180 mutex_exit(&srdp->srd_scd_mutex); 15181 sfmmu_join_scd(scdp, sfmmup); 15182 ASSERT(scdp->scd_refcnt >= 2); 15183 atomic_add_32((volatile uint32_t *) 15184 &scdp->scd_refcnt, -1); 15185 return; 15186 } else { 15187 /* 15188 * If the sfmmu region map is a subset of the scd 15189 * region map, then the assumption is that this process 15190 * will continue attaching to ISM segments until the 15191 * region maps are equal. 15192 */ 15193 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15194 &sfmmup->sfmmu_region_map, ret); 15195 if (ret == 1) { 15196 mutex_exit(&srdp->srd_scd_mutex); 15197 return; 15198 } 15199 } 15200 } 15201 15202 ASSERT(scdp == NULL); 15203 /* 15204 * No matching SCD has been found, create a new one. 15205 */ 15206 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15207 NULL) { 15208 mutex_exit(&srdp->srd_scd_mutex); 15209 return; 15210 } 15211 15212 /* 15213 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15214 */ 15215 15216 /* Set scd_rttecnt for shme rgns in SCD */ 15217 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15218 15219 /* 15220 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15221 */ 15222 sfmmu_link_scd_to_regions(srdp, new_scdp); 15223 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15224 SFMMU_STAT_ADD(sf_create_scd, 1); 15225 15226 mutex_exit(&srdp->srd_scd_mutex); 15227 sfmmu_join_scd(new_scdp, sfmmup); 15228 ASSERT(new_scdp->scd_refcnt >= 2); 15229 atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); 15230 } 15231 15232 /* 15233 * This routine is called by a process to remove itself from an SCD. It is 15234 * either called when the processes has detached from a segment or from 15235 * hat_free_start() as a result of calling exit. 15236 */ 15237 static void 15238 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15239 { 15240 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15241 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15242 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15243 int i; 15244 15245 ASSERT(scdp != NULL); 15246 ASSERT(srdp != NULL); 15247 15248 if (sfmmup->sfmmu_free) { 15249 /* 15250 * If the process is part of an SCD the sfmmu is unlinked 15251 * from scd_sf_list. 15252 */ 15253 mutex_enter(&scdp->scd_mutex); 15254 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15255 mutex_exit(&scdp->scd_mutex); 15256 /* 15257 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15258 * are about to leave the SCD 15259 */ 15260 for (i = 0; i < mmu_page_sizes; i++) { 15261 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15262 scdp->scd_rttecnt[i]); 15263 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15264 sfmmup->sfmmu_scdrttecnt[i]); 15265 sfmmup->sfmmu_scdrttecnt[i] = 0; 15266 } 15267 sfmmup->sfmmu_scdp = NULL; 15268 15269 SF_SCD_DECR_REF(srdp, scdp); 15270 return; 15271 } 15272 15273 ASSERT(r_type != SFMMU_REGION_ISM || 15274 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15275 ASSERT(scdp->scd_refcnt); 15276 ASSERT(!sfmmup->sfmmu_free); 15277 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15278 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15279 15280 /* 15281 * Wait for ISM maps to be updated. 15282 */ 15283 if (r_type != SFMMU_REGION_ISM) { 15284 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15285 sfmmup->sfmmu_scdp != NULL) { 15286 cv_wait(&sfmmup->sfmmu_tsb_cv, 15287 HATLOCK_MUTEXP(hatlockp)); 15288 } 15289 15290 if (sfmmup->sfmmu_scdp == NULL) { 15291 sfmmu_hat_exit(hatlockp); 15292 return; 15293 } 15294 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15295 } 15296 15297 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15298 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15299 /* 15300 * Since HAT_JOIN_SCD was set our context 15301 * is still invalid. 15302 */ 15303 } else { 15304 /* 15305 * For a multi-thread process, we must stop 15306 * all the other threads before leaving the scd. 15307 */ 15308 15309 sfmmu_invalidate_ctx(sfmmup); 15310 } 15311 15312 /* Clear all the rid's for ISM, delete flags, etc */ 15313 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15314 sfmmu_ism_hatflags(sfmmup, 0); 15315 15316 /* 15317 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15318 * are in SCD before this sfmmup leaves the SCD. 15319 */ 15320 for (i = 0; i < mmu_page_sizes; i++) { 15321 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15322 scdp->scd_rttecnt[i]); 15323 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15324 sfmmup->sfmmu_scdrttecnt[i]); 15325 sfmmup->sfmmu_scdrttecnt[i] = 0; 15326 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15327 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15328 sfmmup->sfmmu_scdismttecnt[i] = 0; 15329 } 15330 /* update tsb0 inflation count */ 15331 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15332 15333 if (r_type != SFMMU_REGION_ISM) { 15334 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15335 } 15336 sfmmup->sfmmu_scdp = NULL; 15337 15338 sfmmu_hat_exit(hatlockp); 15339 15340 /* 15341 * Unlink sfmmu from scd_sf_list this can be done without holding 15342 * the hat lock as we hold the sfmmu_as lock which prevents 15343 * hat_join_region from adding this thread to the scd again. Other 15344 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15345 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15346 * while holding the hat lock. 15347 */ 15348 mutex_enter(&scdp->scd_mutex); 15349 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15350 mutex_exit(&scdp->scd_mutex); 15351 SFMMU_STAT(sf_leave_scd); 15352 15353 SF_SCD_DECR_REF(srdp, scdp); 15354 hatlockp = sfmmu_hat_enter(sfmmup); 15355 15356 } 15357 15358 /* 15359 * Unlink and free up an SCD structure with a reference count of 0. 15360 */ 15361 static void 15362 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15363 { 15364 sfmmu_t *scsfmmup; 15365 sf_scd_t *sp; 15366 hatlock_t *shatlockp; 15367 int i, ret; 15368 15369 mutex_enter(&srdp->srd_scd_mutex); 15370 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15371 if (sp == scdp) 15372 break; 15373 } 15374 if (sp == NULL || sp->scd_refcnt) { 15375 mutex_exit(&srdp->srd_scd_mutex); 15376 return; 15377 } 15378 15379 /* 15380 * It is possible that the scd has been freed and reallocated with a 15381 * different region map while we've been waiting for the srd_scd_mutex. 15382 */ 15383 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15384 if (ret != 1) { 15385 mutex_exit(&srdp->srd_scd_mutex); 15386 return; 15387 } 15388 15389 ASSERT(scdp->scd_sf_list == NULL); 15390 /* 15391 * Unlink scd from srd_scdp list. 15392 */ 15393 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15394 mutex_exit(&srdp->srd_scd_mutex); 15395 15396 sfmmu_unlink_scd_from_regions(srdp, scdp); 15397 15398 /* Clear shared context tsb and release ctx */ 15399 scsfmmup = scdp->scd_sfmmup; 15400 15401 /* 15402 * create a barrier so that scd will not be destroyed 15403 * if other thread still holds the same shared hat lock. 15404 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15405 * shared hat lock before checking the shared tsb reloc flag. 15406 */ 15407 shatlockp = sfmmu_hat_enter(scsfmmup); 15408 sfmmu_hat_exit(shatlockp); 15409 15410 sfmmu_free_scd_tsbs(scsfmmup); 15411 15412 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15413 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15414 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15415 SFMMU_L2_HMERLINKS_SIZE); 15416 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15417 } 15418 } 15419 kmem_cache_free(sfmmuid_cache, scsfmmup); 15420 kmem_cache_free(scd_cache, scdp); 15421 SFMMU_STAT(sf_destroy_scd); 15422 } 15423 15424 /* 15425 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15426 * bits which are set in the ism_region_map parameter. This flag indicates to 15427 * the tsbmiss handler that mapping for these segments should be loaded using 15428 * the shared context. 15429 */ 15430 static void 15431 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15432 { 15433 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15434 ism_blk_t *ism_blkp; 15435 ism_map_t *ism_map; 15436 int i, rid; 15437 15438 ASSERT(sfmmup->sfmmu_iblk != NULL); 15439 ASSERT(scdp != NULL); 15440 /* 15441 * Note that the caller either set HAT_ISMBUSY flag or checked 15442 * under hat lock that HAT_ISMBUSY was not set by another thread. 15443 */ 15444 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15445 15446 ism_blkp = sfmmup->sfmmu_iblk; 15447 while (ism_blkp != NULL) { 15448 ism_map = ism_blkp->iblk_maps; 15449 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15450 rid = ism_map[i].imap_rid; 15451 if (rid == SFMMU_INVALID_ISMRID) { 15452 continue; 15453 } 15454 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15455 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15456 addflag) { 15457 ism_map[i].imap_hatflags |= 15458 HAT_CTX1_FLAG; 15459 } else { 15460 ism_map[i].imap_hatflags &= 15461 ~HAT_CTX1_FLAG; 15462 } 15463 } 15464 ism_blkp = ism_blkp->iblk_next; 15465 } 15466 } 15467 15468 static int 15469 sfmmu_srd_lock_held(sf_srd_t *srdp) 15470 { 15471 return (MUTEX_HELD(&srdp->srd_mutex)); 15472 } 15473 15474 /* ARGSUSED */ 15475 static int 15476 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15477 { 15478 sf_scd_t *scdp = (sf_scd_t *)buf; 15479 15480 bzero(buf, sizeof (sf_scd_t)); 15481 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15482 return (0); 15483 } 15484 15485 /* ARGSUSED */ 15486 static void 15487 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15488 { 15489 sf_scd_t *scdp = (sf_scd_t *)buf; 15490 15491 mutex_destroy(&scdp->scd_mutex); 15492 } 15493 15494 /* 15495 * The listp parameter is a pointer to a list of hmeblks which are partially 15496 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15497 * freeing process is to cross-call all cpus to ensure that there are no 15498 * remaining cached references. 15499 * 15500 * If the local generation number is less than the global then we can free 15501 * hmeblks which are already on the pending queue as another cpu has completed 15502 * the cross-call. 15503 * 15504 * We cross-call to make sure that there are no threads on other cpus accessing 15505 * these hmblks and then complete the process of freeing them under the 15506 * following conditions: 15507 * The total number of pending hmeblks is greater than the threshold 15508 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15509 * It is at least 1 second since the last time we cross-called 15510 * 15511 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15512 */ 15513 static void 15514 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15515 { 15516 struct hme_blk *hblkp, *pr_hblkp = NULL; 15517 int count = 0; 15518 cpuset_t cpuset = cpu_ready_set; 15519 cpu_hme_pend_t *cpuhp; 15520 timestruc_t now; 15521 int one_second_expired = 0; 15522 15523 gethrestime_lasttick(&now); 15524 15525 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15526 ASSERT(hblkp->hblk_shw_bit == 0); 15527 ASSERT(hblkp->hblk_shared == 0); 15528 count++; 15529 pr_hblkp = hblkp; 15530 } 15531 15532 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15533 mutex_enter(&cpuhp->chp_mutex); 15534 15535 if ((cpuhp->chp_count + count) == 0) { 15536 mutex_exit(&cpuhp->chp_mutex); 15537 return; 15538 } 15539 15540 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15541 one_second_expired = 1; 15542 } 15543 15544 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15545 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15546 one_second_expired)) { 15547 /* Append global list to local */ 15548 if (pr_hblkp == NULL) { 15549 *listp = cpuhp->chp_listp; 15550 } else { 15551 pr_hblkp->hblk_next = cpuhp->chp_listp; 15552 } 15553 cpuhp->chp_listp = NULL; 15554 cpuhp->chp_count = 0; 15555 cpuhp->chp_timestamp = now.tv_sec; 15556 mutex_exit(&cpuhp->chp_mutex); 15557 15558 kpreempt_disable(); 15559 CPUSET_DEL(cpuset, CPU->cpu_id); 15560 xt_sync(cpuset); 15561 xt_sync(cpuset); 15562 kpreempt_enable(); 15563 15564 /* 15565 * At this stage we know that no trap handlers on other 15566 * cpus can have references to hmeblks on the list. 15567 */ 15568 sfmmu_hblk_free(listp); 15569 } else if (*listp != NULL) { 15570 pr_hblkp->hblk_next = cpuhp->chp_listp; 15571 cpuhp->chp_listp = *listp; 15572 cpuhp->chp_count += count; 15573 *listp = NULL; 15574 mutex_exit(&cpuhp->chp_mutex); 15575 } else { 15576 mutex_exit(&cpuhp->chp_mutex); 15577 } 15578 } 15579 15580 /* 15581 * Add an hmeblk to the the hash list. 15582 */ 15583 void 15584 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15585 uint64_t hblkpa) 15586 { 15587 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15588 #ifdef DEBUG 15589 if (hmebp->hmeblkp == NULL) { 15590 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15591 } 15592 #endif /* DEBUG */ 15593 15594 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15595 /* 15596 * Since the TSB miss handler now does not lock the hash chain before 15597 * walking it, make sure that the hmeblks nextpa is globally visible 15598 * before we make the hmeblk globally visible by updating the chain root 15599 * pointer in the hash bucket. 15600 */ 15601 membar_producer(); 15602 hmebp->hmeh_nextpa = hblkpa; 15603 hmeblkp->hblk_next = hmebp->hmeblkp; 15604 hmebp->hmeblkp = hmeblkp; 15605 15606 } 15607 15608 /* 15609 * This function is the first part of a 2 part process to remove an hmeblk 15610 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15611 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15612 * a per-cpu pending list using the virtual address pointer. 15613 * 15614 * TSB miss trap handlers that start after this phase will no longer see 15615 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15616 * can still use it for further chain traversal because we haven't yet modifed 15617 * the next physical pointer or freed it. 15618 * 15619 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15620 * we reuse or free this hmeblk. This will make sure all lingering references to 15621 * the hmeblk after first phase disappear before we finally reclaim it. 15622 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15623 * during their traversal. 15624 * 15625 * The hmehash_mutex must be held when calling this function. 15626 * 15627 * Input: 15628 * hmebp - hme hash bucket pointer 15629 * hmeblkp - address of hmeblk to be removed 15630 * pr_hblk - virtual address of previous hmeblkp 15631 * listp - pointer to list of hmeblks linked by virtual address 15632 * free_now flag - indicates that a complete removal from the hash chains 15633 * is necessary. 15634 * 15635 * It is inefficient to use the free_now flag as a cross-call is required to 15636 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15637 * in short supply. 15638 */ 15639 void 15640 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15641 struct hme_blk *pr_hblk, struct hme_blk **listp, 15642 int free_now) 15643 { 15644 int shw_size, vshift; 15645 struct hme_blk *shw_hblkp; 15646 uint_t shw_mask, newshw_mask; 15647 caddr_t vaddr; 15648 int size; 15649 cpuset_t cpuset = cpu_ready_set; 15650 15651 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15652 15653 if (hmebp->hmeblkp == hmeblkp) { 15654 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15655 hmebp->hmeblkp = hmeblkp->hblk_next; 15656 } else { 15657 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15658 pr_hblk->hblk_next = hmeblkp->hblk_next; 15659 } 15660 15661 size = get_hblk_ttesz(hmeblkp); 15662 shw_hblkp = hmeblkp->hblk_shadow; 15663 if (shw_hblkp) { 15664 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15665 ASSERT(!hmeblkp->hblk_shared); 15666 #ifdef DEBUG 15667 if (mmu_page_sizes == max_mmu_page_sizes) { 15668 ASSERT(size < TTE256M); 15669 } else { 15670 ASSERT(size < TTE4M); 15671 } 15672 #endif /* DEBUG */ 15673 15674 shw_size = get_hblk_ttesz(shw_hblkp); 15675 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15676 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15677 ASSERT(vshift < 8); 15678 /* 15679 * Atomically clear shadow mask bit 15680 */ 15681 do { 15682 shw_mask = shw_hblkp->hblk_shw_mask; 15683 ASSERT(shw_mask & (1 << vshift)); 15684 newshw_mask = shw_mask & ~(1 << vshift); 15685 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 15686 shw_mask, newshw_mask); 15687 } while (newshw_mask != shw_mask); 15688 hmeblkp->hblk_shadow = NULL; 15689 } 15690 hmeblkp->hblk_shw_bit = 0; 15691 15692 if (hmeblkp->hblk_shared) { 15693 #ifdef DEBUG 15694 sf_srd_t *srdp; 15695 sf_region_t *rgnp; 15696 uint_t rid; 15697 15698 srdp = hblktosrd(hmeblkp); 15699 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15700 rid = hmeblkp->hblk_tag.htag_rid; 15701 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15702 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15703 rgnp = srdp->srd_hmergnp[rid]; 15704 ASSERT(rgnp != NULL); 15705 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15706 #endif /* DEBUG */ 15707 hmeblkp->hblk_shared = 0; 15708 } 15709 if (free_now) { 15710 kpreempt_disable(); 15711 CPUSET_DEL(cpuset, CPU->cpu_id); 15712 xt_sync(cpuset); 15713 xt_sync(cpuset); 15714 kpreempt_enable(); 15715 15716 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15717 hmeblkp->hblk_next = NULL; 15718 } else { 15719 /* Append hmeblkp to listp for processing later. */ 15720 hmeblkp->hblk_next = *listp; 15721 *listp = hmeblkp; 15722 } 15723 } 15724 15725 /* 15726 * This routine is called when memory is in short supply and returns a free 15727 * hmeblk of the requested size from the cpu pending lists. 15728 */ 15729 static struct hme_blk * 15730 sfmmu_check_pending_hblks(int size) 15731 { 15732 int i; 15733 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15734 int found_hmeblk; 15735 cpuset_t cpuset = cpu_ready_set; 15736 cpu_hme_pend_t *cpuhp; 15737 15738 /* Flush cpu hblk pending queues */ 15739 for (i = 0; i < NCPU; i++) { 15740 cpuhp = &cpu_hme_pend[i]; 15741 if (cpuhp->chp_listp != NULL) { 15742 mutex_enter(&cpuhp->chp_mutex); 15743 if (cpuhp->chp_listp == NULL) { 15744 mutex_exit(&cpuhp->chp_mutex); 15745 continue; 15746 } 15747 found_hmeblk = 0; 15748 last_hmeblkp = NULL; 15749 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15750 hmeblkp = hmeblkp->hblk_next) { 15751 if (get_hblk_ttesz(hmeblkp) == size) { 15752 if (last_hmeblkp == NULL) { 15753 cpuhp->chp_listp = 15754 hmeblkp->hblk_next; 15755 } else { 15756 last_hmeblkp->hblk_next = 15757 hmeblkp->hblk_next; 15758 } 15759 ASSERT(cpuhp->chp_count > 0); 15760 cpuhp->chp_count--; 15761 found_hmeblk = 1; 15762 break; 15763 } else { 15764 last_hmeblkp = hmeblkp; 15765 } 15766 } 15767 mutex_exit(&cpuhp->chp_mutex); 15768 15769 if (found_hmeblk) { 15770 kpreempt_disable(); 15771 CPUSET_DEL(cpuset, CPU->cpu_id); 15772 xt_sync(cpuset); 15773 xt_sync(cpuset); 15774 kpreempt_enable(); 15775 return (hmeblkp); 15776 } 15777 } 15778 } 15779 return (NULL); 15780 } 15781