1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright 2016 Gary Mills 27 * Copyright 2019 Joyent, Inc. 28 */ 29 30 /* 31 * VM - Hardware Address Translation management for Spitfire MMU. 32 * 33 * This file implements the machine specific hardware translation 34 * needed by the VM system. The machine independent interface is 35 * described in <vm/hat.h> while the machine dependent interface 36 * and data structures are described in <vm/hat_sfmmu.h>. 37 * 38 * The hat layer manages the address translation hardware as a cache 39 * driven by calls from the higher levels in the VM system. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/kstat.h> 44 #include <vm/hat.h> 45 #include <vm/hat_sfmmu.h> 46 #include <vm/page.h> 47 #include <sys/pte.h> 48 #include <sys/systm.h> 49 #include <sys/mman.h> 50 #include <sys/sysmacros.h> 51 #include <sys/machparam.h> 52 #include <sys/vtrace.h> 53 #include <sys/kmem.h> 54 #include <sys/mmu.h> 55 #include <sys/cmn_err.h> 56 #include <sys/cpu.h> 57 #include <sys/cpuvar.h> 58 #include <sys/debug.h> 59 #include <sys/lgrp.h> 60 #include <sys/archsystm.h> 61 #include <sys/machsystm.h> 62 #include <sys/vmsystm.h> 63 #include <vm/as.h> 64 #include <vm/seg.h> 65 #include <vm/seg_kp.h> 66 #include <vm/seg_kmem.h> 67 #include <vm/seg_kpm.h> 68 #include <vm/rm.h> 69 #include <sys/t_lock.h> 70 #include <sys/obpdefs.h> 71 #include <sys/vm_machparam.h> 72 #include <sys/var.h> 73 #include <sys/trap.h> 74 #include <sys/machtrap.h> 75 #include <sys/scb.h> 76 #include <sys/bitmap.h> 77 #include <sys/machlock.h> 78 #include <sys/membar.h> 79 #include <sys/atomic.h> 80 #include <sys/cpu_module.h> 81 #include <sys/prom_debug.h> 82 #include <sys/ksynch.h> 83 #include <sys/mem_config.h> 84 #include <sys/mem_cage.h> 85 #include <vm/vm_dep.h> 86 #include <sys/fpu/fpusystm.h> 87 #include <vm/mach_kpm.h> 88 #include <sys/callb.h> 89 90 #ifdef DEBUG 91 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 92 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 93 caddr_t _eaddr = (saddr) + (len); \ 94 sf_srd_t *_srdp; \ 95 sf_region_t *_rgnp; \ 96 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 97 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 98 ASSERT((hat) != ksfmmup); \ 99 _srdp = (hat)->sfmmu_srdp; \ 100 ASSERT(_srdp != NULL); \ 101 ASSERT(_srdp->srd_refcnt != 0); \ 102 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 103 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 104 ASSERT(_rgnp->rgn_refcnt != 0); \ 105 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 106 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 107 SFMMU_REGION_HME); \ 108 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 109 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 110 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 111 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 112 } 113 114 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 115 { \ 116 caddr_t _hsva; \ 117 caddr_t _heva; \ 118 caddr_t _rsva; \ 119 caddr_t _reva; \ 120 int _ttesz = get_hblk_ttesz(hmeblkp); \ 121 int _flagtte; \ 122 ASSERT((srdp)->srd_refcnt != 0); \ 123 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 124 ASSERT((rgnp)->rgn_id == rid); \ 125 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 126 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 127 SFMMU_REGION_HME); \ 128 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 129 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 130 _heva = get_hblk_endaddr(hmeblkp); \ 131 _rsva = (caddr_t)P2ALIGN( \ 132 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 133 _reva = (caddr_t)P2ROUNDUP( \ 134 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 135 HBLK_MIN_BYTES); \ 136 ASSERT(_hsva >= _rsva); \ 137 ASSERT(_hsva < _reva); \ 138 ASSERT(_heva > _rsva); \ 139 ASSERT(_heva <= _reva); \ 140 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 141 _ttesz; \ 142 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 143 } 144 145 #else /* DEBUG */ 146 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 147 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 148 #endif /* DEBUG */ 149 150 #if defined(SF_ERRATA_57) 151 extern caddr_t errata57_limit; 152 #endif 153 154 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 155 (sizeof (int64_t))) 156 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 157 158 #define HBLK_RESERVE_CNT 128 159 #define HBLK_RESERVE_MIN 20 160 161 static struct hme_blk *freehblkp; 162 static kmutex_t freehblkp_lock; 163 static int freehblkcnt; 164 165 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 166 static kmutex_t hblk_reserve_lock; 167 static kthread_t *hblk_reserve_thread; 168 169 static nucleus_hblk8_info_t nucleus_hblk8; 170 static nucleus_hblk1_info_t nucleus_hblk1; 171 172 /* 173 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 174 * after the initial phase of removing an hmeblk from the hash chain, see 175 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 176 */ 177 static cpu_hme_pend_t *cpu_hme_pend; 178 static uint_t cpu_hme_pend_thresh; 179 /* 180 * SFMMU specific hat functions 181 */ 182 void hat_pagecachectl(struct page *, int); 183 184 /* flags for hat_pagecachectl */ 185 #define HAT_CACHE 0x1 186 #define HAT_UNCACHE 0x2 187 #define HAT_TMPNC 0x4 188 189 /* 190 * Flag to allow the creation of non-cacheable translations 191 * to system memory. It is off by default. At the moment this 192 * flag is used by the ecache error injector. The error injector 193 * will turn it on when creating such a translation then shut it 194 * off when it's finished. 195 */ 196 197 int sfmmu_allow_nc_trans = 0; 198 199 /* 200 * Flag to disable large page support. 201 * value of 1 => disable all large pages. 202 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 203 * 204 * For example, use the value 0x4 to disable 512K pages. 205 * 206 */ 207 #define LARGE_PAGES_OFF 0x1 208 209 /* 210 * The disable_large_pages and disable_ism_large_pages variables control 211 * hat_memload_array and the page sizes to be used by ISM and the kernel. 212 * 213 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 214 * are only used to control which OOB pages to use at upper VM segment creation 215 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 216 * Their values may come from platform or CPU specific code to disable page 217 * sizes that should not be used. 218 * 219 * WARNING: 512K pages are currently not supported for ISM/DISM. 220 */ 221 uint_t disable_large_pages = 0; 222 uint_t disable_ism_large_pages = (1 << TTE512K); 223 uint_t disable_auto_data_large_pages = 0; 224 uint_t disable_auto_text_large_pages = 0; 225 226 /* 227 * Private sfmmu data structures for hat management 228 */ 229 static struct kmem_cache *sfmmuid_cache; 230 static struct kmem_cache *mmuctxdom_cache; 231 232 /* 233 * Private sfmmu data structures for tsb management 234 */ 235 static struct kmem_cache *sfmmu_tsbinfo_cache; 236 static struct kmem_cache *sfmmu_tsb8k_cache; 237 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 238 static vmem_t *kmem_bigtsb_arena; 239 static vmem_t *kmem_tsb_arena; 240 241 /* 242 * sfmmu static variables for hmeblk resource management. 243 */ 244 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 245 static struct kmem_cache *sfmmu8_cache; 246 static struct kmem_cache *sfmmu1_cache; 247 static struct kmem_cache *pa_hment_cache; 248 249 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 250 /* 251 * private data for ism 252 */ 253 static struct kmem_cache *ism_blk_cache; 254 static struct kmem_cache *ism_ment_cache; 255 #define ISMID_STARTADDR NULL 256 257 /* 258 * Region management data structures and function declarations. 259 */ 260 261 static void sfmmu_leave_srd(sfmmu_t *); 262 static int sfmmu_srdcache_constructor(void *, void *, int); 263 static void sfmmu_srdcache_destructor(void *, void *); 264 static int sfmmu_rgncache_constructor(void *, void *, int); 265 static void sfmmu_rgncache_destructor(void *, void *); 266 static int sfrgnmap_isnull(sf_region_map_t *); 267 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 268 static int sfmmu_scdcache_constructor(void *, void *, int); 269 static void sfmmu_scdcache_destructor(void *, void *); 270 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 271 size_t, void *, u_offset_t); 272 273 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 274 static sf_srd_bucket_t *srd_buckets; 275 static struct kmem_cache *srd_cache; 276 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 277 static struct kmem_cache *region_cache; 278 static struct kmem_cache *scd_cache; 279 280 #ifdef sun4v 281 int use_bigtsb_arena = 1; 282 #else 283 int use_bigtsb_arena = 0; 284 #endif 285 286 /* External /etc/system tunable, for turning on&off the shctx support */ 287 int disable_shctx = 0; 288 /* Internal variable, set by MD if the HW supports shctx feature */ 289 int shctx_on = 0; 290 291 #ifdef DEBUG 292 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 293 #endif 294 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 295 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 296 297 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 298 static void sfmmu_find_scd(sfmmu_t *); 299 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 300 static void sfmmu_finish_join_scd(sfmmu_t *); 301 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 302 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 303 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 304 static void sfmmu_free_scd_tsbs(sfmmu_t *); 305 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 306 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 307 static void sfmmu_ism_hatflags(sfmmu_t *, int); 308 static int sfmmu_srd_lock_held(sf_srd_t *); 309 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 310 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 311 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 312 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 313 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 314 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 315 316 /* 317 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 318 * HAT flags, synchronizing TLB/TSB coherency, and context management. 319 * The lock is hashed on the sfmmup since the case where we need to lock 320 * all processes is rare but does occur (e.g. we need to unload a shared 321 * mapping from all processes using the mapping). We have a lot of buckets, 322 * and each slab of sfmmu_t's can use about a quarter of them, giving us 323 * a fairly good distribution without wasting too much space and overhead 324 * when we have to grab them all. 325 */ 326 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 327 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 328 329 /* 330 * Hash algorithm optimized for a small number of slabs. 331 * 7 is (highbit((sizeof sfmmu_t)) - 1) 332 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 333 * kmem_cache, and thus they will be sequential within that cache. In 334 * addition, each new slab will have a different "color" up to cache_maxcolor 335 * which will skew the hashing for each successive slab which is allocated. 336 * If the size of sfmmu_t changed to a larger size, this algorithm may need 337 * to be revisited. 338 */ 339 #define TSB_HASH_SHIFT_BITS (7) 340 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 341 342 #ifdef DEBUG 343 int tsb_hash_debug = 0; 344 #define TSB_HASH(sfmmup) \ 345 (tsb_hash_debug ? &hat_lock[0] : \ 346 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 347 #else /* DEBUG */ 348 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 349 #endif /* DEBUG */ 350 351 352 /* sfmmu_replace_tsb() return codes. */ 353 typedef enum tsb_replace_rc { 354 TSB_SUCCESS, 355 TSB_ALLOCFAIL, 356 TSB_LOSTRACE, 357 TSB_ALREADY_SWAPPED, 358 TSB_CANTGROW 359 } tsb_replace_rc_t; 360 361 /* 362 * Flags for TSB allocation routines. 363 */ 364 #define TSB_ALLOC 0x01 365 #define TSB_FORCEALLOC 0x02 366 #define TSB_GROW 0x04 367 #define TSB_SHRINK 0x08 368 #define TSB_SWAPIN 0x10 369 370 /* 371 * Support for HAT callbacks. 372 */ 373 #define SFMMU_MAX_RELOC_CALLBACKS 10 374 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 375 static id_t sfmmu_cb_nextid = 0; 376 static id_t sfmmu_tsb_cb_id; 377 struct sfmmu_callback *sfmmu_cb_table; 378 379 kmutex_t kpr_mutex; 380 kmutex_t kpr_suspendlock; 381 kthread_t *kreloc_thread; 382 383 /* 384 * Enable VA->PA translation sanity checking on DEBUG kernels. 385 * Disabled by default. This is incompatible with some 386 * drivers (error injector, RSM) so if it breaks you get 387 * to keep both pieces. 388 */ 389 int hat_check_vtop = 0; 390 391 /* 392 * Private sfmmu routines (prototypes) 393 */ 394 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 395 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 396 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 397 uint_t); 398 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 399 caddr_t, demap_range_t *, uint_t); 400 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 401 caddr_t, int); 402 static void sfmmu_hblk_free(struct hme_blk **); 403 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 404 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 405 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 406 static struct hme_blk *sfmmu_hblk_steal(int); 407 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 408 struct hme_blk *, uint64_t, struct hme_blk *); 409 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 410 411 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 412 struct page **, uint_t, uint_t, uint_t); 413 static void hat_do_memload(struct hat *, caddr_t, struct page *, 414 uint_t, uint_t, uint_t); 415 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 416 uint_t, uint_t, pgcnt_t, uint_t); 417 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 418 uint_t); 419 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 420 uint_t, uint_t); 421 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 422 caddr_t, int, uint_t); 423 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 424 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 425 uint_t); 426 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 427 caddr_t, page_t **, uint_t, uint_t); 428 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 429 430 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 431 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 432 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 433 #ifdef VAC 434 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 435 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 436 int tst_tnc(page_t *pp, pgcnt_t); 437 void conv_tnc(page_t *pp, int); 438 #endif 439 440 static void sfmmu_get_ctx(sfmmu_t *); 441 static void sfmmu_free_sfmmu(sfmmu_t *); 442 443 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 444 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 445 446 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 447 static void hat_pagereload(struct page *, struct page *); 448 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 449 #ifdef VAC 450 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 451 static void sfmmu_page_cache(page_t *, int, int, int); 452 #endif 453 454 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 455 struct hme_blk *, int); 456 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 457 pfn_t, int, int, int, int); 458 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 459 pfn_t, int); 460 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 461 static void sfmmu_tlb_range_demap(demap_range_t *); 462 static void sfmmu_invalidate_ctx(sfmmu_t *); 463 static void sfmmu_sync_mmustate(sfmmu_t *); 464 465 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 466 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 467 sfmmu_t *); 468 static void sfmmu_tsb_free(struct tsb_info *); 469 static void sfmmu_tsbinfo_free(struct tsb_info *); 470 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 471 sfmmu_t *); 472 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 473 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 474 static int sfmmu_select_tsb_szc(pgcnt_t); 475 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 476 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 477 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 478 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 479 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 480 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 481 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 482 hatlock_t *, uint_t); 483 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 484 485 #ifdef VAC 486 void sfmmu_cache_flush(pfn_t, int); 487 void sfmmu_cache_flushcolor(int, pfn_t); 488 #endif 489 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 490 caddr_t, demap_range_t *, uint_t, int); 491 492 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 493 static uint_t sfmmu_ptov_attr(tte_t *); 494 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 495 caddr_t, demap_range_t *, uint_t); 496 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 497 static int sfmmu_idcache_constructor(void *, void *, int); 498 static void sfmmu_idcache_destructor(void *, void *); 499 static int sfmmu_hblkcache_constructor(void *, void *, int); 500 static void sfmmu_hblkcache_destructor(void *, void *); 501 static void sfmmu_hblkcache_reclaim(void *); 502 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 503 struct hmehash_bucket *); 504 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 505 struct hme_blk *, struct hme_blk **, int); 506 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 507 uint64_t); 508 static struct hme_blk *sfmmu_check_pending_hblks(int); 509 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 510 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 511 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 512 int, caddr_t *); 513 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 514 515 static void sfmmu_rm_large_mappings(page_t *, int); 516 517 static void hat_lock_init(void); 518 static void hat_kstat_init(void); 519 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 520 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 521 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 522 static void sfmmu_check_page_sizes(sfmmu_t *, int); 523 int fnd_mapping_sz(page_t *); 524 static void iment_add(struct ism_ment *, struct hat *); 525 static void iment_sub(struct ism_ment *, struct hat *); 526 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 527 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 528 extern void sfmmu_clear_utsbinfo(void); 529 530 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t); 531 532 extern int vpm_enable; 533 534 /* kpm globals */ 535 #ifdef DEBUG 536 /* 537 * Enable trap level tsbmiss handling 538 */ 539 int kpm_tsbmtl = 1; 540 541 /* 542 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 543 * required TLB shootdowns in this case, so handle w/ care. Off by default. 544 */ 545 int kpm_tlb_flush; 546 #endif /* DEBUG */ 547 548 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 549 550 #ifdef DEBUG 551 static void sfmmu_check_hblk_flist(); 552 #endif 553 554 /* 555 * Semi-private sfmmu data structures. Some of them are initialize in 556 * startup or in hat_init. Some of them are private but accessed by 557 * assembly code or mach_sfmmu.c 558 */ 559 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 560 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 561 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 562 uint64_t khme_hash_pa; /* PA of khme_hash */ 563 int uhmehash_num; /* # of buckets in user hash table */ 564 int khmehash_num; /* # of buckets in kernel hash table */ 565 566 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 567 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 568 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 569 570 #define DEFAULT_NUM_CTXS_PER_MMU 8192 571 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 572 573 int cache; /* describes system cache */ 574 575 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 576 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 577 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 578 int ktsb_sz; /* kernel 8k-indexed tsb size */ 579 580 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 581 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 582 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 583 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 584 585 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 586 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 587 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 588 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 589 590 #ifndef sun4v 591 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 592 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 593 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 594 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 595 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 596 #endif /* sun4v */ 597 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 598 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 599 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 600 601 /* 602 * Size to use for TSB slabs. Future platforms that support page sizes 603 * larger than 4M may wish to change these values, and provide their own 604 * assembly macros for building and decoding the TSB base register contents. 605 * Note disable_large_pages will override the value set here. 606 */ 607 static uint_t tsb_slab_ttesz = TTE4M; 608 size_t tsb_slab_size = MMU_PAGESIZE4M; 609 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 610 /* PFN mask for TTE */ 611 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 612 613 /* 614 * Size to use for TSB slabs. These are used only when 256M tsb arenas 615 * exist. 616 */ 617 static uint_t bigtsb_slab_ttesz = TTE256M; 618 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 619 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 620 /* 256M page alignment for 8K pfn */ 621 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 622 623 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 624 static int tsb_max_growsize = 0; 625 626 /* 627 * Tunable parameters dealing with TSB policies. 628 */ 629 630 /* 631 * This undocumented tunable forces all 8K TSBs to be allocated from 632 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 633 */ 634 #ifdef DEBUG 635 int tsb_forceheap = 0; 636 #endif /* DEBUG */ 637 638 /* 639 * Decide whether to use per-lgroup arenas, or one global set of 640 * TSB arenas. The default is not to break up per-lgroup, since 641 * most platforms don't recognize any tangible benefit from it. 642 */ 643 int tsb_lgrp_affinity = 0; 644 645 /* 646 * Used for growing the TSB based on the process RSS. 647 * tsb_rss_factor is based on the smallest TSB, and is 648 * shifted by the TSB size to determine if we need to grow. 649 * The default will grow the TSB if the number of TTEs for 650 * this page size exceeds 75% of the number of TSB entries, 651 * which should _almost_ eliminate all conflict misses 652 * (at the expense of using up lots and lots of memory). 653 */ 654 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 655 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 656 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 657 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 658 default_tsb_size) 659 #define TSB_OK_SHRINK() \ 660 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 661 #define TSB_OK_GROW() \ 662 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 663 664 int enable_tsb_rss_sizing = 1; 665 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 666 667 /* which TSB size code to use for new address spaces or if rss sizing off */ 668 int default_tsb_size = TSB_8K_SZCODE; 669 670 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 671 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 672 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 673 674 #ifdef DEBUG 675 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 676 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 677 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 678 static int tsb_alloc_fail_mtbf = 0; 679 static int tsb_alloc_count = 0; 680 #endif /* DEBUG */ 681 682 /* if set to 1, will remap valid TTEs when growing TSB. */ 683 int tsb_remap_ttes = 1; 684 685 /* 686 * If we have more than this many mappings, allocate a second TSB. 687 * This default is chosen because the I/D fully associative TLBs are 688 * assumed to have at least 8 available entries. Platforms with a 689 * larger fully-associative TLB could probably override the default. 690 */ 691 692 #ifdef sun4v 693 int tsb_sectsb_threshold = 0; 694 #else 695 int tsb_sectsb_threshold = 8; 696 #endif 697 698 /* 699 * kstat data 700 */ 701 struct sfmmu_global_stat sfmmu_global_stat; 702 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 703 704 /* 705 * Global data 706 */ 707 sfmmu_t *ksfmmup; /* kernel's hat id */ 708 709 #ifdef DEBUG 710 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 711 #endif 712 713 /* sfmmu locking operations */ 714 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 715 static int sfmmu_mlspl_held(struct page *, int); 716 717 kmutex_t *sfmmu_page_enter(page_t *); 718 void sfmmu_page_exit(kmutex_t *); 719 int sfmmu_page_spl_held(struct page *); 720 721 /* sfmmu internal locking operations - accessed directly */ 722 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 723 kmutex_t **, kmutex_t **); 724 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 725 static hatlock_t * 726 sfmmu_hat_enter(sfmmu_t *); 727 static hatlock_t * 728 sfmmu_hat_tryenter(sfmmu_t *); 729 static void sfmmu_hat_exit(hatlock_t *); 730 static void sfmmu_hat_lock_all(void); 731 static void sfmmu_hat_unlock_all(void); 732 static void sfmmu_ismhat_enter(sfmmu_t *, int); 733 static void sfmmu_ismhat_exit(sfmmu_t *, int); 734 735 kpm_hlk_t *kpmp_table; 736 uint_t kpmp_table_sz; /* must be a power of 2 */ 737 uchar_t kpmp_shift; 738 739 kpm_shlk_t *kpmp_stable; 740 uint_t kpmp_stable_sz; /* must be a power of 2 */ 741 742 /* 743 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128. 744 * SPL_SHIFT is log2(SPL_TABLE_SIZE). 745 */ 746 #if ((2*NCPU_P2) > 128) 747 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 748 #else 749 #define SPL_SHIFT 7U 750 #endif 751 #define SPL_TABLE_SIZE (1U << SPL_SHIFT) 752 #define SPL_MASK (SPL_TABLE_SIZE - 1) 753 754 /* 755 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t 756 * and by multiples of SPL_SHIFT to get as many varied bits as we can. 757 */ 758 #define SPL_INDEX(pp) \ 759 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \ 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \ 762 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \ 763 SPL_MASK) 764 765 #define SPL_HASH(pp) \ 766 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex) 767 768 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 769 770 /* Array of mutexes protecting a page's mapping list and p_nrm field. */ 771 772 #define MML_TABLE_SIZE SPL_TABLE_SIZE 773 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex) 774 775 static pad_mutex_t mml_table[MML_TABLE_SIZE]; 776 777 /* 778 * hat_unload_callback() will group together callbacks in order 779 * to avoid xt_sync() calls. This is the maximum size of the group. 780 */ 781 #define MAX_CB_ADDR 32 782 783 tte_t hw_tte; 784 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 785 786 static char *mmu_ctx_kstat_names[] = { 787 "mmu_ctx_tsb_exceptions", 788 "mmu_ctx_tsb_raise_exception", 789 "mmu_ctx_wrap_around", 790 }; 791 792 /* 793 * Wrapper for vmem_xalloc since vmem_create only allows limited 794 * parameters for vm_source_alloc functions. This function allows us 795 * to specify alignment consistent with the size of the object being 796 * allocated. 797 */ 798 static void * 799 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 800 { 801 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 802 } 803 804 /* Common code for setting tsb_alloc_hiwater. */ 805 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 806 ptob(pages) / tsb_alloc_hiwater_factor 807 808 /* 809 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 810 * a single TSB. physmem is the number of physical pages so we need physmem 8K 811 * TTEs to represent all those physical pages. We round this up by using 812 * 1<<highbit(). To figure out which size code to use, remember that the size 813 * code is just an amount to shift the smallest TSB size to get the size of 814 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 815 * highbit() - 1) to get the size code for the smallest TSB that can represent 816 * all of physical memory, while erring on the side of too much. 817 * 818 * Restrict tsb_max_growsize to make sure that: 819 * 1) TSBs can't grow larger than the TSB slab size 820 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 821 */ 822 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 823 int _i, _szc, _slabszc, _tsbszc; \ 824 \ 825 _i = highbit(pages); \ 826 if ((1 << (_i - 1)) == (pages)) \ 827 _i--; /* 2^n case, round down */ \ 828 _szc = _i - TSB_START_SIZE; \ 829 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 830 _tsbszc = MIN(_szc, _slabszc); \ 831 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 832 } 833 834 /* 835 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 836 * tsb_info which handles that TTE size. 837 */ 838 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 839 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 840 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 841 sfmmu_hat_lock_held(sfmmup)); \ 842 if ((tte_szc) >= TTE4M) { \ 843 ASSERT((tsbinfop) != NULL); \ 844 (tsbinfop) = (tsbinfop)->tsb_next; \ 845 } \ 846 } 847 848 /* 849 * Macro to use to unload entries from the TSB. 850 * It has knowledge of which page sizes get replicated in the TSB 851 * and will call the appropriate unload routine for the appropriate size. 852 */ 853 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 854 { \ 855 int ttesz = get_hblk_ttesz(hmeblkp); \ 856 if (ttesz == TTE8K || ttesz == TTE4M) { \ 857 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 858 } else { \ 859 caddr_t sva = ismhat ? addr : \ 860 (caddr_t)get_hblk_base(hmeblkp); \ 861 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 862 ASSERT(addr >= sva && addr < eva); \ 863 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 864 } \ 865 } 866 867 868 /* Update tsb_alloc_hiwater after memory is configured. */ 869 /*ARGSUSED*/ 870 static void 871 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 872 { 873 /* Assumes physmem has already been updated. */ 874 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 875 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 876 } 877 878 /* 879 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 880 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 881 * deleted. 882 */ 883 /*ARGSUSED*/ 884 static int 885 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 886 { 887 return (0); 888 } 889 890 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 891 /*ARGSUSED*/ 892 static void 893 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 894 { 895 /* 896 * Whether the delete was cancelled or not, just go ahead and update 897 * tsb_alloc_hiwater and tsb_max_growsize. 898 */ 899 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 900 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 901 } 902 903 static kphysm_setup_vector_t sfmmu_update_vec = { 904 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 905 sfmmu_update_post_add, /* post_add */ 906 sfmmu_update_pre_del, /* pre_del */ 907 sfmmu_update_post_del /* post_del */ 908 }; 909 910 911 /* 912 * HME_BLK HASH PRIMITIVES 913 */ 914 915 /* 916 * Enter a hme on the mapping list for page pp. 917 * When large pages are more prevalent in the system we might want to 918 * keep the mapping list in ascending order by the hment size. For now, 919 * small pages are more frequent, so don't slow it down. 920 */ 921 #define HME_ADD(hme, pp) \ 922 { \ 923 ASSERT(sfmmu_mlist_held(pp)); \ 924 \ 925 hme->hme_prev = NULL; \ 926 hme->hme_next = pp->p_mapping; \ 927 hme->hme_page = pp; \ 928 if (pp->p_mapping) { \ 929 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 930 ASSERT(pp->p_share > 0); \ 931 } else { \ 932 /* EMPTY */ \ 933 ASSERT(pp->p_share == 0); \ 934 } \ 935 pp->p_mapping = hme; \ 936 pp->p_share++; \ 937 } 938 939 /* 940 * Enter a hme on the mapping list for page pp. 941 * If we are unmapping a large translation, we need to make sure that the 942 * change is reflect in the corresponding bit of the p_index field. 943 */ 944 #define HME_SUB(hme, pp) \ 945 { \ 946 ASSERT(sfmmu_mlist_held(pp)); \ 947 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 948 \ 949 if (pp->p_mapping == NULL) { \ 950 panic("hme_remove - no mappings"); \ 951 } \ 952 \ 953 membar_stst(); /* ensure previous stores finish */ \ 954 \ 955 ASSERT(pp->p_share > 0); \ 956 pp->p_share--; \ 957 \ 958 if (hme->hme_prev) { \ 959 ASSERT(pp->p_mapping != hme); \ 960 ASSERT(hme->hme_prev->hme_page == pp || \ 961 IS_PAHME(hme->hme_prev)); \ 962 hme->hme_prev->hme_next = hme->hme_next; \ 963 } else { \ 964 ASSERT(pp->p_mapping == hme); \ 965 pp->p_mapping = hme->hme_next; \ 966 ASSERT((pp->p_mapping == NULL) ? \ 967 (pp->p_share == 0) : 1); \ 968 } \ 969 \ 970 if (hme->hme_next) { \ 971 ASSERT(hme->hme_next->hme_page == pp || \ 972 IS_PAHME(hme->hme_next)); \ 973 hme->hme_next->hme_prev = hme->hme_prev; \ 974 } \ 975 \ 976 /* zero out the entry */ \ 977 hme->hme_next = NULL; \ 978 hme->hme_prev = NULL; \ 979 hme->hme_page = NULL; \ 980 \ 981 if (hme_size(hme) > TTE8K) { \ 982 /* remove mappings for remainder of large pg */ \ 983 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 984 } \ 985 } 986 987 /* 988 * This function returns the hment given the hme_blk and a vaddr. 989 * It assumes addr has already been checked to belong to hme_blk's 990 * range. 991 */ 992 #define HBLKTOHME(hment, hmeblkp, addr) \ 993 { \ 994 int index; \ 995 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 996 } 997 998 /* 999 * Version of HBLKTOHME that also returns the index in hmeblkp 1000 * of the hment. 1001 */ 1002 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1003 { \ 1004 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1005 \ 1006 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1007 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1008 } else \ 1009 idx = 0; \ 1010 \ 1011 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1012 } 1013 1014 /* 1015 * Disable any page sizes not supported by the CPU 1016 */ 1017 void 1018 hat_init_pagesizes() 1019 { 1020 int i; 1021 1022 mmu_exported_page_sizes = 0; 1023 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1024 1025 szc_2_userszc[i] = (uint_t)-1; 1026 userszc_2_szc[i] = (uint_t)-1; 1027 1028 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1029 disable_large_pages |= (1 << i); 1030 } else { 1031 szc_2_userszc[i] = mmu_exported_page_sizes; 1032 userszc_2_szc[mmu_exported_page_sizes] = i; 1033 mmu_exported_page_sizes++; 1034 } 1035 } 1036 1037 disable_ism_large_pages |= disable_large_pages; 1038 disable_auto_data_large_pages = disable_large_pages; 1039 disable_auto_text_large_pages = disable_large_pages; 1040 1041 /* 1042 * Initialize mmu-specific large page sizes. 1043 */ 1044 if (&mmu_large_pages_disabled) { 1045 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1046 disable_ism_large_pages |= 1047 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1048 disable_auto_data_large_pages |= 1049 mmu_large_pages_disabled(HAT_AUTO_DATA); 1050 disable_auto_text_large_pages |= 1051 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1052 } 1053 } 1054 1055 /* 1056 * Initialize the hardware address translation structures. 1057 */ 1058 void 1059 hat_init(void) 1060 { 1061 int i; 1062 uint_t sz; 1063 size_t size; 1064 1065 hat_lock_init(); 1066 hat_kstat_init(); 1067 1068 /* 1069 * Hardware-only bits in a TTE 1070 */ 1071 MAKE_TTE_MASK(&hw_tte); 1072 1073 hat_init_pagesizes(); 1074 1075 /* Initialize the hash locks */ 1076 for (i = 0; i < khmehash_num; i++) { 1077 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1078 MUTEX_DEFAULT, NULL); 1079 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1080 } 1081 for (i = 0; i < uhmehash_num; i++) { 1082 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1083 MUTEX_DEFAULT, NULL); 1084 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1085 } 1086 khmehash_num--; /* make sure counter starts from 0 */ 1087 uhmehash_num--; /* make sure counter starts from 0 */ 1088 1089 /* 1090 * Allocate context domain structures. 1091 * 1092 * A platform may choose to modify max_mmu_ctxdoms in 1093 * set_platform_defaults(). If a platform does not define 1094 * a set_platform_defaults() or does not choose to modify 1095 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1096 * 1097 * For all platforms that have CPUs sharing MMUs, this 1098 * value must be defined. 1099 */ 1100 if (max_mmu_ctxdoms == 0) 1101 max_mmu_ctxdoms = max_ncpus; 1102 1103 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1104 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1105 1106 /* mmu_ctx_t is 64 bytes aligned */ 1107 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1108 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1109 /* 1110 * MMU context domain initialization for the Boot CPU. 1111 * This needs the context domains array allocated above. 1112 */ 1113 mutex_enter(&cpu_lock); 1114 sfmmu_cpu_init(CPU); 1115 mutex_exit(&cpu_lock); 1116 1117 /* 1118 * Intialize ism mapping list lock. 1119 */ 1120 1121 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1122 1123 /* 1124 * Each sfmmu structure carries an array of MMU context info 1125 * structures, one per context domain. The size of this array depends 1126 * on the maximum number of context domains. So, the size of the 1127 * sfmmu structure varies per platform. 1128 * 1129 * sfmmu is allocated from static arena, because trap 1130 * handler at TL > 0 is not allowed to touch kernel relocatable 1131 * memory. sfmmu's alignment is changed to 64 bytes from 1132 * default 8 bytes, as the lower 6 bits will be used to pass 1133 * pgcnt to vtag_flush_pgcnt_tl1. 1134 */ 1135 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1136 1137 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1138 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1139 NULL, NULL, static_arena, 0); 1140 1141 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1142 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1143 1144 /* 1145 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1146 * from the heap when low on memory or when TSB_FORCEALLOC is 1147 * specified, don't use magazines to cache them--we want to return 1148 * them to the system as quickly as possible. 1149 */ 1150 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1151 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1152 static_arena, KMC_NOMAGAZINE); 1153 1154 /* 1155 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1156 * memory, which corresponds to the old static reserve for TSBs. 1157 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1158 * memory we'll allocate for TSB slabs; beyond this point TSB 1159 * allocations will be taken from the kernel heap (via 1160 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1161 * consumer. 1162 */ 1163 if (tsb_alloc_hiwater_factor == 0) { 1164 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1165 } 1166 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1167 1168 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1169 if (!(disable_large_pages & (1 << sz))) 1170 break; 1171 } 1172 1173 if (sz < tsb_slab_ttesz) { 1174 tsb_slab_ttesz = sz; 1175 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1176 tsb_slab_size = 1 << tsb_slab_shift; 1177 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1178 use_bigtsb_arena = 0; 1179 } else if (use_bigtsb_arena && 1180 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1181 use_bigtsb_arena = 0; 1182 } 1183 1184 if (!use_bigtsb_arena) { 1185 bigtsb_slab_shift = tsb_slab_shift; 1186 } 1187 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1188 1189 /* 1190 * On smaller memory systems, allocate TSB memory in smaller chunks 1191 * than the default 4M slab size. We also honor disable_large_pages 1192 * here. 1193 * 1194 * The trap handlers need to be patched with the final slab shift, 1195 * since they need to be able to construct the TSB pointer at runtime. 1196 */ 1197 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1198 !(disable_large_pages & (1 << TTE512K))) { 1199 tsb_slab_ttesz = TTE512K; 1200 tsb_slab_shift = MMU_PAGESHIFT512K; 1201 tsb_slab_size = MMU_PAGESIZE512K; 1202 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1203 use_bigtsb_arena = 0; 1204 } 1205 1206 if (!use_bigtsb_arena) { 1207 bigtsb_slab_ttesz = tsb_slab_ttesz; 1208 bigtsb_slab_shift = tsb_slab_shift; 1209 bigtsb_slab_size = tsb_slab_size; 1210 bigtsb_slab_mask = tsb_slab_mask; 1211 } 1212 1213 1214 /* 1215 * Set up memory callback to update tsb_alloc_hiwater and 1216 * tsb_max_growsize. 1217 */ 1218 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1219 ASSERT(i == 0); 1220 1221 /* 1222 * kmem_tsb_arena is the source from which large TSB slabs are 1223 * drawn. The quantum of this arena corresponds to the largest 1224 * TSB size we can dynamically allocate for user processes. 1225 * Currently it must also be a supported page size since we 1226 * use exactly one translation entry to map each slab page. 1227 * 1228 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1229 * which most TSBs are allocated. Since most TSB allocations are 1230 * typically 8K we have a kmem cache we stack on top of each 1231 * kmem_tsb_default_arena to speed up those allocations. 1232 * 1233 * Note the two-level scheme of arenas is required only 1234 * because vmem_create doesn't allow us to specify alignment 1235 * requirements. If this ever changes the code could be 1236 * simplified to use only one level of arenas. 1237 * 1238 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1239 * will be provided in addition to the 4M kmem_tsb_arena. 1240 */ 1241 if (use_bigtsb_arena) { 1242 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1243 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1244 vmem_xfree, heap_arena, 0, VM_SLEEP); 1245 } 1246 1247 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1248 sfmmu_vmem_xalloc_aligned_wrapper, 1249 vmem_xfree, heap_arena, 0, VM_SLEEP); 1250 1251 if (tsb_lgrp_affinity) { 1252 char s[50]; 1253 for (i = 0; i < NLGRPS_MAX; i++) { 1254 if (use_bigtsb_arena) { 1255 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1256 kmem_bigtsb_default_arena[i] = vmem_create(s, 1257 NULL, 0, 2 * tsb_slab_size, 1258 sfmmu_tsb_segkmem_alloc, 1259 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1260 0, VM_SLEEP | VM_BESTFIT); 1261 } 1262 1263 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1264 kmem_tsb_default_arena[i] = vmem_create(s, 1265 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1266 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1267 VM_SLEEP | VM_BESTFIT); 1268 1269 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1270 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1271 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1272 kmem_tsb_default_arena[i], 0); 1273 } 1274 } else { 1275 if (use_bigtsb_arena) { 1276 kmem_bigtsb_default_arena[0] = 1277 vmem_create("kmem_bigtsb_default", NULL, 0, 1278 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1279 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1280 VM_SLEEP | VM_BESTFIT); 1281 } 1282 1283 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1284 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1285 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1286 VM_SLEEP | VM_BESTFIT); 1287 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1288 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1289 kmem_tsb_default_arena[0], 0); 1290 } 1291 1292 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1293 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1294 sfmmu_hblkcache_destructor, 1295 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1296 hat_memload_arena, KMC_NOHASH); 1297 1298 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1299 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, 1300 VMC_DUMPSAFE | VM_SLEEP); 1301 1302 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1303 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1304 sfmmu_hblkcache_destructor, 1305 NULL, (void *)HME1BLK_SZ, 1306 hat_memload1_arena, KMC_NOHASH); 1307 1308 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1309 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1310 1311 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1312 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1313 NULL, NULL, static_arena, KMC_NOHASH); 1314 1315 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1316 sizeof (ism_ment_t), 0, NULL, NULL, 1317 NULL, NULL, NULL, 0); 1318 1319 /* 1320 * We grab the first hat for the kernel, 1321 */ 1322 AS_LOCK_ENTER(&kas, RW_WRITER); 1323 kas.a_hat = hat_alloc(&kas); 1324 AS_LOCK_EXIT(&kas); 1325 1326 /* 1327 * Initialize hblk_reserve. 1328 */ 1329 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1330 va_to_pa((caddr_t)hblk_reserve); 1331 1332 #ifndef UTSB_PHYS 1333 /* 1334 * Reserve some kernel virtual address space for the locked TTEs 1335 * that allow us to probe the TSB from TL>0. 1336 */ 1337 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1338 0, 0, NULL, NULL, VM_SLEEP); 1339 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1340 0, 0, NULL, NULL, VM_SLEEP); 1341 #endif 1342 1343 #ifdef VAC 1344 /* 1345 * The big page VAC handling code assumes VAC 1346 * will not be bigger than the smallest big 1347 * page- which is 64K. 1348 */ 1349 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1350 cmn_err(CE_PANIC, "VAC too big!"); 1351 } 1352 #endif 1353 1354 uhme_hash_pa = va_to_pa(uhme_hash); 1355 khme_hash_pa = va_to_pa(khme_hash); 1356 1357 /* 1358 * Initialize relocation locks. kpr_suspendlock is held 1359 * at PIL_MAX to prevent interrupts from pinning the holder 1360 * of a suspended TTE which may access it leading to a 1361 * deadlock condition. 1362 */ 1363 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1364 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1365 1366 /* 1367 * If Shared context support is disabled via /etc/system 1368 * set shctx_on to 0 here if it was set to 1 earlier in boot 1369 * sequence by cpu module initialization code. 1370 */ 1371 if (shctx_on && disable_shctx) { 1372 shctx_on = 0; 1373 } 1374 1375 if (shctx_on) { 1376 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1377 sizeof (srd_buckets[0]), KM_SLEEP); 1378 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1379 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1380 MUTEX_DEFAULT, NULL); 1381 } 1382 1383 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1384 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1385 NULL, NULL, NULL, 0); 1386 region_cache = kmem_cache_create("region_cache", 1387 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1388 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1389 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1390 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1391 NULL, NULL, NULL, 0); 1392 } 1393 1394 /* 1395 * Pre-allocate hrm_hashtab before enabling the collection of 1396 * refmod statistics. Allocating on the fly would mean us 1397 * running the risk of suffering recursive mutex enters or 1398 * deadlocks. 1399 */ 1400 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1401 KM_SLEEP); 1402 1403 /* Allocate per-cpu pending freelist of hmeblks */ 1404 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1405 KM_SLEEP); 1406 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1407 (uintptr_t)cpu_hme_pend, 64); 1408 1409 for (i = 0; i < NCPU; i++) { 1410 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1411 NULL); 1412 } 1413 1414 if (cpu_hme_pend_thresh == 0) { 1415 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1416 } 1417 } 1418 1419 /* 1420 * Initialize locking for the hat layer, called early during boot. 1421 */ 1422 static void 1423 hat_lock_init() 1424 { 1425 int i; 1426 1427 /* 1428 * initialize the array of mutexes protecting a page's mapping 1429 * list and p_nrm field. 1430 */ 1431 for (i = 0; i < MML_TABLE_SIZE; i++) 1432 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL); 1433 1434 if (kpm_enable) { 1435 for (i = 0; i < kpmp_table_sz; i++) { 1436 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1437 MUTEX_DEFAULT, NULL); 1438 } 1439 } 1440 1441 /* 1442 * Initialize array of mutex locks that protects sfmmu fields and 1443 * TSB lists. 1444 */ 1445 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1446 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1447 NULL); 1448 } 1449 1450 #define SFMMU_KERNEL_MAXVA \ 1451 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1452 1453 /* 1454 * Allocate a hat structure. 1455 * Called when an address space first uses a hat. 1456 */ 1457 struct hat * 1458 hat_alloc(struct as *as) 1459 { 1460 sfmmu_t *sfmmup; 1461 int i; 1462 uint64_t cnum; 1463 extern uint_t get_color_start(struct as *); 1464 1465 ASSERT(AS_WRITE_HELD(as)); 1466 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1467 sfmmup->sfmmu_as = as; 1468 sfmmup->sfmmu_flags = 0; 1469 sfmmup->sfmmu_tteflags = 0; 1470 sfmmup->sfmmu_rtteflags = 0; 1471 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1472 1473 if (as == &kas) { 1474 ksfmmup = sfmmup; 1475 sfmmup->sfmmu_cext = 0; 1476 cnum = KCONTEXT; 1477 1478 sfmmup->sfmmu_clrstart = 0; 1479 sfmmup->sfmmu_tsb = NULL; 1480 /* 1481 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1482 * to setup tsb_info for ksfmmup. 1483 */ 1484 } else { 1485 1486 /* 1487 * Just set to invalid ctx. When it faults, it will 1488 * get a valid ctx. This would avoid the situation 1489 * where we get a ctx, but it gets stolen and then 1490 * we fault when we try to run and so have to get 1491 * another ctx. 1492 */ 1493 sfmmup->sfmmu_cext = 0; 1494 cnum = INVALID_CONTEXT; 1495 1496 /* initialize original physical page coloring bin */ 1497 sfmmup->sfmmu_clrstart = get_color_start(as); 1498 #ifdef DEBUG 1499 if (tsb_random_size) { 1500 uint32_t randval = (uint32_t)gettick() >> 4; 1501 int size = randval % (tsb_max_growsize + 1); 1502 1503 /* chose a random tsb size for stress testing */ 1504 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1505 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1506 } else 1507 #endif /* DEBUG */ 1508 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1509 default_tsb_size, 1510 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1511 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1512 ASSERT(sfmmup->sfmmu_tsb != NULL); 1513 } 1514 1515 ASSERT(max_mmu_ctxdoms > 0); 1516 for (i = 0; i < max_mmu_ctxdoms; i++) { 1517 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1518 sfmmup->sfmmu_ctxs[i].gnum = 0; 1519 } 1520 1521 for (i = 0; i < max_mmu_page_sizes; i++) { 1522 sfmmup->sfmmu_ttecnt[i] = 0; 1523 sfmmup->sfmmu_scdrttecnt[i] = 0; 1524 sfmmup->sfmmu_ismttecnt[i] = 0; 1525 sfmmup->sfmmu_scdismttecnt[i] = 0; 1526 sfmmup->sfmmu_pgsz[i] = TTE8K; 1527 } 1528 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1529 sfmmup->sfmmu_iblk = NULL; 1530 sfmmup->sfmmu_ismhat = 0; 1531 sfmmup->sfmmu_scdhat = 0; 1532 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1533 if (sfmmup == ksfmmup) { 1534 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1535 } else { 1536 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1537 } 1538 sfmmup->sfmmu_free = 0; 1539 sfmmup->sfmmu_rmstat = 0; 1540 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1541 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1542 sfmmup->sfmmu_srdp = NULL; 1543 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1544 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1545 sfmmup->sfmmu_scdp = NULL; 1546 sfmmup->sfmmu_scd_link.next = NULL; 1547 sfmmup->sfmmu_scd_link.prev = NULL; 1548 return (sfmmup); 1549 } 1550 1551 /* 1552 * Create per-MMU context domain kstats for a given MMU ctx. 1553 */ 1554 static void 1555 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1556 { 1557 mmu_ctx_stat_t stat; 1558 kstat_t *mmu_kstat; 1559 1560 ASSERT(MUTEX_HELD(&cpu_lock)); 1561 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1562 1563 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1564 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1565 1566 if (mmu_kstat == NULL) { 1567 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1568 mmu_ctxp->mmu_idx); 1569 } else { 1570 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1571 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1572 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1573 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1574 mmu_ctxp->mmu_kstat = mmu_kstat; 1575 kstat_install(mmu_kstat); 1576 } 1577 } 1578 1579 /* 1580 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1581 * context domain information for a given CPU. If a platform does not 1582 * specify that interface, then the function below is used instead to return 1583 * default information. The defaults are as follows: 1584 * 1585 * - The number of MMU context IDs supported on any CPU in the 1586 * system is 8K. 1587 * - There is one MMU context domain per CPU. 1588 */ 1589 /*ARGSUSED*/ 1590 static void 1591 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1592 { 1593 infop->mmu_nctxs = nctxs; 1594 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1595 } 1596 1597 /* 1598 * Called during CPU initialization to set the MMU context-related information 1599 * for a CPU. 1600 * 1601 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1602 */ 1603 void 1604 sfmmu_cpu_init(cpu_t *cp) 1605 { 1606 mmu_ctx_info_t info; 1607 mmu_ctx_t *mmu_ctxp; 1608 1609 ASSERT(MUTEX_HELD(&cpu_lock)); 1610 1611 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1612 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1613 else 1614 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1615 1616 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1617 1618 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1619 /* Each mmu_ctx is cacheline aligned. */ 1620 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1621 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1622 1623 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1624 (void *)ipltospl(DISP_LEVEL)); 1625 mmu_ctxp->mmu_idx = info.mmu_idx; 1626 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1627 /* 1628 * Globally for lifetime of a system, 1629 * gnum must always increase. 1630 * mmu_saved_gnum is protected by the cpu_lock. 1631 */ 1632 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1633 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1634 1635 sfmmu_mmu_kstat_create(mmu_ctxp); 1636 1637 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1638 } else { 1639 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1640 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs); 1641 } 1642 1643 /* 1644 * The mmu_lock is acquired here to prevent races with 1645 * the wrap-around code. 1646 */ 1647 mutex_enter(&mmu_ctxp->mmu_lock); 1648 1649 1650 mmu_ctxp->mmu_ncpus++; 1651 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1652 CPU_MMU_IDX(cp) = info.mmu_idx; 1653 CPU_MMU_CTXP(cp) = mmu_ctxp; 1654 1655 mutex_exit(&mmu_ctxp->mmu_lock); 1656 } 1657 1658 static void 1659 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp) 1660 { 1661 ASSERT(MUTEX_HELD(&cpu_lock)); 1662 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock)); 1663 1664 mutex_destroy(&mmu_ctxp->mmu_lock); 1665 1666 if (mmu_ctxp->mmu_kstat) 1667 kstat_delete(mmu_ctxp->mmu_kstat); 1668 1669 /* mmu_saved_gnum is protected by the cpu_lock. */ 1670 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1671 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1672 1673 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1674 } 1675 1676 /* 1677 * Called to perform MMU context-related cleanup for a CPU. 1678 */ 1679 void 1680 sfmmu_cpu_cleanup(cpu_t *cp) 1681 { 1682 mmu_ctx_t *mmu_ctxp; 1683 1684 ASSERT(MUTEX_HELD(&cpu_lock)); 1685 1686 mmu_ctxp = CPU_MMU_CTXP(cp); 1687 ASSERT(mmu_ctxp != NULL); 1688 1689 /* 1690 * The mmu_lock is acquired here to prevent races with 1691 * the wrap-around code. 1692 */ 1693 mutex_enter(&mmu_ctxp->mmu_lock); 1694 1695 CPU_MMU_CTXP(cp) = NULL; 1696 1697 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1698 if (--mmu_ctxp->mmu_ncpus == 0) { 1699 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1700 mutex_exit(&mmu_ctxp->mmu_lock); 1701 sfmmu_ctxdom_free(mmu_ctxp); 1702 return; 1703 } 1704 1705 mutex_exit(&mmu_ctxp->mmu_lock); 1706 } 1707 1708 uint_t 1709 sfmmu_ctxdom_nctxs(int idx) 1710 { 1711 return (mmu_ctxs_tbl[idx]->mmu_nctxs); 1712 } 1713 1714 #ifdef sun4v 1715 /* 1716 * sfmmu_ctxdoms_* is an interface provided to help keep context domains 1717 * consistant after suspend/resume on system that can resume on a different 1718 * hardware than it was suspended. 1719 * 1720 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts 1721 * from being allocated. It acquires all hat_locks, which blocks most access to 1722 * context data, except for a few cases that are handled separately or are 1723 * harmless. It wraps each domain to increment gnum and invalidate on-CPU 1724 * contexts, and forces cnum to its max. As a result of this call all user 1725 * threads that are running on CPUs trap and try to perform wrap around but 1726 * can't because hat_locks are taken. Threads that were not on CPUs but started 1727 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking 1728 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block 1729 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs 1730 * are paused, else it could deadlock acquiring locks held by paused CPUs. 1731 * 1732 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records 1733 * the CPUs that had them. It must be called after CPUs have been paused. This 1734 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data, 1735 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx 1736 * runs with interrupts disabled. When CPUs are later resumed, they may enter 1737 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately 1738 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus 1739 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is 1740 * accessing the old context domains. 1741 * 1742 * sfmmu_ctxdoms_update(void) frees space used by old context domains and 1743 * allocates new context domains based on hardware layout. It initializes 1744 * every CPU that had context domain before migration to have one again. 1745 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it 1746 * could deadlock acquiring locks held by paused CPUs. 1747 * 1748 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads 1749 * acquire new context ids and continue execution. 1750 * 1751 * Therefore functions should be called in the following order: 1752 * suspend_routine() 1753 * sfmmu_ctxdom_lock() 1754 * pause_cpus() 1755 * suspend() 1756 * if (suspend failed) 1757 * sfmmu_ctxdom_unlock() 1758 * ... 1759 * sfmmu_ctxdom_remove() 1760 * resume_cpus() 1761 * sfmmu_ctxdom_update() 1762 * sfmmu_ctxdom_unlock() 1763 */ 1764 static cpuset_t sfmmu_ctxdoms_pset; 1765 1766 void 1767 sfmmu_ctxdoms_remove() 1768 { 1769 processorid_t id; 1770 cpu_t *cp; 1771 1772 /* 1773 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can 1774 * be restored post-migration. A CPU may be powered off and not have a 1775 * domain, for example. 1776 */ 1777 CPUSET_ZERO(sfmmu_ctxdoms_pset); 1778 1779 for (id = 0; id < NCPU; id++) { 1780 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) { 1781 CPUSET_ADD(sfmmu_ctxdoms_pset, id); 1782 CPU_MMU_CTXP(cp) = NULL; 1783 } 1784 } 1785 } 1786 1787 void 1788 sfmmu_ctxdoms_lock(void) 1789 { 1790 int idx; 1791 mmu_ctx_t *mmu_ctxp; 1792 1793 sfmmu_hat_lock_all(); 1794 1795 /* 1796 * At this point, no thread can be in sfmmu_ctx_wrap_around, because 1797 * hat_lock is always taken before calling it. 1798 * 1799 * For each domain, set mmu_cnum to max so no more contexts can be 1800 * allocated, and wrap to flush on-CPU contexts and force threads to 1801 * acquire a new context when we later drop hat_lock after migration. 1802 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum, 1803 * but the latter uses CAS and will miscompare and not overwrite it. 1804 */ 1805 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */ 1806 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1807 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) { 1808 mutex_enter(&mmu_ctxp->mmu_lock); 1809 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs; 1810 /* make sure updated cnum visible */ 1811 membar_enter(); 1812 mutex_exit(&mmu_ctxp->mmu_lock); 1813 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE); 1814 } 1815 } 1816 kpreempt_enable(); 1817 } 1818 1819 void 1820 sfmmu_ctxdoms_unlock(void) 1821 { 1822 sfmmu_hat_unlock_all(); 1823 } 1824 1825 void 1826 sfmmu_ctxdoms_update(void) 1827 { 1828 processorid_t id; 1829 cpu_t *cp; 1830 uint_t idx; 1831 mmu_ctx_t *mmu_ctxp; 1832 1833 /* 1834 * Free all context domains. As side effect, this increases 1835 * mmu_saved_gnum to the maximum gnum over all domains, which is used to 1836 * init gnum in the new domains, which therefore will be larger than the 1837 * sfmmu gnum for any process, guaranteeing that every process will see 1838 * a new generation and allocate a new context regardless of what new 1839 * domain it runs in. 1840 */ 1841 mutex_enter(&cpu_lock); 1842 1843 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1844 if (mmu_ctxs_tbl[idx] != NULL) { 1845 mmu_ctxp = mmu_ctxs_tbl[idx]; 1846 mmu_ctxs_tbl[idx] = NULL; 1847 sfmmu_ctxdom_free(mmu_ctxp); 1848 } 1849 } 1850 1851 for (id = 0; id < NCPU; id++) { 1852 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) && 1853 (cp = cpu[id]) != NULL) 1854 sfmmu_cpu_init(cp); 1855 } 1856 mutex_exit(&cpu_lock); 1857 } 1858 #endif 1859 1860 /* 1861 * Hat_setup, makes an address space context the current active one. 1862 * In sfmmu this translates to setting the secondary context with the 1863 * corresponding context. 1864 */ 1865 void 1866 hat_setup(struct hat *sfmmup, int allocflag) 1867 { 1868 hatlock_t *hatlockp; 1869 1870 /* Init needs some special treatment. */ 1871 if (allocflag == HAT_INIT) { 1872 /* 1873 * Make sure that we have 1874 * 1. a TSB 1875 * 2. a valid ctx that doesn't get stolen after this point. 1876 */ 1877 hatlockp = sfmmu_hat_enter(sfmmup); 1878 1879 /* 1880 * Swap in the TSB. hat_init() allocates tsbinfos without 1881 * TSBs, but we need one for init, since the kernel does some 1882 * special things to set up its stack and needs the TSB to 1883 * resolve page faults. 1884 */ 1885 sfmmu_tsb_swapin(sfmmup, hatlockp); 1886 1887 sfmmu_get_ctx(sfmmup); 1888 1889 sfmmu_hat_exit(hatlockp); 1890 } else { 1891 ASSERT(allocflag == HAT_ALLOC); 1892 1893 hatlockp = sfmmu_hat_enter(sfmmup); 1894 kpreempt_disable(); 1895 1896 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1897 /* 1898 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1899 * pagesize bits don't matter in this case since we are passing 1900 * INVALID_CONTEXT to it. 1901 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1902 */ 1903 sfmmu_setctx_sec(INVALID_CONTEXT); 1904 sfmmu_clear_utsbinfo(); 1905 1906 kpreempt_enable(); 1907 sfmmu_hat_exit(hatlockp); 1908 } 1909 } 1910 1911 /* 1912 * Free all the translation resources for the specified address space. 1913 * Called from as_free when an address space is being destroyed. 1914 */ 1915 void 1916 hat_free_start(struct hat *sfmmup) 1917 { 1918 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 1919 ASSERT(sfmmup != ksfmmup); 1920 1921 sfmmup->sfmmu_free = 1; 1922 if (sfmmup->sfmmu_scdp != NULL) { 1923 sfmmu_leave_scd(sfmmup, 0); 1924 } 1925 1926 ASSERT(sfmmup->sfmmu_scdp == NULL); 1927 } 1928 1929 void 1930 hat_free_end(struct hat *sfmmup) 1931 { 1932 int i; 1933 1934 ASSERT(sfmmup->sfmmu_free == 1); 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1940 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1941 1942 if (sfmmup->sfmmu_rmstat) { 1943 hat_freestat(sfmmup->sfmmu_as, 0); 1944 } 1945 1946 while (sfmmup->sfmmu_tsb != NULL) { 1947 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1948 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1949 sfmmup->sfmmu_tsb = next; 1950 } 1951 1952 if (sfmmup->sfmmu_srdp != NULL) { 1953 sfmmu_leave_srd(sfmmup); 1954 ASSERT(sfmmup->sfmmu_srdp == NULL); 1955 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1956 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1957 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1958 SFMMU_L2_HMERLINKS_SIZE); 1959 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1960 } 1961 } 1962 } 1963 sfmmu_free_sfmmu(sfmmup); 1964 1965 #ifdef DEBUG 1966 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1967 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1968 } 1969 #endif 1970 1971 kmem_cache_free(sfmmuid_cache, sfmmup); 1972 } 1973 1974 /* 1975 * Set up any translation structures, for the specified address space, 1976 * that are needed or preferred when the process is being swapped in. 1977 */ 1978 /* ARGSUSED */ 1979 void 1980 hat_swapin(struct hat *hat) 1981 { 1982 } 1983 1984 /* 1985 * Free all of the translation resources, for the specified address space, 1986 * that can be freed while the process is swapped out. Called from as_swapout. 1987 * Also, free up the ctx that this process was using. 1988 */ 1989 void 1990 hat_swapout(struct hat *sfmmup) 1991 { 1992 struct hmehash_bucket *hmebp; 1993 struct hme_blk *hmeblkp; 1994 struct hme_blk *pr_hblk = NULL; 1995 struct hme_blk *nx_hblk; 1996 int i; 1997 struct hme_blk *list = NULL; 1998 hatlock_t *hatlockp; 1999 struct tsb_info *tsbinfop; 2000 struct free_tsb { 2001 struct free_tsb *next; 2002 struct tsb_info *tsbinfop; 2003 }; /* free list of TSBs */ 2004 struct free_tsb *freelist, *last, *next; 2005 2006 SFMMU_STAT(sf_swapout); 2007 2008 /* 2009 * There is no way to go from an as to all its translations in sfmmu. 2010 * Here is one of the times when we take the big hit and traverse 2011 * the hash looking for hme_blks to free up. Not only do we free up 2012 * this as hme_blks but all those that are free. We are obviously 2013 * swapping because we need memory so let's free up as much 2014 * as we can. 2015 * 2016 * Note that we don't flush TLB/TSB here -- it's not necessary 2017 * because: 2018 * 1) we free the ctx we're using and throw away the TSB(s); 2019 * 2) processes aren't runnable while being swapped out. 2020 */ 2021 ASSERT(sfmmup != KHATID); 2022 for (i = 0; i <= UHMEHASH_SZ; i++) { 2023 hmebp = &uhme_hash[i]; 2024 SFMMU_HASH_LOCK(hmebp); 2025 hmeblkp = hmebp->hmeblkp; 2026 pr_hblk = NULL; 2027 while (hmeblkp) { 2028 2029 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 2030 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 2031 ASSERT(!hmeblkp->hblk_shared); 2032 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 2033 (caddr_t)get_hblk_base(hmeblkp), 2034 get_hblk_endaddr(hmeblkp), 2035 NULL, HAT_UNLOAD); 2036 } 2037 nx_hblk = hmeblkp->hblk_next; 2038 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 2039 ASSERT(!hmeblkp->hblk_lckcnt); 2040 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2041 &list, 0); 2042 } else { 2043 pr_hblk = hmeblkp; 2044 } 2045 hmeblkp = nx_hblk; 2046 } 2047 SFMMU_HASH_UNLOCK(hmebp); 2048 } 2049 2050 sfmmu_hblks_list_purge(&list, 0); 2051 2052 /* 2053 * Now free up the ctx so that others can reuse it. 2054 */ 2055 hatlockp = sfmmu_hat_enter(sfmmup); 2056 2057 sfmmu_invalidate_ctx(sfmmup); 2058 2059 /* 2060 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 2061 * If TSBs were never swapped in, just return. 2062 * This implies that we don't support partial swapping 2063 * of TSBs -- either all are swapped out, or none are. 2064 * 2065 * We must hold the HAT lock here to prevent racing with another 2066 * thread trying to unmap TTEs from the TSB or running the post- 2067 * relocator after relocating the TSB's memory. Unfortunately, we 2068 * can't free memory while holding the HAT lock or we could 2069 * deadlock, so we build a list of TSBs to be freed after marking 2070 * the tsbinfos as swapped out and free them after dropping the 2071 * lock. 2072 */ 2073 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 2074 sfmmu_hat_exit(hatlockp); 2075 return; 2076 } 2077 2078 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 2079 last = freelist = NULL; 2080 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 2081 tsbinfop = tsbinfop->tsb_next) { 2082 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 2083 2084 /* 2085 * Cast the TSB into a struct free_tsb and put it on the free 2086 * list. 2087 */ 2088 if (freelist == NULL) { 2089 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 2090 } else { 2091 last->next = (struct free_tsb *)tsbinfop->tsb_va; 2092 last = last->next; 2093 } 2094 last->next = NULL; 2095 last->tsbinfop = tsbinfop; 2096 tsbinfop->tsb_flags |= TSB_SWAPPED; 2097 /* 2098 * Zero out the TTE to clear the valid bit. 2099 * Note we can't use a value like 0xbad because we want to 2100 * ensure diagnostic bits are NEVER set on TTEs that might 2101 * be loaded. The intent is to catch any invalid access 2102 * to the swapped TSB, such as a thread running with a valid 2103 * context without first calling sfmmu_tsb_swapin() to 2104 * allocate TSB memory. 2105 */ 2106 tsbinfop->tsb_tte.ll = 0; 2107 } 2108 2109 /* Now we can drop the lock and free the TSB memory. */ 2110 sfmmu_hat_exit(hatlockp); 2111 for (; freelist != NULL; freelist = next) { 2112 next = freelist->next; 2113 sfmmu_tsb_free(freelist->tsbinfop); 2114 } 2115 } 2116 2117 /* 2118 * Duplicate the translations of an as into another newas 2119 */ 2120 /* ARGSUSED */ 2121 int 2122 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 2123 uint_t flag) 2124 { 2125 sf_srd_t *srdp; 2126 sf_scd_t *scdp; 2127 int i; 2128 extern uint_t get_color_start(struct as *); 2129 2130 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 2131 (flag == HAT_DUP_SRD)); 2132 ASSERT(hat != ksfmmup); 2133 ASSERT(newhat != ksfmmup); 2134 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 2135 2136 if (flag == HAT_DUP_COW) { 2137 panic("hat_dup: HAT_DUP_COW not supported"); 2138 } 2139 2140 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2141 ASSERT(srdp->srd_evp != NULL); 2142 VN_HOLD(srdp->srd_evp); 2143 ASSERT(srdp->srd_refcnt > 0); 2144 newhat->sfmmu_srdp = srdp; 2145 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 2146 } 2147 2148 /* 2149 * HAT_DUP_ALL flag is used after as duplication is done. 2150 */ 2151 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2152 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2153 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2154 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2155 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2156 } 2157 2158 /* check if need to join scd */ 2159 if ((scdp = hat->sfmmu_scdp) != NULL && 2160 newhat->sfmmu_scdp != scdp) { 2161 int ret; 2162 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2163 &scdp->scd_region_map, ret); 2164 ASSERT(ret); 2165 sfmmu_join_scd(scdp, newhat); 2166 ASSERT(newhat->sfmmu_scdp == scdp && 2167 scdp->scd_refcnt >= 2); 2168 for (i = 0; i < max_mmu_page_sizes; i++) { 2169 newhat->sfmmu_ismttecnt[i] = 2170 hat->sfmmu_ismttecnt[i]; 2171 newhat->sfmmu_scdismttecnt[i] = 2172 hat->sfmmu_scdismttecnt[i]; 2173 } 2174 } 2175 2176 sfmmu_check_page_sizes(newhat, 1); 2177 } 2178 2179 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2180 update_proc_pgcolorbase_after_fork != 0) { 2181 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2182 } 2183 return (0); 2184 } 2185 2186 void 2187 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2188 uint_t attr, uint_t flags) 2189 { 2190 hat_do_memload(hat, addr, pp, attr, flags, 2191 SFMMU_INVALID_SHMERID); 2192 } 2193 2194 void 2195 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2196 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2197 { 2198 uint_t rid; 2199 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2200 hat_do_memload(hat, addr, pp, attr, flags, 2201 SFMMU_INVALID_SHMERID); 2202 return; 2203 } 2204 rid = (uint_t)((uint64_t)rcookie); 2205 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2206 hat_do_memload(hat, addr, pp, attr, flags, rid); 2207 } 2208 2209 /* 2210 * Set up addr to map to page pp with protection prot. 2211 * As an optimization we also load the TSB with the 2212 * corresponding tte but it is no big deal if the tte gets kicked out. 2213 */ 2214 static void 2215 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2216 uint_t attr, uint_t flags, uint_t rid) 2217 { 2218 tte_t tte; 2219 2220 2221 ASSERT(hat != NULL); 2222 ASSERT(PAGE_LOCKED(pp)); 2223 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2224 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2225 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2226 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2227 2228 if (PP_ISFREE(pp)) { 2229 panic("hat_memload: loading a mapping to free page %p", 2230 (void *)pp); 2231 } 2232 2233 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); 2234 2235 if (flags & ~SFMMU_LOAD_ALLFLAG) 2236 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2237 flags & ~SFMMU_LOAD_ALLFLAG); 2238 2239 if (hat->sfmmu_rmstat) 2240 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2241 2242 #if defined(SF_ERRATA_57) 2243 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2244 (addr < errata57_limit) && (attr & PROT_EXEC) && 2245 !(flags & HAT_LOAD_SHARE)) { 2246 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2247 " page executable"); 2248 attr &= ~PROT_EXEC; 2249 } 2250 #endif 2251 2252 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2253 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2254 2255 /* 2256 * Check TSB and TLB page sizes. 2257 */ 2258 if ((flags & HAT_LOAD_SHARE) == 0) { 2259 sfmmu_check_page_sizes(hat, 1); 2260 } 2261 } 2262 2263 /* 2264 * hat_devload can be called to map real memory (e.g. 2265 * /dev/kmem) and even though hat_devload will determine pf is 2266 * for memory, it will be unable to get a shared lock on the 2267 * page (because someone else has it exclusively) and will 2268 * pass dp = NULL. If tteload doesn't get a non-NULL 2269 * page pointer it can't cache memory. 2270 */ 2271 void 2272 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2273 uint_t attr, int flags) 2274 { 2275 tte_t tte; 2276 struct page *pp = NULL; 2277 int use_lgpg = 0; 2278 2279 ASSERT(hat != NULL); 2280 2281 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2282 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2283 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); 2284 if (len == 0) 2285 panic("hat_devload: zero len"); 2286 if (flags & ~SFMMU_LOAD_ALLFLAG) 2287 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2288 flags & ~SFMMU_LOAD_ALLFLAG); 2289 2290 #if defined(SF_ERRATA_57) 2291 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2292 (addr < errata57_limit) && (attr & PROT_EXEC) && 2293 !(flags & HAT_LOAD_SHARE)) { 2294 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2295 " page executable"); 2296 attr &= ~PROT_EXEC; 2297 } 2298 #endif 2299 2300 /* 2301 * If it's a memory page find its pp 2302 */ 2303 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2304 pp = page_numtopp_nolock(pfn); 2305 if (pp == NULL) { 2306 flags |= HAT_LOAD_NOCONSIST; 2307 } else { 2308 if (PP_ISFREE(pp)) { 2309 panic("hat_memload: loading " 2310 "a mapping to free page %p", 2311 (void *)pp); 2312 } 2313 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2314 panic("hat_memload: loading a mapping " 2315 "to unlocked relocatable page %p", 2316 (void *)pp); 2317 } 2318 ASSERT(len == MMU_PAGESIZE); 2319 } 2320 } 2321 2322 if (hat->sfmmu_rmstat) 2323 hat_resvstat(len, hat->sfmmu_as, addr); 2324 2325 if (flags & HAT_LOAD_NOCONSIST) { 2326 attr |= SFMMU_UNCACHEVTTE; 2327 use_lgpg = 1; 2328 } 2329 if (!pf_is_memory(pfn)) { 2330 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2331 use_lgpg = 1; 2332 switch (attr & HAT_ORDER_MASK) { 2333 case HAT_STRICTORDER: 2334 case HAT_UNORDERED_OK: 2335 /* 2336 * we set the side effect bit for all non 2337 * memory mappings unless merging is ok 2338 */ 2339 attr |= SFMMU_SIDEFFECT; 2340 break; 2341 case HAT_MERGING_OK: 2342 case HAT_LOADCACHING_OK: 2343 case HAT_STORECACHING_OK: 2344 break; 2345 default: 2346 panic("hat_devload: bad attr"); 2347 break; 2348 } 2349 } 2350 while (len) { 2351 if (!use_lgpg) { 2352 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2353 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2354 flags, SFMMU_INVALID_SHMERID); 2355 len -= MMU_PAGESIZE; 2356 addr += MMU_PAGESIZE; 2357 pfn++; 2358 continue; 2359 } 2360 /* 2361 * try to use large pages, check va/pa alignments 2362 * Note that 32M/256M page sizes are not (yet) supported. 2363 */ 2364 if ((len >= MMU_PAGESIZE4M) && 2365 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2366 !(disable_large_pages & (1 << TTE4M)) && 2367 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2368 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2369 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2370 flags, SFMMU_INVALID_SHMERID); 2371 len -= MMU_PAGESIZE4M; 2372 addr += MMU_PAGESIZE4M; 2373 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2374 } else if ((len >= MMU_PAGESIZE512K) && 2375 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2376 !(disable_large_pages & (1 << TTE512K)) && 2377 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2378 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2379 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2380 flags, SFMMU_INVALID_SHMERID); 2381 len -= MMU_PAGESIZE512K; 2382 addr += MMU_PAGESIZE512K; 2383 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2384 } else if ((len >= MMU_PAGESIZE64K) && 2385 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2386 !(disable_large_pages & (1 << TTE64K)) && 2387 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2388 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2389 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2390 flags, SFMMU_INVALID_SHMERID); 2391 len -= MMU_PAGESIZE64K; 2392 addr += MMU_PAGESIZE64K; 2393 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2394 } else { 2395 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2396 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2397 flags, SFMMU_INVALID_SHMERID); 2398 len -= MMU_PAGESIZE; 2399 addr += MMU_PAGESIZE; 2400 pfn++; 2401 } 2402 } 2403 2404 /* 2405 * Check TSB and TLB page sizes. 2406 */ 2407 if ((flags & HAT_LOAD_SHARE) == 0) { 2408 sfmmu_check_page_sizes(hat, 1); 2409 } 2410 } 2411 2412 void 2413 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2414 struct page **pps, uint_t attr, uint_t flags) 2415 { 2416 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2417 SFMMU_INVALID_SHMERID); 2418 } 2419 2420 void 2421 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2422 struct page **pps, uint_t attr, uint_t flags, 2423 hat_region_cookie_t rcookie) 2424 { 2425 uint_t rid; 2426 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2427 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2428 SFMMU_INVALID_SHMERID); 2429 return; 2430 } 2431 rid = (uint_t)((uint64_t)rcookie); 2432 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2433 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2434 } 2435 2436 /* 2437 * Map the largest extend possible out of the page array. The array may NOT 2438 * be in order. The largest possible mapping a page can have 2439 * is specified in the p_szc field. The p_szc field 2440 * cannot change as long as there any mappings (large or small) 2441 * to any of the pages that make up the large page. (ie. any 2442 * promotion/demotion of page size is not up to the hat but up to 2443 * the page free list manager). The array 2444 * should consist of properly aligned contigous pages that are 2445 * part of a big page for a large mapping to be created. 2446 */ 2447 static void 2448 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2449 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2450 { 2451 int ttesz; 2452 size_t mapsz; 2453 pgcnt_t numpg, npgs; 2454 tte_t tte; 2455 page_t *pp; 2456 uint_t large_pages_disable; 2457 2458 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2459 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2460 2461 if (hat->sfmmu_rmstat) 2462 hat_resvstat(len, hat->sfmmu_as, addr); 2463 2464 #if defined(SF_ERRATA_57) 2465 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2466 (addr < errata57_limit) && (attr & PROT_EXEC) && 2467 !(flags & HAT_LOAD_SHARE)) { 2468 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2469 "user page executable"); 2470 attr &= ~PROT_EXEC; 2471 } 2472 #endif 2473 2474 /* Get number of pages */ 2475 npgs = len >> MMU_PAGESHIFT; 2476 2477 if (flags & HAT_LOAD_SHARE) { 2478 large_pages_disable = disable_ism_large_pages; 2479 } else { 2480 large_pages_disable = disable_large_pages; 2481 } 2482 2483 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2484 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2485 rid); 2486 return; 2487 } 2488 2489 while (npgs >= NHMENTS) { 2490 pp = *pps; 2491 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2492 /* 2493 * Check if this page size is disabled. 2494 */ 2495 if (large_pages_disable & (1 << ttesz)) 2496 continue; 2497 2498 numpg = TTEPAGES(ttesz); 2499 mapsz = numpg << MMU_PAGESHIFT; 2500 if ((npgs >= numpg) && 2501 IS_P2ALIGNED(addr, mapsz) && 2502 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2503 /* 2504 * At this point we have enough pages and 2505 * we know the virtual address and the pfn 2506 * are properly aligned. We still need 2507 * to check for physical contiguity but since 2508 * it is very likely that this is the case 2509 * we will assume they are so and undo 2510 * the request if necessary. It would 2511 * be great if we could get a hint flag 2512 * like HAT_CONTIG which would tell us 2513 * the pages are contigous for sure. 2514 */ 2515 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2516 attr, ttesz); 2517 if (!sfmmu_tteload_array(hat, &tte, addr, 2518 pps, flags, rid)) { 2519 break; 2520 } 2521 } 2522 } 2523 if (ttesz == TTE8K) { 2524 /* 2525 * We were not able to map array using a large page 2526 * batch a hmeblk or fraction at a time. 2527 */ 2528 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2529 & (NHMENTS-1); 2530 numpg = NHMENTS - numpg; 2531 ASSERT(numpg <= npgs); 2532 mapsz = numpg * MMU_PAGESIZE; 2533 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2534 numpg, rid); 2535 } 2536 addr += mapsz; 2537 npgs -= numpg; 2538 pps += numpg; 2539 } 2540 2541 if (npgs) { 2542 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2543 rid); 2544 } 2545 2546 /* 2547 * Check TSB and TLB page sizes. 2548 */ 2549 if ((flags & HAT_LOAD_SHARE) == 0) { 2550 sfmmu_check_page_sizes(hat, 1); 2551 } 2552 } 2553 2554 /* 2555 * Function tries to batch 8K pages into the same hme blk. 2556 */ 2557 static void 2558 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2559 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2560 { 2561 tte_t tte; 2562 page_t *pp; 2563 struct hmehash_bucket *hmebp; 2564 struct hme_blk *hmeblkp; 2565 int index; 2566 2567 while (npgs) { 2568 /* 2569 * Acquire the hash bucket. 2570 */ 2571 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2572 rid); 2573 ASSERT(hmebp); 2574 2575 /* 2576 * Find the hment block. 2577 */ 2578 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2579 TTE8K, flags, rid); 2580 ASSERT(hmeblkp); 2581 2582 do { 2583 /* 2584 * Make the tte. 2585 */ 2586 pp = *pps; 2587 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2588 2589 /* 2590 * Add the translation. 2591 */ 2592 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2593 vaddr, pps, flags, rid); 2594 2595 /* 2596 * Goto next page. 2597 */ 2598 pps++; 2599 npgs--; 2600 2601 /* 2602 * Goto next address. 2603 */ 2604 vaddr += MMU_PAGESIZE; 2605 2606 /* 2607 * Don't crossover into a different hmentblk. 2608 */ 2609 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2610 (NHMENTS-1)); 2611 2612 } while (index != 0 && npgs != 0); 2613 2614 /* 2615 * Release the hash bucket. 2616 */ 2617 2618 sfmmu_tteload_release_hashbucket(hmebp); 2619 } 2620 } 2621 2622 /* 2623 * Construct a tte for a page: 2624 * 2625 * tte_valid = 1 2626 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2627 * tte_size = size 2628 * tte_nfo = attr & HAT_NOFAULT 2629 * tte_ie = attr & HAT_STRUCTURE_LE 2630 * tte_hmenum = hmenum 2631 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2632 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2633 * tte_ref = 1 (optimization) 2634 * tte_wr_perm = attr & PROT_WRITE; 2635 * tte_no_sync = attr & HAT_NOSYNC 2636 * tte_lock = attr & SFMMU_LOCKTTE 2637 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2638 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2639 * tte_e = attr & SFMMU_SIDEFFECT 2640 * tte_priv = !(attr & PROT_USER) 2641 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2642 * tte_glb = 0 2643 */ 2644 void 2645 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2646 { 2647 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2648 2649 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2650 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2651 2652 if (TTE_IS_NOSYNC(ttep)) { 2653 TTE_SET_REF(ttep); 2654 if (TTE_IS_WRITABLE(ttep)) { 2655 TTE_SET_MOD(ttep); 2656 } 2657 } 2658 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2659 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2660 } 2661 } 2662 2663 /* 2664 * This function will add a translation to the hme_blk and allocate the 2665 * hme_blk if one does not exist. 2666 * If a page structure is specified then it will add the 2667 * corresponding hment to the mapping list. 2668 * It will also update the hmenum field for the tte. 2669 * 2670 * Currently this function is only used for kernel mappings. 2671 * So pass invalid region to sfmmu_tteload_array(). 2672 */ 2673 void 2674 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2675 uint_t flags) 2676 { 2677 ASSERT(sfmmup == ksfmmup); 2678 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2679 SFMMU_INVALID_SHMERID); 2680 } 2681 2682 /* 2683 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2684 * Assumes that a particular page size may only be resident in one TSB. 2685 */ 2686 static void 2687 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2688 { 2689 struct tsb_info *tsbinfop = NULL; 2690 uint64_t tag; 2691 struct tsbe *tsbe_addr; 2692 uint64_t tsb_base; 2693 uint_t tsb_size; 2694 int vpshift = MMU_PAGESHIFT; 2695 int phys = 0; 2696 2697 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2698 phys = ktsb_phys; 2699 if (ttesz >= TTE4M) { 2700 #ifndef sun4v 2701 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2702 #endif 2703 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2704 tsb_size = ktsb4m_szcode; 2705 } else { 2706 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2707 tsb_size = ktsb_szcode; 2708 } 2709 } else { 2710 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2711 2712 /* 2713 * If there isn't a TSB for this page size, or the TSB is 2714 * swapped out, there is nothing to do. Note that the latter 2715 * case seems impossible but can occur if hat_pageunload() 2716 * is called on an ISM mapping while the process is swapped 2717 * out. 2718 */ 2719 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2720 return; 2721 2722 /* 2723 * If another thread is in the middle of relocating a TSB 2724 * we can't unload the entry so set a flag so that the 2725 * TSB will be flushed before it can be accessed by the 2726 * process. 2727 */ 2728 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2729 if (ttep == NULL) 2730 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2731 return; 2732 } 2733 #if defined(UTSB_PHYS) 2734 phys = 1; 2735 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2736 #else 2737 tsb_base = (uint64_t)tsbinfop->tsb_va; 2738 #endif 2739 tsb_size = tsbinfop->tsb_szc; 2740 } 2741 if (ttesz >= TTE4M) 2742 vpshift = MMU_PAGESHIFT4M; 2743 2744 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2745 tag = sfmmu_make_tsbtag(vaddr); 2746 2747 if (ttep == NULL) { 2748 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2749 } else { 2750 if (ttesz >= TTE4M) { 2751 SFMMU_STAT(sf_tsb_load4m); 2752 } else { 2753 SFMMU_STAT(sf_tsb_load8k); 2754 } 2755 2756 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2757 } 2758 } 2759 2760 /* 2761 * Unmap all entries from [start, end) matching the given page size. 2762 * 2763 * This function is used primarily to unmap replicated 64K or 512K entries 2764 * from the TSB that are inserted using the base page size TSB pointer, but 2765 * it may also be called to unmap a range of addresses from the TSB. 2766 */ 2767 void 2768 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2769 { 2770 struct tsb_info *tsbinfop; 2771 uint64_t tag; 2772 struct tsbe *tsbe_addr; 2773 caddr_t vaddr; 2774 uint64_t tsb_base; 2775 int vpshift, vpgsz; 2776 uint_t tsb_size; 2777 int phys = 0; 2778 2779 /* 2780 * Assumptions: 2781 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2782 * at a time shooting down any valid entries we encounter. 2783 * 2784 * If ttesz >= 4M we walk the range 4M at a time shooting 2785 * down any valid mappings we find. 2786 */ 2787 if (sfmmup == ksfmmup) { 2788 phys = ktsb_phys; 2789 if (ttesz >= TTE4M) { 2790 #ifndef sun4v 2791 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2792 #endif 2793 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2794 tsb_size = ktsb4m_szcode; 2795 } else { 2796 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2797 tsb_size = ktsb_szcode; 2798 } 2799 } else { 2800 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2801 2802 /* 2803 * If there isn't a TSB for this page size, or the TSB is 2804 * swapped out, there is nothing to do. Note that the latter 2805 * case seems impossible but can occur if hat_pageunload() 2806 * is called on an ISM mapping while the process is swapped 2807 * out. 2808 */ 2809 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2810 return; 2811 2812 /* 2813 * If another thread is in the middle of relocating a TSB 2814 * we can't unload the entry so set a flag so that the 2815 * TSB will be flushed before it can be accessed by the 2816 * process. 2817 */ 2818 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2819 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2820 return; 2821 } 2822 #if defined(UTSB_PHYS) 2823 phys = 1; 2824 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2825 #else 2826 tsb_base = (uint64_t)tsbinfop->tsb_va; 2827 #endif 2828 tsb_size = tsbinfop->tsb_szc; 2829 } 2830 if (ttesz >= TTE4M) { 2831 vpshift = MMU_PAGESHIFT4M; 2832 vpgsz = MMU_PAGESIZE4M; 2833 } else { 2834 vpshift = MMU_PAGESHIFT; 2835 vpgsz = MMU_PAGESIZE; 2836 } 2837 2838 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2839 tag = sfmmu_make_tsbtag(vaddr); 2840 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2841 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2842 } 2843 } 2844 2845 /* 2846 * Select the optimum TSB size given the number of mappings 2847 * that need to be cached. 2848 */ 2849 static int 2850 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2851 { 2852 int szc = 0; 2853 2854 #ifdef DEBUG 2855 if (tsb_grow_stress) { 2856 uint32_t randval = (uint32_t)gettick() >> 4; 2857 return (randval % (tsb_max_growsize + 1)); 2858 } 2859 #endif /* DEBUG */ 2860 2861 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2862 szc++; 2863 return (szc); 2864 } 2865 2866 /* 2867 * This function will add a translation to the hme_blk and allocate the 2868 * hme_blk if one does not exist. 2869 * If a page structure is specified then it will add the 2870 * corresponding hment to the mapping list. 2871 * It will also update the hmenum field for the tte. 2872 * Furthermore, it attempts to create a large page translation 2873 * for <addr,hat> at page array pps. It assumes addr and first 2874 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2875 */ 2876 static int 2877 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2878 page_t **pps, uint_t flags, uint_t rid) 2879 { 2880 struct hmehash_bucket *hmebp; 2881 struct hme_blk *hmeblkp; 2882 int ret; 2883 uint_t size; 2884 2885 /* 2886 * Get mapping size. 2887 */ 2888 size = TTE_CSZ(ttep); 2889 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2890 2891 /* 2892 * Acquire the hash bucket. 2893 */ 2894 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2895 ASSERT(hmebp); 2896 2897 /* 2898 * Find the hment block. 2899 */ 2900 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2901 rid); 2902 ASSERT(hmeblkp); 2903 2904 /* 2905 * Add the translation. 2906 */ 2907 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2908 rid); 2909 2910 /* 2911 * Release the hash bucket. 2912 */ 2913 sfmmu_tteload_release_hashbucket(hmebp); 2914 2915 return (ret); 2916 } 2917 2918 /* 2919 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2920 */ 2921 static struct hmehash_bucket * 2922 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2923 uint_t rid) 2924 { 2925 struct hmehash_bucket *hmebp; 2926 int hmeshift; 2927 void *htagid = sfmmutohtagid(sfmmup, rid); 2928 2929 ASSERT(htagid != NULL); 2930 2931 hmeshift = HME_HASH_SHIFT(size); 2932 2933 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2934 2935 SFMMU_HASH_LOCK(hmebp); 2936 2937 return (hmebp); 2938 } 2939 2940 /* 2941 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2942 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2943 * allocated. 2944 */ 2945 static struct hme_blk * 2946 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2947 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2948 { 2949 hmeblk_tag hblktag; 2950 int hmeshift; 2951 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2952 2953 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2954 2955 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2956 ASSERT(hblktag.htag_id != NULL); 2957 hmeshift = HME_HASH_SHIFT(size); 2958 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2959 hblktag.htag_rehash = HME_HASH_REHASH(size); 2960 hblktag.htag_rid = rid; 2961 2962 ttearray_realloc: 2963 2964 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2965 2966 /* 2967 * We block until hblk_reserve_lock is released; it's held by 2968 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2969 * replaced by a hblk from sfmmu8_cache. 2970 */ 2971 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2972 hblk_reserve_thread != curthread) { 2973 SFMMU_HASH_UNLOCK(hmebp); 2974 mutex_enter(&hblk_reserve_lock); 2975 mutex_exit(&hblk_reserve_lock); 2976 SFMMU_STAT(sf_hblk_reserve_hit); 2977 SFMMU_HASH_LOCK(hmebp); 2978 goto ttearray_realloc; 2979 } 2980 2981 if (hmeblkp == NULL) { 2982 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2983 hblktag, flags, rid); 2984 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2985 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2986 } else { 2987 /* 2988 * It is possible for 8k and 64k hblks to collide since they 2989 * have the same rehash value. This is because we 2990 * lazily free hblks and 8K/64K blks could be lingering. 2991 * If we find size mismatch we free the block and & try again. 2992 */ 2993 if (get_hblk_ttesz(hmeblkp) != size) { 2994 ASSERT(!hmeblkp->hblk_vcnt); 2995 ASSERT(!hmeblkp->hblk_hmecnt); 2996 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2997 &list, 0); 2998 goto ttearray_realloc; 2999 } 3000 if (hmeblkp->hblk_shw_bit) { 3001 /* 3002 * if the hblk was previously used as a shadow hblk then 3003 * we will change it to a normal hblk 3004 */ 3005 ASSERT(!hmeblkp->hblk_shared); 3006 if (hmeblkp->hblk_shw_mask) { 3007 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 3008 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3009 goto ttearray_realloc; 3010 } else { 3011 hmeblkp->hblk_shw_bit = 0; 3012 } 3013 } 3014 SFMMU_STAT(sf_hblk_hit); 3015 } 3016 3017 /* 3018 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 3019 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 3020 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 3021 * just add these hmeblks to the per-cpu pending queue. 3022 */ 3023 sfmmu_hblks_list_purge(&list, 1); 3024 3025 ASSERT(get_hblk_ttesz(hmeblkp) == size); 3026 ASSERT(!hmeblkp->hblk_shw_bit); 3027 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3028 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3029 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 3030 3031 return (hmeblkp); 3032 } 3033 3034 /* 3035 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 3036 * otherwise. 3037 */ 3038 static int 3039 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 3040 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 3041 { 3042 page_t *pp = *pps; 3043 int hmenum, size, remap; 3044 tte_t tteold, flush_tte; 3045 #ifdef DEBUG 3046 tte_t orig_old; 3047 #endif /* DEBUG */ 3048 struct sf_hment *sfhme; 3049 kmutex_t *pml, *pmtx; 3050 hatlock_t *hatlockp; 3051 int myflt; 3052 3053 /* 3054 * remove this panic when we decide to let user virtual address 3055 * space be >= USERLIMIT. 3056 */ 3057 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 3058 panic("user addr %p in kernel space", (void *)vaddr); 3059 #if defined(TTE_IS_GLOBAL) 3060 if (TTE_IS_GLOBAL(ttep)) 3061 panic("sfmmu_tteload: creating global tte"); 3062 #endif 3063 3064 #ifdef DEBUG 3065 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 3066 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 3067 panic("sfmmu_tteload: non cacheable memory tte"); 3068 #endif /* DEBUG */ 3069 3070 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 3071 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 3072 TTE_SET_REF(ttep); 3073 TTE_SET_MOD(ttep); 3074 } 3075 3076 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 3077 !TTE_IS_MOD(ttep)) { 3078 /* 3079 * Don't load TSB for dummy as in ISM. Also don't preload 3080 * the TSB if the TTE isn't writable since we're likely to 3081 * fault on it again -- preloading can be fairly expensive. 3082 */ 3083 flags |= SFMMU_NO_TSBLOAD; 3084 } 3085 3086 size = TTE_CSZ(ttep); 3087 switch (size) { 3088 case TTE8K: 3089 SFMMU_STAT(sf_tteload8k); 3090 break; 3091 case TTE64K: 3092 SFMMU_STAT(sf_tteload64k); 3093 break; 3094 case TTE512K: 3095 SFMMU_STAT(sf_tteload512k); 3096 break; 3097 case TTE4M: 3098 SFMMU_STAT(sf_tteload4m); 3099 break; 3100 case (TTE32M): 3101 SFMMU_STAT(sf_tteload32m); 3102 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3103 break; 3104 case (TTE256M): 3105 SFMMU_STAT(sf_tteload256m); 3106 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3107 break; 3108 } 3109 3110 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3111 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3112 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3113 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3114 3115 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3116 3117 /* 3118 * Need to grab mlist lock here so that pageunload 3119 * will not change tte behind us. 3120 */ 3121 if (pp) { 3122 pml = sfmmu_mlist_enter(pp); 3123 } 3124 3125 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3126 /* 3127 * Look for corresponding hment and if valid verify 3128 * pfns are equal. 3129 */ 3130 remap = TTE_IS_VALID(&tteold); 3131 if (remap) { 3132 pfn_t new_pfn, old_pfn; 3133 3134 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3135 new_pfn = TTE_TO_PFN(vaddr, ttep); 3136 3137 if (flags & HAT_LOAD_REMAP) { 3138 /* make sure we are remapping same type of pages */ 3139 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3140 panic("sfmmu_tteload - tte remap io<->memory"); 3141 } 3142 if (old_pfn != new_pfn && 3143 (pp != NULL || sfhme->hme_page != NULL)) { 3144 panic("sfmmu_tteload - tte remap pp != NULL"); 3145 } 3146 } else if (old_pfn != new_pfn) { 3147 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3148 (void *)hmeblkp); 3149 } 3150 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3151 } 3152 3153 if (pp) { 3154 if (size == TTE8K) { 3155 #ifdef VAC 3156 /* 3157 * Handle VAC consistency 3158 */ 3159 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3160 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3161 } 3162 #endif 3163 3164 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3165 pmtx = sfmmu_page_enter(pp); 3166 PP_CLRRO(pp); 3167 sfmmu_page_exit(pmtx); 3168 } else if (!PP_ISMAPPED(pp) && 3169 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3170 pmtx = sfmmu_page_enter(pp); 3171 if (!(PP_ISMOD(pp))) { 3172 PP_SETRO(pp); 3173 } 3174 sfmmu_page_exit(pmtx); 3175 } 3176 3177 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3178 /* 3179 * sfmmu_pagearray_setup failed so return 3180 */ 3181 sfmmu_mlist_exit(pml); 3182 return (1); 3183 } 3184 } 3185 3186 /* 3187 * Make sure hment is not on a mapping list. 3188 */ 3189 ASSERT(remap || (sfhme->hme_page == NULL)); 3190 3191 /* if it is not a remap then hme->next better be NULL */ 3192 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3193 3194 if (flags & HAT_LOAD_LOCK) { 3195 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3196 panic("too high lckcnt-hmeblk %p", 3197 (void *)hmeblkp); 3198 } 3199 atomic_inc_32(&hmeblkp->hblk_lckcnt); 3200 3201 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3202 } 3203 3204 #ifdef VAC 3205 if (pp && PP_ISNC(pp)) { 3206 /* 3207 * If the physical page is marked to be uncacheable, like 3208 * by a vac conflict, make sure the new mapping is also 3209 * uncacheable. 3210 */ 3211 TTE_CLR_VCACHEABLE(ttep); 3212 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3213 } 3214 #endif 3215 ttep->tte_hmenum = hmenum; 3216 3217 #ifdef DEBUG 3218 orig_old = tteold; 3219 #endif /* DEBUG */ 3220 3221 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3222 if ((sfmmup == KHATID) && 3223 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3224 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3225 } 3226 #ifdef DEBUG 3227 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3228 #endif /* DEBUG */ 3229 } 3230 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3231 3232 if (!TTE_IS_VALID(&tteold)) { 3233 3234 atomic_inc_16(&hmeblkp->hblk_vcnt); 3235 if (rid == SFMMU_INVALID_SHMERID) { 3236 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]); 3237 } else { 3238 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3239 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3240 /* 3241 * We already accounted for region ttecnt's in sfmmu 3242 * during hat_join_region() processing. Here we 3243 * only update ttecnt's in region struture. 3244 */ 3245 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]); 3246 } 3247 } 3248 3249 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3250 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3251 sfmmup != ksfmmup) { 3252 uchar_t tteflag = 1 << size; 3253 if (rid == SFMMU_INVALID_SHMERID) { 3254 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3255 hatlockp = sfmmu_hat_enter(sfmmup); 3256 sfmmup->sfmmu_tteflags |= tteflag; 3257 sfmmu_hat_exit(hatlockp); 3258 } 3259 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3260 hatlockp = sfmmu_hat_enter(sfmmup); 3261 sfmmup->sfmmu_rtteflags |= tteflag; 3262 sfmmu_hat_exit(hatlockp); 3263 } 3264 /* 3265 * Update the current CPU tsbmiss area, so the current thread 3266 * won't need to take the tsbmiss for the new pagesize. 3267 * The other threads in the process will update their tsb 3268 * miss area lazily in sfmmu_tsbmiss_exception() when they 3269 * fail to find the translation for a newly added pagesize. 3270 */ 3271 if (size > TTE64K && myflt) { 3272 struct tsbmiss *tsbmp; 3273 kpreempt_disable(); 3274 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3275 if (rid == SFMMU_INVALID_SHMERID) { 3276 if (!(tsbmp->uhat_tteflags & tteflag)) { 3277 tsbmp->uhat_tteflags |= tteflag; 3278 } 3279 } else { 3280 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3281 tsbmp->uhat_rtteflags |= tteflag; 3282 } 3283 } 3284 kpreempt_enable(); 3285 } 3286 } 3287 3288 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3289 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3290 hatlockp = sfmmu_hat_enter(sfmmup); 3291 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3292 sfmmu_hat_exit(hatlockp); 3293 } 3294 3295 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3296 hw_tte.tte_intlo; 3297 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3298 hw_tte.tte_inthi; 3299 3300 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3301 /* 3302 * If remap and new tte differs from old tte we need 3303 * to sync the mod bit and flush TLB/TSB. We don't 3304 * need to sync ref bit because we currently always set 3305 * ref bit in tteload. 3306 */ 3307 ASSERT(TTE_IS_REF(ttep)); 3308 if (TTE_IS_MOD(&tteold)) { 3309 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3310 } 3311 /* 3312 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3313 * hmes are only used for read only text. Adding this code for 3314 * completeness and future use of shared hmeblks with writable 3315 * mappings of VMODSORT vnodes. 3316 */ 3317 if (hmeblkp->hblk_shared) { 3318 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3319 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3320 xt_sync(cpuset); 3321 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3322 } else { 3323 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3324 xt_sync(sfmmup->sfmmu_cpusran); 3325 } 3326 } 3327 3328 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3329 /* 3330 * We only preload 8K and 4M mappings into the TSB, since 3331 * 64K and 512K mappings are replicated and hence don't 3332 * have a single, unique TSB entry. Ditto for 32M/256M. 3333 */ 3334 if (size == TTE8K || size == TTE4M) { 3335 sf_scd_t *scdp; 3336 hatlockp = sfmmu_hat_enter(sfmmup); 3337 /* 3338 * Don't preload private TSB if the mapping is used 3339 * by the shctx in the SCD. 3340 */ 3341 scdp = sfmmup->sfmmu_scdp; 3342 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3343 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3344 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3345 size); 3346 } 3347 sfmmu_hat_exit(hatlockp); 3348 } 3349 } 3350 if (pp) { 3351 if (!remap) { 3352 HME_ADD(sfhme, pp); 3353 atomic_inc_16(&hmeblkp->hblk_hmecnt); 3354 ASSERT(hmeblkp->hblk_hmecnt > 0); 3355 3356 /* 3357 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3358 * see pageunload() for comment. 3359 */ 3360 } 3361 sfmmu_mlist_exit(pml); 3362 } 3363 3364 return (0); 3365 } 3366 /* 3367 * Function unlocks hash bucket. 3368 */ 3369 static void 3370 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3371 { 3372 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3373 SFMMU_HASH_UNLOCK(hmebp); 3374 } 3375 3376 /* 3377 * function which checks and sets up page array for a large 3378 * translation. Will set p_vcolor, p_index, p_ro fields. 3379 * Assumes addr and pfnum of first page are properly aligned. 3380 * Will check for physical contiguity. If check fails it return 3381 * non null. 3382 */ 3383 static int 3384 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3385 { 3386 int i, index, ttesz; 3387 pfn_t pfnum; 3388 pgcnt_t npgs; 3389 page_t *pp, *pp1; 3390 kmutex_t *pmtx; 3391 #ifdef VAC 3392 int osz; 3393 int cflags = 0; 3394 int vac_err = 0; 3395 #endif 3396 int newidx = 0; 3397 3398 ttesz = TTE_CSZ(ttep); 3399 3400 ASSERT(ttesz > TTE8K); 3401 3402 npgs = TTEPAGES(ttesz); 3403 index = PAGESZ_TO_INDEX(ttesz); 3404 3405 pfnum = (*pps)->p_pagenum; 3406 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3407 3408 /* 3409 * Save the first pp so we can do HAT_TMPNC at the end. 3410 */ 3411 pp1 = *pps; 3412 #ifdef VAC 3413 osz = fnd_mapping_sz(pp1); 3414 #endif 3415 3416 for (i = 0; i < npgs; i++, pps++) { 3417 pp = *pps; 3418 ASSERT(PAGE_LOCKED(pp)); 3419 ASSERT(pp->p_szc >= ttesz); 3420 ASSERT(pp->p_szc == pp1->p_szc); 3421 ASSERT(sfmmu_mlist_held(pp)); 3422 3423 /* 3424 * XXX is it possible to maintain P_RO on the root only? 3425 */ 3426 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3427 pmtx = sfmmu_page_enter(pp); 3428 PP_CLRRO(pp); 3429 sfmmu_page_exit(pmtx); 3430 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3431 !PP_ISMOD(pp)) { 3432 pmtx = sfmmu_page_enter(pp); 3433 if (!(PP_ISMOD(pp))) { 3434 PP_SETRO(pp); 3435 } 3436 sfmmu_page_exit(pmtx); 3437 } 3438 3439 /* 3440 * If this is a remap we skip vac & contiguity checks. 3441 */ 3442 if (remap) 3443 continue; 3444 3445 /* 3446 * set p_vcolor and detect any vac conflicts. 3447 */ 3448 #ifdef VAC 3449 if (vac_err == 0) { 3450 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3451 3452 } 3453 #endif 3454 3455 /* 3456 * Save current index in case we need to undo it. 3457 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3458 * "SFMMU_INDEX_SHIFT 6" 3459 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3460 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3461 * 3462 * So: index = PAGESZ_TO_INDEX(ttesz); 3463 * if ttesz == 1 then index = 0x2 3464 * 2 then index = 0x4 3465 * 3 then index = 0x8 3466 * 4 then index = 0x10 3467 * 5 then index = 0x20 3468 * The code below checks if it's a new pagesize (ie, newidx) 3469 * in case we need to take it back out of p_index, 3470 * and then or's the new index into the existing index. 3471 */ 3472 if ((PP_MAPINDEX(pp) & index) == 0) 3473 newidx = 1; 3474 pp->p_index = (PP_MAPINDEX(pp) | index); 3475 3476 /* 3477 * contiguity check 3478 */ 3479 if (pp->p_pagenum != pfnum) { 3480 /* 3481 * If we fail the contiguity test then 3482 * the only thing we need to fix is the p_index field. 3483 * We might get a few extra flushes but since this 3484 * path is rare that is ok. The p_ro field will 3485 * get automatically fixed on the next tteload to 3486 * the page. NO TNC bit is set yet. 3487 */ 3488 while (i >= 0) { 3489 pp = *pps; 3490 if (newidx) 3491 pp->p_index = (PP_MAPINDEX(pp) & 3492 ~index); 3493 pps--; 3494 i--; 3495 } 3496 return (1); 3497 } 3498 pfnum++; 3499 addr += MMU_PAGESIZE; 3500 } 3501 3502 #ifdef VAC 3503 if (vac_err) { 3504 if (ttesz > osz) { 3505 /* 3506 * There are some smaller mappings that causes vac 3507 * conflicts. Convert all existing small mappings to 3508 * TNC. 3509 */ 3510 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3511 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3512 npgs); 3513 } else { 3514 /* EMPTY */ 3515 /* 3516 * If there exists an big page mapping, 3517 * that means the whole existing big page 3518 * has TNC setting already. No need to covert to 3519 * TNC again. 3520 */ 3521 ASSERT(PP_ISTNC(pp1)); 3522 } 3523 } 3524 #endif /* VAC */ 3525 3526 return (0); 3527 } 3528 3529 #ifdef VAC 3530 /* 3531 * Routine that detects vac consistency for a large page. It also 3532 * sets virtual color for all pp's for this big mapping. 3533 */ 3534 static int 3535 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3536 { 3537 int vcolor, ocolor; 3538 3539 ASSERT(sfmmu_mlist_held(pp)); 3540 3541 if (PP_ISNC(pp)) { 3542 return (HAT_TMPNC); 3543 } 3544 3545 vcolor = addr_to_vcolor(addr); 3546 if (PP_NEWPAGE(pp)) { 3547 PP_SET_VCOLOR(pp, vcolor); 3548 return (0); 3549 } 3550 3551 ocolor = PP_GET_VCOLOR(pp); 3552 if (ocolor == vcolor) { 3553 return (0); 3554 } 3555 3556 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3557 /* 3558 * Previous user of page had a differnet color 3559 * but since there are no current users 3560 * we just flush the cache and change the color. 3561 * As an optimization for large pages we flush the 3562 * entire cache of that color and set a flag. 3563 */ 3564 SFMMU_STAT(sf_pgcolor_conflict); 3565 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3566 CacheColor_SetFlushed(*cflags, ocolor); 3567 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3568 } 3569 PP_SET_VCOLOR(pp, vcolor); 3570 return (0); 3571 } 3572 3573 /* 3574 * We got a real conflict with a current mapping. 3575 * set flags to start unencaching all mappings 3576 * and return failure so we restart looping 3577 * the pp array from the beginning. 3578 */ 3579 return (HAT_TMPNC); 3580 } 3581 #endif /* VAC */ 3582 3583 /* 3584 * creates a large page shadow hmeblk for a tte. 3585 * The purpose of this routine is to allow us to do quick unloads because 3586 * the vm layer can easily pass a very large but sparsely populated range. 3587 */ 3588 static struct hme_blk * 3589 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3590 { 3591 struct hmehash_bucket *hmebp; 3592 hmeblk_tag hblktag; 3593 int hmeshift, size, vshift; 3594 uint_t shw_mask, newshw_mask; 3595 struct hme_blk *hmeblkp; 3596 3597 ASSERT(sfmmup != KHATID); 3598 if (mmu_page_sizes == max_mmu_page_sizes) { 3599 ASSERT(ttesz < TTE256M); 3600 } else { 3601 ASSERT(ttesz < TTE4M); 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3603 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3604 } 3605 3606 if (ttesz == TTE8K) { 3607 size = TTE512K; 3608 } else { 3609 size = ++ttesz; 3610 } 3611 3612 hblktag.htag_id = sfmmup; 3613 hmeshift = HME_HASH_SHIFT(size); 3614 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3615 hblktag.htag_rehash = HME_HASH_REHASH(size); 3616 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3617 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3618 3619 SFMMU_HASH_LOCK(hmebp); 3620 3621 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3622 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3623 if (hmeblkp == NULL) { 3624 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3625 hblktag, flags, SFMMU_INVALID_SHMERID); 3626 } 3627 ASSERT(hmeblkp); 3628 if (!hmeblkp->hblk_shw_mask) { 3629 /* 3630 * if this is a unused hblk it was just allocated or could 3631 * potentially be a previous large page hblk so we need to 3632 * set the shadow bit. 3633 */ 3634 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3635 hmeblkp->hblk_shw_bit = 1; 3636 } else if (hmeblkp->hblk_shw_bit == 0) { 3637 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3638 (void *)hmeblkp); 3639 } 3640 ASSERT(hmeblkp->hblk_shw_bit == 1); 3641 ASSERT(!hmeblkp->hblk_shared); 3642 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3643 ASSERT(vshift < 8); 3644 /* 3645 * Atomically set shw mask bit 3646 */ 3647 do { 3648 shw_mask = hmeblkp->hblk_shw_mask; 3649 newshw_mask = shw_mask | (1 << vshift); 3650 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask, 3651 newshw_mask); 3652 } while (newshw_mask != shw_mask); 3653 3654 SFMMU_HASH_UNLOCK(hmebp); 3655 3656 return (hmeblkp); 3657 } 3658 3659 /* 3660 * This routine cleanup a previous shadow hmeblk and changes it to 3661 * a regular hblk. This happens rarely but it is possible 3662 * when a process wants to use large pages and there are hblks still 3663 * lying around from the previous as that used these hmeblks. 3664 * The alternative was to cleanup the shadow hblks at unload time 3665 * but since so few user processes actually use large pages, it is 3666 * better to be lazy and cleanup at this time. 3667 */ 3668 static void 3669 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3670 struct hmehash_bucket *hmebp) 3671 { 3672 caddr_t addr, endaddr; 3673 int hashno, size; 3674 3675 ASSERT(hmeblkp->hblk_shw_bit); 3676 ASSERT(!hmeblkp->hblk_shared); 3677 3678 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3679 3680 if (!hmeblkp->hblk_shw_mask) { 3681 hmeblkp->hblk_shw_bit = 0; 3682 return; 3683 } 3684 addr = (caddr_t)get_hblk_base(hmeblkp); 3685 endaddr = get_hblk_endaddr(hmeblkp); 3686 size = get_hblk_ttesz(hmeblkp); 3687 hashno = size - 1; 3688 ASSERT(hashno > 0); 3689 SFMMU_HASH_UNLOCK(hmebp); 3690 3691 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3692 3693 SFMMU_HASH_LOCK(hmebp); 3694 } 3695 3696 static void 3697 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3698 int hashno) 3699 { 3700 int hmeshift, shadow = 0; 3701 hmeblk_tag hblktag; 3702 struct hmehash_bucket *hmebp; 3703 struct hme_blk *hmeblkp; 3704 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3705 3706 ASSERT(hashno > 0); 3707 hblktag.htag_id = sfmmup; 3708 hblktag.htag_rehash = hashno; 3709 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3710 3711 hmeshift = HME_HASH_SHIFT(hashno); 3712 3713 while (addr < endaddr) { 3714 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3715 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3716 SFMMU_HASH_LOCK(hmebp); 3717 /* inline HME_HASH_SEARCH */ 3718 hmeblkp = hmebp->hmeblkp; 3719 pr_hblk = NULL; 3720 while (hmeblkp) { 3721 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3722 /* found hme_blk */ 3723 ASSERT(!hmeblkp->hblk_shared); 3724 if (hmeblkp->hblk_shw_bit) { 3725 if (hmeblkp->hblk_shw_mask) { 3726 shadow = 1; 3727 sfmmu_shadow_hcleanup(sfmmup, 3728 hmeblkp, hmebp); 3729 break; 3730 } else { 3731 hmeblkp->hblk_shw_bit = 0; 3732 } 3733 } 3734 3735 /* 3736 * Hblk_hmecnt and hblk_vcnt could be non zero 3737 * since hblk_unload() does not gurantee that. 3738 * 3739 * XXX - this could cause tteload() to spin 3740 * where sfmmu_shadow_hcleanup() is called. 3741 */ 3742 } 3743 3744 nx_hblk = hmeblkp->hblk_next; 3745 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3746 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3747 &list, 0); 3748 } else { 3749 pr_hblk = hmeblkp; 3750 } 3751 hmeblkp = nx_hblk; 3752 } 3753 3754 SFMMU_HASH_UNLOCK(hmebp); 3755 3756 if (shadow) { 3757 /* 3758 * We found another shadow hblk so cleaned its 3759 * children. We need to go back and cleanup 3760 * the original hblk so we don't change the 3761 * addr. 3762 */ 3763 shadow = 0; 3764 } else { 3765 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3766 (1 << hmeshift)); 3767 } 3768 } 3769 sfmmu_hblks_list_purge(&list, 0); 3770 } 3771 3772 /* 3773 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3774 * may still linger on after pageunload. 3775 */ 3776 static void 3777 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3778 { 3779 int hmeshift; 3780 hmeblk_tag hblktag; 3781 struct hmehash_bucket *hmebp; 3782 struct hme_blk *hmeblkp; 3783 struct hme_blk *pr_hblk; 3784 struct hme_blk *list = NULL; 3785 3786 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3787 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3788 3789 hmeshift = HME_HASH_SHIFT(ttesz); 3790 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3791 hblktag.htag_rehash = ttesz; 3792 hblktag.htag_rid = rid; 3793 hblktag.htag_id = srdp; 3794 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3795 3796 SFMMU_HASH_LOCK(hmebp); 3797 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3798 if (hmeblkp != NULL) { 3799 ASSERT(hmeblkp->hblk_shared); 3800 ASSERT(!hmeblkp->hblk_shw_bit); 3801 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3802 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3803 } 3804 ASSERT(!hmeblkp->hblk_lckcnt); 3805 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3806 &list, 0); 3807 } 3808 SFMMU_HASH_UNLOCK(hmebp); 3809 sfmmu_hblks_list_purge(&list, 0); 3810 } 3811 3812 /* ARGSUSED */ 3813 static void 3814 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3815 size_t r_size, void *r_obj, u_offset_t r_objoff) 3816 { 3817 } 3818 3819 /* 3820 * Searches for an hmeblk which maps addr, then unloads this mapping 3821 * and updates *eaddrp, if the hmeblk is found. 3822 */ 3823 static void 3824 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3825 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3826 { 3827 int hmeshift; 3828 hmeblk_tag hblktag; 3829 struct hmehash_bucket *hmebp; 3830 struct hme_blk *hmeblkp; 3831 struct hme_blk *pr_hblk; 3832 struct hme_blk *list = NULL; 3833 3834 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3835 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3836 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3837 3838 hmeshift = HME_HASH_SHIFT(ttesz); 3839 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3840 hblktag.htag_rehash = ttesz; 3841 hblktag.htag_rid = rid; 3842 hblktag.htag_id = srdp; 3843 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3844 3845 SFMMU_HASH_LOCK(hmebp); 3846 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3847 if (hmeblkp != NULL) { 3848 ASSERT(hmeblkp->hblk_shared); 3849 ASSERT(!hmeblkp->hblk_lckcnt); 3850 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3851 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3852 eaddr, NULL, HAT_UNLOAD); 3853 ASSERT(*eaddrp > addr); 3854 } 3855 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3856 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3857 &list, 0); 3858 } 3859 SFMMU_HASH_UNLOCK(hmebp); 3860 sfmmu_hblks_list_purge(&list, 0); 3861 } 3862 3863 static void 3864 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3865 { 3866 int ttesz = rgnp->rgn_pgszc; 3867 size_t rsz = rgnp->rgn_size; 3868 caddr_t rsaddr = rgnp->rgn_saddr; 3869 caddr_t readdr = rsaddr + rsz; 3870 caddr_t rhsaddr; 3871 caddr_t va; 3872 uint_t rid = rgnp->rgn_id; 3873 caddr_t cbsaddr; 3874 caddr_t cbeaddr; 3875 hat_rgn_cb_func_t rcbfunc; 3876 ulong_t cnt; 3877 3878 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3879 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3880 3881 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3882 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3883 if (ttesz < HBLK_MIN_TTESZ) { 3884 ttesz = HBLK_MIN_TTESZ; 3885 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3886 } else { 3887 rhsaddr = rsaddr; 3888 } 3889 3890 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3891 rcbfunc = sfmmu_rgn_cb_noop; 3892 } 3893 3894 while (ttesz >= HBLK_MIN_TTESZ) { 3895 cbsaddr = rsaddr; 3896 cbeaddr = rsaddr; 3897 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3898 ttesz--; 3899 continue; 3900 } 3901 cnt = 0; 3902 va = rsaddr; 3903 while (va < readdr) { 3904 ASSERT(va >= rhsaddr); 3905 if (va != cbeaddr) { 3906 if (cbeaddr != cbsaddr) { 3907 ASSERT(cbeaddr > cbsaddr); 3908 (*rcbfunc)(cbsaddr, cbeaddr, 3909 rsaddr, rsz, rgnp->rgn_obj, 3910 rgnp->rgn_objoff); 3911 } 3912 cbsaddr = va; 3913 cbeaddr = va; 3914 } 3915 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3916 ttesz, &cbeaddr); 3917 cnt++; 3918 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3919 } 3920 if (cbeaddr != cbsaddr) { 3921 ASSERT(cbeaddr > cbsaddr); 3922 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3923 rsz, rgnp->rgn_obj, 3924 rgnp->rgn_objoff); 3925 } 3926 ttesz--; 3927 } 3928 } 3929 3930 /* 3931 * Release one hardware address translation lock on the given address range. 3932 */ 3933 void 3934 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3935 { 3936 struct hmehash_bucket *hmebp; 3937 hmeblk_tag hblktag; 3938 int hmeshift, hashno = 1; 3939 struct hme_blk *hmeblkp, *list = NULL; 3940 caddr_t endaddr; 3941 3942 ASSERT(sfmmup != NULL); 3943 3944 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 3945 ASSERT((len & MMU_PAGEOFFSET) == 0); 3946 endaddr = addr + len; 3947 hblktag.htag_id = sfmmup; 3948 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3949 3950 /* 3951 * Spitfire supports 4 page sizes. 3952 * Most pages are expected to be of the smallest page size (8K) and 3953 * these will not need to be rehashed. 64K pages also don't need to be 3954 * rehashed because an hmeblk spans 64K of address space. 512K pages 3955 * might need 1 rehash and and 4M pages might need 2 rehashes. 3956 */ 3957 while (addr < endaddr) { 3958 hmeshift = HME_HASH_SHIFT(hashno); 3959 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3960 hblktag.htag_rehash = hashno; 3961 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3962 3963 SFMMU_HASH_LOCK(hmebp); 3964 3965 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3966 if (hmeblkp != NULL) { 3967 ASSERT(!hmeblkp->hblk_shared); 3968 /* 3969 * If we encounter a shadow hmeblk then 3970 * we know there are no valid hmeblks mapping 3971 * this address at this size or larger. 3972 * Just increment address by the smallest 3973 * page size. 3974 */ 3975 if (hmeblkp->hblk_shw_bit) { 3976 addr += MMU_PAGESIZE; 3977 } else { 3978 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3979 endaddr); 3980 } 3981 SFMMU_HASH_UNLOCK(hmebp); 3982 hashno = 1; 3983 continue; 3984 } 3985 SFMMU_HASH_UNLOCK(hmebp); 3986 3987 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3988 /* 3989 * We have traversed the whole list and rehashed 3990 * if necessary without finding the address to unlock 3991 * which should never happen. 3992 */ 3993 panic("sfmmu_unlock: addr not found. " 3994 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3995 } else { 3996 hashno++; 3997 } 3998 } 3999 4000 sfmmu_hblks_list_purge(&list, 0); 4001 } 4002 4003 void 4004 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 4005 hat_region_cookie_t rcookie) 4006 { 4007 sf_srd_t *srdp; 4008 sf_region_t *rgnp; 4009 int ttesz; 4010 uint_t rid; 4011 caddr_t eaddr; 4012 caddr_t va; 4013 int hmeshift; 4014 hmeblk_tag hblktag; 4015 struct hmehash_bucket *hmebp; 4016 struct hme_blk *hmeblkp; 4017 struct hme_blk *pr_hblk; 4018 struct hme_blk *list; 4019 4020 if (rcookie == HAT_INVALID_REGION_COOKIE) { 4021 hat_unlock(sfmmup, addr, len); 4022 return; 4023 } 4024 4025 ASSERT(sfmmup != NULL); 4026 ASSERT(sfmmup != ksfmmup); 4027 4028 srdp = sfmmup->sfmmu_srdp; 4029 rid = (uint_t)((uint64_t)rcookie); 4030 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS); 4031 eaddr = addr + len; 4032 va = addr; 4033 list = NULL; 4034 rgnp = srdp->srd_hmergnp[rid]; 4035 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 4036 4037 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 4038 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 4039 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 4040 ttesz = HBLK_MIN_TTESZ; 4041 } else { 4042 ttesz = rgnp->rgn_pgszc; 4043 } 4044 while (va < eaddr) { 4045 while (ttesz < rgnp->rgn_pgszc && 4046 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 4047 ttesz++; 4048 } 4049 while (ttesz >= HBLK_MIN_TTESZ) { 4050 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 4051 ttesz--; 4052 continue; 4053 } 4054 hmeshift = HME_HASH_SHIFT(ttesz); 4055 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 4056 hblktag.htag_rehash = ttesz; 4057 hblktag.htag_rid = rid; 4058 hblktag.htag_id = srdp; 4059 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 4060 SFMMU_HASH_LOCK(hmebp); 4061 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 4062 &list); 4063 if (hmeblkp == NULL) { 4064 SFMMU_HASH_UNLOCK(hmebp); 4065 ttesz--; 4066 continue; 4067 } 4068 ASSERT(hmeblkp->hblk_shared); 4069 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 4070 ASSERT(va >= eaddr || 4071 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 4072 SFMMU_HASH_UNLOCK(hmebp); 4073 break; 4074 } 4075 if (ttesz < HBLK_MIN_TTESZ) { 4076 panic("hat_unlock_region: addr not found " 4077 "addr %p hat %p", (void *)va, (void *)sfmmup); 4078 } 4079 } 4080 sfmmu_hblks_list_purge(&list, 0); 4081 } 4082 4083 /* 4084 * Function to unlock a range of addresses in an hmeblk. It returns the 4085 * next address that needs to be unlocked. 4086 * Should be called with the hash lock held. 4087 */ 4088 static caddr_t 4089 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4090 { 4091 struct sf_hment *sfhme; 4092 tte_t tteold, ttemod; 4093 int ttesz, ret; 4094 4095 ASSERT(in_hblk_range(hmeblkp, addr)); 4096 ASSERT(hmeblkp->hblk_shw_bit == 0); 4097 4098 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4099 ttesz = get_hblk_ttesz(hmeblkp); 4100 4101 HBLKTOHME(sfhme, hmeblkp, addr); 4102 while (addr < endaddr) { 4103 readtte: 4104 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4105 if (TTE_IS_VALID(&tteold)) { 4106 4107 ttemod = tteold; 4108 4109 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4110 &sfhme->hme_tte); 4111 4112 if (ret < 0) 4113 goto readtte; 4114 4115 if (hmeblkp->hblk_lckcnt == 0) 4116 panic("zero hblk lckcnt"); 4117 4118 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4119 (uintptr_t)endaddr) 4120 panic("can't unlock large tte"); 4121 4122 ASSERT(hmeblkp->hblk_lckcnt > 0); 4123 atomic_dec_32(&hmeblkp->hblk_lckcnt); 4124 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4125 } else { 4126 panic("sfmmu_hblk_unlock: invalid tte"); 4127 } 4128 addr += TTEBYTES(ttesz); 4129 sfhme++; 4130 } 4131 return (addr); 4132 } 4133 4134 /* 4135 * Physical Address Mapping Framework 4136 * 4137 * General rules: 4138 * 4139 * (1) Applies only to seg_kmem memory pages. To make things easier, 4140 * seg_kpm addresses are also accepted by the routines, but nothing 4141 * is done with them since by definition their PA mappings are static. 4142 * (2) hat_add_callback() may only be called while holding the page lock 4143 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4144 * or passing HAC_PAGELOCK flag. 4145 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4146 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4147 * callbacks may not sleep or acquire adaptive mutex locks. 4148 * (4) Either prehandler() or posthandler() (but not both) may be specified 4149 * as being NULL. Specifying an errhandler() is optional. 4150 * 4151 * Details of using the framework: 4152 * 4153 * registering a callback (hat_register_callback()) 4154 * 4155 * Pass prehandler, posthandler, errhandler addresses 4156 * as described below. If capture_cpus argument is nonzero, 4157 * suspend callback to the prehandler will occur with CPUs 4158 * captured and executing xc_loop() and CPUs will remain 4159 * captured until after the posthandler suspend callback 4160 * occurs. 4161 * 4162 * adding a callback (hat_add_callback()) 4163 * 4164 * as_pagelock(); 4165 * hat_add_callback(); 4166 * save returned pfn in private data structures or program registers; 4167 * as_pageunlock(); 4168 * 4169 * prehandler() 4170 * 4171 * Stop all accesses by physical address to this memory page. 4172 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4173 * adaptive locks. The second, SUSPEND, is called at high PIL with 4174 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4175 * locks must be XCALL_PIL or higher locks). 4176 * 4177 * May return the following errors: 4178 * EIO: A fatal error has occurred. This will result in panic. 4179 * EAGAIN: The page cannot be suspended. This will fail the 4180 * relocation. 4181 * 0: Success. 4182 * 4183 * posthandler() 4184 * 4185 * Save new pfn in private data structures or program registers; 4186 * not allowed to fail (non-zero return values will result in panic). 4187 * 4188 * errhandler() 4189 * 4190 * called when an error occurs related to the callback. Currently 4191 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4192 * a page is being freed, but there are still outstanding callback(s) 4193 * registered on the page. 4194 * 4195 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4196 * 4197 * stop using physical address 4198 * hat_delete_callback(); 4199 * 4200 */ 4201 4202 /* 4203 * Register a callback class. Each subsystem should do this once and 4204 * cache the id_t returned for use in setting up and tearing down callbacks. 4205 * 4206 * There is no facility for removing callback IDs once they are created; 4207 * the "key" should be unique for each module, so in case a module is unloaded 4208 * and subsequently re-loaded, we can recycle the module's previous entry. 4209 */ 4210 id_t 4211 hat_register_callback(int key, 4212 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4213 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4214 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4215 int capture_cpus) 4216 { 4217 id_t id; 4218 4219 /* 4220 * Search the table for a pre-existing callback associated with 4221 * the identifier "key". If one exists, we re-use that entry in 4222 * the table for this instance, otherwise we assign the next 4223 * available table slot. 4224 */ 4225 for (id = 0; id < sfmmu_max_cb_id; id++) { 4226 if (sfmmu_cb_table[id].key == key) 4227 break; 4228 } 4229 4230 if (id == sfmmu_max_cb_id) { 4231 id = sfmmu_cb_nextid++; 4232 if (id >= sfmmu_max_cb_id) 4233 panic("hat_register_callback: out of callback IDs"); 4234 } 4235 4236 ASSERT(prehandler != NULL || posthandler != NULL); 4237 4238 sfmmu_cb_table[id].key = key; 4239 sfmmu_cb_table[id].prehandler = prehandler; 4240 sfmmu_cb_table[id].posthandler = posthandler; 4241 sfmmu_cb_table[id].errhandler = errhandler; 4242 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4243 4244 return (id); 4245 } 4246 4247 #define HAC_COOKIE_NONE (void *)-1 4248 4249 /* 4250 * Add relocation callbacks to the specified addr/len which will be called 4251 * when relocating the associated page. See the description of pre and 4252 * posthandler above for more details. 4253 * 4254 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4255 * locked internally so the caller must be able to deal with the callback 4256 * running even before this function has returned. If HAC_PAGELOCK is not 4257 * set, it is assumed that the underlying memory pages are locked. 4258 * 4259 * Since the caller must track the individual page boundaries anyway, 4260 * we only allow a callback to be added to a single page (large 4261 * or small). Thus [addr, addr + len) MUST be contained within a single 4262 * page. 4263 * 4264 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4265 * _provided_that_ a unique parameter is specified for each callback. 4266 * If multiple callbacks are registered on the same range the callback will 4267 * be invoked with each unique parameter. Registering the same callback with 4268 * the same argument more than once will result in corrupted kernel state. 4269 * 4270 * Returns the pfn of the underlying kernel page in *rpfn 4271 * on success, or PFN_INVALID on failure. 4272 * 4273 * cookiep (if passed) provides storage space for an opaque cookie 4274 * to return later to hat_delete_callback(). This cookie makes the callback 4275 * deletion significantly quicker by avoiding a potentially lengthy hash 4276 * search. 4277 * 4278 * Returns values: 4279 * 0: success 4280 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4281 * EINVAL: callback ID is not valid 4282 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4283 * space 4284 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4285 */ 4286 int 4287 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4288 void *pvt, pfn_t *rpfn, void **cookiep) 4289 { 4290 struct hmehash_bucket *hmebp; 4291 hmeblk_tag hblktag; 4292 struct hme_blk *hmeblkp; 4293 int hmeshift, hashno; 4294 caddr_t saddr, eaddr, baseaddr; 4295 struct pa_hment *pahmep; 4296 struct sf_hment *sfhmep, *osfhmep; 4297 kmutex_t *pml; 4298 tte_t tte; 4299 page_t *pp; 4300 vnode_t *vp; 4301 u_offset_t off; 4302 pfn_t pfn; 4303 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4304 int locked = 0; 4305 4306 /* 4307 * For KPM mappings, just return the physical address since we 4308 * don't need to register any callbacks. 4309 */ 4310 if (IS_KPM_ADDR(vaddr)) { 4311 uint64_t paddr; 4312 SFMMU_KPM_VTOP(vaddr, paddr); 4313 *rpfn = btop(paddr); 4314 if (cookiep != NULL) 4315 *cookiep = HAC_COOKIE_NONE; 4316 return (0); 4317 } 4318 4319 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4320 *rpfn = PFN_INVALID; 4321 return (EINVAL); 4322 } 4323 4324 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4325 *rpfn = PFN_INVALID; 4326 return (ENOMEM); 4327 } 4328 4329 sfhmep = &pahmep->sfment; 4330 4331 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4332 eaddr = saddr + len; 4333 4334 rehash: 4335 /* Find the mapping(s) for this page */ 4336 for (hashno = TTE64K, hmeblkp = NULL; 4337 hmeblkp == NULL && hashno <= mmu_hashcnt; 4338 hashno++) { 4339 hmeshift = HME_HASH_SHIFT(hashno); 4340 hblktag.htag_id = ksfmmup; 4341 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4342 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4343 hblktag.htag_rehash = hashno; 4344 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4345 4346 SFMMU_HASH_LOCK(hmebp); 4347 4348 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4349 4350 if (hmeblkp == NULL) 4351 SFMMU_HASH_UNLOCK(hmebp); 4352 } 4353 4354 if (hmeblkp == NULL) { 4355 kmem_cache_free(pa_hment_cache, pahmep); 4356 *rpfn = PFN_INVALID; 4357 return (ENXIO); 4358 } 4359 4360 ASSERT(!hmeblkp->hblk_shared); 4361 4362 HBLKTOHME(osfhmep, hmeblkp, saddr); 4363 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4364 4365 if (!TTE_IS_VALID(&tte)) { 4366 SFMMU_HASH_UNLOCK(hmebp); 4367 kmem_cache_free(pa_hment_cache, pahmep); 4368 *rpfn = PFN_INVALID; 4369 return (ENXIO); 4370 } 4371 4372 /* 4373 * Make sure the boundaries for the callback fall within this 4374 * single mapping. 4375 */ 4376 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4377 ASSERT(saddr >= baseaddr); 4378 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4379 SFMMU_HASH_UNLOCK(hmebp); 4380 kmem_cache_free(pa_hment_cache, pahmep); 4381 *rpfn = PFN_INVALID; 4382 return (ERANGE); 4383 } 4384 4385 pfn = sfmmu_ttetopfn(&tte, vaddr); 4386 4387 /* 4388 * The pfn may not have a page_t underneath in which case we 4389 * just return it. This can happen if we are doing I/O to a 4390 * static portion of the kernel's address space, for instance. 4391 */ 4392 pp = osfhmep->hme_page; 4393 if (pp == NULL) { 4394 SFMMU_HASH_UNLOCK(hmebp); 4395 kmem_cache_free(pa_hment_cache, pahmep); 4396 *rpfn = pfn; 4397 if (cookiep) 4398 *cookiep = HAC_COOKIE_NONE; 4399 return (0); 4400 } 4401 ASSERT(pp == PP_PAGEROOT(pp)); 4402 4403 vp = pp->p_vnode; 4404 off = pp->p_offset; 4405 4406 pml = sfmmu_mlist_enter(pp); 4407 4408 if (flags & HAC_PAGELOCK) { 4409 if (!page_trylock(pp, SE_SHARED)) { 4410 /* 4411 * Somebody is holding SE_EXCL lock. Might even be 4412 * hat_page_relocate(). 4413 * Drop all our locks, lookup the page in &kvp, and 4414 * retry. 4415 * If it doesn't exist in &kvp and &kvps[KV_ZVP], 4416 * then we must be dealing with a kernel mapped 4417 * page which doesn't actually belong to 4418 * segkmem so we punt. 4419 */ 4420 sfmmu_mlist_exit(pml); 4421 SFMMU_HASH_UNLOCK(hmebp); 4422 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4423 4424 /* check &kvps[KV_ZVP] before giving up */ 4425 if (pp == NULL) 4426 pp = page_lookup(&kvps[KV_ZVP], 4427 (u_offset_t)saddr, SE_SHARED); 4428 4429 /* Okay, we didn't find it, give up */ 4430 if (pp == NULL) { 4431 kmem_cache_free(pa_hment_cache, pahmep); 4432 *rpfn = pfn; 4433 if (cookiep) 4434 *cookiep = HAC_COOKIE_NONE; 4435 return (0); 4436 } 4437 page_unlock(pp); 4438 goto rehash; 4439 } 4440 locked = 1; 4441 } 4442 4443 if (!PAGE_LOCKED(pp) && !panicstr) 4444 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4445 4446 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4447 pp->p_offset != off) { 4448 /* 4449 * The page moved before we got our hands on it. Drop 4450 * all the locks and try again. 4451 */ 4452 ASSERT((flags & HAC_PAGELOCK) != 0); 4453 sfmmu_mlist_exit(pml); 4454 SFMMU_HASH_UNLOCK(hmebp); 4455 page_unlock(pp); 4456 locked = 0; 4457 goto rehash; 4458 } 4459 4460 if (!VN_ISKAS(vp)) { 4461 /* 4462 * This is not a segkmem page but another page which 4463 * has been kernel mapped. It had better have at least 4464 * a share lock on it. Return the pfn. 4465 */ 4466 sfmmu_mlist_exit(pml); 4467 SFMMU_HASH_UNLOCK(hmebp); 4468 if (locked) 4469 page_unlock(pp); 4470 kmem_cache_free(pa_hment_cache, pahmep); 4471 ASSERT(PAGE_LOCKED(pp)); 4472 *rpfn = pfn; 4473 if (cookiep) 4474 *cookiep = HAC_COOKIE_NONE; 4475 return (0); 4476 } 4477 4478 /* 4479 * Setup this pa_hment and link its embedded dummy sf_hment into 4480 * the mapping list. 4481 */ 4482 pp->p_share++; 4483 pahmep->cb_id = callback_id; 4484 pahmep->addr = vaddr; 4485 pahmep->len = len; 4486 pahmep->refcnt = 1; 4487 pahmep->flags = 0; 4488 pahmep->pvt = pvt; 4489 4490 sfhmep->hme_tte.ll = 0; 4491 sfhmep->hme_data = pahmep; 4492 sfhmep->hme_prev = osfhmep; 4493 sfhmep->hme_next = osfhmep->hme_next; 4494 4495 if (osfhmep->hme_next) 4496 osfhmep->hme_next->hme_prev = sfhmep; 4497 4498 osfhmep->hme_next = sfhmep; 4499 4500 sfmmu_mlist_exit(pml); 4501 SFMMU_HASH_UNLOCK(hmebp); 4502 4503 if (locked) 4504 page_unlock(pp); 4505 4506 *rpfn = pfn; 4507 if (cookiep) 4508 *cookiep = (void *)pahmep; 4509 4510 return (0); 4511 } 4512 4513 /* 4514 * Remove the relocation callbacks from the specified addr/len. 4515 */ 4516 void 4517 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4518 void *cookie) 4519 { 4520 struct hmehash_bucket *hmebp; 4521 hmeblk_tag hblktag; 4522 struct hme_blk *hmeblkp; 4523 int hmeshift, hashno; 4524 caddr_t saddr; 4525 struct pa_hment *pahmep; 4526 struct sf_hment *sfhmep, *osfhmep; 4527 kmutex_t *pml; 4528 tte_t tte; 4529 page_t *pp; 4530 vnode_t *vp; 4531 u_offset_t off; 4532 int locked = 0; 4533 4534 /* 4535 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4536 * remove so just return. 4537 */ 4538 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4539 return; 4540 4541 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4542 4543 rehash: 4544 /* Find the mapping(s) for this page */ 4545 for (hashno = TTE64K, hmeblkp = NULL; 4546 hmeblkp == NULL && hashno <= mmu_hashcnt; 4547 hashno++) { 4548 hmeshift = HME_HASH_SHIFT(hashno); 4549 hblktag.htag_id = ksfmmup; 4550 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4551 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4552 hblktag.htag_rehash = hashno; 4553 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4554 4555 SFMMU_HASH_LOCK(hmebp); 4556 4557 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4558 4559 if (hmeblkp == NULL) 4560 SFMMU_HASH_UNLOCK(hmebp); 4561 } 4562 4563 if (hmeblkp == NULL) 4564 return; 4565 4566 ASSERT(!hmeblkp->hblk_shared); 4567 4568 HBLKTOHME(osfhmep, hmeblkp, saddr); 4569 4570 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4571 if (!TTE_IS_VALID(&tte)) { 4572 SFMMU_HASH_UNLOCK(hmebp); 4573 return; 4574 } 4575 4576 pp = osfhmep->hme_page; 4577 if (pp == NULL) { 4578 SFMMU_HASH_UNLOCK(hmebp); 4579 ASSERT(cookie == NULL); 4580 return; 4581 } 4582 4583 vp = pp->p_vnode; 4584 off = pp->p_offset; 4585 4586 pml = sfmmu_mlist_enter(pp); 4587 4588 if (flags & HAC_PAGELOCK) { 4589 if (!page_trylock(pp, SE_SHARED)) { 4590 /* 4591 * Somebody is holding SE_EXCL lock. Might even be 4592 * hat_page_relocate(). 4593 * Drop all our locks, lookup the page in &kvp, and 4594 * retry. 4595 * If it doesn't exist in &kvp and &kvps[KV_ZVP], 4596 * then we must be dealing with a kernel mapped 4597 * page which doesn't actually belong to 4598 * segkmem so we punt. 4599 */ 4600 sfmmu_mlist_exit(pml); 4601 SFMMU_HASH_UNLOCK(hmebp); 4602 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4603 4604 /* check &kvps[KV_ZVP] before giving up */ 4605 if (pp == NULL) 4606 pp = page_lookup(&kvps[KV_ZVP], 4607 (u_offset_t)saddr, SE_SHARED); 4608 4609 if (pp == NULL) { 4610 ASSERT(cookie == NULL); 4611 return; 4612 } 4613 page_unlock(pp); 4614 goto rehash; 4615 } 4616 locked = 1; 4617 } 4618 4619 ASSERT(PAGE_LOCKED(pp)); 4620 4621 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4622 pp->p_offset != off) { 4623 /* 4624 * The page moved before we got our hands on it. Drop 4625 * all the locks and try again. 4626 */ 4627 ASSERT((flags & HAC_PAGELOCK) != 0); 4628 sfmmu_mlist_exit(pml); 4629 SFMMU_HASH_UNLOCK(hmebp); 4630 page_unlock(pp); 4631 locked = 0; 4632 goto rehash; 4633 } 4634 4635 if (!VN_ISKAS(vp)) { 4636 /* 4637 * This is not a segkmem page but another page which 4638 * has been kernel mapped. 4639 */ 4640 sfmmu_mlist_exit(pml); 4641 SFMMU_HASH_UNLOCK(hmebp); 4642 if (locked) 4643 page_unlock(pp); 4644 ASSERT(cookie == NULL); 4645 return; 4646 } 4647 4648 if (cookie != NULL) { 4649 pahmep = (struct pa_hment *)cookie; 4650 sfhmep = &pahmep->sfment; 4651 } else { 4652 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4653 sfhmep = sfhmep->hme_next) { 4654 4655 /* 4656 * skip va<->pa mappings 4657 */ 4658 if (!IS_PAHME(sfhmep)) 4659 continue; 4660 4661 pahmep = sfhmep->hme_data; 4662 ASSERT(pahmep != NULL); 4663 4664 /* 4665 * if pa_hment matches, remove it 4666 */ 4667 if ((pahmep->pvt == pvt) && 4668 (pahmep->addr == vaddr) && 4669 (pahmep->len == len)) { 4670 break; 4671 } 4672 } 4673 } 4674 4675 if (sfhmep == NULL) { 4676 if (!panicstr) { 4677 panic("hat_delete_callback: pa_hment not found, pp %p", 4678 (void *)pp); 4679 } 4680 return; 4681 } 4682 4683 /* 4684 * Note: at this point a valid kernel mapping must still be 4685 * present on this page. 4686 */ 4687 pp->p_share--; 4688 if (pp->p_share <= 0) 4689 panic("hat_delete_callback: zero p_share"); 4690 4691 if (--pahmep->refcnt == 0) { 4692 if (pahmep->flags != 0) 4693 panic("hat_delete_callback: pa_hment is busy"); 4694 4695 /* 4696 * Remove sfhmep from the mapping list for the page. 4697 */ 4698 if (sfhmep->hme_prev) { 4699 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4700 } else { 4701 pp->p_mapping = sfhmep->hme_next; 4702 } 4703 4704 if (sfhmep->hme_next) 4705 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4706 4707 sfmmu_mlist_exit(pml); 4708 SFMMU_HASH_UNLOCK(hmebp); 4709 4710 if (locked) 4711 page_unlock(pp); 4712 4713 kmem_cache_free(pa_hment_cache, pahmep); 4714 return; 4715 } 4716 4717 sfmmu_mlist_exit(pml); 4718 SFMMU_HASH_UNLOCK(hmebp); 4719 if (locked) 4720 page_unlock(pp); 4721 } 4722 4723 /* 4724 * hat_probe returns 1 if the translation for the address 'addr' is 4725 * loaded, zero otherwise. 4726 * 4727 * hat_probe should be used only for advisorary purposes because it may 4728 * occasionally return the wrong value. The implementation must guarantee that 4729 * returning the wrong value is a very rare event. hat_probe is used 4730 * to implement optimizations in the segment drivers. 4731 * 4732 */ 4733 int 4734 hat_probe(struct hat *sfmmup, caddr_t addr) 4735 { 4736 pfn_t pfn; 4737 tte_t tte; 4738 4739 ASSERT(sfmmup != NULL); 4740 4741 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 4742 4743 if (sfmmup == ksfmmup) { 4744 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4745 == PFN_SUSPENDED) { 4746 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4747 } 4748 } else { 4749 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4750 } 4751 4752 if (pfn != PFN_INVALID) 4753 return (1); 4754 else 4755 return (0); 4756 } 4757 4758 ssize_t 4759 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4760 { 4761 tte_t tte; 4762 4763 if (sfmmup == ksfmmup) { 4764 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4765 return (-1); 4766 } 4767 } else { 4768 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4769 return (-1); 4770 } 4771 } 4772 4773 ASSERT(TTE_IS_VALID(&tte)); 4774 return (TTEBYTES(TTE_CSZ(&tte))); 4775 } 4776 4777 uint_t 4778 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4779 { 4780 tte_t tte; 4781 4782 if (sfmmup == ksfmmup) { 4783 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4784 tte.ll = 0; 4785 } 4786 } else { 4787 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4788 tte.ll = 0; 4789 } 4790 } 4791 if (TTE_IS_VALID(&tte)) { 4792 *attr = sfmmu_ptov_attr(&tte); 4793 return (0); 4794 } 4795 *attr = 0; 4796 return ((uint_t)0xffffffff); 4797 } 4798 4799 /* 4800 * Enables more attributes on specified address range (ie. logical OR) 4801 */ 4802 void 4803 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4804 { 4805 ASSERT(hat->sfmmu_as != NULL); 4806 4807 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4808 } 4809 4810 /* 4811 * Assigns attributes to the specified address range. All the attributes 4812 * are specified. 4813 */ 4814 void 4815 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4816 { 4817 ASSERT(hat->sfmmu_as != NULL); 4818 4819 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4820 } 4821 4822 /* 4823 * Remove attributes on the specified address range (ie. loginal NAND) 4824 */ 4825 void 4826 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4827 { 4828 ASSERT(hat->sfmmu_as != NULL); 4829 4830 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4831 } 4832 4833 /* 4834 * Change attributes on an address range to that specified by attr and mode. 4835 */ 4836 static void 4837 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4838 int mode) 4839 { 4840 struct hmehash_bucket *hmebp; 4841 hmeblk_tag hblktag; 4842 int hmeshift, hashno = 1; 4843 struct hme_blk *hmeblkp, *list = NULL; 4844 caddr_t endaddr; 4845 cpuset_t cpuset; 4846 demap_range_t dmr; 4847 4848 CPUSET_ZERO(cpuset); 4849 4850 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 4851 ASSERT((len & MMU_PAGEOFFSET) == 0); 4852 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4853 4854 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4855 ((addr + len) > (caddr_t)USERLIMIT)) { 4856 panic("user addr %p in kernel space", 4857 (void *)addr); 4858 } 4859 4860 endaddr = addr + len; 4861 hblktag.htag_id = sfmmup; 4862 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4863 DEMAP_RANGE_INIT(sfmmup, &dmr); 4864 4865 while (addr < endaddr) { 4866 hmeshift = HME_HASH_SHIFT(hashno); 4867 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4868 hblktag.htag_rehash = hashno; 4869 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4870 4871 SFMMU_HASH_LOCK(hmebp); 4872 4873 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4874 if (hmeblkp != NULL) { 4875 ASSERT(!hmeblkp->hblk_shared); 4876 /* 4877 * We've encountered a shadow hmeblk so skip the range 4878 * of the next smaller mapping size. 4879 */ 4880 if (hmeblkp->hblk_shw_bit) { 4881 ASSERT(sfmmup != ksfmmup); 4882 ASSERT(hashno > 1); 4883 addr = (caddr_t)P2END((uintptr_t)addr, 4884 TTEBYTES(hashno - 1)); 4885 } else { 4886 addr = sfmmu_hblk_chgattr(sfmmup, 4887 hmeblkp, addr, endaddr, &dmr, attr, mode); 4888 } 4889 SFMMU_HASH_UNLOCK(hmebp); 4890 hashno = 1; 4891 continue; 4892 } 4893 SFMMU_HASH_UNLOCK(hmebp); 4894 4895 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4896 /* 4897 * We have traversed the whole list and rehashed 4898 * if necessary without finding the address to chgattr. 4899 * This is ok, so we increment the address by the 4900 * smallest hmeblk range for kernel mappings or for 4901 * user mappings with no large pages, and the largest 4902 * hmeblk range, to account for shadow hmeblks, for 4903 * user mappings with large pages and continue. 4904 */ 4905 if (sfmmup == ksfmmup) 4906 addr = (caddr_t)P2END((uintptr_t)addr, 4907 TTEBYTES(1)); 4908 else 4909 addr = (caddr_t)P2END((uintptr_t)addr, 4910 TTEBYTES(hashno)); 4911 hashno = 1; 4912 } else { 4913 hashno++; 4914 } 4915 } 4916 4917 sfmmu_hblks_list_purge(&list, 0); 4918 DEMAP_RANGE_FLUSH(&dmr); 4919 cpuset = sfmmup->sfmmu_cpusran; 4920 xt_sync(cpuset); 4921 } 4922 4923 /* 4924 * This function chgattr on a range of addresses in an hmeblk. It returns the 4925 * next addres that needs to be chgattr. 4926 * It should be called with the hash lock held. 4927 * XXX It should be possible to optimize chgattr by not flushing every time but 4928 * on the other hand: 4929 * 1. do one flush crosscall. 4930 * 2. only flush if we are increasing permissions (make sure this will work) 4931 */ 4932 static caddr_t 4933 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4934 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4935 { 4936 tte_t tte, tteattr, tteflags, ttemod; 4937 struct sf_hment *sfhmep; 4938 int ttesz; 4939 struct page *pp = NULL; 4940 kmutex_t *pml, *pmtx; 4941 int ret; 4942 int use_demap_range; 4943 #if defined(SF_ERRATA_57) 4944 int check_exec; 4945 #endif 4946 4947 ASSERT(in_hblk_range(hmeblkp, addr)); 4948 ASSERT(hmeblkp->hblk_shw_bit == 0); 4949 ASSERT(!hmeblkp->hblk_shared); 4950 4951 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4952 ttesz = get_hblk_ttesz(hmeblkp); 4953 4954 /* 4955 * Flush the current demap region if addresses have been 4956 * skipped or the page size doesn't match. 4957 */ 4958 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4959 if (use_demap_range) { 4960 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4961 } else if (dmrp != NULL) { 4962 DEMAP_RANGE_FLUSH(dmrp); 4963 } 4964 4965 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4966 #if defined(SF_ERRATA_57) 4967 check_exec = (sfmmup != ksfmmup) && 4968 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4969 TTE_IS_EXECUTABLE(&tteattr); 4970 #endif 4971 HBLKTOHME(sfhmep, hmeblkp, addr); 4972 while (addr < endaddr) { 4973 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4974 if (TTE_IS_VALID(&tte)) { 4975 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4976 /* 4977 * if the new attr is the same as old 4978 * continue 4979 */ 4980 goto next_addr; 4981 } 4982 if (!TTE_IS_WRITABLE(&tteattr)) { 4983 /* 4984 * make sure we clear hw modify bit if we 4985 * removing write protections 4986 */ 4987 tteflags.tte_intlo |= TTE_HWWR_INT; 4988 } 4989 4990 pml = NULL; 4991 pp = sfhmep->hme_page; 4992 if (pp) { 4993 pml = sfmmu_mlist_enter(pp); 4994 } 4995 4996 if (pp != sfhmep->hme_page) { 4997 /* 4998 * tte must have been unloaded. 4999 */ 5000 ASSERT(pml); 5001 sfmmu_mlist_exit(pml); 5002 continue; 5003 } 5004 5005 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5006 5007 ttemod = tte; 5008 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 5009 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 5010 5011 #if defined(SF_ERRATA_57) 5012 if (check_exec && addr < errata57_limit) 5013 ttemod.tte_exec_perm = 0; 5014 #endif 5015 ret = sfmmu_modifytte_try(&tte, &ttemod, 5016 &sfhmep->hme_tte); 5017 5018 if (ret < 0) { 5019 /* tte changed underneath us */ 5020 if (pml) { 5021 sfmmu_mlist_exit(pml); 5022 } 5023 continue; 5024 } 5025 5026 if (tteflags.tte_intlo & TTE_HWWR_INT) { 5027 /* 5028 * need to sync if we are clearing modify bit. 5029 */ 5030 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5031 } 5032 5033 if (pp && PP_ISRO(pp)) { 5034 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 5035 pmtx = sfmmu_page_enter(pp); 5036 PP_CLRRO(pp); 5037 sfmmu_page_exit(pmtx); 5038 } 5039 } 5040 5041 if (ret > 0 && use_demap_range) { 5042 DEMAP_RANGE_MARKPG(dmrp, addr); 5043 } else if (ret > 0) { 5044 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5045 } 5046 5047 if (pml) { 5048 sfmmu_mlist_exit(pml); 5049 } 5050 } 5051 next_addr: 5052 addr += TTEBYTES(ttesz); 5053 sfhmep++; 5054 DEMAP_RANGE_NEXTPG(dmrp); 5055 } 5056 return (addr); 5057 } 5058 5059 /* 5060 * This routine converts virtual attributes to physical ones. It will 5061 * update the tteflags field with the tte mask corresponding to the attributes 5062 * affected and it returns the new attributes. It will also clear the modify 5063 * bit if we are taking away write permission. This is necessary since the 5064 * modify bit is the hardware permission bit and we need to clear it in order 5065 * to detect write faults. 5066 */ 5067 static uint64_t 5068 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5069 { 5070 tte_t ttevalue; 5071 5072 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5073 5074 switch (mode) { 5075 case SFMMU_CHGATTR: 5076 /* all attributes specified */ 5077 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5078 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5079 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5080 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5081 break; 5082 case SFMMU_SETATTR: 5083 ASSERT(!(attr & ~HAT_PROT_MASK)); 5084 ttemaskp->ll = 0; 5085 ttevalue.ll = 0; 5086 /* 5087 * a valid tte implies exec and read for sfmmu 5088 * so no need to do anything about them. 5089 * since priviledged access implies user access 5090 * PROT_USER doesn't make sense either. 5091 */ 5092 if (attr & PROT_WRITE) { 5093 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5094 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5095 } 5096 break; 5097 case SFMMU_CLRATTR: 5098 /* attributes will be nand with current ones */ 5099 if (attr & ~(PROT_WRITE | PROT_USER)) { 5100 panic("sfmmu: attr %x not supported", attr); 5101 } 5102 ttemaskp->ll = 0; 5103 ttevalue.ll = 0; 5104 if (attr & PROT_WRITE) { 5105 /* clear both writable and modify bit */ 5106 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5107 } 5108 if (attr & PROT_USER) { 5109 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5110 ttevalue.tte_intlo |= TTE_PRIV_INT; 5111 } 5112 break; 5113 default: 5114 panic("sfmmu_vtop_attr: bad mode %x", mode); 5115 } 5116 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5117 return (ttevalue.ll); 5118 } 5119 5120 static uint_t 5121 sfmmu_ptov_attr(tte_t *ttep) 5122 { 5123 uint_t attr; 5124 5125 ASSERT(TTE_IS_VALID(ttep)); 5126 5127 attr = PROT_READ; 5128 5129 if (TTE_IS_WRITABLE(ttep)) { 5130 attr |= PROT_WRITE; 5131 } 5132 if (TTE_IS_EXECUTABLE(ttep)) { 5133 attr |= PROT_EXEC; 5134 } 5135 if (!TTE_IS_PRIVILEGED(ttep)) { 5136 attr |= PROT_USER; 5137 } 5138 if (TTE_IS_NFO(ttep)) { 5139 attr |= HAT_NOFAULT; 5140 } 5141 if (TTE_IS_NOSYNC(ttep)) { 5142 attr |= HAT_NOSYNC; 5143 } 5144 if (TTE_IS_SIDEFFECT(ttep)) { 5145 attr |= SFMMU_SIDEFFECT; 5146 } 5147 if (!TTE_IS_VCACHEABLE(ttep)) { 5148 attr |= SFMMU_UNCACHEVTTE; 5149 } 5150 if (!TTE_IS_PCACHEABLE(ttep)) { 5151 attr |= SFMMU_UNCACHEPTTE; 5152 } 5153 return (attr); 5154 } 5155 5156 /* 5157 * hat_chgprot is a deprecated hat call. New segment drivers 5158 * should store all attributes and use hat_*attr calls. 5159 * 5160 * Change the protections in the virtual address range 5161 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5162 * then remove write permission, leaving the other 5163 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5164 * 5165 */ 5166 void 5167 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5168 { 5169 struct hmehash_bucket *hmebp; 5170 hmeblk_tag hblktag; 5171 int hmeshift, hashno = 1; 5172 struct hme_blk *hmeblkp, *list = NULL; 5173 caddr_t endaddr; 5174 cpuset_t cpuset; 5175 demap_range_t dmr; 5176 5177 ASSERT((len & MMU_PAGEOFFSET) == 0); 5178 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5179 5180 ASSERT(sfmmup->sfmmu_as != NULL); 5181 5182 CPUSET_ZERO(cpuset); 5183 5184 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5185 ((addr + len) > (caddr_t)USERLIMIT)) { 5186 panic("user addr %p vprot %x in kernel space", 5187 (void *)addr, vprot); 5188 } 5189 endaddr = addr + len; 5190 hblktag.htag_id = sfmmup; 5191 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5192 DEMAP_RANGE_INIT(sfmmup, &dmr); 5193 5194 while (addr < endaddr) { 5195 hmeshift = HME_HASH_SHIFT(hashno); 5196 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5197 hblktag.htag_rehash = hashno; 5198 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5199 5200 SFMMU_HASH_LOCK(hmebp); 5201 5202 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5203 if (hmeblkp != NULL) { 5204 ASSERT(!hmeblkp->hblk_shared); 5205 /* 5206 * We've encountered a shadow hmeblk so skip the range 5207 * of the next smaller mapping size. 5208 */ 5209 if (hmeblkp->hblk_shw_bit) { 5210 ASSERT(sfmmup != ksfmmup); 5211 ASSERT(hashno > 1); 5212 addr = (caddr_t)P2END((uintptr_t)addr, 5213 TTEBYTES(hashno - 1)); 5214 } else { 5215 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5216 addr, endaddr, &dmr, vprot); 5217 } 5218 SFMMU_HASH_UNLOCK(hmebp); 5219 hashno = 1; 5220 continue; 5221 } 5222 SFMMU_HASH_UNLOCK(hmebp); 5223 5224 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5225 /* 5226 * We have traversed the whole list and rehashed 5227 * if necessary without finding the address to chgprot. 5228 * This is ok so we increment the address by the 5229 * smallest hmeblk range for kernel mappings and the 5230 * largest hmeblk range, to account for shadow hmeblks, 5231 * for user mappings and continue. 5232 */ 5233 if (sfmmup == ksfmmup) 5234 addr = (caddr_t)P2END((uintptr_t)addr, 5235 TTEBYTES(1)); 5236 else 5237 addr = (caddr_t)P2END((uintptr_t)addr, 5238 TTEBYTES(hashno)); 5239 hashno = 1; 5240 } else { 5241 hashno++; 5242 } 5243 } 5244 5245 sfmmu_hblks_list_purge(&list, 0); 5246 DEMAP_RANGE_FLUSH(&dmr); 5247 cpuset = sfmmup->sfmmu_cpusran; 5248 xt_sync(cpuset); 5249 } 5250 5251 /* 5252 * This function chgprots a range of addresses in an hmeblk. It returns the 5253 * next addres that needs to be chgprot. 5254 * It should be called with the hash lock held. 5255 * XXX It shold be possible to optimize chgprot by not flushing every time but 5256 * on the other hand: 5257 * 1. do one flush crosscall. 5258 * 2. only flush if we are increasing permissions (make sure this will work) 5259 */ 5260 static caddr_t 5261 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5262 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5263 { 5264 uint_t pprot; 5265 tte_t tte, ttemod; 5266 struct sf_hment *sfhmep; 5267 uint_t tteflags; 5268 int ttesz; 5269 struct page *pp = NULL; 5270 kmutex_t *pml, *pmtx; 5271 int ret; 5272 int use_demap_range; 5273 #if defined(SF_ERRATA_57) 5274 int check_exec; 5275 #endif 5276 5277 ASSERT(in_hblk_range(hmeblkp, addr)); 5278 ASSERT(hmeblkp->hblk_shw_bit == 0); 5279 ASSERT(!hmeblkp->hblk_shared); 5280 5281 #ifdef DEBUG 5282 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5283 (endaddr < get_hblk_endaddr(hmeblkp))) { 5284 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5285 } 5286 #endif /* DEBUG */ 5287 5288 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5289 ttesz = get_hblk_ttesz(hmeblkp); 5290 5291 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5292 #if defined(SF_ERRATA_57) 5293 check_exec = (sfmmup != ksfmmup) && 5294 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5295 ((vprot & PROT_EXEC) == PROT_EXEC); 5296 #endif 5297 HBLKTOHME(sfhmep, hmeblkp, addr); 5298 5299 /* 5300 * Flush the current demap region if addresses have been 5301 * skipped or the page size doesn't match. 5302 */ 5303 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5304 if (use_demap_range) { 5305 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5306 } else if (dmrp != NULL) { 5307 DEMAP_RANGE_FLUSH(dmrp); 5308 } 5309 5310 while (addr < endaddr) { 5311 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5312 if (TTE_IS_VALID(&tte)) { 5313 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5314 /* 5315 * if the new protection is the same as old 5316 * continue 5317 */ 5318 goto next_addr; 5319 } 5320 pml = NULL; 5321 pp = sfhmep->hme_page; 5322 if (pp) { 5323 pml = sfmmu_mlist_enter(pp); 5324 } 5325 if (pp != sfhmep->hme_page) { 5326 /* 5327 * tte most have been unloaded 5328 * underneath us. Recheck 5329 */ 5330 ASSERT(pml); 5331 sfmmu_mlist_exit(pml); 5332 continue; 5333 } 5334 5335 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5336 5337 ttemod = tte; 5338 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5339 #if defined(SF_ERRATA_57) 5340 if (check_exec && addr < errata57_limit) 5341 ttemod.tte_exec_perm = 0; 5342 #endif 5343 ret = sfmmu_modifytte_try(&tte, &ttemod, 5344 &sfhmep->hme_tte); 5345 5346 if (ret < 0) { 5347 /* tte changed underneath us */ 5348 if (pml) { 5349 sfmmu_mlist_exit(pml); 5350 } 5351 continue; 5352 } 5353 5354 if (tteflags & TTE_HWWR_INT) { 5355 /* 5356 * need to sync if we are clearing modify bit. 5357 */ 5358 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5359 } 5360 5361 if (pp && PP_ISRO(pp)) { 5362 if (pprot & TTE_WRPRM_INT) { 5363 pmtx = sfmmu_page_enter(pp); 5364 PP_CLRRO(pp); 5365 sfmmu_page_exit(pmtx); 5366 } 5367 } 5368 5369 if (ret > 0 && use_demap_range) { 5370 DEMAP_RANGE_MARKPG(dmrp, addr); 5371 } else if (ret > 0) { 5372 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5373 } 5374 5375 if (pml) { 5376 sfmmu_mlist_exit(pml); 5377 } 5378 } 5379 next_addr: 5380 addr += TTEBYTES(ttesz); 5381 sfhmep++; 5382 DEMAP_RANGE_NEXTPG(dmrp); 5383 } 5384 return (addr); 5385 } 5386 5387 /* 5388 * This routine is deprecated and should only be used by hat_chgprot. 5389 * The correct routine is sfmmu_vtop_attr. 5390 * This routine converts virtual page protections to physical ones. It will 5391 * update the tteflags field with the tte mask corresponding to the protections 5392 * affected and it returns the new protections. It will also clear the modify 5393 * bit if we are taking away write permission. This is necessary since the 5394 * modify bit is the hardware permission bit and we need to clear it in order 5395 * to detect write faults. 5396 * It accepts the following special protections: 5397 * ~PROT_WRITE = remove write permissions. 5398 * ~PROT_USER = remove user permissions. 5399 */ 5400 static uint_t 5401 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5402 { 5403 if (vprot == (uint_t)~PROT_WRITE) { 5404 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5405 return (0); /* will cause wrprm to be cleared */ 5406 } 5407 if (vprot == (uint_t)~PROT_USER) { 5408 *tteflagsp = TTE_PRIV_INT; 5409 return (0); /* will cause privprm to be cleared */ 5410 } 5411 if ((vprot == 0) || (vprot == PROT_USER) || 5412 ((vprot & PROT_ALL) != vprot)) { 5413 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5414 } 5415 5416 switch (vprot) { 5417 case (PROT_READ): 5418 case (PROT_EXEC): 5419 case (PROT_EXEC | PROT_READ): 5420 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5421 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5422 case (PROT_WRITE): 5423 case (PROT_WRITE | PROT_READ): 5424 case (PROT_EXEC | PROT_WRITE): 5425 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5426 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5427 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5428 case (PROT_USER | PROT_READ): 5429 case (PROT_USER | PROT_EXEC): 5430 case (PROT_USER | PROT_EXEC | PROT_READ): 5431 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5432 return (0); /* clr prv and wrt */ 5433 case (PROT_USER | PROT_WRITE): 5434 case (PROT_USER | PROT_WRITE | PROT_READ): 5435 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5436 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5437 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5438 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5439 default: 5440 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5441 } 5442 return (0); 5443 } 5444 5445 /* 5446 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5447 * the normal algorithm would take too long for a very large VA range with 5448 * few real mappings. This routine just walks thru all HMEs in the global 5449 * hash table to find and remove mappings. 5450 */ 5451 static void 5452 hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len, 5453 uint_t flags, hat_callback_t *callback) 5454 { 5455 struct hmehash_bucket *hmebp; 5456 struct hme_blk *hmeblkp; 5457 struct hme_blk *pr_hblk = NULL; 5458 struct hme_blk *nx_hblk; 5459 struct hme_blk *list = NULL; 5460 int i; 5461 demap_range_t dmr, *dmrp; 5462 cpuset_t cpuset; 5463 caddr_t endaddr = startaddr + len; 5464 caddr_t sa; 5465 caddr_t ea; 5466 caddr_t cb_sa[MAX_CB_ADDR]; 5467 caddr_t cb_ea[MAX_CB_ADDR]; 5468 int addr_cnt = 0; 5469 int a = 0; 5470 5471 if (sfmmup->sfmmu_free) { 5472 dmrp = NULL; 5473 } else { 5474 dmrp = &dmr; 5475 DEMAP_RANGE_INIT(sfmmup, dmrp); 5476 } 5477 5478 /* 5479 * Loop through all the hash buckets of HME blocks looking for matches. 5480 */ 5481 for (i = 0; i <= UHMEHASH_SZ; i++) { 5482 hmebp = &uhme_hash[i]; 5483 SFMMU_HASH_LOCK(hmebp); 5484 hmeblkp = hmebp->hmeblkp; 5485 pr_hblk = NULL; 5486 while (hmeblkp) { 5487 nx_hblk = hmeblkp->hblk_next; 5488 5489 /* 5490 * skip if not this context, if a shadow block or 5491 * if the mapping is not in the requested range 5492 */ 5493 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5494 hmeblkp->hblk_shw_bit || 5495 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5496 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5497 pr_hblk = hmeblkp; 5498 goto next_block; 5499 } 5500 5501 ASSERT(!hmeblkp->hblk_shared); 5502 /* 5503 * unload if there are any current valid mappings 5504 */ 5505 if (hmeblkp->hblk_vcnt != 0 || 5506 hmeblkp->hblk_hmecnt != 0) 5507 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5508 sa, ea, dmrp, flags); 5509 5510 /* 5511 * on unmap we also release the HME block itself, once 5512 * all mappings are gone. 5513 */ 5514 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5515 !hmeblkp->hblk_vcnt && 5516 !hmeblkp->hblk_hmecnt) { 5517 ASSERT(!hmeblkp->hblk_lckcnt); 5518 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5519 &list, 0); 5520 } else { 5521 pr_hblk = hmeblkp; 5522 } 5523 5524 if (callback == NULL) 5525 goto next_block; 5526 5527 /* 5528 * HME blocks may span more than one page, but we may be 5529 * unmapping only one page, so check for a smaller range 5530 * for the callback 5531 */ 5532 if (sa < startaddr) 5533 sa = startaddr; 5534 if (--ea > endaddr) 5535 ea = endaddr - 1; 5536 5537 cb_sa[addr_cnt] = sa; 5538 cb_ea[addr_cnt] = ea; 5539 if (++addr_cnt == MAX_CB_ADDR) { 5540 if (dmrp != NULL) { 5541 DEMAP_RANGE_FLUSH(dmrp); 5542 cpuset = sfmmup->sfmmu_cpusran; 5543 xt_sync(cpuset); 5544 } 5545 5546 for (a = 0; a < MAX_CB_ADDR; ++a) { 5547 callback->hcb_start_addr = cb_sa[a]; 5548 callback->hcb_end_addr = cb_ea[a]; 5549 callback->hcb_function(callback); 5550 } 5551 addr_cnt = 0; 5552 } 5553 5554 next_block: 5555 hmeblkp = nx_hblk; 5556 } 5557 SFMMU_HASH_UNLOCK(hmebp); 5558 } 5559 5560 sfmmu_hblks_list_purge(&list, 0); 5561 if (dmrp != NULL) { 5562 DEMAP_RANGE_FLUSH(dmrp); 5563 cpuset = sfmmup->sfmmu_cpusran; 5564 xt_sync(cpuset); 5565 } 5566 5567 for (a = 0; a < addr_cnt; ++a) { 5568 callback->hcb_start_addr = cb_sa[a]; 5569 callback->hcb_end_addr = cb_ea[a]; 5570 callback->hcb_function(callback); 5571 } 5572 5573 /* 5574 * Check TSB and TLB page sizes if the process isn't exiting. 5575 */ 5576 if (!sfmmup->sfmmu_free) 5577 sfmmu_check_page_sizes(sfmmup, 0); 5578 } 5579 5580 /* 5581 * Unload all the mappings in the range [addr..addr+len). addr and len must 5582 * be MMU_PAGESIZE aligned. 5583 */ 5584 5585 extern struct seg *segkmap; 5586 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5587 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5588 5589 5590 void 5591 hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags, 5592 hat_callback_t *callback) 5593 { 5594 struct hmehash_bucket *hmebp; 5595 hmeblk_tag hblktag; 5596 int hmeshift, hashno, iskernel; 5597 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5598 caddr_t endaddr; 5599 cpuset_t cpuset; 5600 int addr_count = 0; 5601 int a; 5602 caddr_t cb_start_addr[MAX_CB_ADDR]; 5603 caddr_t cb_end_addr[MAX_CB_ADDR]; 5604 int issegkmap = ISSEGKMAP(sfmmup, addr); 5605 demap_range_t dmr, *dmrp; 5606 5607 ASSERT(sfmmup->sfmmu_as != NULL); 5608 5609 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5610 AS_LOCK_HELD(sfmmup->sfmmu_as)); 5611 5612 ASSERT(sfmmup != NULL); 5613 ASSERT((len & MMU_PAGEOFFSET) == 0); 5614 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5615 5616 /* 5617 * Probing through a large VA range (say 63 bits) will be slow, even 5618 * at 4 Meg steps between the probes. So, when the virtual address range 5619 * is very large, search the HME entries for what to unload. 5620 * 5621 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5622 * 5623 * UHMEHASH_SZ is number of hash buckets to examine 5624 * 5625 */ 5626 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5627 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5628 return; 5629 } 5630 5631 CPUSET_ZERO(cpuset); 5632 5633 /* 5634 * If the process is exiting, we can save a lot of fuss since 5635 * we'll flush the TLB when we free the ctx anyway. 5636 */ 5637 if (sfmmup->sfmmu_free) { 5638 dmrp = NULL; 5639 } else { 5640 dmrp = &dmr; 5641 DEMAP_RANGE_INIT(sfmmup, dmrp); 5642 } 5643 5644 endaddr = addr + len; 5645 hblktag.htag_id = sfmmup; 5646 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5647 5648 /* 5649 * It is likely for the vm to call unload over a wide range of 5650 * addresses that are actually very sparsely populated by 5651 * translations. In order to speed this up the sfmmu hat supports 5652 * the concept of shadow hmeblks. Dummy large page hmeblks that 5653 * correspond to actual small translations are allocated at tteload 5654 * time and are referred to as shadow hmeblks. Now, during unload 5655 * time, we first check if we have a shadow hmeblk for that 5656 * translation. The absence of one means the corresponding address 5657 * range is empty and can be skipped. 5658 * 5659 * The kernel is an exception to above statement and that is why 5660 * we don't use shadow hmeblks and hash starting from the smallest 5661 * page size. 5662 */ 5663 if (sfmmup == KHATID) { 5664 iskernel = 1; 5665 hashno = TTE64K; 5666 } else { 5667 iskernel = 0; 5668 if (mmu_page_sizes == max_mmu_page_sizes) { 5669 hashno = TTE256M; 5670 } else { 5671 hashno = TTE4M; 5672 } 5673 } 5674 while (addr < endaddr) { 5675 hmeshift = HME_HASH_SHIFT(hashno); 5676 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5677 hblktag.htag_rehash = hashno; 5678 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5679 5680 SFMMU_HASH_LOCK(hmebp); 5681 5682 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5683 if (hmeblkp == NULL) { 5684 /* 5685 * didn't find an hmeblk. skip the appropiate 5686 * address range. 5687 */ 5688 SFMMU_HASH_UNLOCK(hmebp); 5689 if (iskernel) { 5690 if (hashno < mmu_hashcnt) { 5691 hashno++; 5692 continue; 5693 } else { 5694 hashno = TTE64K; 5695 addr = (caddr_t)roundup((uintptr_t)addr 5696 + 1, MMU_PAGESIZE64K); 5697 continue; 5698 } 5699 } 5700 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5701 (1 << hmeshift)); 5702 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5703 ASSERT(hashno == TTE64K); 5704 continue; 5705 } 5706 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5707 hashno = TTE512K; 5708 continue; 5709 } 5710 if (mmu_page_sizes == max_mmu_page_sizes) { 5711 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5712 hashno = TTE4M; 5713 continue; 5714 } 5715 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5716 hashno = TTE32M; 5717 continue; 5718 } 5719 hashno = TTE256M; 5720 continue; 5721 } else { 5722 hashno = TTE4M; 5723 continue; 5724 } 5725 } 5726 ASSERT(hmeblkp); 5727 ASSERT(!hmeblkp->hblk_shared); 5728 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5729 /* 5730 * If the valid count is zero we can skip the range 5731 * mapped by this hmeblk. 5732 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5733 * is used by segment drivers as a hint 5734 * that the mapping resource won't be used any longer. 5735 * The best example of this is during exit(). 5736 */ 5737 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5738 get_hblk_span(hmeblkp)); 5739 if ((flags & HAT_UNLOAD_UNMAP) || 5740 (iskernel && !issegkmap)) { 5741 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5742 &list, 0); 5743 } 5744 SFMMU_HASH_UNLOCK(hmebp); 5745 5746 if (iskernel) { 5747 hashno = TTE64K; 5748 continue; 5749 } 5750 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5751 ASSERT(hashno == TTE64K); 5752 continue; 5753 } 5754 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5755 hashno = TTE512K; 5756 continue; 5757 } 5758 if (mmu_page_sizes == max_mmu_page_sizes) { 5759 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5760 hashno = TTE4M; 5761 continue; 5762 } 5763 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5764 hashno = TTE32M; 5765 continue; 5766 } 5767 hashno = TTE256M; 5768 continue; 5769 } else { 5770 hashno = TTE4M; 5771 continue; 5772 } 5773 } 5774 if (hmeblkp->hblk_shw_bit) { 5775 /* 5776 * If we encounter a shadow hmeblk we know there is 5777 * smaller sized hmeblks mapping the same address space. 5778 * Decrement the hash size and rehash. 5779 */ 5780 ASSERT(sfmmup != KHATID); 5781 hashno--; 5782 SFMMU_HASH_UNLOCK(hmebp); 5783 continue; 5784 } 5785 5786 /* 5787 * track callback address ranges. 5788 * only start a new range when it's not contiguous 5789 */ 5790 if (callback != NULL) { 5791 if (addr_count > 0 && 5792 addr == cb_end_addr[addr_count - 1]) 5793 --addr_count; 5794 else 5795 cb_start_addr[addr_count] = addr; 5796 } 5797 5798 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5799 dmrp, flags); 5800 5801 if (callback != NULL) 5802 cb_end_addr[addr_count++] = addr; 5803 5804 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5805 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5806 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5807 } 5808 SFMMU_HASH_UNLOCK(hmebp); 5809 5810 /* 5811 * Notify our caller as to exactly which pages 5812 * have been unloaded. We do these in clumps, 5813 * to minimize the number of xt_sync()s that need to occur. 5814 */ 5815 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5816 if (dmrp != NULL) { 5817 DEMAP_RANGE_FLUSH(dmrp); 5818 cpuset = sfmmup->sfmmu_cpusran; 5819 xt_sync(cpuset); 5820 } 5821 5822 for (a = 0; a < MAX_CB_ADDR; ++a) { 5823 callback->hcb_start_addr = cb_start_addr[a]; 5824 callback->hcb_end_addr = cb_end_addr[a]; 5825 callback->hcb_function(callback); 5826 } 5827 addr_count = 0; 5828 } 5829 if (iskernel) { 5830 hashno = TTE64K; 5831 continue; 5832 } 5833 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5834 ASSERT(hashno == TTE64K); 5835 continue; 5836 } 5837 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5838 hashno = TTE512K; 5839 continue; 5840 } 5841 if (mmu_page_sizes == max_mmu_page_sizes) { 5842 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5843 hashno = TTE4M; 5844 continue; 5845 } 5846 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5847 hashno = TTE32M; 5848 continue; 5849 } 5850 hashno = TTE256M; 5851 } else { 5852 hashno = TTE4M; 5853 } 5854 } 5855 5856 sfmmu_hblks_list_purge(&list, 0); 5857 if (dmrp != NULL) { 5858 DEMAP_RANGE_FLUSH(dmrp); 5859 cpuset = sfmmup->sfmmu_cpusran; 5860 xt_sync(cpuset); 5861 } 5862 if (callback && addr_count != 0) { 5863 for (a = 0; a < addr_count; ++a) { 5864 callback->hcb_start_addr = cb_start_addr[a]; 5865 callback->hcb_end_addr = cb_end_addr[a]; 5866 callback->hcb_function(callback); 5867 } 5868 } 5869 5870 /* 5871 * Check TSB and TLB page sizes if the process isn't exiting. 5872 */ 5873 if (!sfmmup->sfmmu_free) 5874 sfmmu_check_page_sizes(sfmmup, 0); 5875 } 5876 5877 /* 5878 * Unload all the mappings in the range [addr..addr+len). addr and len must 5879 * be MMU_PAGESIZE aligned. 5880 */ 5881 void 5882 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5883 { 5884 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5885 } 5886 5887 5888 /* 5889 * Find the largest mapping size for this page. 5890 */ 5891 int 5892 fnd_mapping_sz(page_t *pp) 5893 { 5894 int sz; 5895 int p_index; 5896 5897 p_index = PP_MAPINDEX(pp); 5898 5899 sz = 0; 5900 p_index >>= 1; /* don't care about 8K bit */ 5901 for (; p_index; p_index >>= 1) { 5902 sz++; 5903 } 5904 5905 return (sz); 5906 } 5907 5908 /* 5909 * This function unloads a range of addresses for an hmeblk. 5910 * It returns the next address to be unloaded. 5911 * It should be called with the hash lock held. 5912 */ 5913 static caddr_t 5914 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5915 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5916 { 5917 tte_t tte, ttemod; 5918 struct sf_hment *sfhmep; 5919 int ttesz; 5920 long ttecnt; 5921 page_t *pp; 5922 kmutex_t *pml; 5923 int ret; 5924 int use_demap_range; 5925 5926 ASSERT(in_hblk_range(hmeblkp, addr)); 5927 ASSERT(!hmeblkp->hblk_shw_bit); 5928 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5929 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5930 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5931 5932 #ifdef DEBUG 5933 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5934 (endaddr < get_hblk_endaddr(hmeblkp))) { 5935 panic("sfmmu_hblk_unload: partial unload of large page"); 5936 } 5937 #endif /* DEBUG */ 5938 5939 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5940 ttesz = get_hblk_ttesz(hmeblkp); 5941 5942 use_demap_range = ((dmrp == NULL) || 5943 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5944 5945 if (use_demap_range) { 5946 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5947 } else if (dmrp != NULL) { 5948 DEMAP_RANGE_FLUSH(dmrp); 5949 } 5950 ttecnt = 0; 5951 HBLKTOHME(sfhmep, hmeblkp, addr); 5952 5953 while (addr < endaddr) { 5954 pml = NULL; 5955 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5956 if (TTE_IS_VALID(&tte)) { 5957 pp = sfhmep->hme_page; 5958 if (pp != NULL) { 5959 pml = sfmmu_mlist_enter(pp); 5960 } 5961 5962 /* 5963 * Verify if hme still points to 'pp' now that 5964 * we have p_mapping lock. 5965 */ 5966 if (sfhmep->hme_page != pp) { 5967 if (pp != NULL && sfhmep->hme_page != NULL) { 5968 ASSERT(pml != NULL); 5969 sfmmu_mlist_exit(pml); 5970 /* Re-start this iteration. */ 5971 continue; 5972 } 5973 ASSERT((pp != NULL) && 5974 (sfhmep->hme_page == NULL)); 5975 goto tte_unloaded; 5976 } 5977 5978 /* 5979 * This point on we have both HASH and p_mapping 5980 * lock. 5981 */ 5982 ASSERT(pp == sfhmep->hme_page); 5983 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5984 5985 /* 5986 * We need to loop on modify tte because it is 5987 * possible for pagesync to come along and 5988 * change the software bits beneath us. 5989 * 5990 * Page_unload can also invalidate the tte after 5991 * we read tte outside of p_mapping lock. 5992 */ 5993 again: 5994 ttemod = tte; 5995 5996 TTE_SET_INVALID(&ttemod); 5997 ret = sfmmu_modifytte_try(&tte, &ttemod, 5998 &sfhmep->hme_tte); 5999 6000 if (ret <= 0) { 6001 if (TTE_IS_VALID(&tte)) { 6002 ASSERT(ret < 0); 6003 goto again; 6004 } 6005 if (pp != NULL) { 6006 panic("sfmmu_hblk_unload: pp = 0x%p " 6007 "tte became invalid under mlist" 6008 " lock = 0x%p", (void *)pp, 6009 (void *)pml); 6010 } 6011 continue; 6012 } 6013 6014 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6015 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6016 } 6017 6018 /* 6019 * Ok- we invalidated the tte. Do the rest of the job. 6020 */ 6021 ttecnt++; 6022 6023 if (flags & HAT_UNLOAD_UNLOCK) { 6024 ASSERT(hmeblkp->hblk_lckcnt > 0); 6025 atomic_dec_32(&hmeblkp->hblk_lckcnt); 6026 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6027 } 6028 6029 /* 6030 * Normally we would need to flush the page 6031 * from the virtual cache at this point in 6032 * order to prevent a potential cache alias 6033 * inconsistency. 6034 * The particular scenario we need to worry 6035 * about is: 6036 * Given: va1 and va2 are two virtual address 6037 * that alias and map the same physical 6038 * address. 6039 * 1. mapping exists from va1 to pa and data 6040 * has been read into the cache. 6041 * 2. unload va1. 6042 * 3. load va2 and modify data using va2. 6043 * 4 unload va2. 6044 * 5. load va1 and reference data. Unless we 6045 * flush the data cache when we unload we will 6046 * get stale data. 6047 * Fortunately, page coloring eliminates the 6048 * above scenario by remembering the color a 6049 * physical page was last or is currently 6050 * mapped to. Now, we delay the flush until 6051 * the loading of translations. Only when the 6052 * new translation is of a different color 6053 * are we forced to flush. 6054 */ 6055 if (use_demap_range) { 6056 /* 6057 * Mark this page as needing a demap. 6058 */ 6059 DEMAP_RANGE_MARKPG(dmrp, addr); 6060 } else { 6061 ASSERT(sfmmup != NULL); 6062 ASSERT(!hmeblkp->hblk_shared); 6063 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6064 sfmmup->sfmmu_free, 0); 6065 } 6066 6067 if (pp) { 6068 /* 6069 * Remove the hment from the mapping list 6070 */ 6071 ASSERT(hmeblkp->hblk_hmecnt > 0); 6072 6073 /* 6074 * Again, we cannot 6075 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6076 */ 6077 HME_SUB(sfhmep, pp); 6078 membar_stst(); 6079 atomic_dec_16(&hmeblkp->hblk_hmecnt); 6080 } 6081 6082 ASSERT(hmeblkp->hblk_vcnt > 0); 6083 atomic_dec_16(&hmeblkp->hblk_vcnt); 6084 6085 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6086 !hmeblkp->hblk_lckcnt); 6087 6088 #ifdef VAC 6089 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6090 if (PP_ISTNC(pp)) { 6091 /* 6092 * If page was temporary 6093 * uncached, try to recache 6094 * it. Note that HME_SUB() was 6095 * called above so p_index and 6096 * mlist had been updated. 6097 */ 6098 conv_tnc(pp, ttesz); 6099 } else if (pp->p_mapping == NULL) { 6100 ASSERT(kpm_enable); 6101 /* 6102 * Page is marked to be in VAC conflict 6103 * to an existing kpm mapping and/or is 6104 * kpm mapped using only the regular 6105 * pagesize. 6106 */ 6107 sfmmu_kpm_hme_unload(pp); 6108 } 6109 } 6110 #endif /* VAC */ 6111 } else if ((pp = sfhmep->hme_page) != NULL) { 6112 /* 6113 * TTE is invalid but the hme 6114 * still exists. let pageunload 6115 * complete its job. 6116 */ 6117 ASSERT(pml == NULL); 6118 pml = sfmmu_mlist_enter(pp); 6119 if (sfhmep->hme_page != NULL) { 6120 sfmmu_mlist_exit(pml); 6121 continue; 6122 } 6123 ASSERT(sfhmep->hme_page == NULL); 6124 } else if (hmeblkp->hblk_hmecnt != 0) { 6125 /* 6126 * pageunload may have not finished decrementing 6127 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6128 * wait for pageunload to finish. Rely on pageunload 6129 * to decrement hblk_hmecnt after hblk_vcnt. 6130 */ 6131 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6132 ASSERT(pml == NULL); 6133 if (pf_is_memory(pfn)) { 6134 pp = page_numtopp_nolock(pfn); 6135 if (pp != NULL) { 6136 pml = sfmmu_mlist_enter(pp); 6137 sfmmu_mlist_exit(pml); 6138 pml = NULL; 6139 } 6140 } 6141 } 6142 6143 tte_unloaded: 6144 /* 6145 * At this point, the tte we are looking at 6146 * should be unloaded, and hme has been unlinked 6147 * from page too. This is important because in 6148 * pageunload, it does ttesync() then HME_SUB. 6149 * We need to make sure HME_SUB has been completed 6150 * so we know ttesync() has been completed. Otherwise, 6151 * at exit time, after return from hat layer, VM will 6152 * release as structure which hat_setstat() (called 6153 * by ttesync()) needs. 6154 */ 6155 #ifdef DEBUG 6156 { 6157 tte_t dtte; 6158 6159 ASSERT(sfhmep->hme_page == NULL); 6160 6161 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6162 ASSERT(!TTE_IS_VALID(&dtte)); 6163 } 6164 #endif 6165 6166 if (pml) { 6167 sfmmu_mlist_exit(pml); 6168 } 6169 6170 addr += TTEBYTES(ttesz); 6171 sfhmep++; 6172 DEMAP_RANGE_NEXTPG(dmrp); 6173 } 6174 /* 6175 * For shared hmeblks this routine is only called when region is freed 6176 * and no longer referenced. So no need to decrement ttecnt 6177 * in the region structure here. 6178 */ 6179 if (ttecnt > 0 && sfmmup != NULL) { 6180 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6181 } 6182 return (addr); 6183 } 6184 6185 /* 6186 * Invalidate a virtual address range for the local CPU. 6187 * For best performance ensure that the va range is completely 6188 * mapped, otherwise the entire TLB will be flushed. 6189 */ 6190 void 6191 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size) 6192 { 6193 ssize_t sz; 6194 caddr_t endva = va + size; 6195 6196 while (va < endva) { 6197 sz = hat_getpagesize(sfmmup, va); 6198 if (sz < 0) { 6199 vtag_flushall(); 6200 break; 6201 } 6202 vtag_flushpage(va, (uint64_t)sfmmup); 6203 va += sz; 6204 } 6205 } 6206 6207 /* 6208 * Synchronize all the mappings in the range [addr..addr+len). 6209 * Can be called with clearflag having two states: 6210 * HAT_SYNC_DONTZERO means just return the rm stats 6211 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6212 */ 6213 void 6214 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6215 { 6216 struct hmehash_bucket *hmebp; 6217 hmeblk_tag hblktag; 6218 int hmeshift, hashno = 1; 6219 struct hme_blk *hmeblkp, *list = NULL; 6220 caddr_t endaddr; 6221 cpuset_t cpuset; 6222 6223 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 6224 ASSERT((len & MMU_PAGEOFFSET) == 0); 6225 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6226 (clearflag == HAT_SYNC_ZERORM)); 6227 6228 CPUSET_ZERO(cpuset); 6229 6230 endaddr = addr + len; 6231 hblktag.htag_id = sfmmup; 6232 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6233 6234 /* 6235 * Spitfire supports 4 page sizes. 6236 * Most pages are expected to be of the smallest page 6237 * size (8K) and these will not need to be rehashed. 64K 6238 * pages also don't need to be rehashed because the an hmeblk 6239 * spans 64K of address space. 512K pages might need 1 rehash and 6240 * and 4M pages 2 rehashes. 6241 */ 6242 while (addr < endaddr) { 6243 hmeshift = HME_HASH_SHIFT(hashno); 6244 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6245 hblktag.htag_rehash = hashno; 6246 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6247 6248 SFMMU_HASH_LOCK(hmebp); 6249 6250 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6251 if (hmeblkp != NULL) { 6252 ASSERT(!hmeblkp->hblk_shared); 6253 /* 6254 * We've encountered a shadow hmeblk so skip the range 6255 * of the next smaller mapping size. 6256 */ 6257 if (hmeblkp->hblk_shw_bit) { 6258 ASSERT(sfmmup != ksfmmup); 6259 ASSERT(hashno > 1); 6260 addr = (caddr_t)P2END((uintptr_t)addr, 6261 TTEBYTES(hashno - 1)); 6262 } else { 6263 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6264 addr, endaddr, clearflag); 6265 } 6266 SFMMU_HASH_UNLOCK(hmebp); 6267 hashno = 1; 6268 continue; 6269 } 6270 SFMMU_HASH_UNLOCK(hmebp); 6271 6272 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6273 /* 6274 * We have traversed the whole list and rehashed 6275 * if necessary without finding the address to sync. 6276 * This is ok so we increment the address by the 6277 * smallest hmeblk range for kernel mappings and the 6278 * largest hmeblk range, to account for shadow hmeblks, 6279 * for user mappings and continue. 6280 */ 6281 if (sfmmup == ksfmmup) 6282 addr = (caddr_t)P2END((uintptr_t)addr, 6283 TTEBYTES(1)); 6284 else 6285 addr = (caddr_t)P2END((uintptr_t)addr, 6286 TTEBYTES(hashno)); 6287 hashno = 1; 6288 } else { 6289 hashno++; 6290 } 6291 } 6292 sfmmu_hblks_list_purge(&list, 0); 6293 cpuset = sfmmup->sfmmu_cpusran; 6294 xt_sync(cpuset); 6295 } 6296 6297 static caddr_t 6298 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6299 caddr_t endaddr, int clearflag) 6300 { 6301 tte_t tte, ttemod; 6302 struct sf_hment *sfhmep; 6303 int ttesz; 6304 struct page *pp; 6305 kmutex_t *pml; 6306 int ret; 6307 6308 ASSERT(hmeblkp->hblk_shw_bit == 0); 6309 ASSERT(!hmeblkp->hblk_shared); 6310 6311 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6312 6313 ttesz = get_hblk_ttesz(hmeblkp); 6314 HBLKTOHME(sfhmep, hmeblkp, addr); 6315 6316 while (addr < endaddr) { 6317 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6318 if (TTE_IS_VALID(&tte)) { 6319 pml = NULL; 6320 pp = sfhmep->hme_page; 6321 if (pp) { 6322 pml = sfmmu_mlist_enter(pp); 6323 } 6324 if (pp != sfhmep->hme_page) { 6325 /* 6326 * tte most have been unloaded 6327 * underneath us. Recheck 6328 */ 6329 ASSERT(pml); 6330 sfmmu_mlist_exit(pml); 6331 continue; 6332 } 6333 6334 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6335 6336 if (clearflag == HAT_SYNC_ZERORM) { 6337 ttemod = tte; 6338 TTE_CLR_RM(&ttemod); 6339 ret = sfmmu_modifytte_try(&tte, &ttemod, 6340 &sfhmep->hme_tte); 6341 if (ret < 0) { 6342 if (pml) { 6343 sfmmu_mlist_exit(pml); 6344 } 6345 continue; 6346 } 6347 6348 if (ret > 0) { 6349 sfmmu_tlb_demap(addr, sfmmup, 6350 hmeblkp, 0, 0); 6351 } 6352 } 6353 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6354 if (pml) { 6355 sfmmu_mlist_exit(pml); 6356 } 6357 } 6358 addr += TTEBYTES(ttesz); 6359 sfhmep++; 6360 } 6361 return (addr); 6362 } 6363 6364 /* 6365 * This function will sync a tte to the page struct and it will 6366 * update the hat stats. Currently it allows us to pass a NULL pp 6367 * and we will simply update the stats. We may want to change this 6368 * so we only keep stats for pages backed by pp's. 6369 */ 6370 static void 6371 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6372 { 6373 uint_t rm = 0; 6374 int sz; 6375 pgcnt_t npgs; 6376 6377 ASSERT(TTE_IS_VALID(ttep)); 6378 6379 if (TTE_IS_NOSYNC(ttep)) { 6380 return; 6381 } 6382 6383 if (TTE_IS_REF(ttep)) { 6384 rm = P_REF; 6385 } 6386 if (TTE_IS_MOD(ttep)) { 6387 rm |= P_MOD; 6388 } 6389 6390 if (rm == 0) { 6391 return; 6392 } 6393 6394 sz = TTE_CSZ(ttep); 6395 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6396 int i; 6397 caddr_t vaddr = addr; 6398 6399 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6400 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6401 } 6402 6403 } 6404 6405 /* 6406 * XXX I want to use cas to update nrm bits but they 6407 * currently belong in common/vm and not in hat where 6408 * they should be. 6409 * The nrm bits are protected by the same mutex as 6410 * the one that protects the page's mapping list. 6411 */ 6412 if (!pp) 6413 return; 6414 ASSERT(sfmmu_mlist_held(pp)); 6415 /* 6416 * If the tte is for a large page, we need to sync all the 6417 * pages covered by the tte. 6418 */ 6419 if (sz != TTE8K) { 6420 ASSERT(pp->p_szc != 0); 6421 pp = PP_GROUPLEADER(pp, sz); 6422 ASSERT(sfmmu_mlist_held(pp)); 6423 } 6424 6425 /* Get number of pages from tte size. */ 6426 npgs = TTEPAGES(sz); 6427 6428 do { 6429 ASSERT(pp); 6430 ASSERT(sfmmu_mlist_held(pp)); 6431 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6432 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6433 hat_page_setattr(pp, rm); 6434 6435 /* 6436 * Are we done? If not, we must have a large mapping. 6437 * For large mappings we need to sync the rest of the pages 6438 * covered by this tte; goto the next page. 6439 */ 6440 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6441 } 6442 6443 /* 6444 * Execute pre-callback handler of each pa_hment linked to pp 6445 * 6446 * Inputs: 6447 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6448 * capture_cpus: pointer to return value (below) 6449 * 6450 * Returns: 6451 * Propagates the subsystem callback return values back to the caller; 6452 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6453 * is zero if all of the pa_hments are of a type that do not require 6454 * capturing CPUs prior to suspending the mapping, else it is 1. 6455 */ 6456 static int 6457 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6458 { 6459 struct sf_hment *sfhmep; 6460 struct pa_hment *pahmep; 6461 int (*f)(caddr_t, uint_t, uint_t, void *); 6462 int ret; 6463 id_t id; 6464 int locked = 0; 6465 kmutex_t *pml; 6466 6467 ASSERT(PAGE_EXCL(pp)); 6468 if (!sfmmu_mlist_held(pp)) { 6469 pml = sfmmu_mlist_enter(pp); 6470 locked = 1; 6471 } 6472 6473 if (capture_cpus) 6474 *capture_cpus = 0; 6475 6476 top: 6477 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6478 /* 6479 * skip sf_hments corresponding to VA<->PA mappings; 6480 * for pa_hment's, hme_tte.ll is zero 6481 */ 6482 if (!IS_PAHME(sfhmep)) 6483 continue; 6484 6485 pahmep = sfhmep->hme_data; 6486 ASSERT(pahmep != NULL); 6487 6488 /* 6489 * skip if pre-handler has been called earlier in this loop 6490 */ 6491 if (pahmep->flags & flag) 6492 continue; 6493 6494 id = pahmep->cb_id; 6495 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6496 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6497 *capture_cpus = 1; 6498 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6499 pahmep->flags |= flag; 6500 continue; 6501 } 6502 6503 /* 6504 * Drop the mapping list lock to avoid locking order issues. 6505 */ 6506 if (locked) 6507 sfmmu_mlist_exit(pml); 6508 6509 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6510 if (ret != 0) 6511 return (ret); /* caller must do the cleanup */ 6512 6513 if (locked) { 6514 pml = sfmmu_mlist_enter(pp); 6515 pahmep->flags |= flag; 6516 goto top; 6517 } 6518 6519 pahmep->flags |= flag; 6520 } 6521 6522 if (locked) 6523 sfmmu_mlist_exit(pml); 6524 6525 return (0); 6526 } 6527 6528 /* 6529 * Execute post-callback handler of each pa_hment linked to pp 6530 * 6531 * Same overall assumptions and restrictions apply as for 6532 * hat_pageprocess_precallbacks(). 6533 */ 6534 static void 6535 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6536 { 6537 pfn_t pgpfn = pp->p_pagenum; 6538 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6539 pfn_t newpfn; 6540 struct sf_hment *sfhmep; 6541 struct pa_hment *pahmep; 6542 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6543 id_t id; 6544 int locked = 0; 6545 kmutex_t *pml; 6546 6547 ASSERT(PAGE_EXCL(pp)); 6548 if (!sfmmu_mlist_held(pp)) { 6549 pml = sfmmu_mlist_enter(pp); 6550 locked = 1; 6551 } 6552 6553 top: 6554 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6555 /* 6556 * skip sf_hments corresponding to VA<->PA mappings; 6557 * for pa_hment's, hme_tte.ll is zero 6558 */ 6559 if (!IS_PAHME(sfhmep)) 6560 continue; 6561 6562 pahmep = sfhmep->hme_data; 6563 ASSERT(pahmep != NULL); 6564 6565 if ((pahmep->flags & flag) == 0) 6566 continue; 6567 6568 pahmep->flags &= ~flag; 6569 6570 id = pahmep->cb_id; 6571 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6572 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6573 continue; 6574 6575 /* 6576 * Convert the base page PFN into the constituent PFN 6577 * which is needed by the callback handler. 6578 */ 6579 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6580 6581 /* 6582 * Drop the mapping list lock to avoid locking order issues. 6583 */ 6584 if (locked) 6585 sfmmu_mlist_exit(pml); 6586 6587 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6588 != 0) 6589 panic("sfmmu: posthandler failed"); 6590 6591 if (locked) { 6592 pml = sfmmu_mlist_enter(pp); 6593 goto top; 6594 } 6595 } 6596 6597 if (locked) 6598 sfmmu_mlist_exit(pml); 6599 } 6600 6601 /* 6602 * Suspend locked kernel mapping 6603 */ 6604 void 6605 hat_pagesuspend(struct page *pp) 6606 { 6607 struct sf_hment *sfhmep; 6608 sfmmu_t *sfmmup; 6609 tte_t tte, ttemod; 6610 struct hme_blk *hmeblkp; 6611 caddr_t addr; 6612 int index, cons; 6613 cpuset_t cpuset; 6614 6615 ASSERT(PAGE_EXCL(pp)); 6616 ASSERT(sfmmu_mlist_held(pp)); 6617 6618 mutex_enter(&kpr_suspendlock); 6619 6620 /* 6621 * We're about to suspend a kernel mapping so mark this thread as 6622 * non-traceable by DTrace. This prevents us from running into issues 6623 * with probe context trying to touch a suspended page 6624 * in the relocation codepath itself. 6625 */ 6626 curthread->t_flag |= T_DONTDTRACE; 6627 6628 index = PP_MAPINDEX(pp); 6629 cons = TTE8K; 6630 6631 retry: 6632 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6633 6634 if (IS_PAHME(sfhmep)) 6635 continue; 6636 6637 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6638 continue; 6639 6640 /* 6641 * Loop until we successfully set the suspend bit in 6642 * the TTE. 6643 */ 6644 again: 6645 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6646 ASSERT(TTE_IS_VALID(&tte)); 6647 6648 ttemod = tte; 6649 TTE_SET_SUSPEND(&ttemod); 6650 if (sfmmu_modifytte_try(&tte, &ttemod, 6651 &sfhmep->hme_tte) < 0) 6652 goto again; 6653 6654 /* 6655 * Invalidate TSB entry 6656 */ 6657 hmeblkp = sfmmu_hmetohblk(sfhmep); 6658 6659 sfmmup = hblktosfmmu(hmeblkp); 6660 ASSERT(sfmmup == ksfmmup); 6661 ASSERT(!hmeblkp->hblk_shared); 6662 6663 addr = tte_to_vaddr(hmeblkp, tte); 6664 6665 /* 6666 * No need to make sure that the TSB for this sfmmu is 6667 * not being relocated since it is ksfmmup and thus it 6668 * will never be relocated. 6669 */ 6670 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6671 6672 /* 6673 * Update xcall stats 6674 */ 6675 cpuset = cpu_ready_set; 6676 CPUSET_DEL(cpuset, CPU->cpu_id); 6677 6678 /* LINTED: constant in conditional context */ 6679 SFMMU_XCALL_STATS(ksfmmup); 6680 6681 /* 6682 * Flush TLB entry on remote CPU's 6683 */ 6684 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6685 (uint64_t)ksfmmup); 6686 xt_sync(cpuset); 6687 6688 /* 6689 * Flush TLB entry on local CPU 6690 */ 6691 vtag_flushpage(addr, (uint64_t)ksfmmup); 6692 } 6693 6694 while (index != 0) { 6695 index = index >> 1; 6696 if (index != 0) 6697 cons++; 6698 if (index & 0x1) { 6699 pp = PP_GROUPLEADER(pp, cons); 6700 goto retry; 6701 } 6702 } 6703 } 6704 6705 #ifdef DEBUG 6706 6707 #define N_PRLE 1024 6708 struct prle { 6709 page_t *targ; 6710 page_t *repl; 6711 int status; 6712 int pausecpus; 6713 hrtime_t whence; 6714 }; 6715 6716 static struct prle page_relocate_log[N_PRLE]; 6717 static int prl_entry; 6718 static kmutex_t prl_mutex; 6719 6720 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6721 mutex_enter(&prl_mutex); \ 6722 page_relocate_log[prl_entry].targ = *(t); \ 6723 page_relocate_log[prl_entry].repl = *(r); \ 6724 page_relocate_log[prl_entry].status = (s); \ 6725 page_relocate_log[prl_entry].pausecpus = (p); \ 6726 page_relocate_log[prl_entry].whence = gethrtime(); \ 6727 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6728 mutex_exit(&prl_mutex); 6729 6730 #else /* !DEBUG */ 6731 #define PAGE_RELOCATE_LOG(t, r, s, p) 6732 #endif 6733 6734 /* 6735 * Core Kernel Page Relocation Algorithm 6736 * 6737 * Input: 6738 * 6739 * target : constituent pages are SE_EXCL locked. 6740 * replacement: constituent pages are SE_EXCL locked. 6741 * 6742 * Output: 6743 * 6744 * nrelocp: number of pages relocated 6745 */ 6746 int 6747 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6748 { 6749 page_t *targ, *repl; 6750 page_t *tpp, *rpp; 6751 kmutex_t *low, *high; 6752 spgcnt_t npages, i; 6753 page_t *pl = NULL; 6754 int old_pil; 6755 cpuset_t cpuset; 6756 int cap_cpus; 6757 int ret; 6758 #ifdef VAC 6759 int cflags = 0; 6760 #endif 6761 6762 if (!kcage_on || PP_ISNORELOC(*target)) { 6763 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6764 return (EAGAIN); 6765 } 6766 6767 mutex_enter(&kpr_mutex); 6768 kreloc_thread = curthread; 6769 6770 targ = *target; 6771 repl = *replacement; 6772 ASSERT(repl != NULL); 6773 ASSERT(targ->p_szc == repl->p_szc); 6774 6775 npages = page_get_pagecnt(targ->p_szc); 6776 6777 /* 6778 * unload VA<->PA mappings that are not locked 6779 */ 6780 tpp = targ; 6781 for (i = 0; i < npages; i++) { 6782 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6783 tpp++; 6784 } 6785 6786 /* 6787 * Do "presuspend" callbacks, in a context from which we can still 6788 * block as needed. Note that we don't hold the mapping list lock 6789 * of "targ" at this point due to potential locking order issues; 6790 * we assume that between the hat_pageunload() above and holding 6791 * the SE_EXCL lock that the mapping list *cannot* change at this 6792 * point. 6793 */ 6794 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6795 if (ret != 0) { 6796 /* 6797 * EIO translates to fatal error, for all others cleanup 6798 * and return EAGAIN. 6799 */ 6800 ASSERT(ret != EIO); 6801 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6802 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6803 kreloc_thread = NULL; 6804 mutex_exit(&kpr_mutex); 6805 return (EAGAIN); 6806 } 6807 6808 /* 6809 * acquire p_mapping list lock for both the target and replacement 6810 * root pages. 6811 * 6812 * low and high refer to the need to grab the mlist locks in a 6813 * specific order in order to prevent race conditions. Thus the 6814 * lower lock must be grabbed before the higher lock. 6815 * 6816 * This will block hat_unload's accessing p_mapping list. Since 6817 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6818 * blocked. Thus, no one else will be accessing the p_mapping list 6819 * while we suspend and reload the locked mapping below. 6820 */ 6821 tpp = targ; 6822 rpp = repl; 6823 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6824 6825 kpreempt_disable(); 6826 6827 /* 6828 * We raise our PIL to 13 so that we don't get captured by 6829 * another CPU or pinned by an interrupt thread. We can't go to 6830 * PIL 14 since the nexus driver(s) may need to interrupt at 6831 * that level in the case of IOMMU pseudo mappings. 6832 */ 6833 cpuset = cpu_ready_set; 6834 CPUSET_DEL(cpuset, CPU->cpu_id); 6835 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6836 old_pil = splr(XCALL_PIL); 6837 } else { 6838 old_pil = -1; 6839 xc_attention(cpuset); 6840 } 6841 ASSERT(getpil() == XCALL_PIL); 6842 6843 /* 6844 * Now do suspend callbacks. In the case of an IOMMU mapping 6845 * this will suspend all DMA activity to the page while it is 6846 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6847 * may be captured at this point we should have acquired any needed 6848 * locks in the presuspend callback. 6849 */ 6850 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6851 if (ret != 0) { 6852 repl = targ; 6853 goto suspend_fail; 6854 } 6855 6856 /* 6857 * Raise the PIL yet again, this time to block all high-level 6858 * interrupts on this CPU. This is necessary to prevent an 6859 * interrupt routine from pinning the thread which holds the 6860 * mapping suspended and then touching the suspended page. 6861 * 6862 * Once the page is suspended we also need to be careful to 6863 * avoid calling any functions which touch any seg_kmem memory 6864 * since that memory may be backed by the very page we are 6865 * relocating in here! 6866 */ 6867 hat_pagesuspend(targ); 6868 6869 /* 6870 * Now that we are confident everybody has stopped using this page, 6871 * copy the page contents. Note we use a physical copy to prevent 6872 * locking issues and to avoid fpRAS because we can't handle it in 6873 * this context. 6874 */ 6875 for (i = 0; i < npages; i++, tpp++, rpp++) { 6876 #ifdef VAC 6877 /* 6878 * If the replacement has a different vcolor than 6879 * the one being replacd, we need to handle VAC 6880 * consistency for it just as we were setting up 6881 * a new mapping to it. 6882 */ 6883 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6884 (tpp->p_vcolor != rpp->p_vcolor) && 6885 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6886 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6887 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6888 rpp->p_pagenum); 6889 } 6890 #endif 6891 /* 6892 * Copy the contents of the page. 6893 */ 6894 ppcopy_kernel(tpp, rpp); 6895 } 6896 6897 tpp = targ; 6898 rpp = repl; 6899 for (i = 0; i < npages; i++, tpp++, rpp++) { 6900 /* 6901 * Copy attributes. VAC consistency was handled above, 6902 * if required. 6903 */ 6904 rpp->p_nrm = tpp->p_nrm; 6905 tpp->p_nrm = 0; 6906 rpp->p_index = tpp->p_index; 6907 tpp->p_index = 0; 6908 #ifdef VAC 6909 rpp->p_vcolor = tpp->p_vcolor; 6910 #endif 6911 } 6912 6913 /* 6914 * First, unsuspend the page, if we set the suspend bit, and transfer 6915 * the mapping list from the target page to the replacement page. 6916 * Next process postcallbacks; since pa_hment's are linked only to the 6917 * p_mapping list of root page, we don't iterate over the constituent 6918 * pages. 6919 */ 6920 hat_pagereload(targ, repl); 6921 6922 suspend_fail: 6923 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6924 6925 /* 6926 * Now lower our PIL and release any captured CPUs since we 6927 * are out of the "danger zone". After this it will again be 6928 * safe to acquire adaptive mutex locks, or to drop them... 6929 */ 6930 if (old_pil != -1) { 6931 splx(old_pil); 6932 } else { 6933 xc_dismissed(cpuset); 6934 } 6935 6936 kpreempt_enable(); 6937 6938 sfmmu_mlist_reloc_exit(low, high); 6939 6940 /* 6941 * Postsuspend callbacks should drop any locks held across 6942 * the suspend callbacks. As before, we don't hold the mapping 6943 * list lock at this point.. our assumption is that the mapping 6944 * list still can't change due to our holding SE_EXCL lock and 6945 * there being no unlocked mappings left. Hence the restriction 6946 * on calling context to hat_delete_callback() 6947 */ 6948 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6949 if (ret != 0) { 6950 /* 6951 * The second presuspend call failed: we got here through 6952 * the suspend_fail label above. 6953 */ 6954 ASSERT(ret != EIO); 6955 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6956 kreloc_thread = NULL; 6957 mutex_exit(&kpr_mutex); 6958 return (EAGAIN); 6959 } 6960 6961 /* 6962 * Now that we're out of the performance critical section we can 6963 * take care of updating the hash table, since we still 6964 * hold all the pages locked SE_EXCL at this point we 6965 * needn't worry about things changing out from under us. 6966 */ 6967 tpp = targ; 6968 rpp = repl; 6969 for (i = 0; i < npages; i++, tpp++, rpp++) { 6970 6971 /* 6972 * replace targ with replacement in page_hash table 6973 */ 6974 targ = tpp; 6975 page_relocate_hash(rpp, targ); 6976 6977 /* 6978 * concatenate target; caller of platform_page_relocate() 6979 * expects target to be concatenated after returning. 6980 */ 6981 ASSERT(targ->p_next == targ); 6982 ASSERT(targ->p_prev == targ); 6983 page_list_concat(&pl, &targ); 6984 } 6985 6986 ASSERT(*target == pl); 6987 *nrelocp = npages; 6988 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6989 kreloc_thread = NULL; 6990 mutex_exit(&kpr_mutex); 6991 return (0); 6992 } 6993 6994 /* 6995 * Called when stray pa_hments are found attached to a page which is 6996 * being freed. Notify the subsystem which attached the pa_hment of 6997 * the error if it registered a suitable handler, else panic. 6998 */ 6999 static void 7000 sfmmu_pahment_leaked(struct pa_hment *pahmep) 7001 { 7002 id_t cb_id = pahmep->cb_id; 7003 7004 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 7005 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 7006 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 7007 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 7008 return; /* non-fatal */ 7009 } 7010 panic("pa_hment leaked: 0x%p", (void *)pahmep); 7011 } 7012 7013 /* 7014 * Remove all mappings to page 'pp'. 7015 */ 7016 int 7017 hat_pageunload(struct page *pp, uint_t forceflag) 7018 { 7019 struct page *origpp = pp; 7020 struct sf_hment *sfhme, *tmphme; 7021 struct hme_blk *hmeblkp; 7022 kmutex_t *pml; 7023 #ifdef VAC 7024 kmutex_t *pmtx; 7025 #endif 7026 cpuset_t cpuset, tset; 7027 int index, cons; 7028 int pa_hments; 7029 7030 ASSERT(PAGE_EXCL(pp)); 7031 7032 tmphme = NULL; 7033 pa_hments = 0; 7034 CPUSET_ZERO(cpuset); 7035 7036 pml = sfmmu_mlist_enter(pp); 7037 7038 #ifdef VAC 7039 if (pp->p_kpmref) 7040 sfmmu_kpm_pageunload(pp); 7041 ASSERT(!PP_ISMAPPED_KPM(pp)); 7042 #endif 7043 /* 7044 * Clear vpm reference. Since the page is exclusively locked 7045 * vpm cannot be referencing it. 7046 */ 7047 if (vpm_enable) { 7048 pp->p_vpmref = 0; 7049 } 7050 7051 index = PP_MAPINDEX(pp); 7052 cons = TTE8K; 7053 retry: 7054 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7055 tmphme = sfhme->hme_next; 7056 7057 if (IS_PAHME(sfhme)) { 7058 ASSERT(sfhme->hme_data != NULL); 7059 pa_hments++; 7060 continue; 7061 } 7062 7063 hmeblkp = sfmmu_hmetohblk(sfhme); 7064 7065 /* 7066 * If there are kernel mappings don't unload them, they will 7067 * be suspended. 7068 */ 7069 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7070 hmeblkp->hblk_tag.htag_id == ksfmmup) 7071 continue; 7072 7073 tset = sfmmu_pageunload(pp, sfhme, cons); 7074 CPUSET_OR(cpuset, tset); 7075 } 7076 7077 while (index != 0) { 7078 index = index >> 1; 7079 if (index != 0) 7080 cons++; 7081 if (index & 0x1) { 7082 /* Go to leading page */ 7083 pp = PP_GROUPLEADER(pp, cons); 7084 ASSERT(sfmmu_mlist_held(pp)); 7085 goto retry; 7086 } 7087 } 7088 7089 /* 7090 * cpuset may be empty if the page was only mapped by segkpm, 7091 * in which case we won't actually cross-trap. 7092 */ 7093 xt_sync(cpuset); 7094 7095 /* 7096 * The page should have no mappings at this point, unless 7097 * we were called from hat_page_relocate() in which case we 7098 * leave the locked mappings which will be suspended later. 7099 */ 7100 ASSERT(!PP_ISMAPPED(origpp) || pa_hments || 7101 (forceflag == SFMMU_KERNEL_RELOC)); 7102 7103 #ifdef VAC 7104 if (PP_ISTNC(pp)) { 7105 if (cons == TTE8K) { 7106 pmtx = sfmmu_page_enter(pp); 7107 PP_CLRTNC(pp); 7108 sfmmu_page_exit(pmtx); 7109 } else { 7110 conv_tnc(pp, cons); 7111 } 7112 } 7113 #endif /* VAC */ 7114 7115 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7116 /* 7117 * Unlink any pa_hments and free them, calling back 7118 * the responsible subsystem to notify it of the error. 7119 * This can occur in situations such as drivers leaking 7120 * DMA handles: naughty, but common enough that we'd like 7121 * to keep the system running rather than bringing it 7122 * down with an obscure error like "pa_hment leaked" 7123 * which doesn't aid the user in debugging their driver. 7124 */ 7125 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7126 tmphme = sfhme->hme_next; 7127 if (IS_PAHME(sfhme)) { 7128 struct pa_hment *pahmep = sfhme->hme_data; 7129 sfmmu_pahment_leaked(pahmep); 7130 HME_SUB(sfhme, pp); 7131 kmem_cache_free(pa_hment_cache, pahmep); 7132 } 7133 } 7134 7135 ASSERT(!PP_ISMAPPED(origpp)); 7136 } 7137 7138 sfmmu_mlist_exit(pml); 7139 7140 return (0); 7141 } 7142 7143 cpuset_t 7144 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7145 { 7146 struct hme_blk *hmeblkp; 7147 sfmmu_t *sfmmup; 7148 tte_t tte, ttemod; 7149 #ifdef DEBUG 7150 tte_t orig_old; 7151 #endif /* DEBUG */ 7152 caddr_t addr; 7153 int ttesz; 7154 int ret; 7155 cpuset_t cpuset; 7156 7157 ASSERT(pp != NULL); 7158 ASSERT(sfmmu_mlist_held(pp)); 7159 ASSERT(!PP_ISKAS(pp)); 7160 7161 CPUSET_ZERO(cpuset); 7162 7163 hmeblkp = sfmmu_hmetohblk(sfhme); 7164 7165 readtte: 7166 sfmmu_copytte(&sfhme->hme_tte, &tte); 7167 if (TTE_IS_VALID(&tte)) { 7168 sfmmup = hblktosfmmu(hmeblkp); 7169 ttesz = get_hblk_ttesz(hmeblkp); 7170 /* 7171 * Only unload mappings of 'cons' size. 7172 */ 7173 if (ttesz != cons) 7174 return (cpuset); 7175 7176 /* 7177 * Note that we have p_mapping lock, but no hash lock here. 7178 * hblk_unload() has to have both hash lock AND p_mapping 7179 * lock before it tries to modify tte. So, the tte could 7180 * not become invalid in the sfmmu_modifytte_try() below. 7181 */ 7182 ttemod = tte; 7183 #ifdef DEBUG 7184 orig_old = tte; 7185 #endif /* DEBUG */ 7186 7187 TTE_SET_INVALID(&ttemod); 7188 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7189 if (ret < 0) { 7190 #ifdef DEBUG 7191 /* only R/M bits can change. */ 7192 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7193 #endif /* DEBUG */ 7194 goto readtte; 7195 } 7196 7197 if (ret == 0) { 7198 panic("pageunload: cas failed?"); 7199 } 7200 7201 addr = tte_to_vaddr(hmeblkp, tte); 7202 7203 if (hmeblkp->hblk_shared) { 7204 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7205 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7206 sf_region_t *rgnp; 7207 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7208 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7209 ASSERT(srdp != NULL); 7210 rgnp = srdp->srd_hmergnp[rid]; 7211 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7212 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7213 sfmmu_ttesync(NULL, addr, &tte, pp); 7214 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7215 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]); 7216 } else { 7217 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7218 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]); 7219 7220 /* 7221 * We need to flush the page from the virtual cache 7222 * in order to prevent a virtual cache alias 7223 * inconsistency. The particular scenario we need 7224 * to worry about is: 7225 * Given: va1 and va2 are two virtual address that 7226 * alias and will map the same physical address. 7227 * 1. mapping exists from va1 to pa and data has 7228 * been read into the cache. 7229 * 2. unload va1. 7230 * 3. load va2 and modify data using va2. 7231 * 4 unload va2. 7232 * 5. load va1 and reference data. Unless we flush 7233 * the data cache when we unload we will get 7234 * stale data. 7235 * This scenario is taken care of by using virtual 7236 * page coloring. 7237 */ 7238 if (sfmmup->sfmmu_ismhat) { 7239 /* 7240 * Flush TSBs, TLBs and caches 7241 * of every process 7242 * sharing this ism segment. 7243 */ 7244 sfmmu_hat_lock_all(); 7245 mutex_enter(&ism_mlist_lock); 7246 kpreempt_disable(); 7247 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7248 pp->p_pagenum, CACHE_NO_FLUSH); 7249 kpreempt_enable(); 7250 mutex_exit(&ism_mlist_lock); 7251 sfmmu_hat_unlock_all(); 7252 cpuset = cpu_ready_set; 7253 } else { 7254 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7255 cpuset = sfmmup->sfmmu_cpusran; 7256 } 7257 } 7258 7259 /* 7260 * Hme_sub has to run after ttesync() and a_rss update. 7261 * See hblk_unload(). 7262 */ 7263 HME_SUB(sfhme, pp); 7264 membar_stst(); 7265 7266 /* 7267 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7268 * since pteload may have done a HME_ADD() right after 7269 * we did the HME_SUB() above. Hmecnt is now maintained 7270 * by cas only. no lock guranteed its value. The only 7271 * gurantee we have is the hmecnt should not be less than 7272 * what it should be so the hblk will not be taken away. 7273 * It's also important that we decremented the hmecnt after 7274 * we are done with hmeblkp so that this hmeblk won't be 7275 * stolen. 7276 */ 7277 ASSERT(hmeblkp->hblk_hmecnt > 0); 7278 ASSERT(hmeblkp->hblk_vcnt > 0); 7279 atomic_dec_16(&hmeblkp->hblk_vcnt); 7280 atomic_dec_16(&hmeblkp->hblk_hmecnt); 7281 /* 7282 * This is bug 4063182. 7283 * XXX: fixme 7284 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7285 * !hmeblkp->hblk_lckcnt); 7286 */ 7287 } else { 7288 panic("invalid tte? pp %p &tte %p", 7289 (void *)pp, (void *)&tte); 7290 } 7291 7292 return (cpuset); 7293 } 7294 7295 /* 7296 * While relocating a kernel page, this function will move the mappings 7297 * from tpp to dpp and modify any associated data with these mappings. 7298 * It also unsuspends the suspended kernel mapping. 7299 */ 7300 static void 7301 hat_pagereload(struct page *tpp, struct page *dpp) 7302 { 7303 struct sf_hment *sfhme; 7304 tte_t tte, ttemod; 7305 int index, cons; 7306 7307 ASSERT(getpil() == PIL_MAX); 7308 ASSERT(sfmmu_mlist_held(tpp)); 7309 ASSERT(sfmmu_mlist_held(dpp)); 7310 7311 index = PP_MAPINDEX(tpp); 7312 cons = TTE8K; 7313 7314 /* Update real mappings to the page */ 7315 retry: 7316 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7317 if (IS_PAHME(sfhme)) 7318 continue; 7319 sfmmu_copytte(&sfhme->hme_tte, &tte); 7320 ttemod = tte; 7321 7322 /* 7323 * replace old pfn with new pfn in TTE 7324 */ 7325 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7326 7327 /* 7328 * clear suspend bit 7329 */ 7330 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7331 TTE_CLR_SUSPEND(&ttemod); 7332 7333 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7334 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7335 7336 /* 7337 * set hme_page point to new page 7338 */ 7339 sfhme->hme_page = dpp; 7340 } 7341 7342 /* 7343 * move p_mapping list from old page to new page 7344 */ 7345 dpp->p_mapping = tpp->p_mapping; 7346 tpp->p_mapping = NULL; 7347 dpp->p_share = tpp->p_share; 7348 tpp->p_share = 0; 7349 7350 while (index != 0) { 7351 index = index >> 1; 7352 if (index != 0) 7353 cons++; 7354 if (index & 0x1) { 7355 tpp = PP_GROUPLEADER(tpp, cons); 7356 dpp = PP_GROUPLEADER(dpp, cons); 7357 goto retry; 7358 } 7359 } 7360 7361 curthread->t_flag &= ~T_DONTDTRACE; 7362 mutex_exit(&kpr_suspendlock); 7363 } 7364 7365 uint_t 7366 hat_pagesync(struct page *pp, uint_t clearflag) 7367 { 7368 struct sf_hment *sfhme, *tmphme = NULL; 7369 struct hme_blk *hmeblkp; 7370 kmutex_t *pml; 7371 cpuset_t cpuset, tset; 7372 int index, cons; 7373 extern ulong_t po_share; 7374 page_t *save_pp = pp; 7375 int stop_on_sh = 0; 7376 uint_t shcnt; 7377 7378 CPUSET_ZERO(cpuset); 7379 7380 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7381 return (PP_GENERIC_ATTR(pp)); 7382 } 7383 7384 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7385 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7386 return (PP_GENERIC_ATTR(pp)); 7387 } 7388 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7389 return (PP_GENERIC_ATTR(pp)); 7390 } 7391 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7392 if (pp->p_share > po_share) { 7393 hat_page_setattr(pp, P_REF); 7394 return (PP_GENERIC_ATTR(pp)); 7395 } 7396 stop_on_sh = 1; 7397 shcnt = 0; 7398 } 7399 } 7400 7401 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7402 pml = sfmmu_mlist_enter(pp); 7403 index = PP_MAPINDEX(pp); 7404 cons = TTE8K; 7405 retry: 7406 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7407 /* 7408 * We need to save the next hment on the list since 7409 * it is possible for pagesync to remove an invalid hment 7410 * from the list. 7411 */ 7412 tmphme = sfhme->hme_next; 7413 if (IS_PAHME(sfhme)) 7414 continue; 7415 /* 7416 * If we are looking for large mappings and this hme doesn't 7417 * reach the range we are seeking, just ignore it. 7418 */ 7419 hmeblkp = sfmmu_hmetohblk(sfhme); 7420 7421 if (hme_size(sfhme) < cons) 7422 continue; 7423 7424 if (stop_on_sh) { 7425 if (hmeblkp->hblk_shared) { 7426 sf_srd_t *srdp = hblktosrd(hmeblkp); 7427 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7428 sf_region_t *rgnp; 7429 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7430 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7431 ASSERT(srdp != NULL); 7432 rgnp = srdp->srd_hmergnp[rid]; 7433 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7434 rgnp, rid); 7435 shcnt += rgnp->rgn_refcnt; 7436 } else { 7437 shcnt++; 7438 } 7439 if (shcnt > po_share) { 7440 /* 7441 * tell the pager to spare the page this time 7442 * around. 7443 */ 7444 hat_page_setattr(save_pp, P_REF); 7445 index = 0; 7446 break; 7447 } 7448 } 7449 tset = sfmmu_pagesync(pp, sfhme, 7450 clearflag & ~HAT_SYNC_STOPON_RM); 7451 CPUSET_OR(cpuset, tset); 7452 7453 /* 7454 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7455 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7456 */ 7457 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7458 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7459 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7460 index = 0; 7461 break; 7462 } 7463 } 7464 7465 while (index) { 7466 index = index >> 1; 7467 cons++; 7468 if (index & 0x1) { 7469 /* Go to leading page */ 7470 pp = PP_GROUPLEADER(pp, cons); 7471 goto retry; 7472 } 7473 } 7474 7475 xt_sync(cpuset); 7476 sfmmu_mlist_exit(pml); 7477 return (PP_GENERIC_ATTR(save_pp)); 7478 } 7479 7480 /* 7481 * Get all the hardware dependent attributes for a page struct 7482 */ 7483 static cpuset_t 7484 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7485 uint_t clearflag) 7486 { 7487 caddr_t addr; 7488 tte_t tte, ttemod; 7489 struct hme_blk *hmeblkp; 7490 int ret; 7491 sfmmu_t *sfmmup; 7492 cpuset_t cpuset; 7493 7494 ASSERT(pp != NULL); 7495 ASSERT(sfmmu_mlist_held(pp)); 7496 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7497 (clearflag == HAT_SYNC_ZERORM)); 7498 7499 SFMMU_STAT(sf_pagesync); 7500 7501 CPUSET_ZERO(cpuset); 7502 7503 sfmmu_pagesync_retry: 7504 7505 sfmmu_copytte(&sfhme->hme_tte, &tte); 7506 if (TTE_IS_VALID(&tte)) { 7507 hmeblkp = sfmmu_hmetohblk(sfhme); 7508 sfmmup = hblktosfmmu(hmeblkp); 7509 addr = tte_to_vaddr(hmeblkp, tte); 7510 if (clearflag == HAT_SYNC_ZERORM) { 7511 ttemod = tte; 7512 TTE_CLR_RM(&ttemod); 7513 ret = sfmmu_modifytte_try(&tte, &ttemod, 7514 &sfhme->hme_tte); 7515 if (ret < 0) { 7516 /* 7517 * cas failed and the new value is not what 7518 * we want. 7519 */ 7520 goto sfmmu_pagesync_retry; 7521 } 7522 7523 if (ret > 0) { 7524 /* we win the cas */ 7525 if (hmeblkp->hblk_shared) { 7526 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7527 uint_t rid = 7528 hmeblkp->hblk_tag.htag_rid; 7529 sf_region_t *rgnp; 7530 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7531 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7532 ASSERT(srdp != NULL); 7533 rgnp = srdp->srd_hmergnp[rid]; 7534 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7535 srdp, rgnp, rid); 7536 cpuset = sfmmu_rgntlb_demap(addr, 7537 rgnp, hmeblkp, 1); 7538 } else { 7539 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7540 0, 0); 7541 cpuset = sfmmup->sfmmu_cpusran; 7542 } 7543 } 7544 } 7545 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7546 &tte, pp); 7547 } 7548 return (cpuset); 7549 } 7550 7551 /* 7552 * Remove write permission from a mappings to a page, so that 7553 * we can detect the next modification of it. This requires modifying 7554 * the TTE then invalidating (demap) any TLB entry using that TTE. 7555 * This code is similar to sfmmu_pagesync(). 7556 */ 7557 static cpuset_t 7558 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7559 { 7560 caddr_t addr; 7561 tte_t tte; 7562 tte_t ttemod; 7563 struct hme_blk *hmeblkp; 7564 int ret; 7565 sfmmu_t *sfmmup; 7566 cpuset_t cpuset; 7567 7568 ASSERT(pp != NULL); 7569 ASSERT(sfmmu_mlist_held(pp)); 7570 7571 CPUSET_ZERO(cpuset); 7572 SFMMU_STAT(sf_clrwrt); 7573 7574 retry: 7575 7576 sfmmu_copytte(&sfhme->hme_tte, &tte); 7577 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7578 hmeblkp = sfmmu_hmetohblk(sfhme); 7579 sfmmup = hblktosfmmu(hmeblkp); 7580 addr = tte_to_vaddr(hmeblkp, tte); 7581 7582 ttemod = tte; 7583 TTE_CLR_WRT(&ttemod); 7584 TTE_CLR_MOD(&ttemod); 7585 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7586 7587 /* 7588 * if cas failed and the new value is not what 7589 * we want retry 7590 */ 7591 if (ret < 0) 7592 goto retry; 7593 7594 /* we win the cas */ 7595 if (ret > 0) { 7596 if (hmeblkp->hblk_shared) { 7597 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7598 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7599 sf_region_t *rgnp; 7600 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7601 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7602 ASSERT(srdp != NULL); 7603 rgnp = srdp->srd_hmergnp[rid]; 7604 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7605 srdp, rgnp, rid); 7606 cpuset = sfmmu_rgntlb_demap(addr, 7607 rgnp, hmeblkp, 1); 7608 } else { 7609 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7610 cpuset = sfmmup->sfmmu_cpusran; 7611 } 7612 } 7613 } 7614 7615 return (cpuset); 7616 } 7617 7618 /* 7619 * Walk all mappings of a page, removing write permission and clearing the 7620 * ref/mod bits. This code is similar to hat_pagesync() 7621 */ 7622 static void 7623 hat_page_clrwrt(page_t *pp) 7624 { 7625 struct sf_hment *sfhme; 7626 struct sf_hment *tmphme = NULL; 7627 kmutex_t *pml; 7628 cpuset_t cpuset; 7629 cpuset_t tset; 7630 int index; 7631 int cons; 7632 7633 CPUSET_ZERO(cpuset); 7634 7635 pml = sfmmu_mlist_enter(pp); 7636 index = PP_MAPINDEX(pp); 7637 cons = TTE8K; 7638 retry: 7639 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7640 tmphme = sfhme->hme_next; 7641 7642 /* 7643 * If we are looking for large mappings and this hme doesn't 7644 * reach the range we are seeking, just ignore its. 7645 */ 7646 7647 if (hme_size(sfhme) < cons) 7648 continue; 7649 7650 tset = sfmmu_pageclrwrt(pp, sfhme); 7651 CPUSET_OR(cpuset, tset); 7652 } 7653 7654 while (index) { 7655 index = index >> 1; 7656 cons++; 7657 if (index & 0x1) { 7658 /* Go to leading page */ 7659 pp = PP_GROUPLEADER(pp, cons); 7660 goto retry; 7661 } 7662 } 7663 7664 xt_sync(cpuset); 7665 sfmmu_mlist_exit(pml); 7666 } 7667 7668 /* 7669 * Set the given REF/MOD/RO bits for the given page. 7670 * For a vnode with a sorted v_pages list, we need to change 7671 * the attributes and the v_pages list together under page_vnode_mutex. 7672 */ 7673 void 7674 hat_page_setattr(page_t *pp, uint_t flag) 7675 { 7676 vnode_t *vp = pp->p_vnode; 7677 page_t **listp; 7678 kmutex_t *pmtx; 7679 kmutex_t *vphm = NULL; 7680 int noshuffle; 7681 7682 noshuffle = flag & P_NSH; 7683 flag &= ~P_NSH; 7684 7685 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7686 7687 /* 7688 * nothing to do if attribute already set 7689 */ 7690 if ((pp->p_nrm & flag) == flag) 7691 return; 7692 7693 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7694 !noshuffle) { 7695 vphm = page_vnode_mutex(vp); 7696 mutex_enter(vphm); 7697 } 7698 7699 pmtx = sfmmu_page_enter(pp); 7700 pp->p_nrm |= flag; 7701 sfmmu_page_exit(pmtx); 7702 7703 if (vphm != NULL) { 7704 /* 7705 * Some File Systems examine v_pages for NULL w/o 7706 * grabbing the vphm mutex. Must not let it become NULL when 7707 * pp is the only page on the list. 7708 */ 7709 if (pp->p_vpnext != pp) { 7710 page_vpsub(&vp->v_pages, pp); 7711 if (vp->v_pages != NULL) 7712 listp = &vp->v_pages->p_vpprev->p_vpnext; 7713 else 7714 listp = &vp->v_pages; 7715 page_vpadd(listp, pp); 7716 } 7717 mutex_exit(vphm); 7718 } 7719 } 7720 7721 void 7722 hat_page_clrattr(page_t *pp, uint_t flag) 7723 { 7724 vnode_t *vp = pp->p_vnode; 7725 kmutex_t *pmtx; 7726 7727 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7728 7729 pmtx = sfmmu_page_enter(pp); 7730 7731 /* 7732 * Caller is expected to hold page's io lock for VMODSORT to work 7733 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7734 * bit is cleared. 7735 * We don't have assert to avoid tripping some existing third party 7736 * code. The dirty page is moved back to top of the v_page list 7737 * after IO is done in pvn_write_done(). 7738 */ 7739 pp->p_nrm &= ~flag; 7740 sfmmu_page_exit(pmtx); 7741 7742 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7743 7744 /* 7745 * VMODSORT works by removing write permissions and getting 7746 * a fault when a page is made dirty. At this point 7747 * we need to remove write permission from all mappings 7748 * to this page. 7749 */ 7750 hat_page_clrwrt(pp); 7751 } 7752 } 7753 7754 uint_t 7755 hat_page_getattr(page_t *pp, uint_t flag) 7756 { 7757 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7758 return ((uint_t)(pp->p_nrm & flag)); 7759 } 7760 7761 /* 7762 * DEBUG kernels: verify that a kernel va<->pa translation 7763 * is safe by checking the underlying page_t is in a page 7764 * relocation-safe state. 7765 */ 7766 #ifdef DEBUG 7767 void 7768 sfmmu_check_kpfn(pfn_t pfn) 7769 { 7770 page_t *pp; 7771 int index, cons; 7772 7773 if (hat_check_vtop == 0) 7774 return; 7775 7776 if (kvseg.s_base == NULL || panicstr) 7777 return; 7778 7779 pp = page_numtopp_nolock(pfn); 7780 if (!pp) 7781 return; 7782 7783 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7784 return; 7785 7786 /* 7787 * Handed a large kernel page, we dig up the root page since we 7788 * know the root page might have the lock also. 7789 */ 7790 if (pp->p_szc != 0) { 7791 index = PP_MAPINDEX(pp); 7792 cons = TTE8K; 7793 again: 7794 while (index != 0) { 7795 index >>= 1; 7796 if (index != 0) 7797 cons++; 7798 if (index & 0x1) { 7799 pp = PP_GROUPLEADER(pp, cons); 7800 goto again; 7801 } 7802 } 7803 } 7804 7805 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7806 return; 7807 7808 /* 7809 * Pages need to be locked or allocated "permanent" (either from 7810 * static_arena arena or explicitly setting PG_NORELOC when calling 7811 * page_create_va()) for VA->PA translations to be valid. 7812 */ 7813 if (!PP_ISNORELOC(pp)) 7814 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7815 (void *)pp); 7816 else 7817 panic("Illegal VA->PA translation, pp 0x%p not locked", 7818 (void *)pp); 7819 } 7820 #endif /* DEBUG */ 7821 7822 /* 7823 * Returns a page frame number for a given virtual address. 7824 * Returns PFN_INVALID to indicate an invalid mapping 7825 */ 7826 pfn_t 7827 hat_getpfnum(struct hat *hat, caddr_t addr) 7828 { 7829 pfn_t pfn; 7830 tte_t tte; 7831 7832 /* 7833 * We would like to 7834 * ASSERT(AS_LOCK_HELD(as)); 7835 * but we can't because the iommu driver will call this 7836 * routine at interrupt time and it can't grab the as lock 7837 * or it will deadlock: A thread could have the as lock 7838 * and be waiting for io. The io can't complete 7839 * because the interrupt thread is blocked trying to grab 7840 * the as lock. 7841 */ 7842 7843 if (hat == ksfmmup) { 7844 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7845 ASSERT(segkmem_lpszc > 0); 7846 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7847 if (pfn != PFN_INVALID) { 7848 sfmmu_check_kpfn(pfn); 7849 return (pfn); 7850 } 7851 } else if (segkpm && IS_KPM_ADDR(addr)) { 7852 return (sfmmu_kpm_vatopfn(addr)); 7853 } 7854 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7855 == PFN_SUSPENDED) { 7856 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7857 } 7858 sfmmu_check_kpfn(pfn); 7859 return (pfn); 7860 } else { 7861 return (sfmmu_uvatopfn(addr, hat, NULL)); 7862 } 7863 } 7864 7865 /* 7866 * This routine will return both pfn and tte for the vaddr. 7867 */ 7868 static pfn_t 7869 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7870 { 7871 struct hmehash_bucket *hmebp; 7872 hmeblk_tag hblktag; 7873 int hmeshift, hashno = 1; 7874 struct hme_blk *hmeblkp = NULL; 7875 tte_t tte; 7876 7877 struct sf_hment *sfhmep; 7878 pfn_t pfn; 7879 7880 /* support for ISM */ 7881 ism_map_t *ism_map; 7882 ism_blk_t *ism_blkp; 7883 int i; 7884 sfmmu_t *ism_hatid = NULL; 7885 sfmmu_t *locked_hatid = NULL; 7886 sfmmu_t *sv_sfmmup = sfmmup; 7887 caddr_t sv_vaddr = vaddr; 7888 sf_srd_t *srdp; 7889 7890 if (ttep == NULL) { 7891 ttep = &tte; 7892 } else { 7893 ttep->ll = 0; 7894 } 7895 7896 ASSERT(sfmmup != ksfmmup); 7897 SFMMU_STAT(sf_user_vtop); 7898 /* 7899 * Set ism_hatid if vaddr falls in a ISM segment. 7900 */ 7901 ism_blkp = sfmmup->sfmmu_iblk; 7902 if (ism_blkp != NULL) { 7903 sfmmu_ismhat_enter(sfmmup, 0); 7904 locked_hatid = sfmmup; 7905 } 7906 while (ism_blkp != NULL && ism_hatid == NULL) { 7907 ism_map = ism_blkp->iblk_maps; 7908 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7909 if (vaddr >= ism_start(ism_map[i]) && 7910 vaddr < ism_end(ism_map[i])) { 7911 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7912 vaddr = (caddr_t)(vaddr - 7913 ism_start(ism_map[i])); 7914 break; 7915 } 7916 } 7917 ism_blkp = ism_blkp->iblk_next; 7918 } 7919 if (locked_hatid) { 7920 sfmmu_ismhat_exit(locked_hatid, 0); 7921 } 7922 7923 hblktag.htag_id = sfmmup; 7924 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7925 do { 7926 hmeshift = HME_HASH_SHIFT(hashno); 7927 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7928 hblktag.htag_rehash = hashno; 7929 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7930 7931 SFMMU_HASH_LOCK(hmebp); 7932 7933 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7934 if (hmeblkp != NULL) { 7935 ASSERT(!hmeblkp->hblk_shared); 7936 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7937 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7938 SFMMU_HASH_UNLOCK(hmebp); 7939 if (TTE_IS_VALID(ttep)) { 7940 pfn = TTE_TO_PFN(vaddr, ttep); 7941 return (pfn); 7942 } 7943 break; 7944 } 7945 SFMMU_HASH_UNLOCK(hmebp); 7946 hashno++; 7947 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7948 7949 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 7950 return (PFN_INVALID); 7951 } 7952 srdp = sv_sfmmup->sfmmu_srdp; 7953 ASSERT(srdp != NULL); 7954 ASSERT(srdp->srd_refcnt != 0); 7955 hblktag.htag_id = srdp; 7956 hashno = 1; 7957 do { 7958 hmeshift = HME_HASH_SHIFT(hashno); 7959 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 7960 hblktag.htag_rehash = hashno; 7961 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 7962 7963 SFMMU_HASH_LOCK(hmebp); 7964 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 7965 hmeblkp = hmeblkp->hblk_next) { 7966 uint_t rid; 7967 sf_region_t *rgnp; 7968 caddr_t rsaddr; 7969 caddr_t readdr; 7970 7971 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 7972 sv_sfmmup->sfmmu_hmeregion_map)) { 7973 continue; 7974 } 7975 ASSERT(hmeblkp->hblk_shared); 7976 rid = hmeblkp->hblk_tag.htag_rid; 7977 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7978 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7979 rgnp = srdp->srd_hmergnp[rid]; 7980 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7981 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 7982 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7983 rsaddr = rgnp->rgn_saddr; 7984 readdr = rsaddr + rgnp->rgn_size; 7985 #ifdef DEBUG 7986 if (TTE_IS_VALID(ttep) || 7987 get_hblk_ttesz(hmeblkp) > TTE8K) { 7988 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 7989 ASSERT(eva > sv_vaddr); 7990 ASSERT(sv_vaddr >= rsaddr); 7991 ASSERT(sv_vaddr < readdr); 7992 ASSERT(eva <= readdr); 7993 } 7994 #endif /* DEBUG */ 7995 /* 7996 * Continue the search if we 7997 * found an invalid 8K tte outside of the area 7998 * covered by this hmeblk's region. 7999 */ 8000 if (TTE_IS_VALID(ttep)) { 8001 SFMMU_HASH_UNLOCK(hmebp); 8002 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8003 return (pfn); 8004 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8005 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8006 SFMMU_HASH_UNLOCK(hmebp); 8007 pfn = PFN_INVALID; 8008 return (pfn); 8009 } 8010 } 8011 SFMMU_HASH_UNLOCK(hmebp); 8012 hashno++; 8013 } while (hashno <= mmu_hashcnt); 8014 return (PFN_INVALID); 8015 } 8016 8017 8018 /* 8019 * For compatability with AT&T and later optimizations 8020 */ 8021 /* ARGSUSED */ 8022 void 8023 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8024 { 8025 ASSERT(hat != NULL); 8026 } 8027 8028 /* 8029 * Return the number of mappings to a particular page. This number is an 8030 * approximation of the number of people sharing the page. 8031 * 8032 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8033 * hat_page_checkshare() can be used to compare threshold to share 8034 * count that reflects the number of region sharers albeit at higher cost. 8035 */ 8036 ulong_t 8037 hat_page_getshare(page_t *pp) 8038 { 8039 page_t *spp = pp; /* start page */ 8040 kmutex_t *pml; 8041 ulong_t cnt; 8042 int index, sz = TTE64K; 8043 8044 /* 8045 * We need to grab the mlist lock to make sure any outstanding 8046 * load/unloads complete. Otherwise we could return zero 8047 * even though the unload(s) hasn't finished yet. 8048 */ 8049 pml = sfmmu_mlist_enter(spp); 8050 cnt = spp->p_share; 8051 8052 #ifdef VAC 8053 if (kpm_enable) 8054 cnt += spp->p_kpmref; 8055 #endif 8056 if (vpm_enable && pp->p_vpmref) { 8057 cnt += 1; 8058 } 8059 8060 /* 8061 * If we have any large mappings, we count the number of 8062 * mappings that this large page is part of. 8063 */ 8064 index = PP_MAPINDEX(spp); 8065 index >>= 1; 8066 while (index) { 8067 pp = PP_GROUPLEADER(spp, sz); 8068 if ((index & 0x1) && pp != spp) { 8069 cnt += pp->p_share; 8070 spp = pp; 8071 } 8072 index >>= 1; 8073 sz++; 8074 } 8075 sfmmu_mlist_exit(pml); 8076 return (cnt); 8077 } 8078 8079 /* 8080 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8081 * otherwise. Count shared hmeblks by region's refcnt. 8082 */ 8083 int 8084 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8085 { 8086 kmutex_t *pml; 8087 ulong_t cnt = 0; 8088 int index, sz = TTE8K; 8089 struct sf_hment *sfhme, *tmphme = NULL; 8090 struct hme_blk *hmeblkp; 8091 8092 pml = sfmmu_mlist_enter(pp); 8093 8094 #ifdef VAC 8095 if (kpm_enable) 8096 cnt = pp->p_kpmref; 8097 #endif 8098 8099 if (vpm_enable && pp->p_vpmref) { 8100 cnt += 1; 8101 } 8102 8103 if (pp->p_share + cnt > sh_thresh) { 8104 sfmmu_mlist_exit(pml); 8105 return (1); 8106 } 8107 8108 index = PP_MAPINDEX(pp); 8109 8110 again: 8111 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8112 tmphme = sfhme->hme_next; 8113 if (IS_PAHME(sfhme)) { 8114 continue; 8115 } 8116 8117 hmeblkp = sfmmu_hmetohblk(sfhme); 8118 if (hme_size(sfhme) != sz) { 8119 continue; 8120 } 8121 8122 if (hmeblkp->hblk_shared) { 8123 sf_srd_t *srdp = hblktosrd(hmeblkp); 8124 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8125 sf_region_t *rgnp; 8126 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8127 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8128 ASSERT(srdp != NULL); 8129 rgnp = srdp->srd_hmergnp[rid]; 8130 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8131 rgnp, rid); 8132 cnt += rgnp->rgn_refcnt; 8133 } else { 8134 cnt++; 8135 } 8136 if (cnt > sh_thresh) { 8137 sfmmu_mlist_exit(pml); 8138 return (1); 8139 } 8140 } 8141 8142 index >>= 1; 8143 sz++; 8144 while (index) { 8145 pp = PP_GROUPLEADER(pp, sz); 8146 ASSERT(sfmmu_mlist_held(pp)); 8147 if (index & 0x1) { 8148 goto again; 8149 } 8150 index >>= 1; 8151 sz++; 8152 } 8153 sfmmu_mlist_exit(pml); 8154 return (0); 8155 } 8156 8157 /* 8158 * Unload all large mappings to the pp and reset the p_szc field of every 8159 * constituent page according to the remaining mappings. 8160 * 8161 * pp must be locked SE_EXCL. Even though no other constituent pages are 8162 * locked it's legal to unload the large mappings to the pp because all 8163 * constituent pages of large locked mappings have to be locked SE_SHARED. 8164 * This means if we have SE_EXCL lock on one of constituent pages none of the 8165 * large mappings to pp are locked. 8166 * 8167 * Decrease p_szc field starting from the last constituent page and ending 8168 * with the root page. This method is used because other threads rely on the 8169 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8170 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8171 * ensures that p_szc changes of the constituent pages appears atomic for all 8172 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8173 * 8174 * This mechanism is only used for file system pages where it's not always 8175 * possible to get SE_EXCL locks on all constituent pages to demote the size 8176 * code (as is done for anonymous or kernel large pages). 8177 * 8178 * See more comments in front of sfmmu_mlspl_enter(). 8179 */ 8180 void 8181 hat_page_demote(page_t *pp) 8182 { 8183 int index; 8184 int sz; 8185 cpuset_t cpuset; 8186 int sync = 0; 8187 page_t *rootpp; 8188 struct sf_hment *sfhme; 8189 struct sf_hment *tmphme = NULL; 8190 uint_t pszc; 8191 page_t *lastpp; 8192 cpuset_t tset; 8193 pgcnt_t npgs; 8194 kmutex_t *pml; 8195 kmutex_t *pmtx = NULL; 8196 8197 ASSERT(PAGE_EXCL(pp)); 8198 ASSERT(!PP_ISFREE(pp)); 8199 ASSERT(!PP_ISKAS(pp)); 8200 ASSERT(page_szc_lock_assert(pp)); 8201 pml = sfmmu_mlist_enter(pp); 8202 8203 pszc = pp->p_szc; 8204 if (pszc == 0) { 8205 goto out; 8206 } 8207 8208 index = PP_MAPINDEX(pp) >> 1; 8209 8210 if (index) { 8211 CPUSET_ZERO(cpuset); 8212 sz = TTE64K; 8213 sync = 1; 8214 } 8215 8216 while (index) { 8217 if (!(index & 0x1)) { 8218 index >>= 1; 8219 sz++; 8220 continue; 8221 } 8222 ASSERT(sz <= pszc); 8223 rootpp = PP_GROUPLEADER(pp, sz); 8224 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8225 tmphme = sfhme->hme_next; 8226 ASSERT(!IS_PAHME(sfhme)); 8227 if (hme_size(sfhme) != sz) { 8228 continue; 8229 } 8230 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8231 CPUSET_OR(cpuset, tset); 8232 } 8233 if (index >>= 1) { 8234 sz++; 8235 } 8236 } 8237 8238 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8239 8240 if (sync) { 8241 xt_sync(cpuset); 8242 #ifdef VAC 8243 if (PP_ISTNC(pp)) { 8244 conv_tnc(rootpp, sz); 8245 } 8246 #endif /* VAC */ 8247 } 8248 8249 pmtx = sfmmu_page_enter(pp); 8250 8251 ASSERT(pp->p_szc == pszc); 8252 rootpp = PP_PAGEROOT(pp); 8253 ASSERT(rootpp->p_szc == pszc); 8254 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8255 8256 while (lastpp != rootpp) { 8257 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8258 ASSERT(sz < pszc); 8259 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8260 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8261 while (--npgs > 0) { 8262 lastpp->p_szc = (uchar_t)sz; 8263 lastpp = PP_PAGEPREV(lastpp); 8264 } 8265 if (sz) { 8266 /* 8267 * make sure before current root's pszc 8268 * is updated all updates to constituent pages pszc 8269 * fields are globally visible. 8270 */ 8271 membar_producer(); 8272 } 8273 lastpp->p_szc = sz; 8274 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8275 if (lastpp != rootpp) { 8276 lastpp = PP_PAGEPREV(lastpp); 8277 } 8278 } 8279 if (sz == 0) { 8280 /* the loop above doesn't cover this case */ 8281 rootpp->p_szc = 0; 8282 } 8283 out: 8284 ASSERT(pp->p_szc == 0); 8285 if (pmtx != NULL) { 8286 sfmmu_page_exit(pmtx); 8287 } 8288 sfmmu_mlist_exit(pml); 8289 } 8290 8291 /* 8292 * Refresh the HAT ismttecnt[] element for size szc. 8293 * Caller must have set ISM busy flag to prevent mapping 8294 * lists from changing while we're traversing them. 8295 */ 8296 pgcnt_t 8297 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8298 { 8299 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8300 ism_map_t *ism_map; 8301 pgcnt_t npgs = 0; 8302 pgcnt_t npgs_scd = 0; 8303 int j; 8304 sf_scd_t *scdp; 8305 uchar_t rid; 8306 8307 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8308 scdp = sfmmup->sfmmu_scdp; 8309 8310 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8311 ism_map = ism_blkp->iblk_maps; 8312 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8313 rid = ism_map[j].imap_rid; 8314 ASSERT(rid == SFMMU_INVALID_ISMRID || 8315 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8316 8317 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8318 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8319 /* ISM is in sfmmup's SCD */ 8320 npgs_scd += 8321 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8322 } else { 8323 /* ISMs is not in SCD */ 8324 npgs += 8325 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8326 } 8327 } 8328 } 8329 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8330 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8331 return (npgs); 8332 } 8333 8334 /* 8335 * Yield the memory claim requirement for an address space. 8336 * 8337 * This is currently implemented as the number of bytes that have active 8338 * hardware translations that have page structures. Therefore, it can 8339 * underestimate the traditional resident set size, eg, if the 8340 * physical page is present and the hardware translation is missing; 8341 * and it can overestimate the rss, eg, if there are active 8342 * translations to a frame buffer with page structs. 8343 * Also, it does not take sharing into account. 8344 * 8345 * Note that we don't acquire locks here since this function is most often 8346 * called from the clock thread. 8347 */ 8348 size_t 8349 hat_get_mapped_size(struct hat *hat) 8350 { 8351 size_t assize = 0; 8352 int i; 8353 8354 if (hat == NULL) 8355 return (0); 8356 8357 for (i = 0; i < mmu_page_sizes; i++) 8358 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8359 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8360 8361 if (hat->sfmmu_iblk == NULL) 8362 return (assize); 8363 8364 for (i = 0; i < mmu_page_sizes; i++) 8365 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8366 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8367 8368 return (assize); 8369 } 8370 8371 int 8372 hat_stats_enable(struct hat *hat) 8373 { 8374 hatlock_t *hatlockp; 8375 8376 hatlockp = sfmmu_hat_enter(hat); 8377 hat->sfmmu_rmstat++; 8378 sfmmu_hat_exit(hatlockp); 8379 return (1); 8380 } 8381 8382 void 8383 hat_stats_disable(struct hat *hat) 8384 { 8385 hatlock_t *hatlockp; 8386 8387 hatlockp = sfmmu_hat_enter(hat); 8388 hat->sfmmu_rmstat--; 8389 sfmmu_hat_exit(hatlockp); 8390 } 8391 8392 /* 8393 * Routines for entering or removing ourselves from the 8394 * ism_hat's mapping list. This is used for both private and 8395 * SCD hats. 8396 */ 8397 static void 8398 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8399 { 8400 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8401 8402 iment->iment_prev = NULL; 8403 iment->iment_next = ism_hat->sfmmu_iment; 8404 if (ism_hat->sfmmu_iment) { 8405 ism_hat->sfmmu_iment->iment_prev = iment; 8406 } 8407 ism_hat->sfmmu_iment = iment; 8408 } 8409 8410 static void 8411 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8412 { 8413 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8414 8415 if (ism_hat->sfmmu_iment == NULL) { 8416 panic("ism map entry remove - no entries"); 8417 } 8418 8419 if (iment->iment_prev) { 8420 ASSERT(ism_hat->sfmmu_iment != iment); 8421 iment->iment_prev->iment_next = iment->iment_next; 8422 } else { 8423 ASSERT(ism_hat->sfmmu_iment == iment); 8424 ism_hat->sfmmu_iment = iment->iment_next; 8425 } 8426 8427 if (iment->iment_next) { 8428 iment->iment_next->iment_prev = iment->iment_prev; 8429 } 8430 8431 /* 8432 * zero out the entry 8433 */ 8434 iment->iment_next = NULL; 8435 iment->iment_prev = NULL; 8436 iment->iment_hat = NULL; 8437 iment->iment_base_va = 0; 8438 } 8439 8440 /* 8441 * Hat_share()/unshare() return an (non-zero) error 8442 * when saddr and daddr are not properly aligned. 8443 * 8444 * The top level mapping element determines the alignment 8445 * requirement for saddr and daddr, depending on different 8446 * architectures. 8447 * 8448 * When hat_share()/unshare() are not supported, 8449 * HATOP_SHARE()/UNSHARE() return 0 8450 */ 8451 int 8452 hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid, 8453 caddr_t sptaddr, size_t len, uint_t ismszc) 8454 { 8455 ism_blk_t *ism_blkp; 8456 ism_blk_t *new_iblk; 8457 ism_map_t *ism_map; 8458 ism_ment_t *ism_ment; 8459 int i, added; 8460 hatlock_t *hatlockp; 8461 int reload_mmu = 0; 8462 uint_t ismshift = page_get_shift(ismszc); 8463 size_t ismpgsz = page_get_pagesize(ismszc); 8464 uint_t ismmask = (uint_t)ismpgsz - 1; 8465 size_t sh_size = ISM_SHIFT(ismshift, len); 8466 ushort_t ismhatflag; 8467 hat_region_cookie_t rcookie; 8468 sf_scd_t *old_scdp; 8469 8470 #ifdef DEBUG 8471 caddr_t eaddr = addr + len; 8472 #endif /* DEBUG */ 8473 8474 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8475 ASSERT(sptaddr == ISMID_STARTADDR); 8476 /* 8477 * Check the alignment. 8478 */ 8479 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8480 return (EINVAL); 8481 8482 /* 8483 * Check size alignment. 8484 */ 8485 if (!ISM_ALIGNED(ismshift, len)) 8486 return (EINVAL); 8487 8488 /* 8489 * Allocate ism_ment for the ism_hat's mapping list, and an 8490 * ism map blk in case we need one. We must do our 8491 * allocations before acquiring locks to prevent a deadlock 8492 * in the kmem allocator on the mapping list lock. 8493 */ 8494 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8495 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8496 8497 /* 8498 * Serialize ISM mappings with the ISM busy flag, and also the 8499 * trap handlers. 8500 */ 8501 sfmmu_ismhat_enter(sfmmup, 0); 8502 8503 /* 8504 * Allocate an ism map blk if necessary. 8505 */ 8506 if (sfmmup->sfmmu_iblk == NULL) { 8507 sfmmup->sfmmu_iblk = new_iblk; 8508 bzero(new_iblk, sizeof (*new_iblk)); 8509 new_iblk->iblk_nextpa = (uint64_t)-1; 8510 membar_stst(); /* make sure next ptr visible to all CPUs */ 8511 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8512 reload_mmu = 1; 8513 new_iblk = NULL; 8514 } 8515 8516 #ifdef DEBUG 8517 /* 8518 * Make sure mapping does not already exist. 8519 */ 8520 ism_blkp = sfmmup->sfmmu_iblk; 8521 while (ism_blkp != NULL) { 8522 ism_map = ism_blkp->iblk_maps; 8523 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8524 if ((addr >= ism_start(ism_map[i]) && 8525 addr < ism_end(ism_map[i])) || 8526 eaddr > ism_start(ism_map[i]) && 8527 eaddr <= ism_end(ism_map[i])) { 8528 panic("sfmmu_share: Already mapped!"); 8529 } 8530 } 8531 ism_blkp = ism_blkp->iblk_next; 8532 } 8533 #endif /* DEBUG */ 8534 8535 ASSERT(ismszc >= TTE4M); 8536 if (ismszc == TTE4M) { 8537 ismhatflag = HAT_4M_FLAG; 8538 } else if (ismszc == TTE32M) { 8539 ismhatflag = HAT_32M_FLAG; 8540 } else if (ismszc == TTE256M) { 8541 ismhatflag = HAT_256M_FLAG; 8542 } 8543 /* 8544 * Add mapping to first available mapping slot. 8545 */ 8546 ism_blkp = sfmmup->sfmmu_iblk; 8547 added = 0; 8548 while (!added) { 8549 ism_map = ism_blkp->iblk_maps; 8550 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8551 if (ism_map[i].imap_ismhat == NULL) { 8552 8553 ism_map[i].imap_ismhat = ism_hatid; 8554 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8555 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8556 ism_map[i].imap_hatflags = ismhatflag; 8557 ism_map[i].imap_sz_mask = ismmask; 8558 /* 8559 * imap_seg is checked in ISM_CHECK to see if 8560 * non-NULL, then other info assumed valid. 8561 */ 8562 membar_stst(); 8563 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8564 ism_map[i].imap_ment = ism_ment; 8565 8566 /* 8567 * Now add ourselves to the ism_hat's 8568 * mapping list. 8569 */ 8570 ism_ment->iment_hat = sfmmup; 8571 ism_ment->iment_base_va = addr; 8572 ism_hatid->sfmmu_ismhat = 1; 8573 mutex_enter(&ism_mlist_lock); 8574 iment_add(ism_ment, ism_hatid); 8575 mutex_exit(&ism_mlist_lock); 8576 added = 1; 8577 break; 8578 } 8579 } 8580 if (!added && ism_blkp->iblk_next == NULL) { 8581 ism_blkp->iblk_next = new_iblk; 8582 new_iblk = NULL; 8583 bzero(ism_blkp->iblk_next, 8584 sizeof (*ism_blkp->iblk_next)); 8585 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8586 membar_stst(); 8587 ism_blkp->iblk_nextpa = 8588 va_to_pa((caddr_t)ism_blkp->iblk_next); 8589 } 8590 ism_blkp = ism_blkp->iblk_next; 8591 } 8592 8593 /* 8594 * After calling hat_join_region, sfmmup may join a new SCD or 8595 * move from the old scd to a new scd, in which case, we want to 8596 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8597 * sfmmu_check_page_sizes at the end of this routine. 8598 */ 8599 old_scdp = sfmmup->sfmmu_scdp; 8600 8601 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8602 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8603 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8604 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8605 } 8606 /* 8607 * Update our counters for this sfmmup's ism mappings. 8608 */ 8609 for (i = 0; i <= ismszc; i++) { 8610 if (!(disable_ism_large_pages & (1 << i))) 8611 (void) ism_tsb_entries(sfmmup, i); 8612 } 8613 8614 /* 8615 * For ISM and DISM we do not support 512K pages, so we only only 8616 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8617 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8618 * 8619 * Need to set 32M/256M ISM flags to make sure 8620 * sfmmu_check_page_sizes() enables them on Panther. 8621 */ 8622 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8623 8624 switch (ismszc) { 8625 case TTE256M: 8626 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8627 hatlockp = sfmmu_hat_enter(sfmmup); 8628 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8629 sfmmu_hat_exit(hatlockp); 8630 } 8631 break; 8632 case TTE32M: 8633 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8634 hatlockp = sfmmu_hat_enter(sfmmup); 8635 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8636 sfmmu_hat_exit(hatlockp); 8637 } 8638 break; 8639 default: 8640 break; 8641 } 8642 8643 /* 8644 * If we updated the ismblkpa for this HAT we must make 8645 * sure all CPUs running this process reload their tsbmiss area. 8646 * Otherwise they will fail to load the mappings in the tsbmiss 8647 * handler and will loop calling pagefault(). 8648 */ 8649 if (reload_mmu) { 8650 hatlockp = sfmmu_hat_enter(sfmmup); 8651 sfmmu_sync_mmustate(sfmmup); 8652 sfmmu_hat_exit(hatlockp); 8653 } 8654 8655 sfmmu_ismhat_exit(sfmmup, 0); 8656 8657 /* 8658 * Free up ismblk if we didn't use it. 8659 */ 8660 if (new_iblk != NULL) 8661 kmem_cache_free(ism_blk_cache, new_iblk); 8662 8663 /* 8664 * Check TSB and TLB page sizes. 8665 */ 8666 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8667 sfmmu_check_page_sizes(sfmmup, 0); 8668 } else { 8669 sfmmu_check_page_sizes(sfmmup, 1); 8670 } 8671 return (0); 8672 } 8673 8674 /* 8675 * hat_unshare removes exactly one ism_map from 8676 * this process's as. It expects multiple calls 8677 * to hat_unshare for multiple shm segments. 8678 */ 8679 void 8680 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8681 { 8682 ism_map_t *ism_map; 8683 ism_ment_t *free_ment = NULL; 8684 ism_blk_t *ism_blkp; 8685 struct hat *ism_hatid; 8686 int found, i; 8687 hatlock_t *hatlockp; 8688 struct tsb_info *tsbinfo; 8689 uint_t ismshift = page_get_shift(ismszc); 8690 size_t sh_size = ISM_SHIFT(ismshift, len); 8691 uchar_t ism_rid; 8692 sf_scd_t *old_scdp; 8693 8694 ASSERT(ISM_ALIGNED(ismshift, addr)); 8695 ASSERT(ISM_ALIGNED(ismshift, len)); 8696 ASSERT(sfmmup != NULL); 8697 ASSERT(sfmmup != ksfmmup); 8698 8699 ASSERT(sfmmup->sfmmu_as != NULL); 8700 8701 /* 8702 * Make sure that during the entire time ISM mappings are removed, 8703 * the trap handlers serialize behind us, and that no one else 8704 * can be mucking with ISM mappings. This also lets us get away 8705 * with not doing expensive cross calls to flush the TLB -- we 8706 * just discard the context, flush the entire TSB, and call it 8707 * a day. 8708 */ 8709 sfmmu_ismhat_enter(sfmmup, 0); 8710 8711 /* 8712 * Remove the mapping. 8713 * 8714 * We can't have any holes in the ism map. 8715 * The tsb miss code while searching the ism map will 8716 * stop on an empty map slot. So we must move 8717 * everyone past the hole up 1 if any. 8718 * 8719 * Also empty ism map blks are not freed until the 8720 * process exits. This is to prevent a MT race condition 8721 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8722 */ 8723 found = 0; 8724 ism_blkp = sfmmup->sfmmu_iblk; 8725 while (!found && ism_blkp != NULL) { 8726 ism_map = ism_blkp->iblk_maps; 8727 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8728 if (addr == ism_start(ism_map[i]) && 8729 sh_size == (size_t)(ism_size(ism_map[i]))) { 8730 found = 1; 8731 break; 8732 } 8733 } 8734 if (!found) 8735 ism_blkp = ism_blkp->iblk_next; 8736 } 8737 8738 if (found) { 8739 ism_hatid = ism_map[i].imap_ismhat; 8740 ism_rid = ism_map[i].imap_rid; 8741 ASSERT(ism_hatid != NULL); 8742 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8743 8744 /* 8745 * After hat_leave_region, the sfmmup may leave SCD, 8746 * in which case, we want to grow the private tsb size when 8747 * calling sfmmu_check_page_sizes at the end of the routine. 8748 */ 8749 old_scdp = sfmmup->sfmmu_scdp; 8750 /* 8751 * Then remove ourselves from the region. 8752 */ 8753 if (ism_rid != SFMMU_INVALID_ISMRID) { 8754 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8755 HAT_REGION_ISM); 8756 } 8757 8758 /* 8759 * And now guarantee that any other cpu 8760 * that tries to process an ISM miss 8761 * will go to tl=0. 8762 */ 8763 hatlockp = sfmmu_hat_enter(sfmmup); 8764 sfmmu_invalidate_ctx(sfmmup); 8765 sfmmu_hat_exit(hatlockp); 8766 8767 /* 8768 * Remove ourselves from the ism mapping list. 8769 */ 8770 mutex_enter(&ism_mlist_lock); 8771 iment_sub(ism_map[i].imap_ment, ism_hatid); 8772 mutex_exit(&ism_mlist_lock); 8773 free_ment = ism_map[i].imap_ment; 8774 8775 /* 8776 * We delete the ism map by copying 8777 * the next map over the current one. 8778 * We will take the next one in the maps 8779 * array or from the next ism_blk. 8780 */ 8781 while (ism_blkp != NULL) { 8782 ism_map = ism_blkp->iblk_maps; 8783 while (i < (ISM_MAP_SLOTS - 1)) { 8784 ism_map[i] = ism_map[i + 1]; 8785 i++; 8786 } 8787 /* i == (ISM_MAP_SLOTS - 1) */ 8788 ism_blkp = ism_blkp->iblk_next; 8789 if (ism_blkp != NULL) { 8790 ism_map[i] = ism_blkp->iblk_maps[0]; 8791 i = 0; 8792 } else { 8793 ism_map[i].imap_seg = 0; 8794 ism_map[i].imap_vb_shift = 0; 8795 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8796 ism_map[i].imap_hatflags = 0; 8797 ism_map[i].imap_sz_mask = 0; 8798 ism_map[i].imap_ismhat = NULL; 8799 ism_map[i].imap_ment = NULL; 8800 } 8801 } 8802 8803 /* 8804 * Now flush entire TSB for the process, since 8805 * demapping page by page can be too expensive. 8806 * We don't have to flush the TLB here anymore 8807 * since we switch to a new TLB ctx instead. 8808 * Also, there is no need to flush if the process 8809 * is exiting since the TSB will be freed later. 8810 */ 8811 if (!sfmmup->sfmmu_free) { 8812 hatlockp = sfmmu_hat_enter(sfmmup); 8813 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8814 tsbinfo = tsbinfo->tsb_next) { 8815 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8816 continue; 8817 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8818 tsbinfo->tsb_flags |= 8819 TSB_FLUSH_NEEDED; 8820 continue; 8821 } 8822 8823 sfmmu_inv_tsb(tsbinfo->tsb_va, 8824 TSB_BYTES(tsbinfo->tsb_szc)); 8825 } 8826 sfmmu_hat_exit(hatlockp); 8827 } 8828 } 8829 8830 /* 8831 * Update our counters for this sfmmup's ism mappings. 8832 */ 8833 for (i = 0; i <= ismszc; i++) { 8834 if (!(disable_ism_large_pages & (1 << i))) 8835 (void) ism_tsb_entries(sfmmup, i); 8836 } 8837 8838 sfmmu_ismhat_exit(sfmmup, 0); 8839 8840 /* 8841 * We must do our freeing here after dropping locks 8842 * to prevent a deadlock in the kmem allocator on the 8843 * mapping list lock. 8844 */ 8845 if (free_ment != NULL) 8846 kmem_cache_free(ism_ment_cache, free_ment); 8847 8848 /* 8849 * Check TSB and TLB page sizes if the process isn't exiting. 8850 */ 8851 if (!sfmmup->sfmmu_free) { 8852 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8853 sfmmu_check_page_sizes(sfmmup, 1); 8854 } else { 8855 sfmmu_check_page_sizes(sfmmup, 0); 8856 } 8857 } 8858 } 8859 8860 /* ARGSUSED */ 8861 static int 8862 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8863 { 8864 /* void *buf is sfmmu_t pointer */ 8865 bzero(buf, sizeof (sfmmu_t)); 8866 8867 return (0); 8868 } 8869 8870 /* ARGSUSED */ 8871 static void 8872 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8873 { 8874 /* void *buf is sfmmu_t pointer */ 8875 } 8876 8877 /* 8878 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8879 * field to be the pa of this hmeblk 8880 */ 8881 /* ARGSUSED */ 8882 static int 8883 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8884 { 8885 struct hme_blk *hmeblkp; 8886 8887 bzero(buf, (size_t)cdrarg); 8888 hmeblkp = (struct hme_blk *)buf; 8889 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8890 8891 #ifdef HBLK_TRACE 8892 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8893 #endif /* HBLK_TRACE */ 8894 8895 return (0); 8896 } 8897 8898 /* ARGSUSED */ 8899 static void 8900 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8901 { 8902 8903 #ifdef HBLK_TRACE 8904 8905 struct hme_blk *hmeblkp; 8906 8907 hmeblkp = (struct hme_blk *)buf; 8908 mutex_destroy(&hmeblkp->hblk_audit_lock); 8909 8910 #endif /* HBLK_TRACE */ 8911 } 8912 8913 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8914 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8915 /* 8916 * The kmem allocator will callback into our reclaim routine when the system 8917 * is running low in memory. We traverse the hash and free up all unused but 8918 * still cached hme_blks. We also traverse the free list and free them up 8919 * as well. 8920 */ 8921 /*ARGSUSED*/ 8922 static void 8923 sfmmu_hblkcache_reclaim(void *cdrarg) 8924 { 8925 int i; 8926 struct hmehash_bucket *hmebp; 8927 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8928 static struct hmehash_bucket *uhmehash_reclaim_hand; 8929 static struct hmehash_bucket *khmehash_reclaim_hand; 8930 struct hme_blk *list = NULL, *last_hmeblkp; 8931 cpuset_t cpuset = cpu_ready_set; 8932 cpu_hme_pend_t *cpuhp; 8933 8934 /* Free up hmeblks on the cpu pending lists */ 8935 for (i = 0; i < NCPU; i++) { 8936 cpuhp = &cpu_hme_pend[i]; 8937 if (cpuhp->chp_listp != NULL) { 8938 mutex_enter(&cpuhp->chp_mutex); 8939 if (cpuhp->chp_listp == NULL) { 8940 mutex_exit(&cpuhp->chp_mutex); 8941 continue; 8942 } 8943 for (last_hmeblkp = cpuhp->chp_listp; 8944 last_hmeblkp->hblk_next != NULL; 8945 last_hmeblkp = last_hmeblkp->hblk_next) 8946 ; 8947 last_hmeblkp->hblk_next = list; 8948 list = cpuhp->chp_listp; 8949 cpuhp->chp_listp = NULL; 8950 cpuhp->chp_count = 0; 8951 mutex_exit(&cpuhp->chp_mutex); 8952 } 8953 8954 } 8955 8956 if (list != NULL) { 8957 kpreempt_disable(); 8958 CPUSET_DEL(cpuset, CPU->cpu_id); 8959 xt_sync(cpuset); 8960 xt_sync(cpuset); 8961 kpreempt_enable(); 8962 sfmmu_hblk_free(&list); 8963 list = NULL; 8964 } 8965 8966 hmebp = uhmehash_reclaim_hand; 8967 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8968 uhmehash_reclaim_hand = hmebp = uhme_hash; 8969 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8970 8971 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8972 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8973 hmeblkp = hmebp->hmeblkp; 8974 pr_hblk = NULL; 8975 while (hmeblkp) { 8976 nx_hblk = hmeblkp->hblk_next; 8977 if (!hmeblkp->hblk_vcnt && 8978 !hmeblkp->hblk_hmecnt) { 8979 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8980 pr_hblk, &list, 0); 8981 } else { 8982 pr_hblk = hmeblkp; 8983 } 8984 hmeblkp = nx_hblk; 8985 } 8986 SFMMU_HASH_UNLOCK(hmebp); 8987 } 8988 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8989 hmebp = uhme_hash; 8990 } 8991 8992 hmebp = khmehash_reclaim_hand; 8993 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8994 khmehash_reclaim_hand = hmebp = khme_hash; 8995 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8996 8997 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8998 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8999 hmeblkp = hmebp->hmeblkp; 9000 pr_hblk = NULL; 9001 while (hmeblkp) { 9002 nx_hblk = hmeblkp->hblk_next; 9003 if (!hmeblkp->hblk_vcnt && 9004 !hmeblkp->hblk_hmecnt) { 9005 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9006 pr_hblk, &list, 0); 9007 } else { 9008 pr_hblk = hmeblkp; 9009 } 9010 hmeblkp = nx_hblk; 9011 } 9012 SFMMU_HASH_UNLOCK(hmebp); 9013 } 9014 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9015 hmebp = khme_hash; 9016 } 9017 sfmmu_hblks_list_purge(&list, 0); 9018 } 9019 9020 /* 9021 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9022 * same goes for sfmmu_get_addrvcolor(). 9023 * 9024 * This function will return the virtual color for the specified page. The 9025 * virtual color corresponds to this page current mapping or its last mapping. 9026 * It is used by memory allocators to choose addresses with the correct 9027 * alignment so vac consistency is automatically maintained. If the page 9028 * has no color it returns -1. 9029 */ 9030 /*ARGSUSED*/ 9031 int 9032 sfmmu_get_ppvcolor(struct page *pp) 9033 { 9034 #ifdef VAC 9035 int color; 9036 9037 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9038 return (-1); 9039 } 9040 color = PP_GET_VCOLOR(pp); 9041 ASSERT(color < mmu_btop(shm_alignment)); 9042 return (color); 9043 #else 9044 return (-1); 9045 #endif /* VAC */ 9046 } 9047 9048 /* 9049 * This function will return the desired alignment for vac consistency 9050 * (vac color) given a virtual address. If no vac is present it returns -1. 9051 */ 9052 /*ARGSUSED*/ 9053 int 9054 sfmmu_get_addrvcolor(caddr_t vaddr) 9055 { 9056 #ifdef VAC 9057 if (cache & CACHE_VAC) { 9058 return (addr_to_vcolor(vaddr)); 9059 } else { 9060 return (-1); 9061 } 9062 #else 9063 return (-1); 9064 #endif /* VAC */ 9065 } 9066 9067 #ifdef VAC 9068 /* 9069 * Check for conflicts. 9070 * A conflict exists if the new and existent mappings do not match in 9071 * their "shm_alignment fields. If conflicts exist, the existant mappings 9072 * are flushed unless one of them is locked. If one of them is locked, then 9073 * the mappings are flushed and converted to non-cacheable mappings. 9074 */ 9075 static void 9076 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9077 { 9078 struct hat *tmphat; 9079 struct sf_hment *sfhmep, *tmphme = NULL; 9080 struct hme_blk *hmeblkp; 9081 int vcolor; 9082 tte_t tte; 9083 9084 ASSERT(sfmmu_mlist_held(pp)); 9085 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9086 9087 vcolor = addr_to_vcolor(addr); 9088 if (PP_NEWPAGE(pp)) { 9089 PP_SET_VCOLOR(pp, vcolor); 9090 return; 9091 } 9092 9093 if (PP_GET_VCOLOR(pp) == vcolor) { 9094 return; 9095 } 9096 9097 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9098 /* 9099 * Previous user of page had a different color 9100 * but since there are no current users 9101 * we just flush the cache and change the color. 9102 */ 9103 SFMMU_STAT(sf_pgcolor_conflict); 9104 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9105 PP_SET_VCOLOR(pp, vcolor); 9106 return; 9107 } 9108 9109 /* 9110 * If we get here we have a vac conflict with a current 9111 * mapping. VAC conflict policy is as follows. 9112 * - The default is to unload the other mappings unless: 9113 * - If we have a large mapping we uncache the page. 9114 * We need to uncache the rest of the large page too. 9115 * - If any of the mappings are locked we uncache the page. 9116 * - If the requested mapping is inconsistent 9117 * with another mapping and that mapping 9118 * is in the same address space we have to 9119 * make it non-cached. The default thing 9120 * to do is unload the inconsistent mapping 9121 * but if they are in the same address space 9122 * we run the risk of unmapping the pc or the 9123 * stack which we will use as we return to the user, 9124 * in which case we can then fault on the thing 9125 * we just unloaded and get into an infinite loop. 9126 */ 9127 if (PP_ISMAPPED_LARGE(pp)) { 9128 int sz; 9129 9130 /* 9131 * Existing mapping is for big pages. We don't unload 9132 * existing big mappings to satisfy new mappings. 9133 * Always convert all mappings to TNC. 9134 */ 9135 sz = fnd_mapping_sz(pp); 9136 pp = PP_GROUPLEADER(pp, sz); 9137 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9138 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9139 TTEPAGES(sz)); 9140 9141 return; 9142 } 9143 9144 /* 9145 * check if any mapping is in same as or if it is locked 9146 * since in that case we need to uncache. 9147 */ 9148 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9149 tmphme = sfhmep->hme_next; 9150 if (IS_PAHME(sfhmep)) 9151 continue; 9152 hmeblkp = sfmmu_hmetohblk(sfhmep); 9153 tmphat = hblktosfmmu(hmeblkp); 9154 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9155 ASSERT(TTE_IS_VALID(&tte)); 9156 if (hmeblkp->hblk_shared || tmphat == hat || 9157 hmeblkp->hblk_lckcnt) { 9158 /* 9159 * We have an uncache conflict 9160 */ 9161 SFMMU_STAT(sf_uncache_conflict); 9162 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9163 return; 9164 } 9165 } 9166 9167 /* 9168 * We have an unload conflict 9169 * We have already checked for LARGE mappings, therefore 9170 * the remaining mapping(s) must be TTE8K. 9171 */ 9172 SFMMU_STAT(sf_unload_conflict); 9173 9174 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9175 tmphme = sfhmep->hme_next; 9176 if (IS_PAHME(sfhmep)) 9177 continue; 9178 hmeblkp = sfmmu_hmetohblk(sfhmep); 9179 ASSERT(!hmeblkp->hblk_shared); 9180 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9181 } 9182 9183 if (PP_ISMAPPED_KPM(pp)) 9184 sfmmu_kpm_vac_unload(pp, addr); 9185 9186 /* 9187 * Unloads only do TLB flushes so we need to flush the 9188 * cache here. 9189 */ 9190 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9191 PP_SET_VCOLOR(pp, vcolor); 9192 } 9193 9194 /* 9195 * Whenever a mapping is unloaded and the page is in TNC state, 9196 * we see if the page can be made cacheable again. 'pp' is 9197 * the page that we just unloaded a mapping from, the size 9198 * of mapping that was unloaded is 'ottesz'. 9199 * Remark: 9200 * The recache policy for mpss pages can leave a performance problem 9201 * under the following circumstances: 9202 * . A large page in uncached mode has just been unmapped. 9203 * . All constituent pages are TNC due to a conflicting small mapping. 9204 * . There are many other, non conflicting, small mappings around for 9205 * a lot of the constituent pages. 9206 * . We're called w/ the "old" groupleader page and the old ottesz, 9207 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9208 * we end up w/ TTE8K or npages == 1. 9209 * . We call tst_tnc w/ the old groupleader only, and if there is no 9210 * conflict, we re-cache only this page. 9211 * . All other small mappings are not checked and will be left in TNC mode. 9212 * The problem is not very serious because: 9213 * . mpss is actually only defined for heap and stack, so the probability 9214 * is not very high that a large page mapping exists in parallel to a small 9215 * one (this is possible, but seems to be bad programming style in the 9216 * appl). 9217 * . The problem gets a little bit more serious, when those TNC pages 9218 * have to be mapped into kernel space, e.g. for networking. 9219 * . When VAC alias conflicts occur in applications, this is regarded 9220 * as an application bug. So if kstat's show them, the appl should 9221 * be changed anyway. 9222 */ 9223 void 9224 conv_tnc(page_t *pp, int ottesz) 9225 { 9226 int cursz, dosz; 9227 pgcnt_t curnpgs, dopgs; 9228 pgcnt_t pg64k; 9229 page_t *pp2; 9230 9231 /* 9232 * Determine how big a range we check for TNC and find 9233 * leader page. cursz is the size of the biggest 9234 * mapping that still exist on 'pp'. 9235 */ 9236 if (PP_ISMAPPED_LARGE(pp)) { 9237 cursz = fnd_mapping_sz(pp); 9238 } else { 9239 cursz = TTE8K; 9240 } 9241 9242 if (ottesz >= cursz) { 9243 dosz = ottesz; 9244 pp2 = pp; 9245 } else { 9246 dosz = cursz; 9247 pp2 = PP_GROUPLEADER(pp, dosz); 9248 } 9249 9250 pg64k = TTEPAGES(TTE64K); 9251 dopgs = TTEPAGES(dosz); 9252 9253 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9254 9255 while (dopgs != 0) { 9256 curnpgs = TTEPAGES(cursz); 9257 if (tst_tnc(pp2, curnpgs)) { 9258 SFMMU_STAT_ADD(sf_recache, curnpgs); 9259 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9260 curnpgs); 9261 } 9262 9263 ASSERT(dopgs >= curnpgs); 9264 dopgs -= curnpgs; 9265 9266 if (dopgs == 0) { 9267 break; 9268 } 9269 9270 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9271 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9272 cursz = fnd_mapping_sz(pp2); 9273 } else { 9274 cursz = TTE8K; 9275 } 9276 } 9277 } 9278 9279 /* 9280 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9281 * returns 0 otherwise. Note that oaddr argument is valid for only 9282 * 8k pages. 9283 */ 9284 int 9285 tst_tnc(page_t *pp, pgcnt_t npages) 9286 { 9287 struct sf_hment *sfhme; 9288 struct hme_blk *hmeblkp; 9289 tte_t tte; 9290 caddr_t vaddr; 9291 int clr_valid = 0; 9292 int color, color1, bcolor; 9293 int i, ncolors; 9294 9295 ASSERT(pp != NULL); 9296 ASSERT(!(cache & CACHE_WRITEBACK)); 9297 9298 if (npages > 1) { 9299 ncolors = CACHE_NUM_COLOR; 9300 } 9301 9302 for (i = 0; i < npages; i++) { 9303 ASSERT(sfmmu_mlist_held(pp)); 9304 ASSERT(PP_ISTNC(pp)); 9305 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9306 9307 if (PP_ISPNC(pp)) { 9308 return (0); 9309 } 9310 9311 clr_valid = 0; 9312 if (PP_ISMAPPED_KPM(pp)) { 9313 caddr_t kpmvaddr; 9314 9315 ASSERT(kpm_enable); 9316 kpmvaddr = hat_kpm_page2va(pp, 1); 9317 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9318 color1 = addr_to_vcolor(kpmvaddr); 9319 clr_valid = 1; 9320 } 9321 9322 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9323 if (IS_PAHME(sfhme)) 9324 continue; 9325 hmeblkp = sfmmu_hmetohblk(sfhme); 9326 9327 sfmmu_copytte(&sfhme->hme_tte, &tte); 9328 ASSERT(TTE_IS_VALID(&tte)); 9329 9330 vaddr = tte_to_vaddr(hmeblkp, tte); 9331 color = addr_to_vcolor(vaddr); 9332 9333 if (npages > 1) { 9334 /* 9335 * If there is a big mapping, make sure 9336 * 8K mapping is consistent with the big 9337 * mapping. 9338 */ 9339 bcolor = i % ncolors; 9340 if (color != bcolor) { 9341 return (0); 9342 } 9343 } 9344 if (!clr_valid) { 9345 clr_valid = 1; 9346 color1 = color; 9347 } 9348 9349 if (color1 != color) { 9350 return (0); 9351 } 9352 } 9353 9354 pp = PP_PAGENEXT(pp); 9355 } 9356 9357 return (1); 9358 } 9359 9360 void 9361 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9362 pgcnt_t npages) 9363 { 9364 kmutex_t *pmtx; 9365 int i, ncolors, bcolor; 9366 kpm_hlk_t *kpmp; 9367 cpuset_t cpuset; 9368 9369 ASSERT(pp != NULL); 9370 ASSERT(!(cache & CACHE_WRITEBACK)); 9371 9372 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9373 pmtx = sfmmu_page_enter(pp); 9374 9375 /* 9376 * Fast path caching single unmapped page 9377 */ 9378 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9379 flags == HAT_CACHE) { 9380 PP_CLRTNC(pp); 9381 PP_CLRPNC(pp); 9382 sfmmu_page_exit(pmtx); 9383 sfmmu_kpm_kpmp_exit(kpmp); 9384 return; 9385 } 9386 9387 /* 9388 * We need to capture all cpus in order to change cacheability 9389 * because we can't allow one cpu to access the same physical 9390 * page using a cacheable and a non-cachebale mapping at the same 9391 * time. Since we may end up walking the ism mapping list 9392 * have to grab it's lock now since we can't after all the 9393 * cpus have been captured. 9394 */ 9395 sfmmu_hat_lock_all(); 9396 mutex_enter(&ism_mlist_lock); 9397 kpreempt_disable(); 9398 cpuset = cpu_ready_set; 9399 xc_attention(cpuset); 9400 9401 if (npages > 1) { 9402 /* 9403 * Make sure all colors are flushed since the 9404 * sfmmu_page_cache() only flushes one color- 9405 * it does not know big pages. 9406 */ 9407 ncolors = CACHE_NUM_COLOR; 9408 if (flags & HAT_TMPNC) { 9409 for (i = 0; i < ncolors; i++) { 9410 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9411 } 9412 cache_flush_flag = CACHE_NO_FLUSH; 9413 } 9414 } 9415 9416 for (i = 0; i < npages; i++) { 9417 9418 ASSERT(sfmmu_mlist_held(pp)); 9419 9420 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9421 9422 if (npages > 1) { 9423 bcolor = i % ncolors; 9424 } else { 9425 bcolor = NO_VCOLOR; 9426 } 9427 9428 sfmmu_page_cache(pp, flags, cache_flush_flag, 9429 bcolor); 9430 } 9431 9432 pp = PP_PAGENEXT(pp); 9433 } 9434 9435 xt_sync(cpuset); 9436 xc_dismissed(cpuset); 9437 mutex_exit(&ism_mlist_lock); 9438 sfmmu_hat_unlock_all(); 9439 sfmmu_page_exit(pmtx); 9440 sfmmu_kpm_kpmp_exit(kpmp); 9441 kpreempt_enable(); 9442 } 9443 9444 /* 9445 * This function changes the virtual cacheability of all mappings to a 9446 * particular page. When changing from uncache to cacheable the mappings will 9447 * only be changed if all of them have the same virtual color. 9448 * We need to flush the cache in all cpus. It is possible that 9449 * a process referenced a page as cacheable but has sinced exited 9450 * and cleared the mapping list. We still to flush it but have no 9451 * state so all cpus is the only alternative. 9452 */ 9453 static void 9454 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9455 { 9456 struct sf_hment *sfhme; 9457 struct hme_blk *hmeblkp; 9458 sfmmu_t *sfmmup; 9459 tte_t tte, ttemod; 9460 caddr_t vaddr; 9461 int ret, color; 9462 pfn_t pfn; 9463 9464 color = bcolor; 9465 pfn = pp->p_pagenum; 9466 9467 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9468 9469 if (IS_PAHME(sfhme)) 9470 continue; 9471 hmeblkp = sfmmu_hmetohblk(sfhme); 9472 9473 sfmmu_copytte(&sfhme->hme_tte, &tte); 9474 ASSERT(TTE_IS_VALID(&tte)); 9475 vaddr = tte_to_vaddr(hmeblkp, tte); 9476 color = addr_to_vcolor(vaddr); 9477 9478 #ifdef DEBUG 9479 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9480 ASSERT(color == bcolor); 9481 } 9482 #endif 9483 9484 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9485 9486 ttemod = tte; 9487 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9488 TTE_CLR_VCACHEABLE(&ttemod); 9489 } else { /* flags & HAT_CACHE */ 9490 TTE_SET_VCACHEABLE(&ttemod); 9491 } 9492 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9493 if (ret < 0) { 9494 /* 9495 * Since all cpus are captured modifytte should not 9496 * fail. 9497 */ 9498 panic("sfmmu_page_cache: write to tte failed"); 9499 } 9500 9501 sfmmup = hblktosfmmu(hmeblkp); 9502 if (cache_flush_flag == CACHE_FLUSH) { 9503 /* 9504 * Flush TSBs, TLBs and caches 9505 */ 9506 if (hmeblkp->hblk_shared) { 9507 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9508 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9509 sf_region_t *rgnp; 9510 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9511 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9512 ASSERT(srdp != NULL); 9513 rgnp = srdp->srd_hmergnp[rid]; 9514 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9515 srdp, rgnp, rid); 9516 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9517 hmeblkp, 0); 9518 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9519 } else if (sfmmup->sfmmu_ismhat) { 9520 if (flags & HAT_CACHE) { 9521 SFMMU_STAT(sf_ism_recache); 9522 } else { 9523 SFMMU_STAT(sf_ism_uncache); 9524 } 9525 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9526 pfn, CACHE_FLUSH); 9527 } else { 9528 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9529 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9530 } 9531 9532 /* 9533 * all cache entries belonging to this pfn are 9534 * now flushed. 9535 */ 9536 cache_flush_flag = CACHE_NO_FLUSH; 9537 } else { 9538 /* 9539 * Flush only TSBs and TLBs. 9540 */ 9541 if (hmeblkp->hblk_shared) { 9542 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9543 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9544 sf_region_t *rgnp; 9545 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9546 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9547 ASSERT(srdp != NULL); 9548 rgnp = srdp->srd_hmergnp[rid]; 9549 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9550 srdp, rgnp, rid); 9551 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9552 hmeblkp, 0); 9553 } else if (sfmmup->sfmmu_ismhat) { 9554 if (flags & HAT_CACHE) { 9555 SFMMU_STAT(sf_ism_recache); 9556 } else { 9557 SFMMU_STAT(sf_ism_uncache); 9558 } 9559 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9560 pfn, CACHE_NO_FLUSH); 9561 } else { 9562 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9563 } 9564 } 9565 } 9566 9567 if (PP_ISMAPPED_KPM(pp)) 9568 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9569 9570 switch (flags) { 9571 9572 default: 9573 panic("sfmmu_pagecache: unknown flags"); 9574 break; 9575 9576 case HAT_CACHE: 9577 PP_CLRTNC(pp); 9578 PP_CLRPNC(pp); 9579 PP_SET_VCOLOR(pp, color); 9580 break; 9581 9582 case HAT_TMPNC: 9583 PP_SETTNC(pp); 9584 PP_SET_VCOLOR(pp, NO_VCOLOR); 9585 break; 9586 9587 case HAT_UNCACHE: 9588 PP_SETPNC(pp); 9589 PP_CLRTNC(pp); 9590 PP_SET_VCOLOR(pp, NO_VCOLOR); 9591 break; 9592 } 9593 } 9594 #endif /* VAC */ 9595 9596 9597 /* 9598 * Wrapper routine used to return a context. 9599 * 9600 * It's the responsibility of the caller to guarantee that the 9601 * process serializes on calls here by taking the HAT lock for 9602 * the hat. 9603 * 9604 */ 9605 static void 9606 sfmmu_get_ctx(sfmmu_t *sfmmup) 9607 { 9608 mmu_ctx_t *mmu_ctxp; 9609 uint_t pstate_save; 9610 int ret; 9611 9612 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9613 ASSERT(sfmmup != ksfmmup); 9614 9615 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9616 sfmmu_setup_tsbinfo(sfmmup); 9617 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9618 } 9619 9620 kpreempt_disable(); 9621 9622 mmu_ctxp = CPU_MMU_CTXP(CPU); 9623 ASSERT(mmu_ctxp); 9624 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9625 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9626 9627 /* 9628 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9629 */ 9630 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9631 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE); 9632 9633 /* 9634 * Let the MMU set up the page sizes to use for 9635 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9636 */ 9637 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9638 mmu_set_ctx_page_sizes(sfmmup); 9639 } 9640 9641 /* 9642 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9643 * interrupts disabled to prevent race condition with wrap-around 9644 * ctx invalidatation. In sun4v, ctx invalidation also involves 9645 * a HV call to set the number of TSBs to 0. If interrupts are not 9646 * disabled until after sfmmu_load_mmustate is complete TSBs may 9647 * become assigned to INVALID_CONTEXT. This is not allowed. 9648 */ 9649 pstate_save = sfmmu_disable_intrs(); 9650 9651 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9652 sfmmup->sfmmu_scdp != NULL) { 9653 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9654 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9655 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9656 /* debug purpose only */ 9657 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9658 != INVALID_CONTEXT); 9659 } 9660 sfmmu_load_mmustate(sfmmup); 9661 9662 sfmmu_enable_intrs(pstate_save); 9663 9664 kpreempt_enable(); 9665 } 9666 9667 /* 9668 * When all cnums are used up in a MMU, cnum will wrap around to the 9669 * next generation and start from 2. 9670 */ 9671 static void 9672 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum) 9673 { 9674 9675 /* caller must have disabled the preemption */ 9676 ASSERT(curthread->t_preempt >= 1); 9677 ASSERT(mmu_ctxp != NULL); 9678 9679 /* acquire Per-MMU (PM) spin lock */ 9680 mutex_enter(&mmu_ctxp->mmu_lock); 9681 9682 /* re-check to see if wrap-around is needed */ 9683 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9684 goto done; 9685 9686 SFMMU_MMU_STAT(mmu_wrap_around); 9687 9688 /* update gnum */ 9689 ASSERT(mmu_ctxp->mmu_gnum != 0); 9690 mmu_ctxp->mmu_gnum++; 9691 if (mmu_ctxp->mmu_gnum == 0 || 9692 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9693 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9694 (void *)mmu_ctxp); 9695 } 9696 9697 if (mmu_ctxp->mmu_ncpus > 1) { 9698 cpuset_t cpuset; 9699 9700 membar_enter(); /* make sure updated gnum visible */ 9701 9702 SFMMU_XCALL_STATS(NULL); 9703 9704 /* xcall to others on the same MMU to invalidate ctx */ 9705 cpuset = mmu_ctxp->mmu_cpuset; 9706 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum); 9707 CPUSET_DEL(cpuset, CPU->cpu_id); 9708 CPUSET_AND(cpuset, cpu_ready_set); 9709 9710 /* 9711 * Pass in INVALID_CONTEXT as the first parameter to 9712 * sfmmu_raise_tsb_exception, which invalidates the context 9713 * of any process running on the CPUs in the MMU. 9714 */ 9715 xt_some(cpuset, sfmmu_raise_tsb_exception, 9716 INVALID_CONTEXT, INVALID_CONTEXT); 9717 xt_sync(cpuset); 9718 9719 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9720 } 9721 9722 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9723 sfmmu_setctx_sec(INVALID_CONTEXT); 9724 sfmmu_clear_utsbinfo(); 9725 } 9726 9727 /* 9728 * No xcall is needed here. For sun4u systems all CPUs in context 9729 * domain share a single physical MMU therefore it's enough to flush 9730 * TLB on local CPU. On sun4v systems we use 1 global context 9731 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9732 * handler. Note that vtag_flushall_uctxs() is called 9733 * for Ultra II machine, where the equivalent flushall functionality 9734 * is implemented in SW, and only user ctx TLB entries are flushed. 9735 */ 9736 if (&vtag_flushall_uctxs != NULL) { 9737 vtag_flushall_uctxs(); 9738 } else { 9739 vtag_flushall(); 9740 } 9741 9742 /* reset mmu cnum, skips cnum 0 and 1 */ 9743 if (reset_cnum == B_TRUE) 9744 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9745 9746 done: 9747 mutex_exit(&mmu_ctxp->mmu_lock); 9748 } 9749 9750 9751 /* 9752 * For multi-threaded process, set the process context to INVALID_CONTEXT 9753 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9754 * process, we can just load the MMU state directly without having to 9755 * set context invalid. Caller must hold the hat lock since we don't 9756 * acquire it here. 9757 */ 9758 static void 9759 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9760 { 9761 uint_t cnum; 9762 uint_t pstate_save; 9763 9764 ASSERT(sfmmup != ksfmmup); 9765 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9766 9767 kpreempt_disable(); 9768 9769 /* 9770 * We check whether the pass'ed-in sfmmup is the same as the 9771 * current running proc. This is to makes sure the current proc 9772 * stays single-threaded if it already is. 9773 */ 9774 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9775 (curthread->t_procp->p_lwpcnt == 1)) { 9776 /* single-thread */ 9777 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9778 if (cnum != INVALID_CONTEXT) { 9779 uint_t curcnum; 9780 /* 9781 * Disable interrupts to prevent race condition 9782 * with sfmmu_ctx_wrap_around ctx invalidation. 9783 * In sun4v, ctx invalidation involves setting 9784 * TSB to NULL, hence, interrupts should be disabled 9785 * untill after sfmmu_load_mmustate is completed. 9786 */ 9787 pstate_save = sfmmu_disable_intrs(); 9788 curcnum = sfmmu_getctx_sec(); 9789 if (curcnum == cnum) 9790 sfmmu_load_mmustate(sfmmup); 9791 sfmmu_enable_intrs(pstate_save); 9792 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9793 } 9794 } else { 9795 /* 9796 * multi-thread 9797 * or when sfmmup is not the same as the curproc. 9798 */ 9799 sfmmu_invalidate_ctx(sfmmup); 9800 } 9801 9802 kpreempt_enable(); 9803 } 9804 9805 9806 /* 9807 * Replace the specified TSB with a new TSB. This function gets called when 9808 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9809 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9810 * (8K). 9811 * 9812 * Caller must hold the HAT lock, but should assume any tsb_info 9813 * pointers it has are no longer valid after calling this function. 9814 * 9815 * Return values: 9816 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9817 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9818 * something to this tsbinfo/TSB 9819 * TSB_SUCCESS Operation succeeded 9820 */ 9821 static tsb_replace_rc_t 9822 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9823 hatlock_t *hatlockp, uint_t flags) 9824 { 9825 struct tsb_info *new_tsbinfo = NULL; 9826 struct tsb_info *curtsb, *prevtsb; 9827 uint_t tte_sz_mask; 9828 int i; 9829 9830 ASSERT(sfmmup != ksfmmup); 9831 ASSERT(sfmmup->sfmmu_ismhat == 0); 9832 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9833 ASSERT(szc <= tsb_max_growsize); 9834 9835 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9836 return (TSB_LOSTRACE); 9837 9838 /* 9839 * Find the tsb_info ahead of this one in the list, and 9840 * also make sure that the tsb_info passed in really 9841 * exists! 9842 */ 9843 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9844 curtsb != old_tsbinfo && curtsb != NULL; 9845 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9846 ; 9847 ASSERT(curtsb != NULL); 9848 9849 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9850 /* 9851 * The process is swapped out, so just set the new size 9852 * code. When it swaps back in, we'll allocate a new one 9853 * of the new chosen size. 9854 */ 9855 curtsb->tsb_szc = szc; 9856 return (TSB_SUCCESS); 9857 } 9858 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9859 9860 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9861 9862 /* 9863 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9864 * If we fail to allocate a TSB, exit. 9865 * 9866 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9867 * then try 4M slab after the initial alloc fails. 9868 * 9869 * If tsb swapin with tsb size > 4M, then try 4M after the 9870 * initial alloc fails. 9871 */ 9872 sfmmu_hat_exit(hatlockp); 9873 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9874 tte_sz_mask, flags, sfmmup) && 9875 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9876 (!(flags & TSB_SWAPIN) && 9877 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9878 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9879 tte_sz_mask, flags, sfmmup))) { 9880 (void) sfmmu_hat_enter(sfmmup); 9881 if (!(flags & TSB_SWAPIN)) 9882 SFMMU_STAT(sf_tsb_resize_failures); 9883 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9884 return (TSB_ALLOCFAIL); 9885 } 9886 (void) sfmmu_hat_enter(sfmmup); 9887 9888 /* 9889 * Re-check to make sure somebody else didn't muck with us while we 9890 * didn't hold the HAT lock. If the process swapped out, fine, just 9891 * exit; this can happen if we try to shrink the TSB from the context 9892 * of another process (such as on an ISM unmap), though it is rare. 9893 */ 9894 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9895 SFMMU_STAT(sf_tsb_resize_failures); 9896 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9897 sfmmu_hat_exit(hatlockp); 9898 sfmmu_tsbinfo_free(new_tsbinfo); 9899 (void) sfmmu_hat_enter(sfmmup); 9900 return (TSB_LOSTRACE); 9901 } 9902 9903 #ifdef DEBUG 9904 /* Reverify that the tsb_info still exists.. for debugging only */ 9905 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9906 curtsb != old_tsbinfo && curtsb != NULL; 9907 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9908 ; 9909 ASSERT(curtsb != NULL); 9910 #endif /* DEBUG */ 9911 9912 /* 9913 * Quiesce any CPUs running this process on their next TLB miss 9914 * so they atomically see the new tsb_info. We temporarily set the 9915 * context to invalid context so new threads that come on processor 9916 * after we do the xcall to cpusran will also serialize behind the 9917 * HAT lock on TLB miss and will see the new TSB. Since this short 9918 * race with a new thread coming on processor is relatively rare, 9919 * this synchronization mechanism should be cheaper than always 9920 * pausing all CPUs for the duration of the setup, which is what 9921 * the old implementation did. This is particuarly true if we are 9922 * copying a huge chunk of memory around during that window. 9923 * 9924 * The memory barriers are to make sure things stay consistent 9925 * with resume() since it does not hold the HAT lock while 9926 * walking the list of tsb_info structures. 9927 */ 9928 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9929 /* The TSB is either growing or shrinking. */ 9930 sfmmu_invalidate_ctx(sfmmup); 9931 } else { 9932 /* 9933 * It is illegal to swap in TSBs from a process other 9934 * than a process being swapped in. This in turn 9935 * implies we do not have a valid MMU context here 9936 * since a process needs one to resolve translation 9937 * misses. 9938 */ 9939 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9940 } 9941 9942 #ifdef DEBUG 9943 ASSERT(max_mmu_ctxdoms > 0); 9944 9945 /* 9946 * Process should have INVALID_CONTEXT on all MMUs 9947 */ 9948 for (i = 0; i < max_mmu_ctxdoms; i++) { 9949 9950 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9951 } 9952 #endif 9953 9954 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9955 membar_stst(); /* strict ordering required */ 9956 if (prevtsb) 9957 prevtsb->tsb_next = new_tsbinfo; 9958 else 9959 sfmmup->sfmmu_tsb = new_tsbinfo; 9960 membar_enter(); /* make sure new TSB globally visible */ 9961 9962 /* 9963 * We need to migrate TSB entries from the old TSB to the new TSB 9964 * if tsb_remap_ttes is set and the TSB is growing. 9965 */ 9966 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9967 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9968 9969 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9970 9971 /* 9972 * Drop the HAT lock to free our old tsb_info. 9973 */ 9974 sfmmu_hat_exit(hatlockp); 9975 9976 if ((flags & TSB_GROW) == TSB_GROW) { 9977 SFMMU_STAT(sf_tsb_grow); 9978 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9979 SFMMU_STAT(sf_tsb_shrink); 9980 } 9981 9982 sfmmu_tsbinfo_free(old_tsbinfo); 9983 9984 (void) sfmmu_hat_enter(sfmmup); 9985 return (TSB_SUCCESS); 9986 } 9987 9988 /* 9989 * This function will re-program hat pgsz array, and invalidate the 9990 * process' context, forcing the process to switch to another 9991 * context on the next TLB miss, and therefore start using the 9992 * TLB that is reprogrammed for the new page sizes. 9993 */ 9994 void 9995 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9996 { 9997 int i; 9998 hatlock_t *hatlockp = NULL; 9999 10000 hatlockp = sfmmu_hat_enter(sfmmup); 10001 /* USIII+-IV+ optimization, requires hat lock */ 10002 if (tmp_pgsz) { 10003 for (i = 0; i < mmu_page_sizes; i++) 10004 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10005 } 10006 SFMMU_STAT(sf_tlb_reprog_pgsz); 10007 10008 sfmmu_invalidate_ctx(sfmmup); 10009 10010 sfmmu_hat_exit(hatlockp); 10011 } 10012 10013 /* 10014 * The scd_rttecnt field in the SCD must be updated to take account of the 10015 * regions which it contains. 10016 */ 10017 static void 10018 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10019 { 10020 uint_t rid; 10021 uint_t i, j; 10022 ulong_t w; 10023 sf_region_t *rgnp; 10024 10025 ASSERT(srdp != NULL); 10026 10027 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10028 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10029 continue; 10030 } 10031 10032 j = 0; 10033 while (w) { 10034 if (!(w & 0x1)) { 10035 j++; 10036 w >>= 1; 10037 continue; 10038 } 10039 rid = (i << BT_ULSHIFT) | j; 10040 j++; 10041 w >>= 1; 10042 10043 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10044 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10045 rgnp = srdp->srd_hmergnp[rid]; 10046 ASSERT(rgnp->rgn_refcnt > 0); 10047 ASSERT(rgnp->rgn_id == rid); 10048 10049 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10050 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10051 10052 /* 10053 * Maintain the tsb0 inflation cnt for the regions 10054 * in the SCD. 10055 */ 10056 if (rgnp->rgn_pgszc >= TTE4M) { 10057 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10058 rgnp->rgn_size >> 10059 (TTE_PAGE_SHIFT(TTE8K) + 2); 10060 } 10061 } 10062 } 10063 } 10064 10065 /* 10066 * This function assumes that there are either four or six supported page 10067 * sizes and at most two programmable TLBs, so we need to decide which 10068 * page sizes are most important and then tell the MMU layer so it 10069 * can adjust the TLB page sizes accordingly (if supported). 10070 * 10071 * If these assumptions change, this function will need to be 10072 * updated to support whatever the new limits are. 10073 * 10074 * The growing flag is nonzero if we are growing the address space, 10075 * and zero if it is shrinking. This allows us to decide whether 10076 * to grow or shrink our TSB, depending upon available memory 10077 * conditions. 10078 */ 10079 static void 10080 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10081 { 10082 uint64_t ttecnt[MMU_PAGE_SIZES]; 10083 uint64_t tte8k_cnt, tte4m_cnt; 10084 uint8_t i; 10085 int sectsb_thresh; 10086 10087 /* 10088 * Kernel threads, processes with small address spaces not using 10089 * large pages, and dummy ISM HATs need not apply. 10090 */ 10091 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != 0) 10092 return; 10093 10094 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10095 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10096 return; 10097 10098 for (i = 0; i < mmu_page_sizes; i++) { 10099 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10100 sfmmup->sfmmu_ismttecnt[i]; 10101 } 10102 10103 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10104 if (&mmu_check_page_sizes) 10105 mmu_check_page_sizes(sfmmup, ttecnt); 10106 10107 /* 10108 * Calculate the number of 8k ttes to represent the span of these 10109 * pages. 10110 */ 10111 tte8k_cnt = ttecnt[TTE8K] + 10112 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10113 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10114 if (mmu_page_sizes == max_mmu_page_sizes) { 10115 tte4m_cnt = ttecnt[TTE4M] + 10116 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10117 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10118 } else { 10119 tte4m_cnt = ttecnt[TTE4M]; 10120 } 10121 10122 /* 10123 * Inflate tte8k_cnt to allow for region large page allocation failure. 10124 */ 10125 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10126 10127 /* 10128 * Inflate TSB sizes by a factor of 2 if this process 10129 * uses 4M text pages to minimize extra conflict misses 10130 * in the first TSB since without counting text pages 10131 * 8K TSB may become too small. 10132 * 10133 * Also double the size of the second TSB to minimize 10134 * extra conflict misses due to competition between 4M text pages 10135 * and data pages. 10136 * 10137 * We need to adjust the second TSB allocation threshold by the 10138 * inflation factor, since there is no point in creating a second 10139 * TSB when we know all the mappings can fit in the I/D TLBs. 10140 */ 10141 sectsb_thresh = tsb_sectsb_threshold; 10142 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10143 tte8k_cnt <<= 1; 10144 tte4m_cnt <<= 1; 10145 sectsb_thresh <<= 1; 10146 } 10147 10148 /* 10149 * Check to see if our TSB is the right size; we may need to 10150 * grow or shrink it. If the process is small, our work is 10151 * finished at this point. 10152 */ 10153 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10154 return; 10155 } 10156 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10157 } 10158 10159 static void 10160 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10161 uint64_t tte4m_cnt, int sectsb_thresh) 10162 { 10163 int tsb_bits; 10164 uint_t tsb_szc; 10165 struct tsb_info *tsbinfop; 10166 hatlock_t *hatlockp = NULL; 10167 10168 hatlockp = sfmmu_hat_enter(sfmmup); 10169 ASSERT(hatlockp != NULL); 10170 tsbinfop = sfmmup->sfmmu_tsb; 10171 ASSERT(tsbinfop != NULL); 10172 10173 /* 10174 * If we're growing, select the size based on RSS. If we're 10175 * shrinking, leave some room so we don't have to turn around and 10176 * grow again immediately. 10177 */ 10178 if (growing) 10179 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10180 else 10181 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10182 10183 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10184 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10185 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10186 hatlockp, TSB_SHRINK); 10187 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10188 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10189 hatlockp, TSB_GROW); 10190 } 10191 tsbinfop = sfmmup->sfmmu_tsb; 10192 10193 /* 10194 * With the TLB and first TSB out of the way, we need to see if 10195 * we need a second TSB for 4M pages. If we managed to reprogram 10196 * the TLB page sizes above, the process will start using this new 10197 * TSB right away; otherwise, it will start using it on the next 10198 * context switch. Either way, it's no big deal so there's no 10199 * synchronization with the trap handlers here unless we grow the 10200 * TSB (in which case it's required to prevent using the old one 10201 * after it's freed). Note: second tsb is required for 32M/256M 10202 * page sizes. 10203 */ 10204 if (tte4m_cnt > sectsb_thresh) { 10205 /* 10206 * If we're growing, select the size based on RSS. If we're 10207 * shrinking, leave some room so we don't have to turn 10208 * around and grow again immediately. 10209 */ 10210 if (growing) 10211 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10212 else 10213 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10214 if (tsbinfop->tsb_next == NULL) { 10215 struct tsb_info *newtsb; 10216 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10217 0 : TSB_ALLOC; 10218 10219 sfmmu_hat_exit(hatlockp); 10220 10221 /* 10222 * Try to allocate a TSB for 4[32|256]M pages. If we 10223 * can't get the size we want, retry w/a minimum sized 10224 * TSB. If that still didn't work, give up; we can 10225 * still run without one. 10226 */ 10227 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10228 TSB4M|TSB32M|TSB256M:TSB4M; 10229 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10230 allocflags, sfmmup)) && 10231 (tsb_szc <= TSB_4M_SZCODE || 10232 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10233 tsb_bits, allocflags, sfmmup)) && 10234 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10235 tsb_bits, allocflags, sfmmup)) { 10236 return; 10237 } 10238 10239 hatlockp = sfmmu_hat_enter(sfmmup); 10240 10241 sfmmu_invalidate_ctx(sfmmup); 10242 10243 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10244 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10245 SFMMU_STAT(sf_tsb_sectsb_create); 10246 sfmmu_hat_exit(hatlockp); 10247 return; 10248 } else { 10249 /* 10250 * It's annoying, but possible for us 10251 * to get here.. we dropped the HAT lock 10252 * because of locking order in the kmem 10253 * allocator, and while we were off getting 10254 * our memory, some other thread decided to 10255 * do us a favor and won the race to get a 10256 * second TSB for this process. Sigh. 10257 */ 10258 sfmmu_hat_exit(hatlockp); 10259 sfmmu_tsbinfo_free(newtsb); 10260 return; 10261 } 10262 } 10263 10264 /* 10265 * We have a second TSB, see if it's big enough. 10266 */ 10267 tsbinfop = tsbinfop->tsb_next; 10268 10269 /* 10270 * Check to see if our second TSB is the right size; 10271 * we may need to grow or shrink it. 10272 * To prevent thrashing (e.g. growing the TSB on a 10273 * subsequent map operation), only try to shrink if 10274 * the TSB reach exceeds twice the virtual address 10275 * space size. 10276 */ 10277 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10278 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10279 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10280 tsb_szc, hatlockp, TSB_SHRINK); 10281 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10282 TSB_OK_GROW()) { 10283 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10284 tsb_szc, hatlockp, TSB_GROW); 10285 } 10286 } 10287 10288 sfmmu_hat_exit(hatlockp); 10289 } 10290 10291 /* 10292 * Free up a sfmmu 10293 * Since the sfmmu is currently embedded in the hat struct we simply zero 10294 * out our fields and free up the ism map blk list if any. 10295 */ 10296 static void 10297 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10298 { 10299 ism_blk_t *blkp, *nx_blkp; 10300 #ifdef DEBUG 10301 ism_map_t *map; 10302 int i; 10303 #endif 10304 10305 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10306 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10307 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10308 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10309 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10310 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10311 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10312 10313 sfmmup->sfmmu_free = 0; 10314 sfmmup->sfmmu_ismhat = 0; 10315 10316 blkp = sfmmup->sfmmu_iblk; 10317 sfmmup->sfmmu_iblk = NULL; 10318 10319 while (blkp) { 10320 #ifdef DEBUG 10321 map = blkp->iblk_maps; 10322 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10323 ASSERT(map[i].imap_seg == 0); 10324 ASSERT(map[i].imap_ismhat == NULL); 10325 ASSERT(map[i].imap_ment == NULL); 10326 } 10327 #endif 10328 nx_blkp = blkp->iblk_next; 10329 blkp->iblk_next = NULL; 10330 blkp->iblk_nextpa = (uint64_t)-1; 10331 kmem_cache_free(ism_blk_cache, blkp); 10332 blkp = nx_blkp; 10333 } 10334 } 10335 10336 /* 10337 * Locking primitves accessed by HATLOCK macros 10338 */ 10339 10340 #define SFMMU_SPL_MTX (0x0) 10341 #define SFMMU_ML_MTX (0x1) 10342 10343 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10344 SPL_HASH(pg) : MLIST_HASH(pg)) 10345 10346 kmutex_t * 10347 sfmmu_page_enter(struct page *pp) 10348 { 10349 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10350 } 10351 10352 void 10353 sfmmu_page_exit(kmutex_t *spl) 10354 { 10355 mutex_exit(spl); 10356 } 10357 10358 int 10359 sfmmu_page_spl_held(struct page *pp) 10360 { 10361 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10362 } 10363 10364 kmutex_t * 10365 sfmmu_mlist_enter(struct page *pp) 10366 { 10367 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10368 } 10369 10370 void 10371 sfmmu_mlist_exit(kmutex_t *mml) 10372 { 10373 mutex_exit(mml); 10374 } 10375 10376 int 10377 sfmmu_mlist_held(struct page *pp) 10378 { 10379 10380 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10381 } 10382 10383 /* 10384 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10385 * sfmmu_mlist_enter() case mml_table lock array is used and for 10386 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10387 * 10388 * The lock is taken on a root page so that it protects an operation on all 10389 * constituent pages of a large page pp belongs to. 10390 * 10391 * The routine takes a lock from the appropriate array. The lock is determined 10392 * by hashing the root page. After taking the lock this routine checks if the 10393 * root page has the same size code that was used to determine the root (i.e 10394 * that root hasn't changed). If root page has the expected p_szc field we 10395 * have the right lock and it's returned to the caller. If root's p_szc 10396 * decreased we release the lock and retry from the beginning. This case can 10397 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10398 * value and taking the lock. The number of retries due to p_szc decrease is 10399 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10400 * determined by hashing pp itself. 10401 * 10402 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10403 * possible that p_szc can increase. To increase p_szc a thread has to lock 10404 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10405 * callers that don't hold a page locked recheck if hmeblk through which pp 10406 * was found still maps this pp. If it doesn't map it anymore returned lock 10407 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10408 * p_szc increase after taking the lock it returns this lock without further 10409 * retries because in this case the caller doesn't care about which lock was 10410 * taken. The caller will drop it right away. 10411 * 10412 * After the routine returns it's guaranteed that hat_page_demote() can't 10413 * change p_szc field of any of constituent pages of a large page pp belongs 10414 * to as long as pp was either locked at least SHARED prior to this call or 10415 * the caller finds that hment that pointed to this pp still references this 10416 * pp (this also assumes that the caller holds hme hash bucket lock so that 10417 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10418 * hat_pageunload()). 10419 */ 10420 static kmutex_t * 10421 sfmmu_mlspl_enter(struct page *pp, int type) 10422 { 10423 kmutex_t *mtx; 10424 uint_t prev_rszc = UINT_MAX; 10425 page_t *rootpp; 10426 uint_t szc; 10427 uint_t rszc; 10428 uint_t pszc = pp->p_szc; 10429 10430 ASSERT(pp != NULL); 10431 10432 again: 10433 if (pszc == 0) { 10434 mtx = SFMMU_MLSPL_MTX(type, pp); 10435 mutex_enter(mtx); 10436 return (mtx); 10437 } 10438 10439 /* The lock lives in the root page */ 10440 rootpp = PP_GROUPLEADER(pp, pszc); 10441 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10442 mutex_enter(mtx); 10443 10444 /* 10445 * Return mml in the following 3 cases: 10446 * 10447 * 1) If pp itself is root since if its p_szc decreased before we took 10448 * the lock pp is still the root of smaller szc page. And if its p_szc 10449 * increased it doesn't matter what lock we return (see comment in 10450 * front of this routine). 10451 * 10452 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10453 * large page we have the right lock since any previous potential 10454 * hat_page_demote() is done demoting from greater than current root's 10455 * p_szc because hat_page_demote() changes root's p_szc last. No 10456 * further hat_page_demote() can start or be in progress since it 10457 * would need the same lock we currently hold. 10458 * 10459 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10460 * matter what lock we return (see comment in front of this routine). 10461 */ 10462 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10463 rszc >= prev_rszc) { 10464 return (mtx); 10465 } 10466 10467 /* 10468 * hat_page_demote() could have decreased root's p_szc. 10469 * In this case pp's p_szc must also be smaller than pszc. 10470 * Retry. 10471 */ 10472 if (rszc < pszc) { 10473 szc = pp->p_szc; 10474 if (szc < pszc) { 10475 mutex_exit(mtx); 10476 pszc = szc; 10477 goto again; 10478 } 10479 /* 10480 * pp's p_szc increased after it was decreased. 10481 * page cannot be mapped. Return current lock. The caller 10482 * will drop it right away. 10483 */ 10484 return (mtx); 10485 } 10486 10487 /* 10488 * root's p_szc is greater than pp's p_szc. 10489 * hat_page_demote() is not done with all pages 10490 * yet. Wait for it to complete. 10491 */ 10492 mutex_exit(mtx); 10493 rootpp = PP_GROUPLEADER(rootpp, rszc); 10494 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10495 mutex_enter(mtx); 10496 mutex_exit(mtx); 10497 prev_rszc = rszc; 10498 goto again; 10499 } 10500 10501 static int 10502 sfmmu_mlspl_held(struct page *pp, int type) 10503 { 10504 kmutex_t *mtx; 10505 10506 ASSERT(pp != NULL); 10507 /* The lock lives in the root page */ 10508 pp = PP_PAGEROOT(pp); 10509 ASSERT(pp != NULL); 10510 10511 mtx = SFMMU_MLSPL_MTX(type, pp); 10512 return (MUTEX_HELD(mtx)); 10513 } 10514 10515 static uint_t 10516 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10517 { 10518 struct hme_blk *hblkp; 10519 10520 10521 if (freehblkp != NULL) { 10522 mutex_enter(&freehblkp_lock); 10523 if (freehblkp != NULL) { 10524 /* 10525 * If the current thread is owning hblk_reserve OR 10526 * critical request from sfmmu_hblk_steal() 10527 * let it succeed even if freehblkcnt is really low. 10528 */ 10529 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10530 SFMMU_STAT(sf_get_free_throttle); 10531 mutex_exit(&freehblkp_lock); 10532 return (0); 10533 } 10534 freehblkcnt--; 10535 *hmeblkpp = freehblkp; 10536 hblkp = *hmeblkpp; 10537 freehblkp = hblkp->hblk_next; 10538 mutex_exit(&freehblkp_lock); 10539 hblkp->hblk_next = NULL; 10540 SFMMU_STAT(sf_get_free_success); 10541 10542 ASSERT(hblkp->hblk_hmecnt == 0); 10543 ASSERT(hblkp->hblk_vcnt == 0); 10544 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10545 10546 return (1); 10547 } 10548 mutex_exit(&freehblkp_lock); 10549 } 10550 10551 /* Check cpu hblk pending queues */ 10552 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10553 hblkp = *hmeblkpp; 10554 hblkp->hblk_next = NULL; 10555 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10556 10557 ASSERT(hblkp->hblk_hmecnt == 0); 10558 ASSERT(hblkp->hblk_vcnt == 0); 10559 10560 return (1); 10561 } 10562 10563 SFMMU_STAT(sf_get_free_fail); 10564 return (0); 10565 } 10566 10567 static uint_t 10568 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10569 { 10570 struct hme_blk *hblkp; 10571 10572 ASSERT(hmeblkp->hblk_hmecnt == 0); 10573 ASSERT(hmeblkp->hblk_vcnt == 0); 10574 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10575 10576 /* 10577 * If the current thread is mapping into kernel space, 10578 * let it succede even if freehblkcnt is max 10579 * so that it will avoid freeing it to kmem. 10580 * This will prevent stack overflow due to 10581 * possible recursion since kmem_cache_free() 10582 * might require creation of a slab which 10583 * in turn needs an hmeblk to map that slab; 10584 * let's break this vicious chain at the first 10585 * opportunity. 10586 */ 10587 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10588 mutex_enter(&freehblkp_lock); 10589 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10590 SFMMU_STAT(sf_put_free_success); 10591 freehblkcnt++; 10592 hmeblkp->hblk_next = freehblkp; 10593 freehblkp = hmeblkp; 10594 mutex_exit(&freehblkp_lock); 10595 return (1); 10596 } 10597 mutex_exit(&freehblkp_lock); 10598 } 10599 10600 /* 10601 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10602 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10603 * we are not in the process of mapping into kernel space. 10604 */ 10605 ASSERT(!critical); 10606 while (freehblkcnt > HBLK_RESERVE_CNT) { 10607 mutex_enter(&freehblkp_lock); 10608 if (freehblkcnt > HBLK_RESERVE_CNT) { 10609 freehblkcnt--; 10610 hblkp = freehblkp; 10611 freehblkp = hblkp->hblk_next; 10612 mutex_exit(&freehblkp_lock); 10613 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10614 kmem_cache_free(sfmmu8_cache, hblkp); 10615 continue; 10616 } 10617 mutex_exit(&freehblkp_lock); 10618 } 10619 SFMMU_STAT(sf_put_free_fail); 10620 return (0); 10621 } 10622 10623 static void 10624 sfmmu_hblk_swap(struct hme_blk *new) 10625 { 10626 struct hme_blk *old, *hblkp, *prev; 10627 uint64_t newpa; 10628 caddr_t base, vaddr, endaddr; 10629 struct hmehash_bucket *hmebp; 10630 struct sf_hment *osfhme, *nsfhme; 10631 page_t *pp; 10632 kmutex_t *pml; 10633 tte_t tte; 10634 struct hme_blk *list = NULL; 10635 10636 #ifdef DEBUG 10637 hmeblk_tag hblktag; 10638 struct hme_blk *found; 10639 #endif 10640 old = HBLK_RESERVE; 10641 ASSERT(!old->hblk_shared); 10642 10643 /* 10644 * save pa before bcopy clobbers it 10645 */ 10646 newpa = new->hblk_nextpa; 10647 10648 base = (caddr_t)get_hblk_base(old); 10649 endaddr = base + get_hblk_span(old); 10650 10651 /* 10652 * acquire hash bucket lock. 10653 */ 10654 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10655 SFMMU_INVALID_SHMERID); 10656 10657 /* 10658 * copy contents from old to new 10659 */ 10660 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10661 10662 /* 10663 * add new to hash chain 10664 */ 10665 sfmmu_hblk_hash_add(hmebp, new, newpa); 10666 10667 /* 10668 * search hash chain for hblk_reserve; this needs to be performed 10669 * after adding new, otherwise prev won't correspond to the hblk which 10670 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10671 * remove old later. 10672 */ 10673 for (prev = NULL, 10674 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10675 prev = hblkp, hblkp = hblkp->hblk_next) 10676 ; 10677 10678 if (hblkp != old) 10679 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10680 10681 /* 10682 * p_mapping list is still pointing to hments in hblk_reserve; 10683 * fix up p_mapping list so that they point to hments in new. 10684 * 10685 * Since all these mappings are created by hblk_reserve_thread 10686 * on the way and it's using at least one of the buffers from each of 10687 * the newly minted slabs, there is no danger of any of these 10688 * mappings getting unloaded by another thread. 10689 * 10690 * tsbmiss could only modify ref/mod bits of hments in old/new. 10691 * Since all of these hments hold mappings established by segkmem 10692 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10693 * have no meaning for the mappings in hblk_reserve. hments in 10694 * old and new are identical except for ref/mod bits. 10695 */ 10696 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10697 10698 HBLKTOHME(osfhme, old, vaddr); 10699 sfmmu_copytte(&osfhme->hme_tte, &tte); 10700 10701 if (TTE_IS_VALID(&tte)) { 10702 if ((pp = osfhme->hme_page) == NULL) 10703 panic("sfmmu_hblk_swap: page not mapped"); 10704 10705 pml = sfmmu_mlist_enter(pp); 10706 10707 if (pp != osfhme->hme_page) 10708 panic("sfmmu_hblk_swap: mapping changed"); 10709 10710 HBLKTOHME(nsfhme, new, vaddr); 10711 10712 HME_ADD(nsfhme, pp); 10713 HME_SUB(osfhme, pp); 10714 10715 sfmmu_mlist_exit(pml); 10716 } 10717 } 10718 10719 /* 10720 * remove old from hash chain 10721 */ 10722 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10723 10724 #ifdef DEBUG 10725 10726 hblktag.htag_id = ksfmmup; 10727 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10728 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10729 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10730 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10731 10732 if (found != new) 10733 panic("sfmmu_hblk_swap: new hblk not found"); 10734 #endif 10735 10736 SFMMU_HASH_UNLOCK(hmebp); 10737 10738 /* 10739 * Reset hblk_reserve 10740 */ 10741 bzero((void *)old, HME8BLK_SZ); 10742 old->hblk_nextpa = va_to_pa((caddr_t)old); 10743 } 10744 10745 /* 10746 * Grab the mlist mutex for both pages passed in. 10747 * 10748 * low and high will be returned as pointers to the mutexes for these pages. 10749 * low refers to the mutex residing in the lower bin of the mlist hash, while 10750 * high refers to the mutex residing in the higher bin of the mlist hash. This 10751 * is due to the locking order restrictions on the same thread grabbing 10752 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10753 * 10754 * If both pages hash to the same mutex, only grab that single mutex, and 10755 * high will be returned as NULL 10756 * If the pages hash to different bins in the hash, grab the lower addressed 10757 * lock first and then the higher addressed lock in order to follow the locking 10758 * rules involved with the same thread grabbing multiple mlist mutexes. 10759 * low and high will both have non-NULL values. 10760 */ 10761 static void 10762 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10763 kmutex_t **low, kmutex_t **high) 10764 { 10765 kmutex_t *mml_targ, *mml_repl; 10766 10767 /* 10768 * no need to do the dance around szc as in sfmmu_mlist_enter() 10769 * because this routine is only called by hat_page_relocate() and all 10770 * targ and repl pages are already locked EXCL so szc can't change. 10771 */ 10772 10773 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10774 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10775 10776 if (mml_targ == mml_repl) { 10777 *low = mml_targ; 10778 *high = NULL; 10779 } else { 10780 if (mml_targ < mml_repl) { 10781 *low = mml_targ; 10782 *high = mml_repl; 10783 } else { 10784 *low = mml_repl; 10785 *high = mml_targ; 10786 } 10787 } 10788 10789 mutex_enter(*low); 10790 if (*high) 10791 mutex_enter(*high); 10792 } 10793 10794 static void 10795 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10796 { 10797 if (high) 10798 mutex_exit(high); 10799 mutex_exit(low); 10800 } 10801 10802 static hatlock_t * 10803 sfmmu_hat_enter(sfmmu_t *sfmmup) 10804 { 10805 hatlock_t *hatlockp; 10806 10807 if (sfmmup != ksfmmup) { 10808 hatlockp = TSB_HASH(sfmmup); 10809 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10810 return (hatlockp); 10811 } 10812 return (NULL); 10813 } 10814 10815 static hatlock_t * 10816 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10817 { 10818 hatlock_t *hatlockp; 10819 10820 if (sfmmup != ksfmmup) { 10821 hatlockp = TSB_HASH(sfmmup); 10822 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10823 return (NULL); 10824 return (hatlockp); 10825 } 10826 return (NULL); 10827 } 10828 10829 static void 10830 sfmmu_hat_exit(hatlock_t *hatlockp) 10831 { 10832 if (hatlockp != NULL) 10833 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10834 } 10835 10836 static void 10837 sfmmu_hat_lock_all(void) 10838 { 10839 int i; 10840 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10841 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10842 } 10843 10844 static void 10845 sfmmu_hat_unlock_all(void) 10846 { 10847 int i; 10848 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10849 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10850 } 10851 10852 int 10853 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10854 { 10855 ASSERT(sfmmup != ksfmmup); 10856 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10857 } 10858 10859 /* 10860 * Locking primitives to provide consistency between ISM unmap 10861 * and other operations. Since ISM unmap can take a long time, we 10862 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10863 * contention on the hatlock buckets while ISM segments are being 10864 * unmapped. The tradeoff is that the flags don't prevent priority 10865 * inversion from occurring, so we must request kernel priority in 10866 * case we have to sleep to keep from getting buried while holding 10867 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10868 * threads from running (for example, in sfmmu_uvatopfn()). 10869 */ 10870 static void 10871 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10872 { 10873 hatlock_t *hatlockp; 10874 10875 if (!hatlock_held) 10876 hatlockp = sfmmu_hat_enter(sfmmup); 10877 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10878 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10879 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10880 if (!hatlock_held) 10881 sfmmu_hat_exit(hatlockp); 10882 } 10883 10884 static void 10885 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10886 { 10887 hatlock_t *hatlockp; 10888 10889 if (!hatlock_held) 10890 hatlockp = sfmmu_hat_enter(sfmmup); 10891 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10892 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10893 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10894 if (!hatlock_held) 10895 sfmmu_hat_exit(hatlockp); 10896 } 10897 10898 /* 10899 * 10900 * Algorithm: 10901 * 10902 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10903 * hblks. 10904 * 10905 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10906 * 10907 * (a) try to return an hblk from reserve pool of free hblks; 10908 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10909 * and return hblk_reserve. 10910 * 10911 * (3) call kmem_cache_alloc() to allocate hblk; 10912 * 10913 * (a) if hblk_reserve_lock is held by the current thread, 10914 * atomically replace hblk_reserve by the hblk that is 10915 * returned by kmem_cache_alloc; release hblk_reserve_lock 10916 * and call kmem_cache_alloc() again. 10917 * (b) if reserve pool is not full, add the hblk that is 10918 * returned by kmem_cache_alloc to reserve pool and 10919 * call kmem_cache_alloc again. 10920 * 10921 */ 10922 static struct hme_blk * 10923 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10924 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10925 uint_t flags, uint_t rid) 10926 { 10927 struct hme_blk *hmeblkp = NULL; 10928 struct hme_blk *newhblkp; 10929 struct hme_blk *shw_hblkp = NULL; 10930 struct kmem_cache *sfmmu_cache = NULL; 10931 uint64_t hblkpa; 10932 ulong_t index; 10933 uint_t owner; /* set to 1 if using hblk_reserve */ 10934 uint_t forcefree; 10935 int sleep; 10936 sf_srd_t *srdp; 10937 sf_region_t *rgnp; 10938 10939 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10940 ASSERT(hblktag.htag_rid == rid); 10941 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10942 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10943 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10944 10945 /* 10946 * If segkmem is not created yet, allocate from static hmeblks 10947 * created at the end of startup_modules(). See the block comment 10948 * in startup_modules() describing how we estimate the number of 10949 * static hmeblks that will be needed during re-map. 10950 */ 10951 if (!hblk_alloc_dynamic) { 10952 10953 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 10954 10955 if (size == TTE8K) { 10956 index = nucleus_hblk8.index; 10957 if (index >= nucleus_hblk8.len) { 10958 /* 10959 * If we panic here, see startup_modules() to 10960 * make sure that we are calculating the 10961 * number of hblk8's that we need correctly. 10962 */ 10963 prom_panic("no nucleus hblk8 to allocate"); 10964 } 10965 hmeblkp = 10966 (struct hme_blk *)&nucleus_hblk8.list[index]; 10967 nucleus_hblk8.index++; 10968 SFMMU_STAT(sf_hblk8_nalloc); 10969 } else { 10970 index = nucleus_hblk1.index; 10971 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 10972 /* 10973 * If we panic here, see startup_modules(). 10974 * Most likely you need to update the 10975 * calculation of the number of hblk1 elements 10976 * that the kernel needs to boot. 10977 */ 10978 prom_panic("no nucleus hblk1 to allocate"); 10979 } 10980 hmeblkp = 10981 (struct hme_blk *)&nucleus_hblk1.list[index]; 10982 nucleus_hblk1.index++; 10983 SFMMU_STAT(sf_hblk1_nalloc); 10984 } 10985 10986 goto hblk_init; 10987 } 10988 10989 SFMMU_HASH_UNLOCK(hmebp); 10990 10991 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 10992 if (mmu_page_sizes == max_mmu_page_sizes) { 10993 if (size < TTE256M) 10994 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10995 size, flags); 10996 } else { 10997 if (size < TTE4M) 10998 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10999 size, flags); 11000 } 11001 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11002 /* 11003 * Shared hmes use per region bitmaps in rgn_hmeflag 11004 * rather than shadow hmeblks to keep track of the 11005 * mapping sizes which have been allocated for the region. 11006 * Here we cleanup old invalid hmeblks with this rid, 11007 * which may be left around by pageunload(). 11008 */ 11009 int ttesz; 11010 caddr_t va; 11011 caddr_t eva = vaddr + TTEBYTES(size); 11012 11013 ASSERT(sfmmup != KHATID); 11014 11015 srdp = sfmmup->sfmmu_srdp; 11016 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11017 rgnp = srdp->srd_hmergnp[rid]; 11018 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11019 ASSERT(rgnp->rgn_refcnt != 0); 11020 ASSERT(size <= rgnp->rgn_pgszc); 11021 11022 ttesz = HBLK_MIN_TTESZ; 11023 do { 11024 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11025 continue; 11026 } 11027 11028 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11029 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11030 } else if (ttesz < size) { 11031 for (va = vaddr; va < eva; 11032 va += TTEBYTES(ttesz)) { 11033 sfmmu_cleanup_rhblk(srdp, va, rid, 11034 ttesz); 11035 } 11036 } 11037 } while (++ttesz <= rgnp->rgn_pgszc); 11038 } 11039 11040 fill_hblk: 11041 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11042 11043 if (owner && size == TTE8K) { 11044 11045 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11046 /* 11047 * We are really in a tight spot. We already own 11048 * hblk_reserve and we need another hblk. In anticipation 11049 * of this kind of scenario, we specifically set aside 11050 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11051 * by owner of hblk_reserve. 11052 */ 11053 SFMMU_STAT(sf_hblk_recurse_cnt); 11054 11055 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11056 panic("sfmmu_hblk_alloc: reserve list is empty"); 11057 11058 goto hblk_verify; 11059 } 11060 11061 ASSERT(!owner); 11062 11063 if ((flags & HAT_NO_KALLOC) == 0) { 11064 11065 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11066 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11067 11068 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11069 hmeblkp = sfmmu_hblk_steal(size); 11070 } else { 11071 /* 11072 * if we are the owner of hblk_reserve, 11073 * swap hblk_reserve with hmeblkp and 11074 * start a fresh life. Hope things go 11075 * better this time. 11076 */ 11077 if (hblk_reserve_thread == curthread) { 11078 ASSERT(sfmmu_cache == sfmmu8_cache); 11079 sfmmu_hblk_swap(hmeblkp); 11080 hblk_reserve_thread = NULL; 11081 mutex_exit(&hblk_reserve_lock); 11082 goto fill_hblk; 11083 } 11084 /* 11085 * let's donate this hblk to our reserve list if 11086 * we are not mapping kernel range 11087 */ 11088 if (size == TTE8K && sfmmup != KHATID) { 11089 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11090 goto fill_hblk; 11091 } 11092 } 11093 } else { 11094 /* 11095 * We are here to map the slab in sfmmu8_cache; let's 11096 * check if we could tap our reserve list; if successful, 11097 * this will avoid the pain of going thru sfmmu_hblk_swap 11098 */ 11099 SFMMU_STAT(sf_hblk_slab_cnt); 11100 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11101 /* 11102 * let's start hblk_reserve dance 11103 */ 11104 SFMMU_STAT(sf_hblk_reserve_cnt); 11105 owner = 1; 11106 mutex_enter(&hblk_reserve_lock); 11107 hmeblkp = HBLK_RESERVE; 11108 hblk_reserve_thread = curthread; 11109 } 11110 } 11111 11112 hblk_verify: 11113 ASSERT(hmeblkp != NULL); 11114 set_hblk_sz(hmeblkp, size); 11115 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11116 SFMMU_HASH_LOCK(hmebp); 11117 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11118 if (newhblkp != NULL) { 11119 SFMMU_HASH_UNLOCK(hmebp); 11120 if (hmeblkp != HBLK_RESERVE) { 11121 /* 11122 * This is really tricky! 11123 * 11124 * vmem_alloc(vmem_seg_arena) 11125 * vmem_alloc(vmem_internal_arena) 11126 * segkmem_alloc(heap_arena) 11127 * vmem_alloc(heap_arena) 11128 * page_create() 11129 * hat_memload() 11130 * kmem_cache_free() 11131 * kmem_cache_alloc() 11132 * kmem_slab_create() 11133 * vmem_alloc(kmem_internal_arena) 11134 * segkmem_alloc(heap_arena) 11135 * vmem_alloc(heap_arena) 11136 * page_create() 11137 * hat_memload() 11138 * kmem_cache_free() 11139 * ... 11140 * 11141 * Thus, hat_memload() could call kmem_cache_free 11142 * for enough number of times that we could easily 11143 * hit the bottom of the stack or run out of reserve 11144 * list of vmem_seg structs. So, we must donate 11145 * this hblk to reserve list if it's allocated 11146 * from sfmmu8_cache *and* mapping kernel range. 11147 * We don't need to worry about freeing hmeblk1's 11148 * to kmem since they don't map any kmem slabs. 11149 * 11150 * Note: When segkmem supports largepages, we must 11151 * free hmeblk1's to reserve list as well. 11152 */ 11153 forcefree = (sfmmup == KHATID) ? 1 : 0; 11154 if (size == TTE8K && 11155 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11156 goto re_verify; 11157 } 11158 ASSERT(sfmmup != KHATID); 11159 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11160 } else { 11161 /* 11162 * Hey! we don't need hblk_reserve any more. 11163 */ 11164 ASSERT(owner); 11165 hblk_reserve_thread = NULL; 11166 mutex_exit(&hblk_reserve_lock); 11167 owner = 0; 11168 } 11169 re_verify: 11170 /* 11171 * let's check if the goodies are still present 11172 */ 11173 SFMMU_HASH_LOCK(hmebp); 11174 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11175 if (newhblkp != NULL) { 11176 /* 11177 * return newhblkp if it's not hblk_reserve; 11178 * if newhblkp is hblk_reserve, return it 11179 * _only if_ we are the owner of hblk_reserve. 11180 */ 11181 if (newhblkp != HBLK_RESERVE || owner) { 11182 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11183 newhblkp->hblk_shared); 11184 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11185 !newhblkp->hblk_shared); 11186 return (newhblkp); 11187 } else { 11188 /* 11189 * we just hit hblk_reserve in the hash and 11190 * we are not the owner of that; 11191 * 11192 * block until hblk_reserve_thread completes 11193 * swapping hblk_reserve and try the dance 11194 * once again. 11195 */ 11196 SFMMU_HASH_UNLOCK(hmebp); 11197 mutex_enter(&hblk_reserve_lock); 11198 mutex_exit(&hblk_reserve_lock); 11199 SFMMU_STAT(sf_hblk_reserve_hit); 11200 goto fill_hblk; 11201 } 11202 } else { 11203 /* 11204 * it's no more! try the dance once again. 11205 */ 11206 SFMMU_HASH_UNLOCK(hmebp); 11207 goto fill_hblk; 11208 } 11209 } 11210 11211 hblk_init: 11212 if (SFMMU_IS_SHMERID_VALID(rid)) { 11213 uint16_t tteflag = 0x1 << 11214 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11215 11216 if (!(rgnp->rgn_hmeflags & tteflag)) { 11217 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11218 } 11219 hmeblkp->hblk_shared = 1; 11220 } else { 11221 hmeblkp->hblk_shared = 0; 11222 } 11223 set_hblk_sz(hmeblkp, size); 11224 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11225 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11226 hmeblkp->hblk_tag = hblktag; 11227 hmeblkp->hblk_shadow = shw_hblkp; 11228 hblkpa = hmeblkp->hblk_nextpa; 11229 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11230 11231 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11232 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11233 ASSERT(hmeblkp->hblk_hmecnt == 0); 11234 ASSERT(hmeblkp->hblk_vcnt == 0); 11235 ASSERT(hmeblkp->hblk_lckcnt == 0); 11236 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11237 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11238 return (hmeblkp); 11239 } 11240 11241 /* 11242 * This function cleans up the hme_blk and returns it to the free list. 11243 */ 11244 /* ARGSUSED */ 11245 static void 11246 sfmmu_hblk_free(struct hme_blk **listp) 11247 { 11248 struct hme_blk *hmeblkp, *next_hmeblkp; 11249 int size; 11250 uint_t critical; 11251 uint64_t hblkpa; 11252 11253 ASSERT(*listp != NULL); 11254 11255 hmeblkp = *listp; 11256 while (hmeblkp != NULL) { 11257 next_hmeblkp = hmeblkp->hblk_next; 11258 ASSERT(!hmeblkp->hblk_hmecnt); 11259 ASSERT(!hmeblkp->hblk_vcnt); 11260 ASSERT(!hmeblkp->hblk_lckcnt); 11261 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11262 ASSERT(hmeblkp->hblk_shared == 0); 11263 ASSERT(hmeblkp->hblk_shw_bit == 0); 11264 ASSERT(hmeblkp->hblk_shadow == NULL); 11265 11266 hblkpa = va_to_pa((caddr_t)hmeblkp); 11267 ASSERT(hblkpa != (uint64_t)-1); 11268 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11269 11270 size = get_hblk_ttesz(hmeblkp); 11271 hmeblkp->hblk_next = NULL; 11272 hmeblkp->hblk_nextpa = hblkpa; 11273 11274 if (hmeblkp->hblk_nuc_bit == 0) { 11275 11276 if (size != TTE8K || 11277 !sfmmu_put_free_hblk(hmeblkp, critical)) 11278 kmem_cache_free(get_hblk_cache(hmeblkp), 11279 hmeblkp); 11280 } 11281 hmeblkp = next_hmeblkp; 11282 } 11283 } 11284 11285 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11286 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11287 11288 static uint_t sfmmu_hblk_steal_twice; 11289 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11290 11291 /* 11292 * Steal a hmeblk from user or kernel hme hash lists. 11293 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11294 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11295 * tap into critical reserve of freehblkp. 11296 * Note: We remain looping in this routine until we find one. 11297 */ 11298 static struct hme_blk * 11299 sfmmu_hblk_steal(int size) 11300 { 11301 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11302 struct hmehash_bucket *hmebp; 11303 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11304 uint64_t hblkpa; 11305 int i; 11306 uint_t loop_cnt = 0, critical; 11307 11308 for (;;) { 11309 /* Check cpu hblk pending queues */ 11310 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11311 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11312 ASSERT(hmeblkp->hblk_hmecnt == 0); 11313 ASSERT(hmeblkp->hblk_vcnt == 0); 11314 return (hmeblkp); 11315 } 11316 11317 if (size == TTE8K) { 11318 critical = 11319 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11320 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11321 return (hmeblkp); 11322 } 11323 11324 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11325 uhmehash_steal_hand; 11326 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11327 11328 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11329 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11330 SFMMU_HASH_LOCK(hmebp); 11331 hmeblkp = hmebp->hmeblkp; 11332 hblkpa = hmebp->hmeh_nextpa; 11333 pr_hblk = NULL; 11334 while (hmeblkp) { 11335 /* 11336 * check if it is a hmeblk that is not locked 11337 * and not shared. skip shadow hmeblks with 11338 * shadow_mask set i.e valid count non zero. 11339 */ 11340 if ((get_hblk_ttesz(hmeblkp) == size) && 11341 (hmeblkp->hblk_shw_bit == 0 || 11342 hmeblkp->hblk_vcnt == 0) && 11343 (hmeblkp->hblk_lckcnt == 0)) { 11344 /* 11345 * there is a high probability that we 11346 * will find a free one. search some 11347 * buckets for a free hmeblk initially 11348 * before unloading a valid hmeblk. 11349 */ 11350 if ((hmeblkp->hblk_vcnt == 0 && 11351 hmeblkp->hblk_hmecnt == 0) || (i >= 11352 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11353 if (sfmmu_steal_this_hblk(hmebp, 11354 hmeblkp, hblkpa, pr_hblk)) { 11355 /* 11356 * Hblk is unloaded 11357 * successfully 11358 */ 11359 break; 11360 } 11361 } 11362 } 11363 pr_hblk = hmeblkp; 11364 hblkpa = hmeblkp->hblk_nextpa; 11365 hmeblkp = hmeblkp->hblk_next; 11366 } 11367 11368 SFMMU_HASH_UNLOCK(hmebp); 11369 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11370 hmebp = uhme_hash; 11371 } 11372 uhmehash_steal_hand = hmebp; 11373 11374 if (hmeblkp != NULL) 11375 break; 11376 11377 /* 11378 * in the worst case, look for a free one in the kernel 11379 * hash table. 11380 */ 11381 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11382 SFMMU_HASH_LOCK(hmebp); 11383 hmeblkp = hmebp->hmeblkp; 11384 hblkpa = hmebp->hmeh_nextpa; 11385 pr_hblk = NULL; 11386 while (hmeblkp) { 11387 /* 11388 * check if it is free hmeblk 11389 */ 11390 if ((get_hblk_ttesz(hmeblkp) == size) && 11391 (hmeblkp->hblk_lckcnt == 0) && 11392 (hmeblkp->hblk_vcnt == 0) && 11393 (hmeblkp->hblk_hmecnt == 0)) { 11394 if (sfmmu_steal_this_hblk(hmebp, 11395 hmeblkp, hblkpa, pr_hblk)) { 11396 break; 11397 } else { 11398 /* 11399 * Cannot fail since we have 11400 * hash lock. 11401 */ 11402 panic("fail to steal?"); 11403 } 11404 } 11405 11406 pr_hblk = hmeblkp; 11407 hblkpa = hmeblkp->hblk_nextpa; 11408 hmeblkp = hmeblkp->hblk_next; 11409 } 11410 11411 SFMMU_HASH_UNLOCK(hmebp); 11412 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11413 hmebp = khme_hash; 11414 } 11415 11416 if (hmeblkp != NULL) 11417 break; 11418 sfmmu_hblk_steal_twice++; 11419 } 11420 return (hmeblkp); 11421 } 11422 11423 /* 11424 * This routine does real work to prepare a hblk to be "stolen" by 11425 * unloading the mappings, updating shadow counts .... 11426 * It returns 1 if the block is ready to be reused (stolen), or 0 11427 * means the block cannot be stolen yet- pageunload is still working 11428 * on this hblk. 11429 */ 11430 static int 11431 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11432 uint64_t hblkpa, struct hme_blk *pr_hblk) 11433 { 11434 int shw_size, vshift; 11435 struct hme_blk *shw_hblkp; 11436 caddr_t vaddr; 11437 uint_t shw_mask, newshw_mask; 11438 struct hme_blk *list = NULL; 11439 11440 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11441 11442 /* 11443 * check if the hmeblk is free, unload if necessary 11444 */ 11445 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11446 sfmmu_t *sfmmup; 11447 demap_range_t dmr; 11448 11449 sfmmup = hblktosfmmu(hmeblkp); 11450 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11451 return (0); 11452 } 11453 DEMAP_RANGE_INIT(sfmmup, &dmr); 11454 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11455 (caddr_t)get_hblk_base(hmeblkp), 11456 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11457 DEMAP_RANGE_FLUSH(&dmr); 11458 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11459 /* 11460 * Pageunload is working on the same hblk. 11461 */ 11462 return (0); 11463 } 11464 11465 sfmmu_hblk_steal_unload_count++; 11466 } 11467 11468 ASSERT(hmeblkp->hblk_lckcnt == 0); 11469 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11470 11471 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11472 hmeblkp->hblk_nextpa = hblkpa; 11473 11474 shw_hblkp = hmeblkp->hblk_shadow; 11475 if (shw_hblkp) { 11476 ASSERT(!hmeblkp->hblk_shared); 11477 shw_size = get_hblk_ttesz(shw_hblkp); 11478 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11479 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11480 ASSERT(vshift < 8); 11481 /* 11482 * Atomically clear shadow mask bit 11483 */ 11484 do { 11485 shw_mask = shw_hblkp->hblk_shw_mask; 11486 ASSERT(shw_mask & (1 << vshift)); 11487 newshw_mask = shw_mask & ~(1 << vshift); 11488 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 11489 shw_mask, newshw_mask); 11490 } while (newshw_mask != shw_mask); 11491 hmeblkp->hblk_shadow = NULL; 11492 } 11493 11494 /* 11495 * remove shadow bit if we are stealing an unused shadow hmeblk. 11496 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11497 * we are indeed allocating a shadow hmeblk. 11498 */ 11499 hmeblkp->hblk_shw_bit = 0; 11500 11501 if (hmeblkp->hblk_shared) { 11502 sf_srd_t *srdp; 11503 sf_region_t *rgnp; 11504 uint_t rid; 11505 11506 srdp = hblktosrd(hmeblkp); 11507 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11508 rid = hmeblkp->hblk_tag.htag_rid; 11509 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11510 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11511 rgnp = srdp->srd_hmergnp[rid]; 11512 ASSERT(rgnp != NULL); 11513 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11514 hmeblkp->hblk_shared = 0; 11515 } 11516 11517 sfmmu_hblk_steal_count++; 11518 SFMMU_STAT(sf_steal_count); 11519 11520 return (1); 11521 } 11522 11523 struct hme_blk * 11524 sfmmu_hmetohblk(struct sf_hment *sfhme) 11525 { 11526 struct hme_blk *hmeblkp; 11527 struct sf_hment *sfhme0; 11528 struct hme_blk *hblk_dummy = 0; 11529 11530 /* 11531 * No dummy sf_hments, please. 11532 */ 11533 ASSERT(sfhme->hme_tte.ll != 0); 11534 11535 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11536 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11537 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11538 11539 return (hmeblkp); 11540 } 11541 11542 /* 11543 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11544 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11545 * KM_SLEEP allocation. 11546 * 11547 * Return 0 on success, -1 otherwise. 11548 */ 11549 static void 11550 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11551 { 11552 struct tsb_info *tsbinfop, *next; 11553 tsb_replace_rc_t rc; 11554 boolean_t gotfirst = B_FALSE; 11555 11556 ASSERT(sfmmup != ksfmmup); 11557 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11558 11559 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11560 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11561 } 11562 11563 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11564 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11565 } else { 11566 return; 11567 } 11568 11569 ASSERT(sfmmup->sfmmu_tsb != NULL); 11570 11571 /* 11572 * Loop over all tsbinfo's replacing them with ones that actually have 11573 * a TSB. If any of the replacements ever fail, bail out of the loop. 11574 */ 11575 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11576 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11577 next = tsbinfop->tsb_next; 11578 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11579 hatlockp, TSB_SWAPIN); 11580 if (rc != TSB_SUCCESS) { 11581 break; 11582 } 11583 gotfirst = B_TRUE; 11584 } 11585 11586 switch (rc) { 11587 case TSB_SUCCESS: 11588 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11589 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11590 return; 11591 case TSB_LOSTRACE: 11592 break; 11593 case TSB_ALLOCFAIL: 11594 break; 11595 default: 11596 panic("sfmmu_replace_tsb returned unrecognized failure code " 11597 "%d", rc); 11598 } 11599 11600 /* 11601 * In this case, we failed to get one of our TSBs. If we failed to 11602 * get the first TSB, get one of minimum size (8KB). Walk the list 11603 * and throw away the tsbinfos, starting where the allocation failed; 11604 * we can get by with just one TSB as long as we don't leave the 11605 * SWAPPED tsbinfo structures lying around. 11606 */ 11607 tsbinfop = sfmmup->sfmmu_tsb; 11608 next = tsbinfop->tsb_next; 11609 tsbinfop->tsb_next = NULL; 11610 11611 sfmmu_hat_exit(hatlockp); 11612 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11613 next = tsbinfop->tsb_next; 11614 sfmmu_tsbinfo_free(tsbinfop); 11615 } 11616 hatlockp = sfmmu_hat_enter(sfmmup); 11617 11618 /* 11619 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11620 * pages. 11621 */ 11622 if (!gotfirst) { 11623 tsbinfop = sfmmup->sfmmu_tsb; 11624 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11625 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11626 ASSERT(rc == TSB_SUCCESS); 11627 } 11628 11629 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11630 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11631 } 11632 11633 static int 11634 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11635 { 11636 ulong_t bix = 0; 11637 uint_t rid; 11638 sf_region_t *rgnp; 11639 11640 ASSERT(srdp != NULL); 11641 ASSERT(srdp->srd_refcnt != 0); 11642 11643 w <<= BT_ULSHIFT; 11644 while (bmw) { 11645 if (!(bmw & 0x1)) { 11646 bix++; 11647 bmw >>= 1; 11648 continue; 11649 } 11650 rid = w | bix; 11651 rgnp = srdp->srd_hmergnp[rid]; 11652 ASSERT(rgnp->rgn_refcnt > 0); 11653 ASSERT(rgnp->rgn_id == rid); 11654 if (addr < rgnp->rgn_saddr || 11655 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11656 bix++; 11657 bmw >>= 1; 11658 } else { 11659 return (1); 11660 } 11661 } 11662 return (0); 11663 } 11664 11665 /* 11666 * Handle exceptions for low level tsb_handler. 11667 * 11668 * There are many scenarios that could land us here: 11669 * 11670 * If the context is invalid we land here. The context can be invalid 11671 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11672 * perform a wrap around operation in order to allocate a new context. 11673 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11674 * TSBs configuration is changeing for this process and we are forced into 11675 * here to do a syncronization operation. If the context is valid we can 11676 * be here from window trap hanlder. In this case just call trap to handle 11677 * the fault. 11678 * 11679 * Note that the process will run in INVALID_CONTEXT before 11680 * faulting into here and subsequently loading the MMU registers 11681 * (including the TSB base register) associated with this process. 11682 * For this reason, the trap handlers must all test for 11683 * INVALID_CONTEXT before attempting to access any registers other 11684 * than the context registers. 11685 */ 11686 void 11687 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11688 { 11689 sfmmu_t *sfmmup, *shsfmmup; 11690 uint_t ctxtype; 11691 klwp_id_t lwp; 11692 char lwp_save_state; 11693 hatlock_t *hatlockp, *shatlockp; 11694 struct tsb_info *tsbinfop; 11695 struct tsbmiss *tsbmp; 11696 sf_scd_t *scdp; 11697 11698 SFMMU_STAT(sf_tsb_exceptions); 11699 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11700 sfmmup = astosfmmu(curthread->t_procp->p_as); 11701 /* 11702 * note that in sun4u, tagacces register contains ctxnum 11703 * while sun4v passes ctxtype in the tagaccess register. 11704 */ 11705 ctxtype = tagaccess & TAGACC_CTX_MASK; 11706 11707 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11708 ASSERT(sfmmup->sfmmu_ismhat == 0); 11709 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11710 ctxtype == INVALID_CONTEXT); 11711 11712 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11713 /* 11714 * We may land here because shme bitmap and pagesize 11715 * flags are updated lazily in tsbmiss area on other cpus. 11716 * If we detect here that tsbmiss area is out of sync with 11717 * sfmmu update it and retry the trapped instruction. 11718 * Otherwise call trap(). 11719 */ 11720 int ret = 0; 11721 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11722 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11723 11724 /* 11725 * Must set lwp state to LWP_SYS before 11726 * trying to acquire any adaptive lock 11727 */ 11728 lwp = ttolwp(curthread); 11729 ASSERT(lwp); 11730 lwp_save_state = lwp->lwp_state; 11731 lwp->lwp_state = LWP_SYS; 11732 11733 hatlockp = sfmmu_hat_enter(sfmmup); 11734 kpreempt_disable(); 11735 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11736 ASSERT(sfmmup == tsbmp->usfmmup); 11737 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11738 ~tteflag_mask) || 11739 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11740 ~tteflag_mask)) { 11741 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11742 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11743 ret = 1; 11744 } 11745 if (sfmmup->sfmmu_srdp != NULL) { 11746 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11747 ulong_t *tm = tsbmp->shmermap; 11748 ulong_t i; 11749 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11750 ulong_t d = tm[i] ^ sm[i]; 11751 if (d) { 11752 if (d & sm[i]) { 11753 if (!ret && sfmmu_is_rgnva( 11754 sfmmup->sfmmu_srdp, 11755 addr, i, d & sm[i])) { 11756 ret = 1; 11757 } 11758 } 11759 tm[i] = sm[i]; 11760 } 11761 } 11762 } 11763 kpreempt_enable(); 11764 sfmmu_hat_exit(hatlockp); 11765 lwp->lwp_state = lwp_save_state; 11766 if (ret) { 11767 return; 11768 } 11769 } else if (ctxtype == INVALID_CONTEXT) { 11770 /* 11771 * First, make sure we come out of here with a valid ctx, 11772 * since if we don't get one we'll simply loop on the 11773 * faulting instruction. 11774 * 11775 * If the ISM mappings are changing, the TSB is relocated, 11776 * the process is swapped, the process is joining SCD or 11777 * leaving SCD or shared regions we serialize behind the 11778 * controlling thread with hat lock, sfmmu_flags and 11779 * sfmmu_tsb_cv condition variable. 11780 */ 11781 11782 /* 11783 * Must set lwp state to LWP_SYS before 11784 * trying to acquire any adaptive lock 11785 */ 11786 lwp = ttolwp(curthread); 11787 ASSERT(lwp); 11788 lwp_save_state = lwp->lwp_state; 11789 lwp->lwp_state = LWP_SYS; 11790 11791 hatlockp = sfmmu_hat_enter(sfmmup); 11792 retry: 11793 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11794 shsfmmup = scdp->scd_sfmmup; 11795 ASSERT(shsfmmup != NULL); 11796 11797 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11798 tsbinfop = tsbinfop->tsb_next) { 11799 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11800 /* drop the private hat lock */ 11801 sfmmu_hat_exit(hatlockp); 11802 /* acquire the shared hat lock */ 11803 shatlockp = sfmmu_hat_enter(shsfmmup); 11804 /* 11805 * recheck to see if anything changed 11806 * after we drop the private hat lock. 11807 */ 11808 if (sfmmup->sfmmu_scdp == scdp && 11809 shsfmmup == scdp->scd_sfmmup) { 11810 sfmmu_tsb_chk_reloc(shsfmmup, 11811 shatlockp); 11812 } 11813 sfmmu_hat_exit(shatlockp); 11814 hatlockp = sfmmu_hat_enter(sfmmup); 11815 goto retry; 11816 } 11817 } 11818 } 11819 11820 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11821 tsbinfop = tsbinfop->tsb_next) { 11822 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11823 cv_wait(&sfmmup->sfmmu_tsb_cv, 11824 HATLOCK_MUTEXP(hatlockp)); 11825 goto retry; 11826 } 11827 } 11828 11829 /* 11830 * Wait for ISM maps to be updated. 11831 */ 11832 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11833 cv_wait(&sfmmup->sfmmu_tsb_cv, 11834 HATLOCK_MUTEXP(hatlockp)); 11835 goto retry; 11836 } 11837 11838 /* Is this process joining an SCD? */ 11839 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11840 /* 11841 * Flush private TSB and setup shared TSB. 11842 * sfmmu_finish_join_scd() does not drop the 11843 * hat lock. 11844 */ 11845 sfmmu_finish_join_scd(sfmmup); 11846 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11847 } 11848 11849 /* 11850 * If we're swapping in, get TSB(s). Note that we must do 11851 * this before we get a ctx or load the MMU state. Once 11852 * we swap in we have to recheck to make sure the TSB(s) and 11853 * ISM mappings didn't change while we slept. 11854 */ 11855 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11856 sfmmu_tsb_swapin(sfmmup, hatlockp); 11857 goto retry; 11858 } 11859 11860 sfmmu_get_ctx(sfmmup); 11861 11862 sfmmu_hat_exit(hatlockp); 11863 /* 11864 * Must restore lwp_state if not calling 11865 * trap() for further processing. Restore 11866 * it anyway. 11867 */ 11868 lwp->lwp_state = lwp_save_state; 11869 return; 11870 } 11871 trap(rp, (caddr_t)tagaccess, traptype, 0); 11872 } 11873 11874 static void 11875 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11876 { 11877 struct tsb_info *tp; 11878 11879 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11880 11881 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11882 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11883 cv_wait(&sfmmup->sfmmu_tsb_cv, 11884 HATLOCK_MUTEXP(hatlockp)); 11885 break; 11886 } 11887 } 11888 } 11889 11890 /* 11891 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11892 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11893 * rather than spinning to avoid send mondo timeouts with 11894 * interrupts enabled. When the lock is acquired it is immediately 11895 * released and we return back to sfmmu_vatopfn just after 11896 * the GET_TTE call. 11897 */ 11898 void 11899 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11900 { 11901 struct page **pp; 11902 11903 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11904 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11905 } 11906 11907 /* 11908 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 11909 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 11910 * cross traps which cannot be handled while spinning in the 11911 * trap handlers. Simply enter and exit the kpr_suspendlock spin 11912 * mutex, which is held by the holder of the suspend bit, and then 11913 * retry the trapped instruction after unwinding. 11914 */ 11915 /*ARGSUSED*/ 11916 void 11917 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 11918 { 11919 ASSERT(curthread != kreloc_thread); 11920 mutex_enter(&kpr_suspendlock); 11921 mutex_exit(&kpr_suspendlock); 11922 } 11923 11924 /* 11925 * This routine could be optimized to reduce the number of xcalls by flushing 11926 * the entire TLBs if region reference count is above some threshold but the 11927 * tradeoff will depend on the size of the TLB. So for now flush the specific 11928 * page a context at a time. 11929 * 11930 * If uselocks is 0 then it's called after all cpus were captured and all the 11931 * hat locks were taken. In this case don't take the region lock by relying on 11932 * the order of list region update operations in hat_join_region(), 11933 * hat_leave_region() and hat_dup_region(). The ordering in those routines 11934 * guarantees that list is always forward walkable and reaches active sfmmus 11935 * regardless of where xc_attention() captures a cpu. 11936 */ 11937 cpuset_t 11938 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 11939 struct hme_blk *hmeblkp, int uselocks) 11940 { 11941 sfmmu_t *sfmmup; 11942 cpuset_t cpuset; 11943 cpuset_t rcpuset; 11944 hatlock_t *hatlockp; 11945 uint_t rid = rgnp->rgn_id; 11946 sf_rgn_link_t *rlink; 11947 sf_scd_t *scdp; 11948 11949 ASSERT(hmeblkp->hblk_shared); 11950 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11951 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11952 11953 CPUSET_ZERO(rcpuset); 11954 if (uselocks) { 11955 mutex_enter(&rgnp->rgn_mutex); 11956 } 11957 sfmmup = rgnp->rgn_sfmmu_head; 11958 while (sfmmup != NULL) { 11959 if (uselocks) { 11960 hatlockp = sfmmu_hat_enter(sfmmup); 11961 } 11962 11963 /* 11964 * When an SCD is created the SCD hat is linked on the sfmmu 11965 * region lists for each hme region which is part of the 11966 * SCD. If we find an SCD hat, when walking these lists, 11967 * then we flush the shared TSBs, if we find a private hat, 11968 * which is part of an SCD, but where the region 11969 * is not part of the SCD then we flush the private TSBs. 11970 */ 11971 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 11972 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11973 scdp = sfmmup->sfmmu_scdp; 11974 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 11975 if (uselocks) { 11976 sfmmu_hat_exit(hatlockp); 11977 } 11978 goto next; 11979 } 11980 } 11981 11982 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 11983 11984 kpreempt_disable(); 11985 cpuset = sfmmup->sfmmu_cpusran; 11986 CPUSET_AND(cpuset, cpu_ready_set); 11987 CPUSET_DEL(cpuset, CPU->cpu_id); 11988 SFMMU_XCALL_STATS(sfmmup); 11989 xt_some(cpuset, vtag_flushpage_tl1, 11990 (uint64_t)addr, (uint64_t)sfmmup); 11991 vtag_flushpage(addr, (uint64_t)sfmmup); 11992 if (uselocks) { 11993 sfmmu_hat_exit(hatlockp); 11994 } 11995 kpreempt_enable(); 11996 CPUSET_OR(rcpuset, cpuset); 11997 11998 next: 11999 /* LINTED: constant in conditional context */ 12000 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12001 ASSERT(rlink != NULL); 12002 sfmmup = rlink->next; 12003 } 12004 if (uselocks) { 12005 mutex_exit(&rgnp->rgn_mutex); 12006 } 12007 return (rcpuset); 12008 } 12009 12010 /* 12011 * This routine takes an sfmmu pointer and the va for an adddress in an 12012 * ISM region as input and returns the corresponding region id in ism_rid. 12013 * The return value of 1 indicates that a region has been found and ism_rid 12014 * is valid, otherwise 0 is returned. 12015 */ 12016 static int 12017 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12018 { 12019 ism_blk_t *ism_blkp; 12020 int i; 12021 ism_map_t *ism_map; 12022 #ifdef DEBUG 12023 struct hat *ism_hatid; 12024 #endif 12025 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12026 12027 ism_blkp = sfmmup->sfmmu_iblk; 12028 while (ism_blkp != NULL) { 12029 ism_map = ism_blkp->iblk_maps; 12030 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12031 if ((va >= ism_start(ism_map[i])) && 12032 (va < ism_end(ism_map[i]))) { 12033 12034 *ism_rid = ism_map[i].imap_rid; 12035 #ifdef DEBUG 12036 ism_hatid = ism_map[i].imap_ismhat; 12037 ASSERT(ism_hatid == ism_sfmmup); 12038 ASSERT(ism_hatid->sfmmu_ismhat); 12039 #endif 12040 return (1); 12041 } 12042 } 12043 ism_blkp = ism_blkp->iblk_next; 12044 } 12045 return (0); 12046 } 12047 12048 /* 12049 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12050 * This routine may be called with all cpu's captured. Therefore, the 12051 * caller is responsible for holding all locks and disabling kernel 12052 * preemption. 12053 */ 12054 /* ARGSUSED */ 12055 static void 12056 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12057 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12058 { 12059 cpuset_t cpuset; 12060 caddr_t va; 12061 ism_ment_t *ment; 12062 sfmmu_t *sfmmup; 12063 #ifdef VAC 12064 int vcolor; 12065 #endif 12066 12067 sf_scd_t *scdp; 12068 uint_t ism_rid; 12069 12070 ASSERT(!hmeblkp->hblk_shared); 12071 /* 12072 * Walk the ism_hat's mapping list and flush the page 12073 * from every hat sharing this ism_hat. This routine 12074 * may be called while all cpu's have been captured. 12075 * Therefore we can't attempt to grab any locks. For now 12076 * this means we will protect the ism mapping list under 12077 * a single lock which will be grabbed by the caller. 12078 * If hat_share/unshare scalibility becomes a performance 12079 * problem then we may need to re-think ism mapping list locking. 12080 */ 12081 ASSERT(ism_sfmmup->sfmmu_ismhat); 12082 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12083 addr = (caddr_t)((uintptr_t)addr - (uintptr_t)ISMID_STARTADDR); 12084 12085 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12086 12087 sfmmup = ment->iment_hat; 12088 12089 va = ment->iment_base_va; 12090 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12091 12092 /* 12093 * When an SCD is created the SCD hat is linked on the ism 12094 * mapping lists for each ISM segment which is part of the 12095 * SCD. If we find an SCD hat, when walking these lists, 12096 * then we flush the shared TSBs, if we find a private hat, 12097 * which is part of an SCD, but where the region 12098 * corresponding to this va is not part of the SCD then we 12099 * flush the private TSBs. 12100 */ 12101 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12102 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12103 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12104 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12105 &ism_rid)) { 12106 cmn_err(CE_PANIC, 12107 "can't find matching ISM rid!"); 12108 } 12109 12110 scdp = sfmmup->sfmmu_scdp; 12111 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12112 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12113 ism_rid)) { 12114 continue; 12115 } 12116 } 12117 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12118 12119 cpuset = sfmmup->sfmmu_cpusran; 12120 CPUSET_AND(cpuset, cpu_ready_set); 12121 CPUSET_DEL(cpuset, CPU->cpu_id); 12122 SFMMU_XCALL_STATS(sfmmup); 12123 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12124 (uint64_t)sfmmup); 12125 vtag_flushpage(va, (uint64_t)sfmmup); 12126 12127 #ifdef VAC 12128 /* 12129 * Flush D$ 12130 * When flushing D$ we must flush all 12131 * cpu's. See sfmmu_cache_flush(). 12132 */ 12133 if (cache_flush_flag == CACHE_FLUSH) { 12134 cpuset = cpu_ready_set; 12135 CPUSET_DEL(cpuset, CPU->cpu_id); 12136 12137 SFMMU_XCALL_STATS(sfmmup); 12138 vcolor = addr_to_vcolor(va); 12139 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12140 vac_flushpage(pfnum, vcolor); 12141 } 12142 #endif /* VAC */ 12143 } 12144 } 12145 12146 /* 12147 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12148 * a particular virtual address and ctx. If noflush is set we do not 12149 * flush the TLB/TSB. This function may or may not be called with the 12150 * HAT lock held. 12151 */ 12152 static void 12153 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12154 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12155 int hat_lock_held) 12156 { 12157 #ifdef VAC 12158 int vcolor; 12159 #endif 12160 cpuset_t cpuset; 12161 hatlock_t *hatlockp; 12162 12163 ASSERT(!hmeblkp->hblk_shared); 12164 12165 #if defined(lint) && !defined(VAC) 12166 pfnum = pfnum; 12167 cpu_flag = cpu_flag; 12168 cache_flush_flag = cache_flush_flag; 12169 #endif 12170 12171 /* 12172 * There is no longer a need to protect against ctx being 12173 * stolen here since we don't store the ctx in the TSB anymore. 12174 */ 12175 #ifdef VAC 12176 vcolor = addr_to_vcolor(addr); 12177 #endif 12178 12179 /* 12180 * We must hold the hat lock during the flush of TLB, 12181 * to avoid a race with sfmmu_invalidate_ctx(), where 12182 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12183 * causing TLB demap routine to skip flush on that MMU. 12184 * If the context on a MMU has already been set to 12185 * INVALID_CONTEXT, we just get an extra flush on 12186 * that MMU. 12187 */ 12188 if (!hat_lock_held && !tlb_noflush) 12189 hatlockp = sfmmu_hat_enter(sfmmup); 12190 12191 kpreempt_disable(); 12192 if (!tlb_noflush) { 12193 /* 12194 * Flush the TSB and TLB. 12195 */ 12196 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12197 12198 cpuset = sfmmup->sfmmu_cpusran; 12199 CPUSET_AND(cpuset, cpu_ready_set); 12200 CPUSET_DEL(cpuset, CPU->cpu_id); 12201 12202 SFMMU_XCALL_STATS(sfmmup); 12203 12204 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12205 (uint64_t)sfmmup); 12206 12207 vtag_flushpage(addr, (uint64_t)sfmmup); 12208 } 12209 12210 if (!hat_lock_held && !tlb_noflush) 12211 sfmmu_hat_exit(hatlockp); 12212 12213 #ifdef VAC 12214 /* 12215 * Flush the D$ 12216 * 12217 * Even if the ctx is stolen, we need to flush the 12218 * cache. Our ctx stealer only flushes the TLBs. 12219 */ 12220 if (cache_flush_flag == CACHE_FLUSH) { 12221 if (cpu_flag & FLUSH_ALL_CPUS) { 12222 cpuset = cpu_ready_set; 12223 } else { 12224 cpuset = sfmmup->sfmmu_cpusran; 12225 CPUSET_AND(cpuset, cpu_ready_set); 12226 } 12227 CPUSET_DEL(cpuset, CPU->cpu_id); 12228 SFMMU_XCALL_STATS(sfmmup); 12229 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12230 vac_flushpage(pfnum, vcolor); 12231 } 12232 #endif /* VAC */ 12233 kpreempt_enable(); 12234 } 12235 12236 /* 12237 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12238 * address and ctx. If noflush is set we do not currently do anything. 12239 * This function may or may not be called with the HAT lock held. 12240 */ 12241 static void 12242 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12243 int tlb_noflush, int hat_lock_held) 12244 { 12245 cpuset_t cpuset; 12246 hatlock_t *hatlockp; 12247 12248 ASSERT(!hmeblkp->hblk_shared); 12249 12250 /* 12251 * If the process is exiting we have nothing to do. 12252 */ 12253 if (tlb_noflush) 12254 return; 12255 12256 /* 12257 * Flush TSB. 12258 */ 12259 if (!hat_lock_held) 12260 hatlockp = sfmmu_hat_enter(sfmmup); 12261 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12262 12263 kpreempt_disable(); 12264 12265 cpuset = sfmmup->sfmmu_cpusran; 12266 CPUSET_AND(cpuset, cpu_ready_set); 12267 CPUSET_DEL(cpuset, CPU->cpu_id); 12268 12269 SFMMU_XCALL_STATS(sfmmup); 12270 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12271 12272 vtag_flushpage(addr, (uint64_t)sfmmup); 12273 12274 if (!hat_lock_held) 12275 sfmmu_hat_exit(hatlockp); 12276 12277 kpreempt_enable(); 12278 12279 } 12280 12281 /* 12282 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12283 * call handler that can flush a range of pages to save on xcalls. 12284 */ 12285 static int sfmmu_xcall_save; 12286 12287 /* 12288 * this routine is never used for demaping addresses backed by SRD hmeblks. 12289 */ 12290 static void 12291 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12292 { 12293 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12294 hatlock_t *hatlockp; 12295 cpuset_t cpuset; 12296 uint64_t sfmmu_pgcnt; 12297 pgcnt_t pgcnt = 0; 12298 int pgunload = 0; 12299 int dirtypg = 0; 12300 caddr_t addr = dmrp->dmr_addr; 12301 caddr_t eaddr; 12302 uint64_t bitvec = dmrp->dmr_bitvec; 12303 12304 ASSERT(bitvec & 1); 12305 12306 /* 12307 * Flush TSB and calculate number of pages to flush. 12308 */ 12309 while (bitvec != 0) { 12310 dirtypg = 0; 12311 /* 12312 * Find the first page to flush and then count how many 12313 * pages there are after it that also need to be flushed. 12314 * This way the number of TSB flushes is minimized. 12315 */ 12316 while ((bitvec & 1) == 0) { 12317 pgcnt++; 12318 addr += MMU_PAGESIZE; 12319 bitvec >>= 1; 12320 } 12321 while (bitvec & 1) { 12322 dirtypg++; 12323 bitvec >>= 1; 12324 } 12325 eaddr = addr + ptob(dirtypg); 12326 hatlockp = sfmmu_hat_enter(sfmmup); 12327 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12328 sfmmu_hat_exit(hatlockp); 12329 pgunload += dirtypg; 12330 addr = eaddr; 12331 pgcnt += dirtypg; 12332 } 12333 12334 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12335 if (sfmmup->sfmmu_free == 0) { 12336 addr = dmrp->dmr_addr; 12337 bitvec = dmrp->dmr_bitvec; 12338 12339 /* 12340 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12341 * as it will be used to pack argument for xt_some 12342 */ 12343 ASSERT((pgcnt > 0) && 12344 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12345 12346 /* 12347 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12348 * the low 6 bits of sfmmup. This is doable since pgcnt 12349 * always >= 1. 12350 */ 12351 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12352 sfmmu_pgcnt = (uint64_t)sfmmup | 12353 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12354 12355 /* 12356 * We must hold the hat lock during the flush of TLB, 12357 * to avoid a race with sfmmu_invalidate_ctx(), where 12358 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12359 * causing TLB demap routine to skip flush on that MMU. 12360 * If the context on a MMU has already been set to 12361 * INVALID_CONTEXT, we just get an extra flush on 12362 * that MMU. 12363 */ 12364 hatlockp = sfmmu_hat_enter(sfmmup); 12365 kpreempt_disable(); 12366 12367 cpuset = sfmmup->sfmmu_cpusran; 12368 CPUSET_AND(cpuset, cpu_ready_set); 12369 CPUSET_DEL(cpuset, CPU->cpu_id); 12370 12371 SFMMU_XCALL_STATS(sfmmup); 12372 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12373 sfmmu_pgcnt); 12374 12375 for (; bitvec != 0; bitvec >>= 1) { 12376 if (bitvec & 1) 12377 vtag_flushpage(addr, (uint64_t)sfmmup); 12378 addr += MMU_PAGESIZE; 12379 } 12380 kpreempt_enable(); 12381 sfmmu_hat_exit(hatlockp); 12382 12383 sfmmu_xcall_save += (pgunload-1); 12384 } 12385 dmrp->dmr_bitvec = 0; 12386 } 12387 12388 /* 12389 * In cases where we need to synchronize with TLB/TSB miss trap 12390 * handlers, _and_ need to flush the TLB, it's a lot easier to 12391 * throw away the context from the process than to do a 12392 * special song and dance to keep things consistent for the 12393 * handlers. 12394 * 12395 * Since the process suddenly ends up without a context and our caller 12396 * holds the hat lock, threads that fault after this function is called 12397 * will pile up on the lock. We can then do whatever we need to 12398 * atomically from the context of the caller. The first blocked thread 12399 * to resume executing will get the process a new context, and the 12400 * process will resume executing. 12401 * 12402 * One added advantage of this approach is that on MMUs that 12403 * support a "flush all" operation, we will delay the flush until 12404 * cnum wrap-around, and then flush the TLB one time. This 12405 * is rather rare, so it's a lot less expensive than making 8000 12406 * x-calls to flush the TLB 8000 times. 12407 * 12408 * A per-process (PP) lock is used to synchronize ctx allocations in 12409 * resume() and ctx invalidations here. 12410 */ 12411 static void 12412 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12413 { 12414 cpuset_t cpuset; 12415 int cnum, currcnum; 12416 mmu_ctx_t *mmu_ctxp; 12417 int i; 12418 uint_t pstate_save; 12419 12420 SFMMU_STAT(sf_ctx_inv); 12421 12422 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12423 ASSERT(sfmmup != ksfmmup); 12424 12425 kpreempt_disable(); 12426 12427 mmu_ctxp = CPU_MMU_CTXP(CPU); 12428 ASSERT(mmu_ctxp); 12429 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12430 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12431 12432 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12433 12434 pstate_save = sfmmu_disable_intrs(); 12435 12436 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12437 /* set HAT cnum invalid across all context domains. */ 12438 for (i = 0; i < max_mmu_ctxdoms; i++) { 12439 12440 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12441 if (cnum == INVALID_CONTEXT) { 12442 continue; 12443 } 12444 12445 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12446 } 12447 membar_enter(); /* make sure globally visible to all CPUs */ 12448 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12449 12450 sfmmu_enable_intrs(pstate_save); 12451 12452 cpuset = sfmmup->sfmmu_cpusran; 12453 CPUSET_DEL(cpuset, CPU->cpu_id); 12454 CPUSET_AND(cpuset, cpu_ready_set); 12455 if (!CPUSET_ISNULL(cpuset)) { 12456 SFMMU_XCALL_STATS(sfmmup); 12457 xt_some(cpuset, sfmmu_raise_tsb_exception, 12458 (uint64_t)sfmmup, INVALID_CONTEXT); 12459 xt_sync(cpuset); 12460 SFMMU_STAT(sf_tsb_raise_exception); 12461 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12462 } 12463 12464 /* 12465 * If the hat to-be-invalidated is the same as the current 12466 * process on local CPU we need to invalidate 12467 * this CPU context as well. 12468 */ 12469 if ((sfmmu_getctx_sec() == currcnum) && 12470 (currcnum != INVALID_CONTEXT)) { 12471 /* sets shared context to INVALID too */ 12472 sfmmu_setctx_sec(INVALID_CONTEXT); 12473 sfmmu_clear_utsbinfo(); 12474 } 12475 12476 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12477 12478 kpreempt_enable(); 12479 12480 /* 12481 * we hold the hat lock, so nobody should allocate a context 12482 * for us yet 12483 */ 12484 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12485 } 12486 12487 #ifdef VAC 12488 /* 12489 * We need to flush the cache in all cpus. It is possible that 12490 * a process referenced a page as cacheable but has sinced exited 12491 * and cleared the mapping list. We still to flush it but have no 12492 * state so all cpus is the only alternative. 12493 */ 12494 void 12495 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12496 { 12497 cpuset_t cpuset; 12498 12499 kpreempt_disable(); 12500 cpuset = cpu_ready_set; 12501 CPUSET_DEL(cpuset, CPU->cpu_id); 12502 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12503 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12504 xt_sync(cpuset); 12505 vac_flushpage(pfnum, vcolor); 12506 kpreempt_enable(); 12507 } 12508 12509 void 12510 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12511 { 12512 cpuset_t cpuset; 12513 12514 ASSERT(vcolor >= 0); 12515 12516 kpreempt_disable(); 12517 cpuset = cpu_ready_set; 12518 CPUSET_DEL(cpuset, CPU->cpu_id); 12519 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12520 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12521 xt_sync(cpuset); 12522 vac_flushcolor(vcolor, pfnum); 12523 kpreempt_enable(); 12524 } 12525 #endif /* VAC */ 12526 12527 /* 12528 * We need to prevent processes from accessing the TSB using a cached physical 12529 * address. It's alright if they try to access the TSB via virtual address 12530 * since they will just fault on that virtual address once the mapping has 12531 * been suspended. 12532 */ 12533 #pragma weak sendmondo_in_recover 12534 12535 /* ARGSUSED */ 12536 static int 12537 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12538 { 12539 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12540 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12541 hatlock_t *hatlockp; 12542 sf_scd_t *scdp; 12543 12544 if (flags != HAT_PRESUSPEND) 12545 return (0); 12546 12547 /* 12548 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12549 * be a shared hat, then set SCD's tsbinfo's flag. 12550 * If tsb is not shared, sfmmup is a private hat, then set 12551 * its private tsbinfo's flag. 12552 */ 12553 hatlockp = sfmmu_hat_enter(sfmmup); 12554 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12555 12556 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12557 sfmmu_tsb_inv_ctx(sfmmup); 12558 sfmmu_hat_exit(hatlockp); 12559 } else { 12560 /* release lock on the shared hat */ 12561 sfmmu_hat_exit(hatlockp); 12562 /* sfmmup is a shared hat */ 12563 ASSERT(sfmmup->sfmmu_scdhat); 12564 scdp = sfmmup->sfmmu_scdp; 12565 ASSERT(scdp != NULL); 12566 /* get private hat from the scd list */ 12567 mutex_enter(&scdp->scd_mutex); 12568 sfmmup = scdp->scd_sf_list; 12569 while (sfmmup != NULL) { 12570 hatlockp = sfmmu_hat_enter(sfmmup); 12571 /* 12572 * We do not call sfmmu_tsb_inv_ctx here because 12573 * sendmondo_in_recover check is only needed for 12574 * sun4u. 12575 */ 12576 sfmmu_invalidate_ctx(sfmmup); 12577 sfmmu_hat_exit(hatlockp); 12578 sfmmup = sfmmup->sfmmu_scd_link.next; 12579 12580 } 12581 mutex_exit(&scdp->scd_mutex); 12582 } 12583 return (0); 12584 } 12585 12586 static void 12587 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12588 { 12589 extern uint32_t sendmondo_in_recover; 12590 12591 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12592 12593 /* 12594 * For Cheetah+ Erratum 25: 12595 * Wait for any active recovery to finish. We can't risk 12596 * relocating the TSB of the thread running mondo_recover_proc() 12597 * since, if we did that, we would deadlock. The scenario we are 12598 * trying to avoid is as follows: 12599 * 12600 * THIS CPU RECOVER CPU 12601 * -------- ----------- 12602 * Begins recovery, walking through TSB 12603 * hat_pagesuspend() TSB TTE 12604 * TLB miss on TSB TTE, spins at TL1 12605 * xt_sync() 12606 * send_mondo_timeout() 12607 * mondo_recover_proc() 12608 * ((deadlocked)) 12609 * 12610 * The second half of the workaround is that mondo_recover_proc() 12611 * checks to see if the tsb_info has the RELOC flag set, and if it 12612 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12613 * and hence avoiding the TLB miss that could result in a deadlock. 12614 */ 12615 if (&sendmondo_in_recover) { 12616 membar_enter(); /* make sure RELOC flag visible */ 12617 while (sendmondo_in_recover) { 12618 drv_usecwait(1); 12619 membar_consumer(); 12620 } 12621 } 12622 12623 sfmmu_invalidate_ctx(sfmmup); 12624 } 12625 12626 /* ARGSUSED */ 12627 static int 12628 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12629 void *tsbinfo, pfn_t newpfn) 12630 { 12631 hatlock_t *hatlockp; 12632 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12633 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12634 12635 if (flags != HAT_POSTUNSUSPEND) 12636 return (0); 12637 12638 hatlockp = sfmmu_hat_enter(sfmmup); 12639 12640 SFMMU_STAT(sf_tsb_reloc); 12641 12642 /* 12643 * The process may have swapped out while we were relocating one 12644 * of its TSBs. If so, don't bother doing the setup since the 12645 * process can't be using the memory anymore. 12646 */ 12647 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12648 ASSERT(va == tsbinfop->tsb_va); 12649 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12650 12651 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12652 sfmmu_inv_tsb(tsbinfop->tsb_va, 12653 TSB_BYTES(tsbinfop->tsb_szc)); 12654 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12655 } 12656 } 12657 12658 membar_exit(); 12659 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12660 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12661 12662 sfmmu_hat_exit(hatlockp); 12663 12664 return (0); 12665 } 12666 12667 /* 12668 * Allocate and initialize a tsb_info structure. Note that we may or may not 12669 * allocate a TSB here, depending on the flags passed in. 12670 */ 12671 static int 12672 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12673 uint_t flags, sfmmu_t *sfmmup) 12674 { 12675 int err; 12676 12677 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12678 sfmmu_tsbinfo_cache, KM_SLEEP); 12679 12680 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12681 tsb_szc, flags, sfmmup)) != 0) { 12682 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12683 SFMMU_STAT(sf_tsb_allocfail); 12684 *tsbinfopp = NULL; 12685 return (err); 12686 } 12687 SFMMU_STAT(sf_tsb_alloc); 12688 12689 /* 12690 * Bump the TSB size counters for this TSB size. 12691 */ 12692 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12693 return (0); 12694 } 12695 12696 static void 12697 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12698 { 12699 caddr_t tsbva = tsbinfo->tsb_va; 12700 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12701 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12702 vmem_t *vmp = tsbinfo->tsb_vmp; 12703 12704 /* 12705 * If we allocated this TSB from relocatable kernel memory, then we 12706 * need to uninstall the callback handler. 12707 */ 12708 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12709 uintptr_t slab_mask; 12710 caddr_t slab_vaddr; 12711 page_t **ppl; 12712 int ret; 12713 12714 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12715 if (tsb_size > MMU_PAGESIZE4M) 12716 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12717 else 12718 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12719 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12720 12721 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12722 ASSERT(ret == 0); 12723 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12724 0, NULL); 12725 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12726 } 12727 12728 if (kmem_cachep != NULL) { 12729 kmem_cache_free(kmem_cachep, tsbva); 12730 } else { 12731 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12732 } 12733 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12734 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12735 } 12736 12737 static void 12738 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12739 { 12740 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12741 sfmmu_tsb_free(tsbinfo); 12742 } 12743 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12744 12745 } 12746 12747 /* 12748 * Setup all the references to physical memory for this tsbinfo. 12749 * The underlying page(s) must be locked. 12750 */ 12751 static void 12752 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12753 { 12754 ASSERT(pfn != PFN_INVALID); 12755 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12756 12757 #ifndef sun4v 12758 if (tsbinfo->tsb_szc == 0) { 12759 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12760 PROT_WRITE|PROT_READ, TTE8K); 12761 } else { 12762 /* 12763 * Round down PA and use a large mapping; the handlers will 12764 * compute the TSB pointer at the correct offset into the 12765 * big virtual page. NOTE: this assumes all TSBs larger 12766 * than 8K must come from physically contiguous slabs of 12767 * size tsb_slab_size. 12768 */ 12769 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12770 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12771 } 12772 tsbinfo->tsb_pa = ptob(pfn); 12773 12774 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12775 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12776 12777 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12778 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12779 #else /* sun4v */ 12780 tsbinfo->tsb_pa = ptob(pfn); 12781 #endif /* sun4v */ 12782 } 12783 12784 12785 /* 12786 * Returns zero on success, ENOMEM if over the high water mark, 12787 * or EAGAIN if the caller needs to retry with a smaller TSB 12788 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12789 * 12790 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12791 * is specified and the TSB requested is PAGESIZE, though it 12792 * may sleep waiting for memory if sufficient memory is not 12793 * available. 12794 */ 12795 static int 12796 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12797 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12798 { 12799 caddr_t vaddr = NULL; 12800 caddr_t slab_vaddr; 12801 uintptr_t slab_mask; 12802 int tsbbytes = TSB_BYTES(tsbcode); 12803 int lowmem = 0; 12804 struct kmem_cache *kmem_cachep = NULL; 12805 vmem_t *vmp = NULL; 12806 lgrp_id_t lgrpid = LGRP_NONE; 12807 pfn_t pfn; 12808 uint_t cbflags = HAC_SLEEP; 12809 page_t **pplist; 12810 int ret; 12811 12812 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12813 if (tsbbytes > MMU_PAGESIZE4M) 12814 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12815 else 12816 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12817 12818 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12819 flags |= TSB_ALLOC; 12820 12821 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12822 12823 tsbinfo->tsb_sfmmu = sfmmup; 12824 12825 /* 12826 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12827 * return. 12828 */ 12829 if ((flags & TSB_ALLOC) == 0) { 12830 tsbinfo->tsb_szc = tsbcode; 12831 tsbinfo->tsb_ttesz_mask = tteszmask; 12832 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12833 tsbinfo->tsb_pa = -1; 12834 tsbinfo->tsb_tte.ll = 0; 12835 tsbinfo->tsb_next = NULL; 12836 tsbinfo->tsb_flags = TSB_SWAPPED; 12837 tsbinfo->tsb_cache = NULL; 12838 tsbinfo->tsb_vmp = NULL; 12839 return (0); 12840 } 12841 12842 #ifdef DEBUG 12843 /* 12844 * For debugging: 12845 * Randomly force allocation failures every tsb_alloc_mtbf 12846 * tries if TSB_FORCEALLOC is not specified. This will 12847 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12848 * it is even, to allow testing of both failure paths... 12849 */ 12850 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12851 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12852 tsb_alloc_count = 0; 12853 tsb_alloc_fail_mtbf++; 12854 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12855 } 12856 #endif /* DEBUG */ 12857 12858 /* 12859 * Enforce high water mark if we are not doing a forced allocation 12860 * and are not shrinking a process' TSB. 12861 */ 12862 if ((flags & TSB_SHRINK) == 0 && 12863 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12864 if ((flags & TSB_FORCEALLOC) == 0) 12865 return (ENOMEM); 12866 lowmem = 1; 12867 } 12868 12869 /* 12870 * Allocate from the correct location based upon the size of the TSB 12871 * compared to the base page size, and what memory conditions dictate. 12872 * Note we always do nonblocking allocations from the TSB arena since 12873 * we don't want memory fragmentation to cause processes to block 12874 * indefinitely waiting for memory; until the kernel algorithms that 12875 * coalesce large pages are improved this is our best option. 12876 * 12877 * Algorithm: 12878 * If allocating a "large" TSB (>8K), allocate from the 12879 * appropriate kmem_tsb_default_arena vmem arena 12880 * else if low on memory or the TSB_FORCEALLOC flag is set or 12881 * tsb_forceheap is set 12882 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12883 * KM_SLEEP (never fails) 12884 * else 12885 * Allocate from appropriate sfmmu_tsb_cache with 12886 * KM_NOSLEEP 12887 * endif 12888 */ 12889 if (tsb_lgrp_affinity) 12890 lgrpid = lgrp_home_id(curthread); 12891 if (lgrpid == LGRP_NONE) 12892 lgrpid = 0; /* use lgrp of boot CPU */ 12893 12894 if (tsbbytes > MMU_PAGESIZE) { 12895 if (tsbbytes > MMU_PAGESIZE4M) { 12896 vmp = kmem_bigtsb_default_arena[lgrpid]; 12897 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12898 0, 0, NULL, NULL, VM_NOSLEEP); 12899 } else { 12900 vmp = kmem_tsb_default_arena[lgrpid]; 12901 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12902 0, 0, NULL, NULL, VM_NOSLEEP); 12903 } 12904 #ifdef DEBUG 12905 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 12906 #else /* !DEBUG */ 12907 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 12908 #endif /* DEBUG */ 12909 kmem_cachep = sfmmu_tsb8k_cache; 12910 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 12911 ASSERT(vaddr != NULL); 12912 } else { 12913 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 12914 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 12915 } 12916 12917 tsbinfo->tsb_cache = kmem_cachep; 12918 tsbinfo->tsb_vmp = vmp; 12919 12920 if (vaddr == NULL) { 12921 return (EAGAIN); 12922 } 12923 12924 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 12925 kmem_cachep = tsbinfo->tsb_cache; 12926 12927 /* 12928 * If we are allocating from outside the cage, then we need to 12929 * register a relocation callback handler. Note that for now 12930 * since pseudo mappings always hang off of the slab's root page, 12931 * we need only lock the first 8K of the TSB slab. This is a bit 12932 * hacky but it is good for performance. 12933 */ 12934 if (kmem_cachep != sfmmu_tsb8k_cache) { 12935 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 12936 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 12937 ASSERT(ret == 0); 12938 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 12939 cbflags, (void *)tsbinfo, &pfn, NULL); 12940 12941 /* 12942 * Need to free up resources if we could not successfully 12943 * add the callback function and return an error condition. 12944 */ 12945 if (ret != 0) { 12946 if (kmem_cachep) { 12947 kmem_cache_free(kmem_cachep, vaddr); 12948 } else { 12949 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 12950 } 12951 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 12952 S_WRITE); 12953 return (EAGAIN); 12954 } 12955 } else { 12956 /* 12957 * Since allocation of 8K TSBs from heap is rare and occurs 12958 * during memory pressure we allocate them from permanent 12959 * memory rather than using callbacks to get the PFN. 12960 */ 12961 pfn = hat_getpfnum(kas.a_hat, vaddr); 12962 } 12963 12964 tsbinfo->tsb_va = vaddr; 12965 tsbinfo->tsb_szc = tsbcode; 12966 tsbinfo->tsb_ttesz_mask = tteszmask; 12967 tsbinfo->tsb_next = NULL; 12968 tsbinfo->tsb_flags = 0; 12969 12970 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 12971 12972 sfmmu_inv_tsb(vaddr, tsbbytes); 12973 12974 if (kmem_cachep != sfmmu_tsb8k_cache) { 12975 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 12976 } 12977 12978 return (0); 12979 } 12980 12981 /* 12982 * Initialize per cpu tsb and per cpu tsbmiss_area 12983 */ 12984 void 12985 sfmmu_init_tsbs(void) 12986 { 12987 int i; 12988 struct tsbmiss *tsbmissp; 12989 struct kpmtsbm *kpmtsbmp; 12990 #ifndef sun4v 12991 extern int dcache_line_mask; 12992 #endif /* sun4v */ 12993 extern uint_t vac_colors; 12994 12995 /* 12996 * Init. tsb miss area. 12997 */ 12998 tsbmissp = tsbmiss_area; 12999 13000 for (i = 0; i < NCPU; tsbmissp++, i++) { 13001 /* 13002 * initialize the tsbmiss area. 13003 * Do this for all possible CPUs as some may be added 13004 * while the system is running. There is no cost to this. 13005 */ 13006 tsbmissp->ksfmmup = ksfmmup; 13007 #ifndef sun4v 13008 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13009 #endif /* sun4v */ 13010 tsbmissp->khashstart = 13011 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13012 tsbmissp->uhashstart = 13013 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13014 tsbmissp->khashsz = khmehash_num; 13015 tsbmissp->uhashsz = uhmehash_num; 13016 } 13017 13018 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13019 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13020 13021 if (kpm_enable == 0) 13022 return; 13023 13024 /* -- Begin KPM specific init -- */ 13025 13026 if (kpm_smallpages) { 13027 /* 13028 * If we're using base pagesize pages for seg_kpm 13029 * mappings, we use the kernel TSB since we can't afford 13030 * to allocate a second huge TSB for these mappings. 13031 */ 13032 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13033 kpm_tsbsz = ktsb_szcode; 13034 kpmsm_tsbbase = kpm_tsbbase; 13035 kpmsm_tsbsz = kpm_tsbsz; 13036 } else { 13037 /* 13038 * In VAC conflict case, just put the entries in the 13039 * kernel 8K indexed TSB for now so we can find them. 13040 * This could really be changed in the future if we feel 13041 * the need... 13042 */ 13043 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13044 kpmsm_tsbsz = ktsb_szcode; 13045 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13046 kpm_tsbsz = ktsb4m_szcode; 13047 } 13048 13049 kpmtsbmp = kpmtsbm_area; 13050 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13051 /* 13052 * Initialize the kpmtsbm area. 13053 * Do this for all possible CPUs as some may be added 13054 * while the system is running. There is no cost to this. 13055 */ 13056 kpmtsbmp->vbase = kpm_vbase; 13057 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13058 kpmtsbmp->sz_shift = kpm_size_shift; 13059 kpmtsbmp->kpmp_shift = kpmp_shift; 13060 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13061 if (kpm_smallpages == 0) { 13062 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13063 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13064 } else { 13065 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13066 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13067 } 13068 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13069 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13070 #ifdef DEBUG 13071 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13072 #endif /* DEBUG */ 13073 if (ktsb_phys) 13074 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13075 } 13076 13077 /* -- End KPM specific init -- */ 13078 } 13079 13080 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13081 struct tsb_info ktsb_info[2]; 13082 13083 /* 13084 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13085 */ 13086 void 13087 sfmmu_init_ktsbinfo() 13088 { 13089 ASSERT(ksfmmup != NULL); 13090 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13091 /* 13092 * Allocate tsbinfos for kernel and copy in data 13093 * to make debug easier and sun4v setup easier. 13094 */ 13095 ktsb_info[0].tsb_sfmmu = ksfmmup; 13096 ktsb_info[0].tsb_szc = ktsb_szcode; 13097 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13098 ktsb_info[0].tsb_va = ktsb_base; 13099 ktsb_info[0].tsb_pa = ktsb_pbase; 13100 ktsb_info[0].tsb_flags = 0; 13101 ktsb_info[0].tsb_tte.ll = 0; 13102 ktsb_info[0].tsb_cache = NULL; 13103 13104 ktsb_info[1].tsb_sfmmu = ksfmmup; 13105 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13106 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13107 ktsb_info[1].tsb_va = ktsb4m_base; 13108 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13109 ktsb_info[1].tsb_flags = 0; 13110 ktsb_info[1].tsb_tte.ll = 0; 13111 ktsb_info[1].tsb_cache = NULL; 13112 13113 /* Link them into ksfmmup. */ 13114 ktsb_info[0].tsb_next = &ktsb_info[1]; 13115 ktsb_info[1].tsb_next = NULL; 13116 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13117 13118 sfmmu_setup_tsbinfo(ksfmmup); 13119 } 13120 13121 /* 13122 * Cache the last value returned from va_to_pa(). If the VA specified 13123 * in the current call to cached_va_to_pa() maps to the same Page (as the 13124 * previous call to cached_va_to_pa()), then compute the PA using 13125 * cached info, else call va_to_pa(). 13126 * 13127 * Note: this function is neither MT-safe nor consistent in the presence 13128 * of multiple, interleaved threads. This function was created to enable 13129 * an optimization used during boot (at a point when there's only one thread 13130 * executing on the "boot CPU", and before startup_vm() has been called). 13131 */ 13132 static uint64_t 13133 cached_va_to_pa(void *vaddr) 13134 { 13135 static uint64_t prev_vaddr_base = 0; 13136 static uint64_t prev_pfn = 0; 13137 13138 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13139 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13140 } else { 13141 uint64_t pa = va_to_pa(vaddr); 13142 13143 if (pa != ((uint64_t)-1)) { 13144 /* 13145 * Computed physical address is valid. Cache its 13146 * related info for the next cached_va_to_pa() call. 13147 */ 13148 prev_pfn = pa & MMU_PAGEMASK; 13149 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13150 } 13151 13152 return (pa); 13153 } 13154 } 13155 13156 /* 13157 * Carve up our nucleus hblk region. We may allocate more hblks than 13158 * asked due to rounding errors but we are guaranteed to have at least 13159 * enough space to allocate the requested number of hblk8's and hblk1's. 13160 */ 13161 void 13162 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13163 { 13164 struct hme_blk *hmeblkp; 13165 size_t hme8blk_sz, hme1blk_sz; 13166 size_t i; 13167 size_t hblk8_bound; 13168 ulong_t j = 0, k = 0; 13169 13170 ASSERT(addr != NULL && size != 0); 13171 13172 /* Need to use proper structure alignment */ 13173 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13174 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13175 13176 nucleus_hblk8.list = (void *)addr; 13177 nucleus_hblk8.index = 0; 13178 13179 /* 13180 * Use as much memory as possible for hblk8's since we 13181 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13182 * We need to hold back enough space for the hblk1's which 13183 * we'll allocate next. 13184 */ 13185 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13186 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13187 hmeblkp = (struct hme_blk *)addr; 13188 addr += hme8blk_sz; 13189 hmeblkp->hblk_nuc_bit = 1; 13190 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13191 } 13192 nucleus_hblk8.len = j; 13193 ASSERT(j >= nhblk8); 13194 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13195 13196 nucleus_hblk1.list = (void *)addr; 13197 nucleus_hblk1.index = 0; 13198 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13199 hmeblkp = (struct hme_blk *)addr; 13200 addr += hme1blk_sz; 13201 hmeblkp->hblk_nuc_bit = 1; 13202 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13203 } 13204 ASSERT(k >= nhblk1); 13205 nucleus_hblk1.len = k; 13206 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13207 } 13208 13209 /* 13210 * This function is currently not supported on this platform. For what 13211 * it's supposed to do, see hat.c and hat_srmmu.c 13212 */ 13213 /* ARGSUSED */ 13214 faultcode_t 13215 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13216 uint_t flags) 13217 { 13218 return (FC_NOSUPPORT); 13219 } 13220 13221 /* 13222 * Searchs the mapping list of the page for a mapping of the same size. If not 13223 * found the corresponding bit is cleared in the p_index field. When large 13224 * pages are more prevalent in the system, we can maintain the mapping list 13225 * in order and we don't have to traverse the list each time. Just check the 13226 * next and prev entries, and if both are of different size, we clear the bit. 13227 */ 13228 static void 13229 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13230 { 13231 struct sf_hment *sfhmep; 13232 int index; 13233 pgcnt_t npgs; 13234 13235 ASSERT(ttesz > TTE8K); 13236 13237 ASSERT(sfmmu_mlist_held(pp)); 13238 13239 ASSERT(PP_ISMAPPED_LARGE(pp)); 13240 13241 /* 13242 * Traverse mapping list looking for another mapping of same size. 13243 * since we only want to clear index field if all mappings of 13244 * that size are gone. 13245 */ 13246 13247 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13248 if (IS_PAHME(sfhmep)) 13249 continue; 13250 if (hme_size(sfhmep) == ttesz) { 13251 /* 13252 * another mapping of the same size. don't clear index. 13253 */ 13254 return; 13255 } 13256 } 13257 13258 /* 13259 * Clear the p_index bit for large page. 13260 */ 13261 index = PAGESZ_TO_INDEX(ttesz); 13262 npgs = TTEPAGES(ttesz); 13263 while (npgs-- > 0) { 13264 ASSERT(pp->p_index & index); 13265 pp->p_index &= ~index; 13266 pp = PP_PAGENEXT(pp); 13267 } 13268 } 13269 13270 /* 13271 * return supported features 13272 */ 13273 /* ARGSUSED */ 13274 int 13275 hat_supported(enum hat_features feature, void *arg) 13276 { 13277 switch (feature) { 13278 case HAT_SHARED_PT: 13279 case HAT_DYNAMIC_ISM_UNMAP: 13280 case HAT_VMODSORT: 13281 return (1); 13282 case HAT_SHARED_REGIONS: 13283 if (shctx_on) 13284 return (1); 13285 else 13286 return (0); 13287 default: 13288 return (0); 13289 } 13290 } 13291 13292 void 13293 hat_enter(struct hat *hat) 13294 { 13295 hatlock_t *hatlockp; 13296 13297 if (hat != ksfmmup) { 13298 hatlockp = TSB_HASH(hat); 13299 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13300 } 13301 } 13302 13303 void 13304 hat_exit(struct hat *hat) 13305 { 13306 hatlock_t *hatlockp; 13307 13308 if (hat != ksfmmup) { 13309 hatlockp = TSB_HASH(hat); 13310 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13311 } 13312 } 13313 13314 /*ARGSUSED*/ 13315 void 13316 hat_reserve(struct as *as, caddr_t addr, size_t len) 13317 { 13318 } 13319 13320 static void 13321 hat_kstat_init(void) 13322 { 13323 kstat_t *ksp; 13324 13325 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13326 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13327 KSTAT_FLAG_VIRTUAL); 13328 if (ksp) { 13329 ksp->ks_data = (void *) &sfmmu_global_stat; 13330 kstat_install(ksp); 13331 } 13332 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13333 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13334 KSTAT_FLAG_VIRTUAL); 13335 if (ksp) { 13336 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13337 kstat_install(ksp); 13338 } 13339 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13340 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13341 KSTAT_FLAG_WRITABLE); 13342 if (ksp) { 13343 ksp->ks_update = sfmmu_kstat_percpu_update; 13344 kstat_install(ksp); 13345 } 13346 } 13347 13348 /* ARGSUSED */ 13349 static int 13350 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13351 { 13352 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13353 struct tsbmiss *tsbm = tsbmiss_area; 13354 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13355 int i; 13356 13357 ASSERT(cpu_kstat); 13358 if (rw == KSTAT_READ) { 13359 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13360 cpu_kstat->sf_itlb_misses = 0; 13361 cpu_kstat->sf_dtlb_misses = 0; 13362 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13363 tsbm->uprot_traps; 13364 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13365 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13366 cpu_kstat->sf_tsb_hits = 0; 13367 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13368 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13369 } 13370 } else { 13371 /* KSTAT_WRITE is used to clear stats */ 13372 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13373 tsbm->utsb_misses = 0; 13374 tsbm->ktsb_misses = 0; 13375 tsbm->uprot_traps = 0; 13376 tsbm->kprot_traps = 0; 13377 kpmtsbm->kpm_dtlb_misses = 0; 13378 kpmtsbm->kpm_tsb_misses = 0; 13379 } 13380 } 13381 return (0); 13382 } 13383 13384 #ifdef DEBUG 13385 13386 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13387 13388 /* 13389 * A tte checker. *orig_old is the value we read before cas. 13390 * *cur is the value returned by cas. 13391 * *new is the desired value when we do the cas. 13392 * 13393 * *hmeblkp is currently unused. 13394 */ 13395 13396 /* ARGSUSED */ 13397 void 13398 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13399 { 13400 pfn_t i, j, k; 13401 int cpuid = CPU->cpu_id; 13402 13403 gorig[cpuid] = orig_old; 13404 gcur[cpuid] = cur; 13405 gnew[cpuid] = new; 13406 13407 #ifdef lint 13408 hmeblkp = hmeblkp; 13409 #endif 13410 13411 if (TTE_IS_VALID(orig_old)) { 13412 if (TTE_IS_VALID(cur)) { 13413 i = TTE_TO_TTEPFN(orig_old); 13414 j = TTE_TO_TTEPFN(cur); 13415 k = TTE_TO_TTEPFN(new); 13416 if (i != j) { 13417 /* remap error? */ 13418 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13419 } 13420 13421 if (i != k) { 13422 /* remap error? */ 13423 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13424 } 13425 } else { 13426 if (TTE_IS_VALID(new)) { 13427 panic("chk_tte: invalid cur? "); 13428 } 13429 13430 i = TTE_TO_TTEPFN(orig_old); 13431 k = TTE_TO_TTEPFN(new); 13432 if (i != k) { 13433 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13434 } 13435 } 13436 } else { 13437 if (TTE_IS_VALID(cur)) { 13438 j = TTE_TO_TTEPFN(cur); 13439 if (TTE_IS_VALID(new)) { 13440 k = TTE_TO_TTEPFN(new); 13441 if (j != k) { 13442 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13443 j, k); 13444 } 13445 } else { 13446 panic("chk_tte: why here?"); 13447 } 13448 } else { 13449 if (!TTE_IS_VALID(new)) { 13450 panic("chk_tte: why here2 ?"); 13451 } 13452 } 13453 } 13454 } 13455 13456 #endif /* DEBUG */ 13457 13458 extern void prefetch_tsbe_read(struct tsbe *); 13459 extern void prefetch_tsbe_write(struct tsbe *); 13460 13461 13462 /* 13463 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13464 * us optimal performance on Cheetah+. You can only have 8 outstanding 13465 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13466 * prefetch to make the most utilization of the prefetch capability. 13467 */ 13468 #define TSBE_PREFETCH_STRIDE (7) 13469 13470 void 13471 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13472 { 13473 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13474 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13475 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13476 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13477 struct tsbe *old; 13478 struct tsbe *new; 13479 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13480 uint64_t va; 13481 int new_offset; 13482 int i; 13483 int vpshift; 13484 int last_prefetch; 13485 13486 if (old_bytes == new_bytes) { 13487 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13488 } else { 13489 13490 /* 13491 * A TSBE is 16 bytes which means there are four TSBE's per 13492 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13493 */ 13494 old = (struct tsbe *)old_tsbinfo->tsb_va; 13495 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13496 for (i = 0; i < old_entries; i++, old++) { 13497 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13498 prefetch_tsbe_read(old); 13499 if (!old->tte_tag.tag_invalid) { 13500 /* 13501 * We have a valid TTE to remap. Check the 13502 * size. We won't remap 64K or 512K TTEs 13503 * because they span more than one TSB entry 13504 * and are indexed using an 8K virt. page. 13505 * Ditto for 32M and 256M TTEs. 13506 */ 13507 if (TTE_CSZ(&old->tte_data) == TTE64K || 13508 TTE_CSZ(&old->tte_data) == TTE512K) 13509 continue; 13510 if (mmu_page_sizes == max_mmu_page_sizes) { 13511 if (TTE_CSZ(&old->tte_data) == TTE32M || 13512 TTE_CSZ(&old->tte_data) == TTE256M) 13513 continue; 13514 } 13515 13516 /* clear the lower 22 bits of the va */ 13517 va = *(uint64_t *)old << 22; 13518 /* turn va into a virtual pfn */ 13519 va >>= 22 - TSB_START_SIZE; 13520 /* 13521 * or in bits from the offset in the tsb 13522 * to get the real virtual pfn. These 13523 * correspond to bits [21:13] in the va 13524 */ 13525 vpshift = 13526 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13527 0x1ff; 13528 va |= (i << vpshift); 13529 va >>= vpshift; 13530 new_offset = va & (new_entries - 1); 13531 new = new_base + new_offset; 13532 prefetch_tsbe_write(new); 13533 *new = *old; 13534 } 13535 } 13536 } 13537 } 13538 13539 /* 13540 * unused in sfmmu 13541 */ 13542 void 13543 hat_dump(void) 13544 { 13545 } 13546 13547 /* 13548 * Called when a thread is exiting and we have switched to the kernel address 13549 * space. Perform the same VM initialization resume() uses when switching 13550 * processes. 13551 * 13552 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13553 * we call it anyway in case the semantics change in the future. 13554 */ 13555 /*ARGSUSED*/ 13556 void 13557 hat_thread_exit(kthread_t *thd) 13558 { 13559 uint_t pgsz_cnum; 13560 uint_t pstate_save; 13561 13562 ASSERT(thd->t_procp->p_as == &kas); 13563 13564 pgsz_cnum = KCONTEXT; 13565 #ifdef sun4u 13566 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13567 #endif 13568 13569 /* 13570 * Note that sfmmu_load_mmustate() is currently a no-op for 13571 * kernel threads. We need to disable interrupts here, 13572 * simply because otherwise sfmmu_load_mmustate() would panic 13573 * if the caller does not disable interrupts. 13574 */ 13575 pstate_save = sfmmu_disable_intrs(); 13576 13577 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13578 sfmmu_setctx_sec(pgsz_cnum); 13579 sfmmu_load_mmustate(ksfmmup); 13580 sfmmu_enable_intrs(pstate_save); 13581 } 13582 13583 13584 /* 13585 * SRD support 13586 */ 13587 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13588 (((uintptr_t)(vp)) >> 11)) & \ 13589 srd_hashmask) 13590 13591 /* 13592 * Attach the process to the srd struct associated with the exec vnode 13593 * from which the process is started. 13594 */ 13595 void 13596 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13597 { 13598 uint_t hash = SRD_HASH_FUNCTION(evp); 13599 sf_srd_t *srdp; 13600 sf_srd_t *newsrdp; 13601 13602 ASSERT(sfmmup != ksfmmup); 13603 ASSERT(sfmmup->sfmmu_srdp == NULL); 13604 13605 if (!shctx_on) { 13606 return; 13607 } 13608 13609 VN_HOLD(evp); 13610 13611 if (srd_buckets[hash].srdb_srdp != NULL) { 13612 mutex_enter(&srd_buckets[hash].srdb_lock); 13613 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13614 srdp = srdp->srd_hash) { 13615 if (srdp->srd_evp == evp) { 13616 ASSERT(srdp->srd_refcnt >= 0); 13617 sfmmup->sfmmu_srdp = srdp; 13618 atomic_inc_32( 13619 (volatile uint_t *)&srdp->srd_refcnt); 13620 mutex_exit(&srd_buckets[hash].srdb_lock); 13621 return; 13622 } 13623 } 13624 mutex_exit(&srd_buckets[hash].srdb_lock); 13625 } 13626 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13627 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13628 13629 newsrdp->srd_evp = evp; 13630 newsrdp->srd_refcnt = 1; 13631 newsrdp->srd_hmergnfree = NULL; 13632 newsrdp->srd_ismrgnfree = NULL; 13633 13634 mutex_enter(&srd_buckets[hash].srdb_lock); 13635 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13636 srdp = srdp->srd_hash) { 13637 if (srdp->srd_evp == evp) { 13638 ASSERT(srdp->srd_refcnt >= 0); 13639 sfmmup->sfmmu_srdp = srdp; 13640 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 13641 mutex_exit(&srd_buckets[hash].srdb_lock); 13642 kmem_cache_free(srd_cache, newsrdp); 13643 return; 13644 } 13645 } 13646 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13647 srd_buckets[hash].srdb_srdp = newsrdp; 13648 sfmmup->sfmmu_srdp = newsrdp; 13649 13650 mutex_exit(&srd_buckets[hash].srdb_lock); 13651 13652 } 13653 13654 static void 13655 sfmmu_leave_srd(sfmmu_t *sfmmup) 13656 { 13657 vnode_t *evp; 13658 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13659 uint_t hash; 13660 sf_srd_t **prev_srdpp; 13661 sf_region_t *rgnp; 13662 sf_region_t *nrgnp; 13663 #ifdef DEBUG 13664 int rgns = 0; 13665 #endif 13666 int i; 13667 13668 ASSERT(sfmmup != ksfmmup); 13669 ASSERT(srdp != NULL); 13670 ASSERT(srdp->srd_refcnt > 0); 13671 ASSERT(sfmmup->sfmmu_scdp == NULL); 13672 ASSERT(sfmmup->sfmmu_free == 1); 13673 13674 sfmmup->sfmmu_srdp = NULL; 13675 evp = srdp->srd_evp; 13676 ASSERT(evp != NULL); 13677 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) { 13678 VN_RELE(evp); 13679 return; 13680 } 13681 13682 hash = SRD_HASH_FUNCTION(evp); 13683 mutex_enter(&srd_buckets[hash].srdb_lock); 13684 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13685 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13686 if (srdp->srd_evp == evp) { 13687 break; 13688 } 13689 } 13690 if (srdp == NULL || srdp->srd_refcnt) { 13691 mutex_exit(&srd_buckets[hash].srdb_lock); 13692 VN_RELE(evp); 13693 return; 13694 } 13695 *prev_srdpp = srdp->srd_hash; 13696 mutex_exit(&srd_buckets[hash].srdb_lock); 13697 13698 ASSERT(srdp->srd_refcnt == 0); 13699 VN_RELE(evp); 13700 13701 #ifdef DEBUG 13702 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13703 ASSERT(srdp->srd_rgnhash[i] == NULL); 13704 } 13705 #endif /* DEBUG */ 13706 13707 /* free each hme regions in the srd */ 13708 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13709 nrgnp = rgnp->rgn_next; 13710 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13711 ASSERT(rgnp->rgn_refcnt == 0); 13712 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13713 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13714 ASSERT(rgnp->rgn_hmeflags == 0); 13715 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13716 #ifdef DEBUG 13717 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13718 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13719 } 13720 rgns++; 13721 #endif /* DEBUG */ 13722 kmem_cache_free(region_cache, rgnp); 13723 } 13724 ASSERT(rgns == srdp->srd_next_hmerid); 13725 13726 #ifdef DEBUG 13727 rgns = 0; 13728 #endif 13729 /* free each ism rgns in the srd */ 13730 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13731 nrgnp = rgnp->rgn_next; 13732 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13733 ASSERT(rgnp->rgn_refcnt == 0); 13734 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13735 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13736 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13737 #ifdef DEBUG 13738 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13739 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13740 } 13741 rgns++; 13742 #endif /* DEBUG */ 13743 kmem_cache_free(region_cache, rgnp); 13744 } 13745 ASSERT(rgns == srdp->srd_next_ismrid); 13746 ASSERT(srdp->srd_ismbusyrgns == 0); 13747 ASSERT(srdp->srd_hmebusyrgns == 0); 13748 13749 srdp->srd_next_ismrid = 0; 13750 srdp->srd_next_hmerid = 0; 13751 13752 bzero((void *)srdp->srd_ismrgnp, 13753 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13754 bzero((void *)srdp->srd_hmergnp, 13755 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13756 13757 ASSERT(srdp->srd_scdp == NULL); 13758 kmem_cache_free(srd_cache, srdp); 13759 } 13760 13761 /* ARGSUSED */ 13762 static int 13763 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13764 { 13765 sf_srd_t *srdp = (sf_srd_t *)buf; 13766 bzero(buf, sizeof (*srdp)); 13767 13768 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13769 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13770 return (0); 13771 } 13772 13773 /* ARGSUSED */ 13774 static void 13775 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13776 { 13777 sf_srd_t *srdp = (sf_srd_t *)buf; 13778 13779 mutex_destroy(&srdp->srd_mutex); 13780 mutex_destroy(&srdp->srd_scd_mutex); 13781 } 13782 13783 /* 13784 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13785 * at the same time for the same process and address range. This is ensured by 13786 * the fact that address space is locked as writer when a process joins the 13787 * regions. Therefore there's no need to hold an srd lock during the entire 13788 * execution of hat_join_region()/hat_leave_region(). 13789 */ 13790 13791 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13792 (((uintptr_t)(obj)) >> 11)) & \ 13793 srd_rgn_hashmask) 13794 /* 13795 * This routine implements the shared context functionality required when 13796 * attaching a segment to an address space. It must be called from 13797 * hat_share() for D(ISM) segments and from segvn_create() for segments 13798 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13799 * which is saved in the private segment data for hme segments and 13800 * the ism_map structure for ism segments. 13801 */ 13802 hat_region_cookie_t 13803 hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size, 13804 void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc, 13805 hat_rgn_cb_func_t r_cb_function, uint_t flags) 13806 { 13807 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13808 uint_t rhash; 13809 uint_t rid; 13810 hatlock_t *hatlockp; 13811 sf_region_t *rgnp; 13812 sf_region_t *new_rgnp = NULL; 13813 int i; 13814 uint16_t *nextidp; 13815 sf_region_t **freelistp; 13816 int maxids; 13817 sf_region_t **rarrp; 13818 uint16_t *busyrgnsp; 13819 ulong_t rttecnt; 13820 uchar_t tteflag; 13821 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13822 int text = (r_type == HAT_REGION_TEXT); 13823 13824 if (srdp == NULL || r_size == 0) { 13825 return (HAT_INVALID_REGION_COOKIE); 13826 } 13827 13828 ASSERT(sfmmup != ksfmmup); 13829 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 13830 ASSERT(srdp->srd_refcnt > 0); 13831 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13832 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13833 ASSERT(r_pgszc < mmu_page_sizes); 13834 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13835 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13836 panic("hat_join_region: region addr or size is not aligned\n"); 13837 } 13838 13839 13840 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13841 SFMMU_REGION_HME; 13842 /* 13843 * Currently only support shared hmes for the read only main text 13844 * region. 13845 */ 13846 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13847 (r_perm & PROT_WRITE))) { 13848 return (HAT_INVALID_REGION_COOKIE); 13849 } 13850 13851 rhash = RGN_HASH_FUNCTION(r_obj); 13852 13853 if (r_type == SFMMU_REGION_ISM) { 13854 nextidp = &srdp->srd_next_ismrid; 13855 freelistp = &srdp->srd_ismrgnfree; 13856 maxids = SFMMU_MAX_ISM_REGIONS; 13857 rarrp = srdp->srd_ismrgnp; 13858 busyrgnsp = &srdp->srd_ismbusyrgns; 13859 } else { 13860 nextidp = &srdp->srd_next_hmerid; 13861 freelistp = &srdp->srd_hmergnfree; 13862 maxids = SFMMU_MAX_HME_REGIONS; 13863 rarrp = srdp->srd_hmergnp; 13864 busyrgnsp = &srdp->srd_hmebusyrgns; 13865 } 13866 13867 mutex_enter(&srdp->srd_mutex); 13868 13869 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13870 rgnp = rgnp->rgn_hash) { 13871 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13872 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13873 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13874 break; 13875 } 13876 } 13877 13878 rfound: 13879 if (rgnp != NULL) { 13880 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13881 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13882 ASSERT(rgnp->rgn_refcnt >= 0); 13883 rid = rgnp->rgn_id; 13884 ASSERT(rid < maxids); 13885 ASSERT(rarrp[rid] == rgnp); 13886 ASSERT(rid < *nextidp); 13887 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 13888 mutex_exit(&srdp->srd_mutex); 13889 if (new_rgnp != NULL) { 13890 kmem_cache_free(region_cache, new_rgnp); 13891 } 13892 if (r_type == SFMMU_REGION_HME) { 13893 int myjoin = 13894 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 13895 13896 sfmmu_link_to_hmeregion(sfmmup, rgnp); 13897 /* 13898 * bitmap should be updated after linking sfmmu on 13899 * region list so that pageunload() doesn't skip 13900 * TSB/TLB flush. As soon as bitmap is updated another 13901 * thread in this process can already start accessing 13902 * this region. 13903 */ 13904 /* 13905 * Normally ttecnt accounting is done as part of 13906 * pagefault handling. But a process may not take any 13907 * pagefaults on shared hmeblks created by some other 13908 * process. To compensate for this assume that the 13909 * entire region will end up faulted in using 13910 * the region's pagesize. 13911 * 13912 */ 13913 if (r_pgszc > TTE8K) { 13914 tteflag = 1 << r_pgszc; 13915 if (disable_large_pages & tteflag) { 13916 tteflag = 0; 13917 } 13918 } else { 13919 tteflag = 0; 13920 } 13921 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 13922 hatlockp = sfmmu_hat_enter(sfmmup); 13923 sfmmup->sfmmu_rtteflags |= tteflag; 13924 sfmmu_hat_exit(hatlockp); 13925 } 13926 hatlockp = sfmmu_hat_enter(sfmmup); 13927 13928 /* 13929 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 13930 * region to allow for large page allocation failure. 13931 */ 13932 if (r_pgszc >= TTE4M) { 13933 sfmmup->sfmmu_tsb0_4minflcnt += 13934 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 13935 } 13936 13937 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 13938 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 13939 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 13940 rttecnt); 13941 13942 if (text && r_pgszc >= TTE4M && 13943 (tteflag || ((disable_large_pages >> TTE4M) & 13944 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 13945 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 13946 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 13947 } 13948 13949 sfmmu_hat_exit(hatlockp); 13950 /* 13951 * On Panther we need to make sure TLB is programmed 13952 * to accept 32M/256M pages. Call 13953 * sfmmu_check_page_sizes() now to make sure TLB is 13954 * setup before making hmeregions visible to other 13955 * threads. 13956 */ 13957 sfmmu_check_page_sizes(sfmmup, 1); 13958 hatlockp = sfmmu_hat_enter(sfmmup); 13959 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 13960 13961 /* 13962 * if context is invalid tsb miss exception code will 13963 * call sfmmu_check_page_sizes() and update tsbmiss 13964 * area later. 13965 */ 13966 kpreempt_disable(); 13967 if (myjoin && 13968 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 13969 != INVALID_CONTEXT)) { 13970 struct tsbmiss *tsbmp; 13971 13972 tsbmp = &tsbmiss_area[CPU->cpu_id]; 13973 ASSERT(sfmmup == tsbmp->usfmmup); 13974 BT_SET(tsbmp->shmermap, rid); 13975 if (r_pgszc > TTE64K) { 13976 tsbmp->uhat_rtteflags |= tteflag; 13977 } 13978 13979 } 13980 kpreempt_enable(); 13981 13982 sfmmu_hat_exit(hatlockp); 13983 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 13984 HAT_INVALID_REGION_COOKIE); 13985 } else { 13986 hatlockp = sfmmu_hat_enter(sfmmup); 13987 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 13988 sfmmu_hat_exit(hatlockp); 13989 } 13990 ASSERT(rid < maxids); 13991 13992 if (r_type == SFMMU_REGION_ISM) { 13993 sfmmu_find_scd(sfmmup); 13994 } 13995 return ((hat_region_cookie_t)((uint64_t)rid)); 13996 } 13997 13998 ASSERT(new_rgnp == NULL); 13999 14000 if (*busyrgnsp >= maxids) { 14001 mutex_exit(&srdp->srd_mutex); 14002 return (HAT_INVALID_REGION_COOKIE); 14003 } 14004 14005 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14006 if (*freelistp != NULL) { 14007 rgnp = *freelistp; 14008 *freelistp = rgnp->rgn_next; 14009 ASSERT(rgnp->rgn_id < *nextidp); 14010 ASSERT(rgnp->rgn_id < maxids); 14011 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14012 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14013 == r_type); 14014 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14015 ASSERT(rgnp->rgn_hmeflags == 0); 14016 } else { 14017 /* 14018 * release local locks before memory allocation. 14019 */ 14020 mutex_exit(&srdp->srd_mutex); 14021 14022 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14023 14024 mutex_enter(&srdp->srd_mutex); 14025 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14026 rgnp = rgnp->rgn_hash) { 14027 if (rgnp->rgn_saddr == r_saddr && 14028 rgnp->rgn_size == r_size && 14029 rgnp->rgn_obj == r_obj && 14030 rgnp->rgn_objoff == r_objoff && 14031 rgnp->rgn_perm == r_perm && 14032 rgnp->rgn_pgszc == r_pgszc) { 14033 break; 14034 } 14035 } 14036 if (rgnp != NULL) { 14037 goto rfound; 14038 } 14039 14040 if (*nextidp >= maxids) { 14041 mutex_exit(&srdp->srd_mutex); 14042 goto fail; 14043 } 14044 rgnp = new_rgnp; 14045 new_rgnp = NULL; 14046 rgnp->rgn_id = (*nextidp)++; 14047 ASSERT(rgnp->rgn_id < maxids); 14048 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14049 rarrp[rgnp->rgn_id] = rgnp; 14050 } 14051 14052 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14053 ASSERT(rgnp->rgn_hmeflags == 0); 14054 #ifdef DEBUG 14055 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14056 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14057 } 14058 #endif 14059 rgnp->rgn_saddr = r_saddr; 14060 rgnp->rgn_size = r_size; 14061 rgnp->rgn_obj = r_obj; 14062 rgnp->rgn_objoff = r_objoff; 14063 rgnp->rgn_perm = r_perm; 14064 rgnp->rgn_pgszc = r_pgszc; 14065 rgnp->rgn_flags = r_type; 14066 rgnp->rgn_refcnt = 0; 14067 rgnp->rgn_cb_function = r_cb_function; 14068 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14069 srdp->srd_rgnhash[rhash] = rgnp; 14070 (*busyrgnsp)++; 14071 ASSERT(*busyrgnsp <= maxids); 14072 goto rfound; 14073 14074 fail: 14075 ASSERT(new_rgnp != NULL); 14076 kmem_cache_free(region_cache, new_rgnp); 14077 return (HAT_INVALID_REGION_COOKIE); 14078 } 14079 14080 /* 14081 * This function implements the shared context functionality required 14082 * when detaching a segment from an address space. It must be called 14083 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14084 * for segments with a valid region_cookie. 14085 * It will also be called from all seg_vn routines which change a 14086 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14087 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14088 * from segvn_fault(). 14089 */ 14090 void 14091 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14092 { 14093 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14094 sf_scd_t *scdp; 14095 uint_t rhash; 14096 uint_t rid = (uint_t)((uint64_t)rcookie); 14097 hatlock_t *hatlockp = NULL; 14098 sf_region_t *rgnp; 14099 sf_region_t **prev_rgnpp; 14100 sf_region_t *cur_rgnp; 14101 void *r_obj; 14102 int i; 14103 caddr_t r_saddr; 14104 caddr_t r_eaddr; 14105 size_t r_size; 14106 uchar_t r_pgszc; 14107 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14108 14109 ASSERT(sfmmup != ksfmmup); 14110 ASSERT(srdp != NULL); 14111 ASSERT(srdp->srd_refcnt > 0); 14112 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14113 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14114 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14115 14116 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14117 SFMMU_REGION_HME; 14118 14119 if (r_type == SFMMU_REGION_ISM) { 14120 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14121 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14122 rgnp = srdp->srd_ismrgnp[rid]; 14123 } else { 14124 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14125 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14126 rgnp = srdp->srd_hmergnp[rid]; 14127 } 14128 ASSERT(rgnp != NULL); 14129 ASSERT(rgnp->rgn_id == rid); 14130 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14131 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14132 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); 14133 14134 if (sfmmup->sfmmu_free) { 14135 ulong_t rttecnt; 14136 r_pgszc = rgnp->rgn_pgszc; 14137 r_size = rgnp->rgn_size; 14138 14139 ASSERT(sfmmup->sfmmu_scdp == NULL); 14140 if (r_type == SFMMU_REGION_ISM) { 14141 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14142 } else { 14143 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14144 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14145 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14146 14147 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14148 -rttecnt); 14149 14150 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14151 } 14152 } else if (r_type == SFMMU_REGION_ISM) { 14153 hatlockp = sfmmu_hat_enter(sfmmup); 14154 ASSERT(rid < srdp->srd_next_ismrid); 14155 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14156 scdp = sfmmup->sfmmu_scdp; 14157 if (scdp != NULL && 14158 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14159 sfmmu_leave_scd(sfmmup, r_type); 14160 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14161 } 14162 sfmmu_hat_exit(hatlockp); 14163 } else { 14164 ulong_t rttecnt; 14165 r_pgszc = rgnp->rgn_pgszc; 14166 r_saddr = rgnp->rgn_saddr; 14167 r_size = rgnp->rgn_size; 14168 r_eaddr = r_saddr + r_size; 14169 14170 ASSERT(r_type == SFMMU_REGION_HME); 14171 hatlockp = sfmmu_hat_enter(sfmmup); 14172 ASSERT(rid < srdp->srd_next_hmerid); 14173 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14174 14175 /* 14176 * If region is part of an SCD call sfmmu_leave_scd(). 14177 * Otherwise if process is not exiting and has valid context 14178 * just drop the context on the floor to lose stale TLB 14179 * entries and force the update of tsb miss area to reflect 14180 * the new region map. After that clean our TSB entries. 14181 */ 14182 scdp = sfmmup->sfmmu_scdp; 14183 if (scdp != NULL && 14184 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14185 sfmmu_leave_scd(sfmmup, r_type); 14186 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14187 } 14188 sfmmu_invalidate_ctx(sfmmup); 14189 14190 i = TTE8K; 14191 while (i < mmu_page_sizes) { 14192 if (rgnp->rgn_ttecnt[i] != 0) { 14193 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14194 r_eaddr, i); 14195 if (i < TTE4M) { 14196 i = TTE4M; 14197 continue; 14198 } else { 14199 break; 14200 } 14201 } 14202 i++; 14203 } 14204 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14205 if (r_pgszc >= TTE4M) { 14206 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14207 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14208 rttecnt); 14209 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14210 } 14211 14212 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14213 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14214 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14215 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14216 14217 sfmmu_hat_exit(hatlockp); 14218 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14219 /* sfmmup left the scd, grow private tsb */ 14220 sfmmu_check_page_sizes(sfmmup, 1); 14221 } else { 14222 sfmmu_check_page_sizes(sfmmup, 0); 14223 } 14224 } 14225 14226 if (r_type == SFMMU_REGION_HME) { 14227 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14228 } 14229 14230 r_obj = rgnp->rgn_obj; 14231 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) { 14232 return; 14233 } 14234 14235 /* 14236 * looks like nobody uses this region anymore. Free it. 14237 */ 14238 rhash = RGN_HASH_FUNCTION(r_obj); 14239 mutex_enter(&srdp->srd_mutex); 14240 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14241 (cur_rgnp = *prev_rgnpp) != NULL; 14242 prev_rgnpp = &cur_rgnp->rgn_hash) { 14243 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14244 break; 14245 } 14246 } 14247 14248 if (cur_rgnp == NULL) { 14249 mutex_exit(&srdp->srd_mutex); 14250 return; 14251 } 14252 14253 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14254 *prev_rgnpp = rgnp->rgn_hash; 14255 if (r_type == SFMMU_REGION_ISM) { 14256 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14257 ASSERT(rid < srdp->srd_next_ismrid); 14258 rgnp->rgn_next = srdp->srd_ismrgnfree; 14259 srdp->srd_ismrgnfree = rgnp; 14260 ASSERT(srdp->srd_ismbusyrgns > 0); 14261 srdp->srd_ismbusyrgns--; 14262 mutex_exit(&srdp->srd_mutex); 14263 return; 14264 } 14265 mutex_exit(&srdp->srd_mutex); 14266 14267 /* 14268 * Destroy region's hmeblks. 14269 */ 14270 sfmmu_unload_hmeregion(srdp, rgnp); 14271 14272 rgnp->rgn_hmeflags = 0; 14273 14274 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14275 ASSERT(rgnp->rgn_id == rid); 14276 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14277 rgnp->rgn_ttecnt[i] = 0; 14278 } 14279 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14280 mutex_enter(&srdp->srd_mutex); 14281 ASSERT(rid < srdp->srd_next_hmerid); 14282 rgnp->rgn_next = srdp->srd_hmergnfree; 14283 srdp->srd_hmergnfree = rgnp; 14284 ASSERT(srdp->srd_hmebusyrgns > 0); 14285 srdp->srd_hmebusyrgns--; 14286 mutex_exit(&srdp->srd_mutex); 14287 } 14288 14289 /* 14290 * For now only called for hmeblk regions and not for ISM regions. 14291 */ 14292 void 14293 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14294 { 14295 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14296 uint_t rid = (uint_t)((uint64_t)rcookie); 14297 sf_region_t *rgnp; 14298 sf_rgn_link_t *rlink; 14299 sf_rgn_link_t *hrlink; 14300 ulong_t rttecnt; 14301 14302 ASSERT(sfmmup != ksfmmup); 14303 ASSERT(srdp != NULL); 14304 ASSERT(srdp->srd_refcnt > 0); 14305 14306 ASSERT(rid < srdp->srd_next_hmerid); 14307 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14308 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14309 14310 rgnp = srdp->srd_hmergnp[rid]; 14311 ASSERT(rgnp->rgn_refcnt > 0); 14312 ASSERT(rgnp->rgn_id == rid); 14313 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14314 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14315 14316 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 14317 14318 /* LINTED: constant in conditional context */ 14319 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14320 ASSERT(rlink != NULL); 14321 mutex_enter(&rgnp->rgn_mutex); 14322 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14323 /* LINTED: constant in conditional context */ 14324 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14325 ASSERT(hrlink != NULL); 14326 ASSERT(hrlink->prev == NULL); 14327 rlink->next = rgnp->rgn_sfmmu_head; 14328 rlink->prev = NULL; 14329 hrlink->prev = sfmmup; 14330 /* 14331 * make sure rlink's next field is correct 14332 * before making this link visible. 14333 */ 14334 membar_stst(); 14335 rgnp->rgn_sfmmu_head = sfmmup; 14336 mutex_exit(&rgnp->rgn_mutex); 14337 14338 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14339 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14340 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14341 /* update tsb0 inflation count */ 14342 if (rgnp->rgn_pgszc >= TTE4M) { 14343 sfmmup->sfmmu_tsb0_4minflcnt += 14344 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14345 } 14346 /* 14347 * Update regionid bitmask without hat lock since no other thread 14348 * can update this region bitmask right now. 14349 */ 14350 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14351 } 14352 14353 /* ARGSUSED */ 14354 static int 14355 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14356 { 14357 sf_region_t *rgnp = (sf_region_t *)buf; 14358 bzero(buf, sizeof (*rgnp)); 14359 14360 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14361 14362 return (0); 14363 } 14364 14365 /* ARGSUSED */ 14366 static void 14367 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14368 { 14369 sf_region_t *rgnp = (sf_region_t *)buf; 14370 mutex_destroy(&rgnp->rgn_mutex); 14371 } 14372 14373 static int 14374 sfrgnmap_isnull(sf_region_map_t *map) 14375 { 14376 int i; 14377 14378 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14379 if (map->bitmap[i] != 0) { 14380 return (0); 14381 } 14382 } 14383 return (1); 14384 } 14385 14386 static int 14387 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14388 { 14389 int i; 14390 14391 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14392 if (map->bitmap[i] != 0) { 14393 return (0); 14394 } 14395 } 14396 return (1); 14397 } 14398 14399 #ifdef DEBUG 14400 static void 14401 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14402 { 14403 sfmmu_t *sp; 14404 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14405 14406 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14407 ASSERT(srdp == sp->sfmmu_srdp); 14408 if (sp == sfmmup) { 14409 if (onlist) { 14410 return; 14411 } else { 14412 panic("shctx: sfmmu 0x%p found on scd" 14413 "list 0x%p", (void *)sfmmup, 14414 (void *)*headp); 14415 } 14416 } 14417 } 14418 if (onlist) { 14419 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14420 (void *)sfmmup, (void *)*headp); 14421 } else { 14422 return; 14423 } 14424 } 14425 #else /* DEBUG */ 14426 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14427 #endif /* DEBUG */ 14428 14429 /* 14430 * Removes an sfmmu from the SCD sfmmu list. 14431 */ 14432 static void 14433 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14434 { 14435 ASSERT(sfmmup->sfmmu_srdp != NULL); 14436 check_scd_sfmmu_list(headp, sfmmup, 1); 14437 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14438 ASSERT(*headp != sfmmup); 14439 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14440 sfmmup->sfmmu_scd_link.next; 14441 } else { 14442 ASSERT(*headp == sfmmup); 14443 *headp = sfmmup->sfmmu_scd_link.next; 14444 } 14445 if (sfmmup->sfmmu_scd_link.next != NULL) { 14446 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14447 sfmmup->sfmmu_scd_link.prev; 14448 } 14449 } 14450 14451 14452 /* 14453 * Adds an sfmmu to the start of the queue. 14454 */ 14455 static void 14456 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14457 { 14458 check_scd_sfmmu_list(headp, sfmmup, 0); 14459 sfmmup->sfmmu_scd_link.prev = NULL; 14460 sfmmup->sfmmu_scd_link.next = *headp; 14461 if (*headp != NULL) 14462 (*headp)->sfmmu_scd_link.prev = sfmmup; 14463 *headp = sfmmup; 14464 } 14465 14466 /* 14467 * Remove an scd from the start of the queue. 14468 */ 14469 static void 14470 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14471 { 14472 if (scdp->scd_prev != NULL) { 14473 ASSERT(*headp != scdp); 14474 scdp->scd_prev->scd_next = scdp->scd_next; 14475 } else { 14476 ASSERT(*headp == scdp); 14477 *headp = scdp->scd_next; 14478 } 14479 14480 if (scdp->scd_next != NULL) { 14481 scdp->scd_next->scd_prev = scdp->scd_prev; 14482 } 14483 } 14484 14485 /* 14486 * Add an scd to the start of the queue. 14487 */ 14488 static void 14489 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14490 { 14491 scdp->scd_prev = NULL; 14492 scdp->scd_next = *headp; 14493 if (*headp != NULL) { 14494 (*headp)->scd_prev = scdp; 14495 } 14496 *headp = scdp; 14497 } 14498 14499 static int 14500 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14501 { 14502 uint_t rid; 14503 uint_t i; 14504 uint_t j; 14505 ulong_t w; 14506 sf_region_t *rgnp; 14507 ulong_t tte8k_cnt = 0; 14508 ulong_t tte4m_cnt = 0; 14509 uint_t tsb_szc; 14510 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14511 sfmmu_t *ism_hatid; 14512 struct tsb_info *newtsb; 14513 int szc; 14514 14515 ASSERT(srdp != NULL); 14516 14517 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14518 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14519 continue; 14520 } 14521 j = 0; 14522 while (w) { 14523 if (!(w & 0x1)) { 14524 j++; 14525 w >>= 1; 14526 continue; 14527 } 14528 rid = (i << BT_ULSHIFT) | j; 14529 j++; 14530 w >>= 1; 14531 14532 if (rid < SFMMU_MAX_HME_REGIONS) { 14533 rgnp = srdp->srd_hmergnp[rid]; 14534 ASSERT(rgnp->rgn_id == rid); 14535 ASSERT(rgnp->rgn_refcnt > 0); 14536 14537 if (rgnp->rgn_pgszc < TTE4M) { 14538 tte8k_cnt += rgnp->rgn_size >> 14539 TTE_PAGE_SHIFT(TTE8K); 14540 } else { 14541 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14542 tte4m_cnt += rgnp->rgn_size >> 14543 TTE_PAGE_SHIFT(TTE4M); 14544 /* 14545 * Inflate SCD tsb0 by preallocating 14546 * 1/4 8k ttecnt for 4M regions to 14547 * allow for lgpg alloc failure. 14548 */ 14549 tte8k_cnt += rgnp->rgn_size >> 14550 (TTE_PAGE_SHIFT(TTE8K) + 2); 14551 } 14552 } else { 14553 rid -= SFMMU_MAX_HME_REGIONS; 14554 rgnp = srdp->srd_ismrgnp[rid]; 14555 ASSERT(rgnp->rgn_id == rid); 14556 ASSERT(rgnp->rgn_refcnt > 0); 14557 14558 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14559 ASSERT(ism_hatid->sfmmu_ismhat); 14560 14561 for (szc = 0; szc < TTE4M; szc++) { 14562 tte8k_cnt += 14563 ism_hatid->sfmmu_ttecnt[szc] << 14564 TTE_BSZS_SHIFT(szc); 14565 } 14566 14567 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14568 if (rgnp->rgn_pgszc >= TTE4M) { 14569 tte4m_cnt += rgnp->rgn_size >> 14570 TTE_PAGE_SHIFT(TTE4M); 14571 } 14572 } 14573 } 14574 } 14575 14576 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14577 14578 /* Allocate both the SCD TSBs here. */ 14579 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14580 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14581 (tsb_szc <= TSB_4M_SZCODE || 14582 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14583 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14584 TSB_ALLOC, scsfmmup))) { 14585 14586 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14587 return (TSB_ALLOCFAIL); 14588 } else { 14589 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14590 14591 if (tte4m_cnt) { 14592 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14593 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14594 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14595 (tsb_szc <= TSB_4M_SZCODE || 14596 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14597 TSB4M|TSB32M|TSB256M, 14598 TSB_ALLOC, scsfmmup))) { 14599 /* 14600 * If we fail to allocate the 2nd shared tsb, 14601 * just free the 1st tsb, return failure. 14602 */ 14603 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14604 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14605 return (TSB_ALLOCFAIL); 14606 } else { 14607 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14608 newtsb->tsb_flags |= TSB_SHAREDCTX; 14609 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14610 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14611 } 14612 } 14613 SFMMU_STAT(sf_scd_1sttsb_alloc); 14614 } 14615 return (TSB_SUCCESS); 14616 } 14617 14618 static void 14619 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14620 { 14621 while (scd_sfmmu->sfmmu_tsb != NULL) { 14622 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14623 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14624 scd_sfmmu->sfmmu_tsb = next; 14625 } 14626 } 14627 14628 /* 14629 * Link the sfmmu onto the hme region list. 14630 */ 14631 void 14632 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14633 { 14634 uint_t rid; 14635 sf_rgn_link_t *rlink; 14636 sfmmu_t *head; 14637 sf_rgn_link_t *hrlink; 14638 14639 rid = rgnp->rgn_id; 14640 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14641 14642 /* LINTED: constant in conditional context */ 14643 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14644 ASSERT(rlink != NULL); 14645 mutex_enter(&rgnp->rgn_mutex); 14646 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14647 rlink->next = NULL; 14648 rlink->prev = NULL; 14649 /* 14650 * make sure rlink's next field is NULL 14651 * before making this link visible. 14652 */ 14653 membar_stst(); 14654 rgnp->rgn_sfmmu_head = sfmmup; 14655 } else { 14656 /* LINTED: constant in conditional context */ 14657 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14658 ASSERT(hrlink != NULL); 14659 ASSERT(hrlink->prev == NULL); 14660 rlink->next = head; 14661 rlink->prev = NULL; 14662 hrlink->prev = sfmmup; 14663 /* 14664 * make sure rlink's next field is correct 14665 * before making this link visible. 14666 */ 14667 membar_stst(); 14668 rgnp->rgn_sfmmu_head = sfmmup; 14669 } 14670 mutex_exit(&rgnp->rgn_mutex); 14671 } 14672 14673 /* 14674 * Unlink the sfmmu from the hme region list. 14675 */ 14676 void 14677 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14678 { 14679 uint_t rid; 14680 sf_rgn_link_t *rlink; 14681 14682 rid = rgnp->rgn_id; 14683 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14684 14685 /* LINTED: constant in conditional context */ 14686 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14687 ASSERT(rlink != NULL); 14688 mutex_enter(&rgnp->rgn_mutex); 14689 if (rgnp->rgn_sfmmu_head == sfmmup) { 14690 sfmmu_t *next = rlink->next; 14691 rgnp->rgn_sfmmu_head = next; 14692 /* 14693 * if we are stopped by xc_attention() after this 14694 * point the forward link walking in 14695 * sfmmu_rgntlb_demap() will work correctly since the 14696 * head correctly points to the next element. 14697 */ 14698 membar_stst(); 14699 rlink->next = NULL; 14700 ASSERT(rlink->prev == NULL); 14701 if (next != NULL) { 14702 sf_rgn_link_t *nrlink; 14703 /* LINTED: constant in conditional context */ 14704 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14705 ASSERT(nrlink != NULL); 14706 ASSERT(nrlink->prev == sfmmup); 14707 nrlink->prev = NULL; 14708 } 14709 } else { 14710 sfmmu_t *next = rlink->next; 14711 sfmmu_t *prev = rlink->prev; 14712 sf_rgn_link_t *prlink; 14713 14714 ASSERT(prev != NULL); 14715 /* LINTED: constant in conditional context */ 14716 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14717 ASSERT(prlink != NULL); 14718 ASSERT(prlink->next == sfmmup); 14719 prlink->next = next; 14720 /* 14721 * if we are stopped by xc_attention() 14722 * after this point the forward link walking 14723 * will work correctly since the prev element 14724 * correctly points to the next element. 14725 */ 14726 membar_stst(); 14727 rlink->next = NULL; 14728 rlink->prev = NULL; 14729 if (next != NULL) { 14730 sf_rgn_link_t *nrlink; 14731 /* LINTED: constant in conditional context */ 14732 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14733 ASSERT(nrlink != NULL); 14734 ASSERT(nrlink->prev == sfmmup); 14735 nrlink->prev = prev; 14736 } 14737 } 14738 mutex_exit(&rgnp->rgn_mutex); 14739 } 14740 14741 /* 14742 * Link scd sfmmu onto ism or hme region list for each region in the 14743 * scd region map. 14744 */ 14745 void 14746 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14747 { 14748 uint_t rid; 14749 uint_t i; 14750 uint_t j; 14751 ulong_t w; 14752 sf_region_t *rgnp; 14753 sfmmu_t *scsfmmup; 14754 14755 scsfmmup = scdp->scd_sfmmup; 14756 ASSERT(scsfmmup->sfmmu_scdhat); 14757 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14758 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14759 continue; 14760 } 14761 j = 0; 14762 while (w) { 14763 if (!(w & 0x1)) { 14764 j++; 14765 w >>= 1; 14766 continue; 14767 } 14768 rid = (i << BT_ULSHIFT) | j; 14769 j++; 14770 w >>= 1; 14771 14772 if (rid < SFMMU_MAX_HME_REGIONS) { 14773 rgnp = srdp->srd_hmergnp[rid]; 14774 ASSERT(rgnp->rgn_id == rid); 14775 ASSERT(rgnp->rgn_refcnt > 0); 14776 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14777 } else { 14778 sfmmu_t *ism_hatid = NULL; 14779 ism_ment_t *ism_ment; 14780 rid -= SFMMU_MAX_HME_REGIONS; 14781 rgnp = srdp->srd_ismrgnp[rid]; 14782 ASSERT(rgnp->rgn_id == rid); 14783 ASSERT(rgnp->rgn_refcnt > 0); 14784 14785 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14786 ASSERT(ism_hatid->sfmmu_ismhat); 14787 ism_ment = &scdp->scd_ism_links[rid]; 14788 ism_ment->iment_hat = scsfmmup; 14789 ism_ment->iment_base_va = rgnp->rgn_saddr; 14790 mutex_enter(&ism_mlist_lock); 14791 iment_add(ism_ment, ism_hatid); 14792 mutex_exit(&ism_mlist_lock); 14793 14794 } 14795 } 14796 } 14797 } 14798 /* 14799 * Unlink scd sfmmu from ism or hme region list for each region in the 14800 * scd region map. 14801 */ 14802 void 14803 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14804 { 14805 uint_t rid; 14806 uint_t i; 14807 uint_t j; 14808 ulong_t w; 14809 sf_region_t *rgnp; 14810 sfmmu_t *scsfmmup; 14811 14812 scsfmmup = scdp->scd_sfmmup; 14813 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14814 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14815 continue; 14816 } 14817 j = 0; 14818 while (w) { 14819 if (!(w & 0x1)) { 14820 j++; 14821 w >>= 1; 14822 continue; 14823 } 14824 rid = (i << BT_ULSHIFT) | j; 14825 j++; 14826 w >>= 1; 14827 14828 if (rid < SFMMU_MAX_HME_REGIONS) { 14829 rgnp = srdp->srd_hmergnp[rid]; 14830 ASSERT(rgnp->rgn_id == rid); 14831 ASSERT(rgnp->rgn_refcnt > 0); 14832 sfmmu_unlink_from_hmeregion(scsfmmup, 14833 rgnp); 14834 14835 } else { 14836 sfmmu_t *ism_hatid = NULL; 14837 ism_ment_t *ism_ment; 14838 rid -= SFMMU_MAX_HME_REGIONS; 14839 rgnp = srdp->srd_ismrgnp[rid]; 14840 ASSERT(rgnp->rgn_id == rid); 14841 ASSERT(rgnp->rgn_refcnt > 0); 14842 14843 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14844 ASSERT(ism_hatid->sfmmu_ismhat); 14845 ism_ment = &scdp->scd_ism_links[rid]; 14846 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14847 ASSERT(ism_ment->iment_base_va == 14848 rgnp->rgn_saddr); 14849 mutex_enter(&ism_mlist_lock); 14850 iment_sub(ism_ment, ism_hatid); 14851 mutex_exit(&ism_mlist_lock); 14852 14853 } 14854 } 14855 } 14856 } 14857 /* 14858 * Allocates and initialises a new SCD structure, this is called with 14859 * the srd_scd_mutex held and returns with the reference count 14860 * initialised to 1. 14861 */ 14862 static sf_scd_t * 14863 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14864 { 14865 sf_scd_t *new_scdp; 14866 sfmmu_t *scsfmmup; 14867 int i; 14868 14869 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14870 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14871 14872 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14873 new_scdp->scd_sfmmup = scsfmmup; 14874 scsfmmup->sfmmu_srdp = srdp; 14875 scsfmmup->sfmmu_scdp = new_scdp; 14876 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14877 scsfmmup->sfmmu_scdhat = 1; 14878 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14879 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 14880 14881 ASSERT(max_mmu_ctxdoms > 0); 14882 for (i = 0; i < max_mmu_ctxdoms; i++) { 14883 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 14884 scsfmmup->sfmmu_ctxs[i].gnum = 0; 14885 } 14886 14887 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14888 new_scdp->scd_rttecnt[i] = 0; 14889 } 14890 14891 new_scdp->scd_region_map = *new_map; 14892 new_scdp->scd_refcnt = 1; 14893 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 14894 kmem_cache_free(scd_cache, new_scdp); 14895 kmem_cache_free(sfmmuid_cache, scsfmmup); 14896 return (NULL); 14897 } 14898 if (&mmu_init_scd) { 14899 mmu_init_scd(new_scdp); 14900 } 14901 return (new_scdp); 14902 } 14903 14904 /* 14905 * The first phase of a process joining an SCD. The hat structure is 14906 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 14907 * and a cross-call with context invalidation is used to cause the 14908 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 14909 * routine. 14910 */ 14911 static void 14912 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 14913 { 14914 hatlock_t *hatlockp; 14915 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14916 int i; 14917 sf_scd_t *old_scdp; 14918 14919 ASSERT(srdp != NULL); 14920 ASSERT(scdp != NULL); 14921 ASSERT(scdp->scd_refcnt > 0); 14922 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 14923 14924 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 14925 ASSERT(old_scdp != scdp); 14926 14927 mutex_enter(&old_scdp->scd_mutex); 14928 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 14929 mutex_exit(&old_scdp->scd_mutex); 14930 /* 14931 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 14932 * include the shme rgn ttecnt for rgns that 14933 * were in the old SCD 14934 */ 14935 for (i = 0; i < mmu_page_sizes; i++) { 14936 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 14937 old_scdp->scd_rttecnt[i]); 14938 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14939 sfmmup->sfmmu_scdrttecnt[i]); 14940 } 14941 } 14942 14943 /* 14944 * Move sfmmu to the scd lists. 14945 */ 14946 mutex_enter(&scdp->scd_mutex); 14947 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 14948 mutex_exit(&scdp->scd_mutex); 14949 SF_SCD_INCR_REF(scdp); 14950 14951 hatlockp = sfmmu_hat_enter(sfmmup); 14952 /* 14953 * For a multi-thread process, we must stop 14954 * all the other threads before joining the scd. 14955 */ 14956 14957 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 14958 14959 sfmmu_invalidate_ctx(sfmmup); 14960 sfmmup->sfmmu_scdp = scdp; 14961 14962 /* 14963 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 14964 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 14965 */ 14966 for (i = 0; i < mmu_page_sizes; i++) { 14967 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 14968 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 14969 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14970 -sfmmup->sfmmu_scdrttecnt[i]); 14971 } 14972 /* update tsb0 inflation count */ 14973 if (old_scdp != NULL) { 14974 sfmmup->sfmmu_tsb0_4minflcnt += 14975 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 14976 } 14977 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14978 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 14979 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 14980 14981 sfmmu_hat_exit(hatlockp); 14982 14983 if (old_scdp != NULL) { 14984 SF_SCD_DECR_REF(srdp, old_scdp); 14985 } 14986 14987 } 14988 14989 /* 14990 * This routine is called by a process to become part of an SCD. It is called 14991 * from sfmmu_tsbmiss_exception() once most of the initial work has been 14992 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 14993 */ 14994 static void 14995 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 14996 { 14997 struct tsb_info *tsbinfop; 14998 14999 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15000 ASSERT(sfmmup->sfmmu_scdp != NULL); 15001 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15002 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15003 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15004 15005 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15006 tsbinfop = tsbinfop->tsb_next) { 15007 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15008 continue; 15009 } 15010 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15011 15012 sfmmu_inv_tsb(tsbinfop->tsb_va, 15013 TSB_BYTES(tsbinfop->tsb_szc)); 15014 } 15015 15016 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15017 sfmmu_ism_hatflags(sfmmup, 1); 15018 15019 SFMMU_STAT(sf_join_scd); 15020 } 15021 15022 /* 15023 * This routine is called in order to check if there is an SCD which matches 15024 * the process's region map if not then a new SCD may be created. 15025 */ 15026 static void 15027 sfmmu_find_scd(sfmmu_t *sfmmup) 15028 { 15029 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15030 sf_scd_t *scdp, *new_scdp; 15031 int ret; 15032 15033 ASSERT(srdp != NULL); 15034 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 15035 15036 mutex_enter(&srdp->srd_scd_mutex); 15037 for (scdp = srdp->srd_scdp; scdp != NULL; 15038 scdp = scdp->scd_next) { 15039 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15040 &sfmmup->sfmmu_region_map, ret); 15041 if (ret == 1) { 15042 SF_SCD_INCR_REF(scdp); 15043 mutex_exit(&srdp->srd_scd_mutex); 15044 sfmmu_join_scd(scdp, sfmmup); 15045 ASSERT(scdp->scd_refcnt >= 2); 15046 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt); 15047 return; 15048 } else { 15049 /* 15050 * If the sfmmu region map is a subset of the scd 15051 * region map, then the assumption is that this process 15052 * will continue attaching to ISM segments until the 15053 * region maps are equal. 15054 */ 15055 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15056 &sfmmup->sfmmu_region_map, ret); 15057 if (ret == 1) { 15058 mutex_exit(&srdp->srd_scd_mutex); 15059 return; 15060 } 15061 } 15062 } 15063 15064 ASSERT(scdp == NULL); 15065 /* 15066 * No matching SCD has been found, create a new one. 15067 */ 15068 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15069 NULL) { 15070 mutex_exit(&srdp->srd_scd_mutex); 15071 return; 15072 } 15073 15074 /* 15075 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15076 */ 15077 15078 /* Set scd_rttecnt for shme rgns in SCD */ 15079 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15080 15081 /* 15082 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15083 */ 15084 sfmmu_link_scd_to_regions(srdp, new_scdp); 15085 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15086 SFMMU_STAT_ADD(sf_create_scd, 1); 15087 15088 mutex_exit(&srdp->srd_scd_mutex); 15089 sfmmu_join_scd(new_scdp, sfmmup); 15090 ASSERT(new_scdp->scd_refcnt >= 2); 15091 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt); 15092 } 15093 15094 /* 15095 * This routine is called by a process to remove itself from an SCD. It is 15096 * either called when the processes has detached from a segment or from 15097 * hat_free_start() as a result of calling exit. 15098 */ 15099 static void 15100 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15101 { 15102 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15103 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15104 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15105 int i; 15106 15107 ASSERT(scdp != NULL); 15108 ASSERT(srdp != NULL); 15109 15110 if (sfmmup->sfmmu_free) { 15111 /* 15112 * If the process is part of an SCD the sfmmu is unlinked 15113 * from scd_sf_list. 15114 */ 15115 mutex_enter(&scdp->scd_mutex); 15116 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15117 mutex_exit(&scdp->scd_mutex); 15118 /* 15119 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15120 * are about to leave the SCD 15121 */ 15122 for (i = 0; i < mmu_page_sizes; i++) { 15123 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15124 scdp->scd_rttecnt[i]); 15125 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15126 sfmmup->sfmmu_scdrttecnt[i]); 15127 sfmmup->sfmmu_scdrttecnt[i] = 0; 15128 } 15129 sfmmup->sfmmu_scdp = NULL; 15130 15131 SF_SCD_DECR_REF(srdp, scdp); 15132 return; 15133 } 15134 15135 ASSERT(r_type != SFMMU_REGION_ISM || 15136 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15137 ASSERT(scdp->scd_refcnt); 15138 ASSERT(!sfmmup->sfmmu_free); 15139 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15140 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); 15141 15142 /* 15143 * Wait for ISM maps to be updated. 15144 */ 15145 if (r_type != SFMMU_REGION_ISM) { 15146 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15147 sfmmup->sfmmu_scdp != NULL) { 15148 cv_wait(&sfmmup->sfmmu_tsb_cv, 15149 HATLOCK_MUTEXP(hatlockp)); 15150 } 15151 15152 if (sfmmup->sfmmu_scdp == NULL) { 15153 sfmmu_hat_exit(hatlockp); 15154 return; 15155 } 15156 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15157 } 15158 15159 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15160 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15161 /* 15162 * Since HAT_JOIN_SCD was set our context 15163 * is still invalid. 15164 */ 15165 } else { 15166 /* 15167 * For a multi-thread process, we must stop 15168 * all the other threads before leaving the scd. 15169 */ 15170 15171 sfmmu_invalidate_ctx(sfmmup); 15172 } 15173 15174 /* Clear all the rid's for ISM, delete flags, etc */ 15175 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15176 sfmmu_ism_hatflags(sfmmup, 0); 15177 15178 /* 15179 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15180 * are in SCD before this sfmmup leaves the SCD. 15181 */ 15182 for (i = 0; i < mmu_page_sizes; i++) { 15183 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15184 scdp->scd_rttecnt[i]); 15185 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15186 sfmmup->sfmmu_scdrttecnt[i]); 15187 sfmmup->sfmmu_scdrttecnt[i] = 0; 15188 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15189 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15190 sfmmup->sfmmu_scdismttecnt[i] = 0; 15191 } 15192 /* update tsb0 inflation count */ 15193 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15194 15195 if (r_type != SFMMU_REGION_ISM) { 15196 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15197 } 15198 sfmmup->sfmmu_scdp = NULL; 15199 15200 sfmmu_hat_exit(hatlockp); 15201 15202 /* 15203 * Unlink sfmmu from scd_sf_list this can be done without holding 15204 * the hat lock as we hold the sfmmu_as lock which prevents 15205 * hat_join_region from adding this thread to the scd again. Other 15206 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15207 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15208 * while holding the hat lock. 15209 */ 15210 mutex_enter(&scdp->scd_mutex); 15211 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15212 mutex_exit(&scdp->scd_mutex); 15213 SFMMU_STAT(sf_leave_scd); 15214 15215 SF_SCD_DECR_REF(srdp, scdp); 15216 hatlockp = sfmmu_hat_enter(sfmmup); 15217 15218 } 15219 15220 /* 15221 * Unlink and free up an SCD structure with a reference count of 0. 15222 */ 15223 static void 15224 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15225 { 15226 sfmmu_t *scsfmmup; 15227 sf_scd_t *sp; 15228 hatlock_t *shatlockp; 15229 int i, ret; 15230 15231 mutex_enter(&srdp->srd_scd_mutex); 15232 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15233 if (sp == scdp) 15234 break; 15235 } 15236 if (sp == NULL || sp->scd_refcnt) { 15237 mutex_exit(&srdp->srd_scd_mutex); 15238 return; 15239 } 15240 15241 /* 15242 * It is possible that the scd has been freed and reallocated with a 15243 * different region map while we've been waiting for the srd_scd_mutex. 15244 */ 15245 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15246 if (ret != 1) { 15247 mutex_exit(&srdp->srd_scd_mutex); 15248 return; 15249 } 15250 15251 ASSERT(scdp->scd_sf_list == NULL); 15252 /* 15253 * Unlink scd from srd_scdp list. 15254 */ 15255 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15256 mutex_exit(&srdp->srd_scd_mutex); 15257 15258 sfmmu_unlink_scd_from_regions(srdp, scdp); 15259 15260 /* Clear shared context tsb and release ctx */ 15261 scsfmmup = scdp->scd_sfmmup; 15262 15263 /* 15264 * create a barrier so that scd will not be destroyed 15265 * if other thread still holds the same shared hat lock. 15266 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15267 * shared hat lock before checking the shared tsb reloc flag. 15268 */ 15269 shatlockp = sfmmu_hat_enter(scsfmmup); 15270 sfmmu_hat_exit(shatlockp); 15271 15272 sfmmu_free_scd_tsbs(scsfmmup); 15273 15274 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15275 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15276 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15277 SFMMU_L2_HMERLINKS_SIZE); 15278 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15279 } 15280 } 15281 kmem_cache_free(sfmmuid_cache, scsfmmup); 15282 kmem_cache_free(scd_cache, scdp); 15283 SFMMU_STAT(sf_destroy_scd); 15284 } 15285 15286 /* 15287 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15288 * bits which are set in the ism_region_map parameter. This flag indicates to 15289 * the tsbmiss handler that mapping for these segments should be loaded using 15290 * the shared context. 15291 */ 15292 static void 15293 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15294 { 15295 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15296 ism_blk_t *ism_blkp; 15297 ism_map_t *ism_map; 15298 int i, rid; 15299 15300 ASSERT(sfmmup->sfmmu_iblk != NULL); 15301 ASSERT(scdp != NULL); 15302 /* 15303 * Note that the caller either set HAT_ISMBUSY flag or checked 15304 * under hat lock that HAT_ISMBUSY was not set by another thread. 15305 */ 15306 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15307 15308 ism_blkp = sfmmup->sfmmu_iblk; 15309 while (ism_blkp != NULL) { 15310 ism_map = ism_blkp->iblk_maps; 15311 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15312 rid = ism_map[i].imap_rid; 15313 if (rid == SFMMU_INVALID_ISMRID) { 15314 continue; 15315 } 15316 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15317 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15318 addflag) { 15319 ism_map[i].imap_hatflags |= 15320 HAT_CTX1_FLAG; 15321 } else { 15322 ism_map[i].imap_hatflags &= 15323 ~HAT_CTX1_FLAG; 15324 } 15325 } 15326 ism_blkp = ism_blkp->iblk_next; 15327 } 15328 } 15329 15330 static int 15331 sfmmu_srd_lock_held(sf_srd_t *srdp) 15332 { 15333 return (MUTEX_HELD(&srdp->srd_mutex)); 15334 } 15335 15336 /* ARGSUSED */ 15337 static int 15338 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15339 { 15340 sf_scd_t *scdp = (sf_scd_t *)buf; 15341 15342 bzero(buf, sizeof (sf_scd_t)); 15343 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15344 return (0); 15345 } 15346 15347 /* ARGSUSED */ 15348 static void 15349 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15350 { 15351 sf_scd_t *scdp = (sf_scd_t *)buf; 15352 15353 mutex_destroy(&scdp->scd_mutex); 15354 } 15355 15356 /* 15357 * The listp parameter is a pointer to a list of hmeblks which are partially 15358 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15359 * freeing process is to cross-call all cpus to ensure that there are no 15360 * remaining cached references. 15361 * 15362 * If the local generation number is less than the global then we can free 15363 * hmeblks which are already on the pending queue as another cpu has completed 15364 * the cross-call. 15365 * 15366 * We cross-call to make sure that there are no threads on other cpus accessing 15367 * these hmblks and then complete the process of freeing them under the 15368 * following conditions: 15369 * The total number of pending hmeblks is greater than the threshold 15370 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15371 * It is at least 1 second since the last time we cross-called 15372 * 15373 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15374 */ 15375 static void 15376 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15377 { 15378 struct hme_blk *hblkp, *pr_hblkp = NULL; 15379 int count = 0; 15380 cpuset_t cpuset = cpu_ready_set; 15381 cpu_hme_pend_t *cpuhp; 15382 timestruc_t now; 15383 int one_second_expired = 0; 15384 15385 gethrestime_lasttick(&now); 15386 15387 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15388 ASSERT(hblkp->hblk_shw_bit == 0); 15389 ASSERT(hblkp->hblk_shared == 0); 15390 count++; 15391 pr_hblkp = hblkp; 15392 } 15393 15394 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15395 mutex_enter(&cpuhp->chp_mutex); 15396 15397 if ((cpuhp->chp_count + count) == 0) { 15398 mutex_exit(&cpuhp->chp_mutex); 15399 return; 15400 } 15401 15402 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15403 one_second_expired = 1; 15404 } 15405 15406 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15407 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15408 one_second_expired)) { 15409 /* Append global list to local */ 15410 if (pr_hblkp == NULL) { 15411 *listp = cpuhp->chp_listp; 15412 } else { 15413 pr_hblkp->hblk_next = cpuhp->chp_listp; 15414 } 15415 cpuhp->chp_listp = NULL; 15416 cpuhp->chp_count = 0; 15417 cpuhp->chp_timestamp = now.tv_sec; 15418 mutex_exit(&cpuhp->chp_mutex); 15419 15420 kpreempt_disable(); 15421 CPUSET_DEL(cpuset, CPU->cpu_id); 15422 xt_sync(cpuset); 15423 xt_sync(cpuset); 15424 kpreempt_enable(); 15425 15426 /* 15427 * At this stage we know that no trap handlers on other 15428 * cpus can have references to hmeblks on the list. 15429 */ 15430 sfmmu_hblk_free(listp); 15431 } else if (*listp != NULL) { 15432 pr_hblkp->hblk_next = cpuhp->chp_listp; 15433 cpuhp->chp_listp = *listp; 15434 cpuhp->chp_count += count; 15435 *listp = NULL; 15436 mutex_exit(&cpuhp->chp_mutex); 15437 } else { 15438 mutex_exit(&cpuhp->chp_mutex); 15439 } 15440 } 15441 15442 /* 15443 * Add an hmeblk to the the hash list. 15444 */ 15445 void 15446 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15447 uint64_t hblkpa) 15448 { 15449 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15450 #ifdef DEBUG 15451 if (hmebp->hmeblkp == NULL) { 15452 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15453 } 15454 #endif /* DEBUG */ 15455 15456 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15457 /* 15458 * Since the TSB miss handler now does not lock the hash chain before 15459 * walking it, make sure that the hmeblks nextpa is globally visible 15460 * before we make the hmeblk globally visible by updating the chain root 15461 * pointer in the hash bucket. 15462 */ 15463 membar_producer(); 15464 hmebp->hmeh_nextpa = hblkpa; 15465 hmeblkp->hblk_next = hmebp->hmeblkp; 15466 hmebp->hmeblkp = hmeblkp; 15467 15468 } 15469 15470 /* 15471 * This function is the first part of a 2 part process to remove an hmeblk 15472 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15473 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15474 * a per-cpu pending list using the virtual address pointer. 15475 * 15476 * TSB miss trap handlers that start after this phase will no longer see 15477 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15478 * can still use it for further chain traversal because we haven't yet modifed 15479 * the next physical pointer or freed it. 15480 * 15481 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15482 * we reuse or free this hmeblk. This will make sure all lingering references to 15483 * the hmeblk after first phase disappear before we finally reclaim it. 15484 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15485 * during their traversal. 15486 * 15487 * The hmehash_mutex must be held when calling this function. 15488 * 15489 * Input: 15490 * hmebp - hme hash bucket pointer 15491 * hmeblkp - address of hmeblk to be removed 15492 * pr_hblk - virtual address of previous hmeblkp 15493 * listp - pointer to list of hmeblks linked by virtual address 15494 * free_now flag - indicates that a complete removal from the hash chains 15495 * is necessary. 15496 * 15497 * It is inefficient to use the free_now flag as a cross-call is required to 15498 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15499 * in short supply. 15500 */ 15501 void 15502 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15503 struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now) 15504 { 15505 int shw_size, vshift; 15506 struct hme_blk *shw_hblkp; 15507 uint_t shw_mask, newshw_mask; 15508 caddr_t vaddr; 15509 int size; 15510 cpuset_t cpuset = cpu_ready_set; 15511 15512 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15513 15514 if (hmebp->hmeblkp == hmeblkp) { 15515 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15516 hmebp->hmeblkp = hmeblkp->hblk_next; 15517 } else { 15518 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15519 pr_hblk->hblk_next = hmeblkp->hblk_next; 15520 } 15521 15522 size = get_hblk_ttesz(hmeblkp); 15523 shw_hblkp = hmeblkp->hblk_shadow; 15524 if (shw_hblkp) { 15525 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15526 ASSERT(!hmeblkp->hblk_shared); 15527 #ifdef DEBUG 15528 if (mmu_page_sizes == max_mmu_page_sizes) { 15529 ASSERT(size < TTE256M); 15530 } else { 15531 ASSERT(size < TTE4M); 15532 } 15533 #endif /* DEBUG */ 15534 15535 shw_size = get_hblk_ttesz(shw_hblkp); 15536 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15537 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15538 ASSERT(vshift < 8); 15539 /* 15540 * Atomically clear shadow mask bit 15541 */ 15542 do { 15543 shw_mask = shw_hblkp->hblk_shw_mask; 15544 ASSERT(shw_mask & (1 << vshift)); 15545 newshw_mask = shw_mask & ~(1 << vshift); 15546 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 15547 shw_mask, newshw_mask); 15548 } while (newshw_mask != shw_mask); 15549 hmeblkp->hblk_shadow = NULL; 15550 } 15551 hmeblkp->hblk_shw_bit = 0; 15552 15553 if (hmeblkp->hblk_shared) { 15554 #ifdef DEBUG 15555 sf_srd_t *srdp; 15556 sf_region_t *rgnp; 15557 uint_t rid; 15558 15559 srdp = hblktosrd(hmeblkp); 15560 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15561 rid = hmeblkp->hblk_tag.htag_rid; 15562 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15563 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15564 rgnp = srdp->srd_hmergnp[rid]; 15565 ASSERT(rgnp != NULL); 15566 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15567 #endif /* DEBUG */ 15568 hmeblkp->hblk_shared = 0; 15569 } 15570 if (free_now) { 15571 kpreempt_disable(); 15572 CPUSET_DEL(cpuset, CPU->cpu_id); 15573 xt_sync(cpuset); 15574 xt_sync(cpuset); 15575 kpreempt_enable(); 15576 15577 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15578 hmeblkp->hblk_next = NULL; 15579 } else { 15580 /* Append hmeblkp to listp for processing later. */ 15581 hmeblkp->hblk_next = *listp; 15582 *listp = hmeblkp; 15583 } 15584 } 15585 15586 /* 15587 * This routine is called when memory is in short supply and returns a free 15588 * hmeblk of the requested size from the cpu pending lists. 15589 */ 15590 static struct hme_blk * 15591 sfmmu_check_pending_hblks(int size) 15592 { 15593 int i; 15594 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15595 int found_hmeblk; 15596 cpuset_t cpuset = cpu_ready_set; 15597 cpu_hme_pend_t *cpuhp; 15598 15599 /* Flush cpu hblk pending queues */ 15600 for (i = 0; i < NCPU; i++) { 15601 cpuhp = &cpu_hme_pend[i]; 15602 if (cpuhp->chp_listp != NULL) { 15603 mutex_enter(&cpuhp->chp_mutex); 15604 if (cpuhp->chp_listp == NULL) { 15605 mutex_exit(&cpuhp->chp_mutex); 15606 continue; 15607 } 15608 found_hmeblk = 0; 15609 last_hmeblkp = NULL; 15610 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15611 hmeblkp = hmeblkp->hblk_next) { 15612 if (get_hblk_ttesz(hmeblkp) == size) { 15613 if (last_hmeblkp == NULL) { 15614 cpuhp->chp_listp = 15615 hmeblkp->hblk_next; 15616 } else { 15617 last_hmeblkp->hblk_next = 15618 hmeblkp->hblk_next; 15619 } 15620 ASSERT(cpuhp->chp_count > 0); 15621 cpuhp->chp_count--; 15622 found_hmeblk = 1; 15623 break; 15624 } else { 15625 last_hmeblkp = hmeblkp; 15626 } 15627 } 15628 mutex_exit(&cpuhp->chp_mutex); 15629 15630 if (found_hmeblk) { 15631 kpreempt_disable(); 15632 CPUSET_DEL(cpuset, CPU->cpu_id); 15633 xt_sync(cpuset); 15634 xt_sync(cpuset); 15635 kpreempt_enable(); 15636 return (hmeblkp); 15637 } 15638 } 15639 } 15640 return (NULL); 15641 } 15642