1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright 2019 Joyent, Inc. 27 */ 28 29 /* 30 * VM - Hardware Address Translation management for Spitfire MMU. 31 * 32 * This file implements the machine specific hardware translation 33 * needed by the VM system. The machine independent interface is 34 * described in <vm/hat.h> while the machine dependent interface 35 * and data structures are described in <vm/hat_sfmmu.h>. 36 * 37 * The hat layer manages the address translation hardware as a cache 38 * driven by calls from the higher levels in the VM system. 39 */ 40 41 #include <sys/types.h> 42 #include <sys/kstat.h> 43 #include <vm/hat.h> 44 #include <vm/hat_sfmmu.h> 45 #include <vm/page.h> 46 #include <sys/pte.h> 47 #include <sys/systm.h> 48 #include <sys/mman.h> 49 #include <sys/sysmacros.h> 50 #include <sys/machparam.h> 51 #include <sys/vtrace.h> 52 #include <sys/kmem.h> 53 #include <sys/mmu.h> 54 #include <sys/cmn_err.h> 55 #include <sys/cpu.h> 56 #include <sys/cpuvar.h> 57 #include <sys/debug.h> 58 #include <sys/lgrp.h> 59 #include <sys/archsystm.h> 60 #include <sys/machsystm.h> 61 #include <sys/vmsystm.h> 62 #include <vm/as.h> 63 #include <vm/seg.h> 64 #include <vm/seg_kp.h> 65 #include <vm/seg_kmem.h> 66 #include <vm/seg_kpm.h> 67 #include <vm/rm.h> 68 #include <sys/t_lock.h> 69 #include <sys/obpdefs.h> 70 #include <sys/vm_machparam.h> 71 #include <sys/var.h> 72 #include <sys/trap.h> 73 #include <sys/machtrap.h> 74 #include <sys/scb.h> 75 #include <sys/bitmap.h> 76 #include <sys/machlock.h> 77 #include <sys/membar.h> 78 #include <sys/atomic.h> 79 #include <sys/cpu_module.h> 80 #include <sys/prom_debug.h> 81 #include <sys/ksynch.h> 82 #include <sys/mem_config.h> 83 #include <sys/mem_cage.h> 84 #include <vm/vm_dep.h> 85 #include <sys/fpu/fpusystm.h> 86 #include <vm/mach_kpm.h> 87 #include <sys/callb.h> 88 89 #ifdef DEBUG 90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 91 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 92 caddr_t _eaddr = (saddr) + (len); \ 93 sf_srd_t *_srdp; \ 94 sf_region_t *_rgnp; \ 95 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 97 ASSERT((hat) != ksfmmup); \ 98 _srdp = (hat)->sfmmu_srdp; \ 99 ASSERT(_srdp != NULL); \ 100 ASSERT(_srdp->srd_refcnt != 0); \ 101 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 102 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 103 ASSERT(_rgnp->rgn_refcnt != 0); \ 104 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 105 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 106 SFMMU_REGION_HME); \ 107 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 108 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 110 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 111 } 112 113 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 114 { \ 115 caddr_t _hsva; \ 116 caddr_t _heva; \ 117 caddr_t _rsva; \ 118 caddr_t _reva; \ 119 int _ttesz = get_hblk_ttesz(hmeblkp); \ 120 int _flagtte; \ 121 ASSERT((srdp)->srd_refcnt != 0); \ 122 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 123 ASSERT((rgnp)->rgn_id == rid); \ 124 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 125 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 126 SFMMU_REGION_HME); \ 127 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 129 _heva = get_hblk_endaddr(hmeblkp); \ 130 _rsva = (caddr_t)P2ALIGN( \ 131 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 132 _reva = (caddr_t)P2ROUNDUP( \ 133 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 134 HBLK_MIN_BYTES); \ 135 ASSERT(_hsva >= _rsva); \ 136 ASSERT(_hsva < _reva); \ 137 ASSERT(_heva > _rsva); \ 138 ASSERT(_heva <= _reva); \ 139 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 140 _ttesz; \ 141 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 142 } 143 144 #else /* DEBUG */ 145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 147 #endif /* DEBUG */ 148 149 #if defined(SF_ERRATA_57) 150 extern caddr_t errata57_limit; 151 #endif 152 153 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 154 (sizeof (int64_t))) 155 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 156 157 #define HBLK_RESERVE_CNT 128 158 #define HBLK_RESERVE_MIN 20 159 160 static struct hme_blk *freehblkp; 161 static kmutex_t freehblkp_lock; 162 static int freehblkcnt; 163 164 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 165 static kmutex_t hblk_reserve_lock; 166 static kthread_t *hblk_reserve_thread; 167 168 static nucleus_hblk8_info_t nucleus_hblk8; 169 static nucleus_hblk1_info_t nucleus_hblk1; 170 171 /* 172 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 173 * after the initial phase of removing an hmeblk from the hash chain, see 174 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 175 */ 176 static cpu_hme_pend_t *cpu_hme_pend; 177 static uint_t cpu_hme_pend_thresh; 178 /* 179 * SFMMU specific hat functions 180 */ 181 void hat_pagecachectl(struct page *, int); 182 183 /* flags for hat_pagecachectl */ 184 #define HAT_CACHE 0x1 185 #define HAT_UNCACHE 0x2 186 #define HAT_TMPNC 0x4 187 188 /* 189 * Flag to allow the creation of non-cacheable translations 190 * to system memory. It is off by default. At the moment this 191 * flag is used by the ecache error injector. The error injector 192 * will turn it on when creating such a translation then shut it 193 * off when it's finished. 194 */ 195 196 int sfmmu_allow_nc_trans = 0; 197 198 /* 199 * Flag to disable large page support. 200 * value of 1 => disable all large pages. 201 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 202 * 203 * For example, use the value 0x4 to disable 512K pages. 204 * 205 */ 206 #define LARGE_PAGES_OFF 0x1 207 208 /* 209 * The disable_large_pages and disable_ism_large_pages variables control 210 * hat_memload_array and the page sizes to be used by ISM and the kernel. 211 * 212 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 213 * are only used to control which OOB pages to use at upper VM segment creation 214 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 215 * Their values may come from platform or CPU specific code to disable page 216 * sizes that should not be used. 217 * 218 * WARNING: 512K pages are currently not supported for ISM/DISM. 219 */ 220 uint_t disable_large_pages = 0; 221 uint_t disable_ism_large_pages = (1 << TTE512K); 222 uint_t disable_auto_data_large_pages = 0; 223 uint_t disable_auto_text_large_pages = 0; 224 225 /* 226 * Private sfmmu data structures for hat management 227 */ 228 static struct kmem_cache *sfmmuid_cache; 229 static struct kmem_cache *mmuctxdom_cache; 230 231 /* 232 * Private sfmmu data structures for tsb management 233 */ 234 static struct kmem_cache *sfmmu_tsbinfo_cache; 235 static struct kmem_cache *sfmmu_tsb8k_cache; 236 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 237 static vmem_t *kmem_bigtsb_arena; 238 static vmem_t *kmem_tsb_arena; 239 240 /* 241 * sfmmu static variables for hmeblk resource management. 242 */ 243 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 244 static struct kmem_cache *sfmmu8_cache; 245 static struct kmem_cache *sfmmu1_cache; 246 static struct kmem_cache *pa_hment_cache; 247 248 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 249 /* 250 * private data for ism 251 */ 252 static struct kmem_cache *ism_blk_cache; 253 static struct kmem_cache *ism_ment_cache; 254 #define ISMID_STARTADDR NULL 255 256 /* 257 * Region management data structures and function declarations. 258 */ 259 260 static void sfmmu_leave_srd(sfmmu_t *); 261 static int sfmmu_srdcache_constructor(void *, void *, int); 262 static void sfmmu_srdcache_destructor(void *, void *); 263 static int sfmmu_rgncache_constructor(void *, void *, int); 264 static void sfmmu_rgncache_destructor(void *, void *); 265 static int sfrgnmap_isnull(sf_region_map_t *); 266 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 267 static int sfmmu_scdcache_constructor(void *, void *, int); 268 static void sfmmu_scdcache_destructor(void *, void *); 269 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 270 size_t, void *, u_offset_t); 271 272 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 273 static sf_srd_bucket_t *srd_buckets; 274 static struct kmem_cache *srd_cache; 275 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 276 static struct kmem_cache *region_cache; 277 static struct kmem_cache *scd_cache; 278 279 #ifdef sun4v 280 int use_bigtsb_arena = 1; 281 #else 282 int use_bigtsb_arena = 0; 283 #endif 284 285 /* External /etc/system tunable, for turning on&off the shctx support */ 286 int disable_shctx = 0; 287 /* Internal variable, set by MD if the HW supports shctx feature */ 288 int shctx_on = 0; 289 290 #ifdef DEBUG 291 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 292 #endif 293 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 294 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 295 296 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 297 static void sfmmu_find_scd(sfmmu_t *); 298 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 299 static void sfmmu_finish_join_scd(sfmmu_t *); 300 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 301 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 302 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 303 static void sfmmu_free_scd_tsbs(sfmmu_t *); 304 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 305 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 306 static void sfmmu_ism_hatflags(sfmmu_t *, int); 307 static int sfmmu_srd_lock_held(sf_srd_t *); 308 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 309 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 310 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 311 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 312 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 313 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 314 315 /* 316 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 317 * HAT flags, synchronizing TLB/TSB coherency, and context management. 318 * The lock is hashed on the sfmmup since the case where we need to lock 319 * all processes is rare but does occur (e.g. we need to unload a shared 320 * mapping from all processes using the mapping). We have a lot of buckets, 321 * and each slab of sfmmu_t's can use about a quarter of them, giving us 322 * a fairly good distribution without wasting too much space and overhead 323 * when we have to grab them all. 324 */ 325 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 326 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 327 328 /* 329 * Hash algorithm optimized for a small number of slabs. 330 * 7 is (highbit((sizeof sfmmu_t)) - 1) 331 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 332 * kmem_cache, and thus they will be sequential within that cache. In 333 * addition, each new slab will have a different "color" up to cache_maxcolor 334 * which will skew the hashing for each successive slab which is allocated. 335 * If the size of sfmmu_t changed to a larger size, this algorithm may need 336 * to be revisited. 337 */ 338 #define TSB_HASH_SHIFT_BITS (7) 339 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 340 341 #ifdef DEBUG 342 int tsb_hash_debug = 0; 343 #define TSB_HASH(sfmmup) \ 344 (tsb_hash_debug ? &hat_lock[0] : \ 345 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 346 #else /* DEBUG */ 347 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 348 #endif /* DEBUG */ 349 350 351 /* sfmmu_replace_tsb() return codes. */ 352 typedef enum tsb_replace_rc { 353 TSB_SUCCESS, 354 TSB_ALLOCFAIL, 355 TSB_LOSTRACE, 356 TSB_ALREADY_SWAPPED, 357 TSB_CANTGROW 358 } tsb_replace_rc_t; 359 360 /* 361 * Flags for TSB allocation routines. 362 */ 363 #define TSB_ALLOC 0x01 364 #define TSB_FORCEALLOC 0x02 365 #define TSB_GROW 0x04 366 #define TSB_SHRINK 0x08 367 #define TSB_SWAPIN 0x10 368 369 /* 370 * Support for HAT callbacks. 371 */ 372 #define SFMMU_MAX_RELOC_CALLBACKS 10 373 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 374 static id_t sfmmu_cb_nextid = 0; 375 static id_t sfmmu_tsb_cb_id; 376 struct sfmmu_callback *sfmmu_cb_table; 377 378 kmutex_t kpr_mutex; 379 kmutex_t kpr_suspendlock; 380 kthread_t *kreloc_thread; 381 382 /* 383 * Enable VA->PA translation sanity checking on DEBUG kernels. 384 * Disabled by default. This is incompatible with some 385 * drivers (error injector, RSM) so if it breaks you get 386 * to keep both pieces. 387 */ 388 int hat_check_vtop = 0; 389 390 /* 391 * Private sfmmu routines (prototypes) 392 */ 393 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 394 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 395 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 396 uint_t); 397 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 398 caddr_t, demap_range_t *, uint_t); 399 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 400 caddr_t, int); 401 static void sfmmu_hblk_free(struct hme_blk **); 402 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 403 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 404 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 405 static struct hme_blk *sfmmu_hblk_steal(int); 406 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 407 struct hme_blk *, uint64_t, struct hme_blk *); 408 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 409 410 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 411 struct page **, uint_t, uint_t, uint_t); 412 static void hat_do_memload(struct hat *, caddr_t, struct page *, 413 uint_t, uint_t, uint_t); 414 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 415 uint_t, uint_t, pgcnt_t, uint_t); 416 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 417 uint_t); 418 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 419 uint_t, uint_t); 420 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 421 caddr_t, int, uint_t); 422 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 423 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 424 uint_t); 425 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 426 caddr_t, page_t **, uint_t, uint_t); 427 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 428 429 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 430 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 431 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 432 #ifdef VAC 433 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 434 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 435 int tst_tnc(page_t *pp, pgcnt_t); 436 void conv_tnc(page_t *pp, int); 437 #endif 438 439 static void sfmmu_get_ctx(sfmmu_t *); 440 static void sfmmu_free_sfmmu(sfmmu_t *); 441 442 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 443 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 444 445 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 446 static void hat_pagereload(struct page *, struct page *); 447 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 448 #ifdef VAC 449 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 450 static void sfmmu_page_cache(page_t *, int, int, int); 451 #endif 452 453 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 454 struct hme_blk *, int); 455 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 456 pfn_t, int, int, int, int); 457 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 458 pfn_t, int); 459 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 460 static void sfmmu_tlb_range_demap(demap_range_t *); 461 static void sfmmu_invalidate_ctx(sfmmu_t *); 462 static void sfmmu_sync_mmustate(sfmmu_t *); 463 464 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 465 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 466 sfmmu_t *); 467 static void sfmmu_tsb_free(struct tsb_info *); 468 static void sfmmu_tsbinfo_free(struct tsb_info *); 469 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 470 sfmmu_t *); 471 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 472 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 473 static int sfmmu_select_tsb_szc(pgcnt_t); 474 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 475 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 476 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 477 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 478 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 479 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 480 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 481 hatlock_t *, uint_t); 482 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 483 484 #ifdef VAC 485 void sfmmu_cache_flush(pfn_t, int); 486 void sfmmu_cache_flushcolor(int, pfn_t); 487 #endif 488 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 489 caddr_t, demap_range_t *, uint_t, int); 490 491 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 492 static uint_t sfmmu_ptov_attr(tte_t *); 493 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 494 caddr_t, demap_range_t *, uint_t); 495 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 496 static int sfmmu_idcache_constructor(void *, void *, int); 497 static void sfmmu_idcache_destructor(void *, void *); 498 static int sfmmu_hblkcache_constructor(void *, void *, int); 499 static void sfmmu_hblkcache_destructor(void *, void *); 500 static void sfmmu_hblkcache_reclaim(void *); 501 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 502 struct hmehash_bucket *); 503 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 504 struct hme_blk *, struct hme_blk **, int); 505 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 506 uint64_t); 507 static struct hme_blk *sfmmu_check_pending_hblks(int); 508 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 509 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 510 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 511 int, caddr_t *); 512 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 513 514 static void sfmmu_rm_large_mappings(page_t *, int); 515 516 static void hat_lock_init(void); 517 static void hat_kstat_init(void); 518 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 519 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 520 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 521 static void sfmmu_check_page_sizes(sfmmu_t *, int); 522 int fnd_mapping_sz(page_t *); 523 static void iment_add(struct ism_ment *, struct hat *); 524 static void iment_sub(struct ism_ment *, struct hat *); 525 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 526 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 527 extern void sfmmu_clear_utsbinfo(void); 528 529 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t); 530 531 extern int vpm_enable; 532 533 /* kpm globals */ 534 #ifdef DEBUG 535 /* 536 * Enable trap level tsbmiss handling 537 */ 538 int kpm_tsbmtl = 1; 539 540 /* 541 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 542 * required TLB shootdowns in this case, so handle w/ care. Off by default. 543 */ 544 int kpm_tlb_flush; 545 #endif /* DEBUG */ 546 547 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 548 549 #ifdef DEBUG 550 static void sfmmu_check_hblk_flist(); 551 #endif 552 553 /* 554 * Semi-private sfmmu data structures. Some of them are initialize in 555 * startup or in hat_init. Some of them are private but accessed by 556 * assembly code or mach_sfmmu.c 557 */ 558 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 559 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 560 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 561 uint64_t khme_hash_pa; /* PA of khme_hash */ 562 int uhmehash_num; /* # of buckets in user hash table */ 563 int khmehash_num; /* # of buckets in kernel hash table */ 564 565 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 566 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 567 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 568 569 #define DEFAULT_NUM_CTXS_PER_MMU 8192 570 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 571 572 int cache; /* describes system cache */ 573 574 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 575 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 576 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 577 int ktsb_sz; /* kernel 8k-indexed tsb size */ 578 579 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 580 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 581 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 582 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 583 584 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 585 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 586 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 587 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 588 589 #ifndef sun4v 590 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 591 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 592 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 593 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 594 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 595 #endif /* sun4v */ 596 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 597 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 598 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 599 600 /* 601 * Size to use for TSB slabs. Future platforms that support page sizes 602 * larger than 4M may wish to change these values, and provide their own 603 * assembly macros for building and decoding the TSB base register contents. 604 * Note disable_large_pages will override the value set here. 605 */ 606 static uint_t tsb_slab_ttesz = TTE4M; 607 size_t tsb_slab_size = MMU_PAGESIZE4M; 608 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 609 /* PFN mask for TTE */ 610 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 611 612 /* 613 * Size to use for TSB slabs. These are used only when 256M tsb arenas 614 * exist. 615 */ 616 static uint_t bigtsb_slab_ttesz = TTE256M; 617 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 618 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 619 /* 256M page alignment for 8K pfn */ 620 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 621 622 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 623 static int tsb_max_growsize = 0; 624 625 /* 626 * Tunable parameters dealing with TSB policies. 627 */ 628 629 /* 630 * This undocumented tunable forces all 8K TSBs to be allocated from 631 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 632 */ 633 #ifdef DEBUG 634 int tsb_forceheap = 0; 635 #endif /* DEBUG */ 636 637 /* 638 * Decide whether to use per-lgroup arenas, or one global set of 639 * TSB arenas. The default is not to break up per-lgroup, since 640 * most platforms don't recognize any tangible benefit from it. 641 */ 642 int tsb_lgrp_affinity = 0; 643 644 /* 645 * Used for growing the TSB based on the process RSS. 646 * tsb_rss_factor is based on the smallest TSB, and is 647 * shifted by the TSB size to determine if we need to grow. 648 * The default will grow the TSB if the number of TTEs for 649 * this page size exceeds 75% of the number of TSB entries, 650 * which should _almost_ eliminate all conflict misses 651 * (at the expense of using up lots and lots of memory). 652 */ 653 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 654 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 655 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 656 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 657 default_tsb_size) 658 #define TSB_OK_SHRINK() \ 659 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 660 #define TSB_OK_GROW() \ 661 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 662 663 int enable_tsb_rss_sizing = 1; 664 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 665 666 /* which TSB size code to use for new address spaces or if rss sizing off */ 667 int default_tsb_size = TSB_8K_SZCODE; 668 669 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 670 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 671 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 672 673 #ifdef DEBUG 674 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 675 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 676 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 677 static int tsb_alloc_fail_mtbf = 0; 678 static int tsb_alloc_count = 0; 679 #endif /* DEBUG */ 680 681 /* if set to 1, will remap valid TTEs when growing TSB. */ 682 int tsb_remap_ttes = 1; 683 684 /* 685 * If we have more than this many mappings, allocate a second TSB. 686 * This default is chosen because the I/D fully associative TLBs are 687 * assumed to have at least 8 available entries. Platforms with a 688 * larger fully-associative TLB could probably override the default. 689 */ 690 691 #ifdef sun4v 692 int tsb_sectsb_threshold = 0; 693 #else 694 int tsb_sectsb_threshold = 8; 695 #endif 696 697 /* 698 * kstat data 699 */ 700 struct sfmmu_global_stat sfmmu_global_stat; 701 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 702 703 /* 704 * Global data 705 */ 706 sfmmu_t *ksfmmup; /* kernel's hat id */ 707 708 #ifdef DEBUG 709 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 710 #endif 711 712 /* sfmmu locking operations */ 713 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 714 static int sfmmu_mlspl_held(struct page *, int); 715 716 kmutex_t *sfmmu_page_enter(page_t *); 717 void sfmmu_page_exit(kmutex_t *); 718 int sfmmu_page_spl_held(struct page *); 719 720 /* sfmmu internal locking operations - accessed directly */ 721 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 722 kmutex_t **, kmutex_t **); 723 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 724 static hatlock_t * 725 sfmmu_hat_enter(sfmmu_t *); 726 static hatlock_t * 727 sfmmu_hat_tryenter(sfmmu_t *); 728 static void sfmmu_hat_exit(hatlock_t *); 729 static void sfmmu_hat_lock_all(void); 730 static void sfmmu_hat_unlock_all(void); 731 static void sfmmu_ismhat_enter(sfmmu_t *, int); 732 static void sfmmu_ismhat_exit(sfmmu_t *, int); 733 734 kpm_hlk_t *kpmp_table; 735 uint_t kpmp_table_sz; /* must be a power of 2 */ 736 uchar_t kpmp_shift; 737 738 kpm_shlk_t *kpmp_stable; 739 uint_t kpmp_stable_sz; /* must be a power of 2 */ 740 741 /* 742 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128. 743 * SPL_SHIFT is log2(SPL_TABLE_SIZE). 744 */ 745 #if ((2*NCPU_P2) > 128) 746 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 747 #else 748 #define SPL_SHIFT 7U 749 #endif 750 #define SPL_TABLE_SIZE (1U << SPL_SHIFT) 751 #define SPL_MASK (SPL_TABLE_SIZE - 1) 752 753 /* 754 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t 755 * and by multiples of SPL_SHIFT to get as many varied bits as we can. 756 */ 757 #define SPL_INDEX(pp) \ 758 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \ 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \ 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \ 762 SPL_MASK) 763 764 #define SPL_HASH(pp) \ 765 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex) 766 767 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 768 769 /* Array of mutexes protecting a page's mapping list and p_nrm field. */ 770 771 #define MML_TABLE_SIZE SPL_TABLE_SIZE 772 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex) 773 774 static pad_mutex_t mml_table[MML_TABLE_SIZE]; 775 776 /* 777 * hat_unload_callback() will group together callbacks in order 778 * to avoid xt_sync() calls. This is the maximum size of the group. 779 */ 780 #define MAX_CB_ADDR 32 781 782 tte_t hw_tte; 783 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 784 785 static char *mmu_ctx_kstat_names[] = { 786 "mmu_ctx_tsb_exceptions", 787 "mmu_ctx_tsb_raise_exception", 788 "mmu_ctx_wrap_around", 789 }; 790 791 /* 792 * Wrapper for vmem_xalloc since vmem_create only allows limited 793 * parameters for vm_source_alloc functions. This function allows us 794 * to specify alignment consistent with the size of the object being 795 * allocated. 796 */ 797 static void * 798 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 799 { 800 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 801 } 802 803 /* Common code for setting tsb_alloc_hiwater. */ 804 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 805 ptob(pages) / tsb_alloc_hiwater_factor 806 807 /* 808 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 809 * a single TSB. physmem is the number of physical pages so we need physmem 8K 810 * TTEs to represent all those physical pages. We round this up by using 811 * 1<<highbit(). To figure out which size code to use, remember that the size 812 * code is just an amount to shift the smallest TSB size to get the size of 813 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 814 * highbit() - 1) to get the size code for the smallest TSB that can represent 815 * all of physical memory, while erring on the side of too much. 816 * 817 * Restrict tsb_max_growsize to make sure that: 818 * 1) TSBs can't grow larger than the TSB slab size 819 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 820 */ 821 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 822 int _i, _szc, _slabszc, _tsbszc; \ 823 \ 824 _i = highbit(pages); \ 825 if ((1 << (_i - 1)) == (pages)) \ 826 _i--; /* 2^n case, round down */ \ 827 _szc = _i - TSB_START_SIZE; \ 828 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 829 _tsbszc = MIN(_szc, _slabszc); \ 830 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 831 } 832 833 /* 834 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 835 * tsb_info which handles that TTE size. 836 */ 837 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 838 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 839 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 840 sfmmu_hat_lock_held(sfmmup)); \ 841 if ((tte_szc) >= TTE4M) { \ 842 ASSERT((tsbinfop) != NULL); \ 843 (tsbinfop) = (tsbinfop)->tsb_next; \ 844 } \ 845 } 846 847 /* 848 * Macro to use to unload entries from the TSB. 849 * It has knowledge of which page sizes get replicated in the TSB 850 * and will call the appropriate unload routine for the appropriate size. 851 */ 852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 853 { \ 854 int ttesz = get_hblk_ttesz(hmeblkp); \ 855 if (ttesz == TTE8K || ttesz == TTE4M) { \ 856 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 857 } else { \ 858 caddr_t sva = ismhat ? addr : \ 859 (caddr_t)get_hblk_base(hmeblkp); \ 860 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 861 ASSERT(addr >= sva && addr < eva); \ 862 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 863 } \ 864 } 865 866 867 /* Update tsb_alloc_hiwater after memory is configured. */ 868 /*ARGSUSED*/ 869 static void 870 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 871 { 872 /* Assumes physmem has already been updated. */ 873 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 874 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 875 } 876 877 /* 878 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 879 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 880 * deleted. 881 */ 882 /*ARGSUSED*/ 883 static int 884 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 885 { 886 return (0); 887 } 888 889 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 890 /*ARGSUSED*/ 891 static void 892 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 893 { 894 /* 895 * Whether the delete was cancelled or not, just go ahead and update 896 * tsb_alloc_hiwater and tsb_max_growsize. 897 */ 898 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 899 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 900 } 901 902 static kphysm_setup_vector_t sfmmu_update_vec = { 903 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 904 sfmmu_update_post_add, /* post_add */ 905 sfmmu_update_pre_del, /* pre_del */ 906 sfmmu_update_post_del /* post_del */ 907 }; 908 909 910 /* 911 * HME_BLK HASH PRIMITIVES 912 */ 913 914 /* 915 * Enter a hme on the mapping list for page pp. 916 * When large pages are more prevalent in the system we might want to 917 * keep the mapping list in ascending order by the hment size. For now, 918 * small pages are more frequent, so don't slow it down. 919 */ 920 #define HME_ADD(hme, pp) \ 921 { \ 922 ASSERT(sfmmu_mlist_held(pp)); \ 923 \ 924 hme->hme_prev = NULL; \ 925 hme->hme_next = pp->p_mapping; \ 926 hme->hme_page = pp; \ 927 if (pp->p_mapping) { \ 928 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 929 ASSERT(pp->p_share > 0); \ 930 } else { \ 931 /* EMPTY */ \ 932 ASSERT(pp->p_share == 0); \ 933 } \ 934 pp->p_mapping = hme; \ 935 pp->p_share++; \ 936 } 937 938 /* 939 * Enter a hme on the mapping list for page pp. 940 * If we are unmapping a large translation, we need to make sure that the 941 * change is reflect in the corresponding bit of the p_index field. 942 */ 943 #define HME_SUB(hme, pp) \ 944 { \ 945 ASSERT(sfmmu_mlist_held(pp)); \ 946 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 947 \ 948 if (pp->p_mapping == NULL) { \ 949 panic("hme_remove - no mappings"); \ 950 } \ 951 \ 952 membar_stst(); /* ensure previous stores finish */ \ 953 \ 954 ASSERT(pp->p_share > 0); \ 955 pp->p_share--; \ 956 \ 957 if (hme->hme_prev) { \ 958 ASSERT(pp->p_mapping != hme); \ 959 ASSERT(hme->hme_prev->hme_page == pp || \ 960 IS_PAHME(hme->hme_prev)); \ 961 hme->hme_prev->hme_next = hme->hme_next; \ 962 } else { \ 963 ASSERT(pp->p_mapping == hme); \ 964 pp->p_mapping = hme->hme_next; \ 965 ASSERT((pp->p_mapping == NULL) ? \ 966 (pp->p_share == 0) : 1); \ 967 } \ 968 \ 969 if (hme->hme_next) { \ 970 ASSERT(hme->hme_next->hme_page == pp || \ 971 IS_PAHME(hme->hme_next)); \ 972 hme->hme_next->hme_prev = hme->hme_prev; \ 973 } \ 974 \ 975 /* zero out the entry */ \ 976 hme->hme_next = NULL; \ 977 hme->hme_prev = NULL; \ 978 hme->hme_page = NULL; \ 979 \ 980 if (hme_size(hme) > TTE8K) { \ 981 /* remove mappings for remainder of large pg */ \ 982 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 983 } \ 984 } 985 986 /* 987 * This function returns the hment given the hme_blk and a vaddr. 988 * It assumes addr has already been checked to belong to hme_blk's 989 * range. 990 */ 991 #define HBLKTOHME(hment, hmeblkp, addr) \ 992 { \ 993 int index; \ 994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 995 } 996 997 /* 998 * Version of HBLKTOHME that also returns the index in hmeblkp 999 * of the hment. 1000 */ 1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1002 { \ 1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1004 \ 1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1006 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1007 } else \ 1008 idx = 0; \ 1009 \ 1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1011 } 1012 1013 /* 1014 * Disable any page sizes not supported by the CPU 1015 */ 1016 void 1017 hat_init_pagesizes() 1018 { 1019 int i; 1020 1021 mmu_exported_page_sizes = 0; 1022 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1023 1024 szc_2_userszc[i] = (uint_t)-1; 1025 userszc_2_szc[i] = (uint_t)-1; 1026 1027 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1028 disable_large_pages |= (1 << i); 1029 } else { 1030 szc_2_userszc[i] = mmu_exported_page_sizes; 1031 userszc_2_szc[mmu_exported_page_sizes] = i; 1032 mmu_exported_page_sizes++; 1033 } 1034 } 1035 1036 disable_ism_large_pages |= disable_large_pages; 1037 disable_auto_data_large_pages = disable_large_pages; 1038 disable_auto_text_large_pages = disable_large_pages; 1039 1040 /* 1041 * Initialize mmu-specific large page sizes. 1042 */ 1043 if (&mmu_large_pages_disabled) { 1044 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1045 disable_ism_large_pages |= 1046 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1047 disable_auto_data_large_pages |= 1048 mmu_large_pages_disabled(HAT_AUTO_DATA); 1049 disable_auto_text_large_pages |= 1050 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1051 } 1052 } 1053 1054 /* 1055 * Initialize the hardware address translation structures. 1056 */ 1057 void 1058 hat_init(void) 1059 { 1060 int i; 1061 uint_t sz; 1062 size_t size; 1063 1064 hat_lock_init(); 1065 hat_kstat_init(); 1066 1067 /* 1068 * Hardware-only bits in a TTE 1069 */ 1070 MAKE_TTE_MASK(&hw_tte); 1071 1072 hat_init_pagesizes(); 1073 1074 /* Initialize the hash locks */ 1075 for (i = 0; i < khmehash_num; i++) { 1076 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1077 MUTEX_DEFAULT, NULL); 1078 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1079 } 1080 for (i = 0; i < uhmehash_num; i++) { 1081 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1082 MUTEX_DEFAULT, NULL); 1083 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1084 } 1085 khmehash_num--; /* make sure counter starts from 0 */ 1086 uhmehash_num--; /* make sure counter starts from 0 */ 1087 1088 /* 1089 * Allocate context domain structures. 1090 * 1091 * A platform may choose to modify max_mmu_ctxdoms in 1092 * set_platform_defaults(). If a platform does not define 1093 * a set_platform_defaults() or does not choose to modify 1094 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1095 * 1096 * For all platforms that have CPUs sharing MMUs, this 1097 * value must be defined. 1098 */ 1099 if (max_mmu_ctxdoms == 0) 1100 max_mmu_ctxdoms = max_ncpus; 1101 1102 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1103 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1104 1105 /* mmu_ctx_t is 64 bytes aligned */ 1106 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1107 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1108 /* 1109 * MMU context domain initialization for the Boot CPU. 1110 * This needs the context domains array allocated above. 1111 */ 1112 mutex_enter(&cpu_lock); 1113 sfmmu_cpu_init(CPU); 1114 mutex_exit(&cpu_lock); 1115 1116 /* 1117 * Intialize ism mapping list lock. 1118 */ 1119 1120 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1121 1122 /* 1123 * Each sfmmu structure carries an array of MMU context info 1124 * structures, one per context domain. The size of this array depends 1125 * on the maximum number of context domains. So, the size of the 1126 * sfmmu structure varies per platform. 1127 * 1128 * sfmmu is allocated from static arena, because trap 1129 * handler at TL > 0 is not allowed to touch kernel relocatable 1130 * memory. sfmmu's alignment is changed to 64 bytes from 1131 * default 8 bytes, as the lower 6 bits will be used to pass 1132 * pgcnt to vtag_flush_pgcnt_tl1. 1133 */ 1134 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1135 1136 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1137 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1138 NULL, NULL, static_arena, 0); 1139 1140 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1141 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1142 1143 /* 1144 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1145 * from the heap when low on memory or when TSB_FORCEALLOC is 1146 * specified, don't use magazines to cache them--we want to return 1147 * them to the system as quickly as possible. 1148 */ 1149 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1150 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1151 static_arena, KMC_NOMAGAZINE); 1152 1153 /* 1154 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1155 * memory, which corresponds to the old static reserve for TSBs. 1156 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1157 * memory we'll allocate for TSB slabs; beyond this point TSB 1158 * allocations will be taken from the kernel heap (via 1159 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1160 * consumer. 1161 */ 1162 if (tsb_alloc_hiwater_factor == 0) { 1163 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1164 } 1165 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1166 1167 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1168 if (!(disable_large_pages & (1 << sz))) 1169 break; 1170 } 1171 1172 if (sz < tsb_slab_ttesz) { 1173 tsb_slab_ttesz = sz; 1174 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1175 tsb_slab_size = 1 << tsb_slab_shift; 1176 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1177 use_bigtsb_arena = 0; 1178 } else if (use_bigtsb_arena && 1179 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1180 use_bigtsb_arena = 0; 1181 } 1182 1183 if (!use_bigtsb_arena) { 1184 bigtsb_slab_shift = tsb_slab_shift; 1185 } 1186 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1187 1188 /* 1189 * On smaller memory systems, allocate TSB memory in smaller chunks 1190 * than the default 4M slab size. We also honor disable_large_pages 1191 * here. 1192 * 1193 * The trap handlers need to be patched with the final slab shift, 1194 * since they need to be able to construct the TSB pointer at runtime. 1195 */ 1196 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1197 !(disable_large_pages & (1 << TTE512K))) { 1198 tsb_slab_ttesz = TTE512K; 1199 tsb_slab_shift = MMU_PAGESHIFT512K; 1200 tsb_slab_size = MMU_PAGESIZE512K; 1201 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1202 use_bigtsb_arena = 0; 1203 } 1204 1205 if (!use_bigtsb_arena) { 1206 bigtsb_slab_ttesz = tsb_slab_ttesz; 1207 bigtsb_slab_shift = tsb_slab_shift; 1208 bigtsb_slab_size = tsb_slab_size; 1209 bigtsb_slab_mask = tsb_slab_mask; 1210 } 1211 1212 1213 /* 1214 * Set up memory callback to update tsb_alloc_hiwater and 1215 * tsb_max_growsize. 1216 */ 1217 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1218 ASSERT(i == 0); 1219 1220 /* 1221 * kmem_tsb_arena is the source from which large TSB slabs are 1222 * drawn. The quantum of this arena corresponds to the largest 1223 * TSB size we can dynamically allocate for user processes. 1224 * Currently it must also be a supported page size since we 1225 * use exactly one translation entry to map each slab page. 1226 * 1227 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1228 * which most TSBs are allocated. Since most TSB allocations are 1229 * typically 8K we have a kmem cache we stack on top of each 1230 * kmem_tsb_default_arena to speed up those allocations. 1231 * 1232 * Note the two-level scheme of arenas is required only 1233 * because vmem_create doesn't allow us to specify alignment 1234 * requirements. If this ever changes the code could be 1235 * simplified to use only one level of arenas. 1236 * 1237 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1238 * will be provided in addition to the 4M kmem_tsb_arena. 1239 */ 1240 if (use_bigtsb_arena) { 1241 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1242 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1243 vmem_xfree, heap_arena, 0, VM_SLEEP); 1244 } 1245 1246 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1247 sfmmu_vmem_xalloc_aligned_wrapper, 1248 vmem_xfree, heap_arena, 0, VM_SLEEP); 1249 1250 if (tsb_lgrp_affinity) { 1251 char s[50]; 1252 for (i = 0; i < NLGRPS_MAX; i++) { 1253 if (use_bigtsb_arena) { 1254 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1255 kmem_bigtsb_default_arena[i] = vmem_create(s, 1256 NULL, 0, 2 * tsb_slab_size, 1257 sfmmu_tsb_segkmem_alloc, 1258 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1259 0, VM_SLEEP | VM_BESTFIT); 1260 } 1261 1262 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1263 kmem_tsb_default_arena[i] = vmem_create(s, 1264 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1265 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1266 VM_SLEEP | VM_BESTFIT); 1267 1268 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1269 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1270 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1271 kmem_tsb_default_arena[i], 0); 1272 } 1273 } else { 1274 if (use_bigtsb_arena) { 1275 kmem_bigtsb_default_arena[0] = 1276 vmem_create("kmem_bigtsb_default", NULL, 0, 1277 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1278 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1279 VM_SLEEP | VM_BESTFIT); 1280 } 1281 1282 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1283 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1284 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1285 VM_SLEEP | VM_BESTFIT); 1286 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1287 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1288 kmem_tsb_default_arena[0], 0); 1289 } 1290 1291 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1292 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1293 sfmmu_hblkcache_destructor, 1294 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1295 hat_memload_arena, KMC_NOHASH); 1296 1297 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1298 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, 1299 VMC_DUMPSAFE | VM_SLEEP); 1300 1301 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1302 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1303 sfmmu_hblkcache_destructor, 1304 NULL, (void *)HME1BLK_SZ, 1305 hat_memload1_arena, KMC_NOHASH); 1306 1307 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1308 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1309 1310 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1311 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1312 NULL, NULL, static_arena, KMC_NOHASH); 1313 1314 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1315 sizeof (ism_ment_t), 0, NULL, NULL, 1316 NULL, NULL, NULL, 0); 1317 1318 /* 1319 * We grab the first hat for the kernel, 1320 */ 1321 AS_LOCK_ENTER(&kas, RW_WRITER); 1322 kas.a_hat = hat_alloc(&kas); 1323 AS_LOCK_EXIT(&kas); 1324 1325 /* 1326 * Initialize hblk_reserve. 1327 */ 1328 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1329 va_to_pa((caddr_t)hblk_reserve); 1330 1331 #ifndef UTSB_PHYS 1332 /* 1333 * Reserve some kernel virtual address space for the locked TTEs 1334 * that allow us to probe the TSB from TL>0. 1335 */ 1336 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1337 0, 0, NULL, NULL, VM_SLEEP); 1338 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1339 0, 0, NULL, NULL, VM_SLEEP); 1340 #endif 1341 1342 #ifdef VAC 1343 /* 1344 * The big page VAC handling code assumes VAC 1345 * will not be bigger than the smallest big 1346 * page- which is 64K. 1347 */ 1348 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1349 cmn_err(CE_PANIC, "VAC too big!"); 1350 } 1351 #endif 1352 1353 uhme_hash_pa = va_to_pa(uhme_hash); 1354 khme_hash_pa = va_to_pa(khme_hash); 1355 1356 /* 1357 * Initialize relocation locks. kpr_suspendlock is held 1358 * at PIL_MAX to prevent interrupts from pinning the holder 1359 * of a suspended TTE which may access it leading to a 1360 * deadlock condition. 1361 */ 1362 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1363 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1364 1365 /* 1366 * If Shared context support is disabled via /etc/system 1367 * set shctx_on to 0 here if it was set to 1 earlier in boot 1368 * sequence by cpu module initialization code. 1369 */ 1370 if (shctx_on && disable_shctx) { 1371 shctx_on = 0; 1372 } 1373 1374 if (shctx_on) { 1375 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1376 sizeof (srd_buckets[0]), KM_SLEEP); 1377 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1378 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1379 MUTEX_DEFAULT, NULL); 1380 } 1381 1382 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1383 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1384 NULL, NULL, NULL, 0); 1385 region_cache = kmem_cache_create("region_cache", 1386 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1387 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1388 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1389 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1390 NULL, NULL, NULL, 0); 1391 } 1392 1393 /* 1394 * Pre-allocate hrm_hashtab before enabling the collection of 1395 * refmod statistics. Allocating on the fly would mean us 1396 * running the risk of suffering recursive mutex enters or 1397 * deadlocks. 1398 */ 1399 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1400 KM_SLEEP); 1401 1402 /* Allocate per-cpu pending freelist of hmeblks */ 1403 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1404 KM_SLEEP); 1405 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1406 (uintptr_t)cpu_hme_pend, 64); 1407 1408 for (i = 0; i < NCPU; i++) { 1409 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1410 NULL); 1411 } 1412 1413 if (cpu_hme_pend_thresh == 0) { 1414 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1415 } 1416 } 1417 1418 /* 1419 * Initialize locking for the hat layer, called early during boot. 1420 */ 1421 static void 1422 hat_lock_init() 1423 { 1424 int i; 1425 1426 /* 1427 * initialize the array of mutexes protecting a page's mapping 1428 * list and p_nrm field. 1429 */ 1430 for (i = 0; i < MML_TABLE_SIZE; i++) 1431 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL); 1432 1433 if (kpm_enable) { 1434 for (i = 0; i < kpmp_table_sz; i++) { 1435 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1436 MUTEX_DEFAULT, NULL); 1437 } 1438 } 1439 1440 /* 1441 * Initialize array of mutex locks that protects sfmmu fields and 1442 * TSB lists. 1443 */ 1444 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1445 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1446 NULL); 1447 } 1448 1449 #define SFMMU_KERNEL_MAXVA \ 1450 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1451 1452 /* 1453 * Allocate a hat structure. 1454 * Called when an address space first uses a hat. 1455 */ 1456 struct hat * 1457 hat_alloc(struct as *as) 1458 { 1459 sfmmu_t *sfmmup; 1460 int i; 1461 uint64_t cnum; 1462 extern uint_t get_color_start(struct as *); 1463 1464 ASSERT(AS_WRITE_HELD(as)); 1465 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1466 sfmmup->sfmmu_as = as; 1467 sfmmup->sfmmu_flags = 0; 1468 sfmmup->sfmmu_tteflags = 0; 1469 sfmmup->sfmmu_rtteflags = 0; 1470 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1471 1472 if (as == &kas) { 1473 ksfmmup = sfmmup; 1474 sfmmup->sfmmu_cext = 0; 1475 cnum = KCONTEXT; 1476 1477 sfmmup->sfmmu_clrstart = 0; 1478 sfmmup->sfmmu_tsb = NULL; 1479 /* 1480 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1481 * to setup tsb_info for ksfmmup. 1482 */ 1483 } else { 1484 1485 /* 1486 * Just set to invalid ctx. When it faults, it will 1487 * get a valid ctx. This would avoid the situation 1488 * where we get a ctx, but it gets stolen and then 1489 * we fault when we try to run and so have to get 1490 * another ctx. 1491 */ 1492 sfmmup->sfmmu_cext = 0; 1493 cnum = INVALID_CONTEXT; 1494 1495 /* initialize original physical page coloring bin */ 1496 sfmmup->sfmmu_clrstart = get_color_start(as); 1497 #ifdef DEBUG 1498 if (tsb_random_size) { 1499 uint32_t randval = (uint32_t)gettick() >> 4; 1500 int size = randval % (tsb_max_growsize + 1); 1501 1502 /* chose a random tsb size for stress testing */ 1503 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1504 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1505 } else 1506 #endif /* DEBUG */ 1507 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1508 default_tsb_size, 1509 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1510 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1511 ASSERT(sfmmup->sfmmu_tsb != NULL); 1512 } 1513 1514 ASSERT(max_mmu_ctxdoms > 0); 1515 for (i = 0; i < max_mmu_ctxdoms; i++) { 1516 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1517 sfmmup->sfmmu_ctxs[i].gnum = 0; 1518 } 1519 1520 for (i = 0; i < max_mmu_page_sizes; i++) { 1521 sfmmup->sfmmu_ttecnt[i] = 0; 1522 sfmmup->sfmmu_scdrttecnt[i] = 0; 1523 sfmmup->sfmmu_ismttecnt[i] = 0; 1524 sfmmup->sfmmu_scdismttecnt[i] = 0; 1525 sfmmup->sfmmu_pgsz[i] = TTE8K; 1526 } 1527 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1528 sfmmup->sfmmu_iblk = NULL; 1529 sfmmup->sfmmu_ismhat = 0; 1530 sfmmup->sfmmu_scdhat = 0; 1531 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1532 if (sfmmup == ksfmmup) { 1533 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1534 } else { 1535 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1536 } 1537 sfmmup->sfmmu_free = 0; 1538 sfmmup->sfmmu_rmstat = 0; 1539 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1540 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1541 sfmmup->sfmmu_srdp = NULL; 1542 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1543 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1544 sfmmup->sfmmu_scdp = NULL; 1545 sfmmup->sfmmu_scd_link.next = NULL; 1546 sfmmup->sfmmu_scd_link.prev = NULL; 1547 return (sfmmup); 1548 } 1549 1550 /* 1551 * Create per-MMU context domain kstats for a given MMU ctx. 1552 */ 1553 static void 1554 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1555 { 1556 mmu_ctx_stat_t stat; 1557 kstat_t *mmu_kstat; 1558 1559 ASSERT(MUTEX_HELD(&cpu_lock)); 1560 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1561 1562 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1563 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1564 1565 if (mmu_kstat == NULL) { 1566 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1567 mmu_ctxp->mmu_idx); 1568 } else { 1569 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1570 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1571 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1572 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1573 mmu_ctxp->mmu_kstat = mmu_kstat; 1574 kstat_install(mmu_kstat); 1575 } 1576 } 1577 1578 /* 1579 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1580 * context domain information for a given CPU. If a platform does not 1581 * specify that interface, then the function below is used instead to return 1582 * default information. The defaults are as follows: 1583 * 1584 * - The number of MMU context IDs supported on any CPU in the 1585 * system is 8K. 1586 * - There is one MMU context domain per CPU. 1587 */ 1588 /*ARGSUSED*/ 1589 static void 1590 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1591 { 1592 infop->mmu_nctxs = nctxs; 1593 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1594 } 1595 1596 /* 1597 * Called during CPU initialization to set the MMU context-related information 1598 * for a CPU. 1599 * 1600 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1601 */ 1602 void 1603 sfmmu_cpu_init(cpu_t *cp) 1604 { 1605 mmu_ctx_info_t info; 1606 mmu_ctx_t *mmu_ctxp; 1607 1608 ASSERT(MUTEX_HELD(&cpu_lock)); 1609 1610 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1611 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1612 else 1613 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1614 1615 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1616 1617 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1618 /* Each mmu_ctx is cacheline aligned. */ 1619 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1620 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1621 1622 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1623 (void *)ipltospl(DISP_LEVEL)); 1624 mmu_ctxp->mmu_idx = info.mmu_idx; 1625 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1626 /* 1627 * Globally for lifetime of a system, 1628 * gnum must always increase. 1629 * mmu_saved_gnum is protected by the cpu_lock. 1630 */ 1631 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1632 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1633 1634 sfmmu_mmu_kstat_create(mmu_ctxp); 1635 1636 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1637 } else { 1638 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1639 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs); 1640 } 1641 1642 /* 1643 * The mmu_lock is acquired here to prevent races with 1644 * the wrap-around code. 1645 */ 1646 mutex_enter(&mmu_ctxp->mmu_lock); 1647 1648 1649 mmu_ctxp->mmu_ncpus++; 1650 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1651 CPU_MMU_IDX(cp) = info.mmu_idx; 1652 CPU_MMU_CTXP(cp) = mmu_ctxp; 1653 1654 mutex_exit(&mmu_ctxp->mmu_lock); 1655 } 1656 1657 static void 1658 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp) 1659 { 1660 ASSERT(MUTEX_HELD(&cpu_lock)); 1661 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock)); 1662 1663 mutex_destroy(&mmu_ctxp->mmu_lock); 1664 1665 if (mmu_ctxp->mmu_kstat) 1666 kstat_delete(mmu_ctxp->mmu_kstat); 1667 1668 /* mmu_saved_gnum is protected by the cpu_lock. */ 1669 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1670 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1671 1672 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1673 } 1674 1675 /* 1676 * Called to perform MMU context-related cleanup for a CPU. 1677 */ 1678 void 1679 sfmmu_cpu_cleanup(cpu_t *cp) 1680 { 1681 mmu_ctx_t *mmu_ctxp; 1682 1683 ASSERT(MUTEX_HELD(&cpu_lock)); 1684 1685 mmu_ctxp = CPU_MMU_CTXP(cp); 1686 ASSERT(mmu_ctxp != NULL); 1687 1688 /* 1689 * The mmu_lock is acquired here to prevent races with 1690 * the wrap-around code. 1691 */ 1692 mutex_enter(&mmu_ctxp->mmu_lock); 1693 1694 CPU_MMU_CTXP(cp) = NULL; 1695 1696 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1697 if (--mmu_ctxp->mmu_ncpus == 0) { 1698 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1699 mutex_exit(&mmu_ctxp->mmu_lock); 1700 sfmmu_ctxdom_free(mmu_ctxp); 1701 return; 1702 } 1703 1704 mutex_exit(&mmu_ctxp->mmu_lock); 1705 } 1706 1707 uint_t 1708 sfmmu_ctxdom_nctxs(int idx) 1709 { 1710 return (mmu_ctxs_tbl[idx]->mmu_nctxs); 1711 } 1712 1713 #ifdef sun4v 1714 /* 1715 * sfmmu_ctxdoms_* is an interface provided to help keep context domains 1716 * consistant after suspend/resume on system that can resume on a different 1717 * hardware than it was suspended. 1718 * 1719 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts 1720 * from being allocated. It acquires all hat_locks, which blocks most access to 1721 * context data, except for a few cases that are handled separately or are 1722 * harmless. It wraps each domain to increment gnum and invalidate on-CPU 1723 * contexts, and forces cnum to its max. As a result of this call all user 1724 * threads that are running on CPUs trap and try to perform wrap around but 1725 * can't because hat_locks are taken. Threads that were not on CPUs but started 1726 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking 1727 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block 1728 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs 1729 * are paused, else it could deadlock acquiring locks held by paused CPUs. 1730 * 1731 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records 1732 * the CPUs that had them. It must be called after CPUs have been paused. This 1733 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data, 1734 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx 1735 * runs with interrupts disabled. When CPUs are later resumed, they may enter 1736 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately 1737 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus 1738 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is 1739 * accessing the old context domains. 1740 * 1741 * sfmmu_ctxdoms_update(void) frees space used by old context domains and 1742 * allocates new context domains based on hardware layout. It initializes 1743 * every CPU that had context domain before migration to have one again. 1744 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it 1745 * could deadlock acquiring locks held by paused CPUs. 1746 * 1747 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads 1748 * acquire new context ids and continue execution. 1749 * 1750 * Therefore functions should be called in the following order: 1751 * suspend_routine() 1752 * sfmmu_ctxdom_lock() 1753 * pause_cpus() 1754 * suspend() 1755 * if (suspend failed) 1756 * sfmmu_ctxdom_unlock() 1757 * ... 1758 * sfmmu_ctxdom_remove() 1759 * resume_cpus() 1760 * sfmmu_ctxdom_update() 1761 * sfmmu_ctxdom_unlock() 1762 */ 1763 static cpuset_t sfmmu_ctxdoms_pset; 1764 1765 void 1766 sfmmu_ctxdoms_remove() 1767 { 1768 processorid_t id; 1769 cpu_t *cp; 1770 1771 /* 1772 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can 1773 * be restored post-migration. A CPU may be powered off and not have a 1774 * domain, for example. 1775 */ 1776 CPUSET_ZERO(sfmmu_ctxdoms_pset); 1777 1778 for (id = 0; id < NCPU; id++) { 1779 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) { 1780 CPUSET_ADD(sfmmu_ctxdoms_pset, id); 1781 CPU_MMU_CTXP(cp) = NULL; 1782 } 1783 } 1784 } 1785 1786 void 1787 sfmmu_ctxdoms_lock(void) 1788 { 1789 int idx; 1790 mmu_ctx_t *mmu_ctxp; 1791 1792 sfmmu_hat_lock_all(); 1793 1794 /* 1795 * At this point, no thread can be in sfmmu_ctx_wrap_around, because 1796 * hat_lock is always taken before calling it. 1797 * 1798 * For each domain, set mmu_cnum to max so no more contexts can be 1799 * allocated, and wrap to flush on-CPU contexts and force threads to 1800 * acquire a new context when we later drop hat_lock after migration. 1801 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum, 1802 * but the latter uses CAS and will miscompare and not overwrite it. 1803 */ 1804 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */ 1805 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1806 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) { 1807 mutex_enter(&mmu_ctxp->mmu_lock); 1808 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs; 1809 /* make sure updated cnum visible */ 1810 membar_enter(); 1811 mutex_exit(&mmu_ctxp->mmu_lock); 1812 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE); 1813 } 1814 } 1815 kpreempt_enable(); 1816 } 1817 1818 void 1819 sfmmu_ctxdoms_unlock(void) 1820 { 1821 sfmmu_hat_unlock_all(); 1822 } 1823 1824 void 1825 sfmmu_ctxdoms_update(void) 1826 { 1827 processorid_t id; 1828 cpu_t *cp; 1829 uint_t idx; 1830 mmu_ctx_t *mmu_ctxp; 1831 1832 /* 1833 * Free all context domains. As side effect, this increases 1834 * mmu_saved_gnum to the maximum gnum over all domains, which is used to 1835 * init gnum in the new domains, which therefore will be larger than the 1836 * sfmmu gnum for any process, guaranteeing that every process will see 1837 * a new generation and allocate a new context regardless of what new 1838 * domain it runs in. 1839 */ 1840 mutex_enter(&cpu_lock); 1841 1842 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1843 if (mmu_ctxs_tbl[idx] != NULL) { 1844 mmu_ctxp = mmu_ctxs_tbl[idx]; 1845 mmu_ctxs_tbl[idx] = NULL; 1846 sfmmu_ctxdom_free(mmu_ctxp); 1847 } 1848 } 1849 1850 for (id = 0; id < NCPU; id++) { 1851 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) && 1852 (cp = cpu[id]) != NULL) 1853 sfmmu_cpu_init(cp); 1854 } 1855 mutex_exit(&cpu_lock); 1856 } 1857 #endif 1858 1859 /* 1860 * Hat_setup, makes an address space context the current active one. 1861 * In sfmmu this translates to setting the secondary context with the 1862 * corresponding context. 1863 */ 1864 void 1865 hat_setup(struct hat *sfmmup, int allocflag) 1866 { 1867 hatlock_t *hatlockp; 1868 1869 /* Init needs some special treatment. */ 1870 if (allocflag == HAT_INIT) { 1871 /* 1872 * Make sure that we have 1873 * 1. a TSB 1874 * 2. a valid ctx that doesn't get stolen after this point. 1875 */ 1876 hatlockp = sfmmu_hat_enter(sfmmup); 1877 1878 /* 1879 * Swap in the TSB. hat_init() allocates tsbinfos without 1880 * TSBs, but we need one for init, since the kernel does some 1881 * special things to set up its stack and needs the TSB to 1882 * resolve page faults. 1883 */ 1884 sfmmu_tsb_swapin(sfmmup, hatlockp); 1885 1886 sfmmu_get_ctx(sfmmup); 1887 1888 sfmmu_hat_exit(hatlockp); 1889 } else { 1890 ASSERT(allocflag == HAT_ALLOC); 1891 1892 hatlockp = sfmmu_hat_enter(sfmmup); 1893 kpreempt_disable(); 1894 1895 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1896 /* 1897 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1898 * pagesize bits don't matter in this case since we are passing 1899 * INVALID_CONTEXT to it. 1900 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1901 */ 1902 sfmmu_setctx_sec(INVALID_CONTEXT); 1903 sfmmu_clear_utsbinfo(); 1904 1905 kpreempt_enable(); 1906 sfmmu_hat_exit(hatlockp); 1907 } 1908 } 1909 1910 /* 1911 * Free all the translation resources for the specified address space. 1912 * Called from as_free when an address space is being destroyed. 1913 */ 1914 void 1915 hat_free_start(struct hat *sfmmup) 1916 { 1917 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 1918 ASSERT(sfmmup != ksfmmup); 1919 1920 sfmmup->sfmmu_free = 1; 1921 if (sfmmup->sfmmu_scdp != NULL) { 1922 sfmmu_leave_scd(sfmmup, 0); 1923 } 1924 1925 ASSERT(sfmmup->sfmmu_scdp == NULL); 1926 } 1927 1928 void 1929 hat_free_end(struct hat *sfmmup) 1930 { 1931 int i; 1932 1933 ASSERT(sfmmup->sfmmu_free == 1); 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1940 1941 if (sfmmup->sfmmu_rmstat) { 1942 hat_freestat(sfmmup->sfmmu_as, NULL); 1943 } 1944 1945 while (sfmmup->sfmmu_tsb != NULL) { 1946 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1947 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1948 sfmmup->sfmmu_tsb = next; 1949 } 1950 1951 if (sfmmup->sfmmu_srdp != NULL) { 1952 sfmmu_leave_srd(sfmmup); 1953 ASSERT(sfmmup->sfmmu_srdp == NULL); 1954 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1955 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1956 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1957 SFMMU_L2_HMERLINKS_SIZE); 1958 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1959 } 1960 } 1961 } 1962 sfmmu_free_sfmmu(sfmmup); 1963 1964 #ifdef DEBUG 1965 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1966 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1967 } 1968 #endif 1969 1970 kmem_cache_free(sfmmuid_cache, sfmmup); 1971 } 1972 1973 /* 1974 * Set up any translation structures, for the specified address space, 1975 * that are needed or preferred when the process is being swapped in. 1976 */ 1977 /* ARGSUSED */ 1978 void 1979 hat_swapin(struct hat *hat) 1980 { 1981 } 1982 1983 /* 1984 * Free all of the translation resources, for the specified address space, 1985 * that can be freed while the process is swapped out. Called from as_swapout. 1986 * Also, free up the ctx that this process was using. 1987 */ 1988 void 1989 hat_swapout(struct hat *sfmmup) 1990 { 1991 struct hmehash_bucket *hmebp; 1992 struct hme_blk *hmeblkp; 1993 struct hme_blk *pr_hblk = NULL; 1994 struct hme_blk *nx_hblk; 1995 int i; 1996 struct hme_blk *list = NULL; 1997 hatlock_t *hatlockp; 1998 struct tsb_info *tsbinfop; 1999 struct free_tsb { 2000 struct free_tsb *next; 2001 struct tsb_info *tsbinfop; 2002 }; /* free list of TSBs */ 2003 struct free_tsb *freelist, *last, *next; 2004 2005 SFMMU_STAT(sf_swapout); 2006 2007 /* 2008 * There is no way to go from an as to all its translations in sfmmu. 2009 * Here is one of the times when we take the big hit and traverse 2010 * the hash looking for hme_blks to free up. Not only do we free up 2011 * this as hme_blks but all those that are free. We are obviously 2012 * swapping because we need memory so let's free up as much 2013 * as we can. 2014 * 2015 * Note that we don't flush TLB/TSB here -- it's not necessary 2016 * because: 2017 * 1) we free the ctx we're using and throw away the TSB(s); 2018 * 2) processes aren't runnable while being swapped out. 2019 */ 2020 ASSERT(sfmmup != KHATID); 2021 for (i = 0; i <= UHMEHASH_SZ; i++) { 2022 hmebp = &uhme_hash[i]; 2023 SFMMU_HASH_LOCK(hmebp); 2024 hmeblkp = hmebp->hmeblkp; 2025 pr_hblk = NULL; 2026 while (hmeblkp) { 2027 2028 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 2029 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 2030 ASSERT(!hmeblkp->hblk_shared); 2031 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 2032 (caddr_t)get_hblk_base(hmeblkp), 2033 get_hblk_endaddr(hmeblkp), 2034 NULL, HAT_UNLOAD); 2035 } 2036 nx_hblk = hmeblkp->hblk_next; 2037 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 2038 ASSERT(!hmeblkp->hblk_lckcnt); 2039 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2040 &list, 0); 2041 } else { 2042 pr_hblk = hmeblkp; 2043 } 2044 hmeblkp = nx_hblk; 2045 } 2046 SFMMU_HASH_UNLOCK(hmebp); 2047 } 2048 2049 sfmmu_hblks_list_purge(&list, 0); 2050 2051 /* 2052 * Now free up the ctx so that others can reuse it. 2053 */ 2054 hatlockp = sfmmu_hat_enter(sfmmup); 2055 2056 sfmmu_invalidate_ctx(sfmmup); 2057 2058 /* 2059 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 2060 * If TSBs were never swapped in, just return. 2061 * This implies that we don't support partial swapping 2062 * of TSBs -- either all are swapped out, or none are. 2063 * 2064 * We must hold the HAT lock here to prevent racing with another 2065 * thread trying to unmap TTEs from the TSB or running the post- 2066 * relocator after relocating the TSB's memory. Unfortunately, we 2067 * can't free memory while holding the HAT lock or we could 2068 * deadlock, so we build a list of TSBs to be freed after marking 2069 * the tsbinfos as swapped out and free them after dropping the 2070 * lock. 2071 */ 2072 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 2073 sfmmu_hat_exit(hatlockp); 2074 return; 2075 } 2076 2077 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 2078 last = freelist = NULL; 2079 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 2080 tsbinfop = tsbinfop->tsb_next) { 2081 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 2082 2083 /* 2084 * Cast the TSB into a struct free_tsb and put it on the free 2085 * list. 2086 */ 2087 if (freelist == NULL) { 2088 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 2089 } else { 2090 last->next = (struct free_tsb *)tsbinfop->tsb_va; 2091 last = last->next; 2092 } 2093 last->next = NULL; 2094 last->tsbinfop = tsbinfop; 2095 tsbinfop->tsb_flags |= TSB_SWAPPED; 2096 /* 2097 * Zero out the TTE to clear the valid bit. 2098 * Note we can't use a value like 0xbad because we want to 2099 * ensure diagnostic bits are NEVER set on TTEs that might 2100 * be loaded. The intent is to catch any invalid access 2101 * to the swapped TSB, such as a thread running with a valid 2102 * context without first calling sfmmu_tsb_swapin() to 2103 * allocate TSB memory. 2104 */ 2105 tsbinfop->tsb_tte.ll = 0; 2106 } 2107 2108 /* Now we can drop the lock and free the TSB memory. */ 2109 sfmmu_hat_exit(hatlockp); 2110 for (; freelist != NULL; freelist = next) { 2111 next = freelist->next; 2112 sfmmu_tsb_free(freelist->tsbinfop); 2113 } 2114 } 2115 2116 /* 2117 * Duplicate the translations of an as into another newas 2118 */ 2119 /* ARGSUSED */ 2120 int 2121 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 2122 uint_t flag) 2123 { 2124 sf_srd_t *srdp; 2125 sf_scd_t *scdp; 2126 int i; 2127 extern uint_t get_color_start(struct as *); 2128 2129 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 2130 (flag == HAT_DUP_SRD)); 2131 ASSERT(hat != ksfmmup); 2132 ASSERT(newhat != ksfmmup); 2133 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 2134 2135 if (flag == HAT_DUP_COW) { 2136 panic("hat_dup: HAT_DUP_COW not supported"); 2137 } 2138 2139 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2140 ASSERT(srdp->srd_evp != NULL); 2141 VN_HOLD(srdp->srd_evp); 2142 ASSERT(srdp->srd_refcnt > 0); 2143 newhat->sfmmu_srdp = srdp; 2144 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 2145 } 2146 2147 /* 2148 * HAT_DUP_ALL flag is used after as duplication is done. 2149 */ 2150 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2151 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2152 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2153 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2154 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2155 } 2156 2157 /* check if need to join scd */ 2158 if ((scdp = hat->sfmmu_scdp) != NULL && 2159 newhat->sfmmu_scdp != scdp) { 2160 int ret; 2161 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2162 &scdp->scd_region_map, ret); 2163 ASSERT(ret); 2164 sfmmu_join_scd(scdp, newhat); 2165 ASSERT(newhat->sfmmu_scdp == scdp && 2166 scdp->scd_refcnt >= 2); 2167 for (i = 0; i < max_mmu_page_sizes; i++) { 2168 newhat->sfmmu_ismttecnt[i] = 2169 hat->sfmmu_ismttecnt[i]; 2170 newhat->sfmmu_scdismttecnt[i] = 2171 hat->sfmmu_scdismttecnt[i]; 2172 } 2173 } 2174 2175 sfmmu_check_page_sizes(newhat, 1); 2176 } 2177 2178 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2179 update_proc_pgcolorbase_after_fork != 0) { 2180 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2181 } 2182 return (0); 2183 } 2184 2185 void 2186 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2187 uint_t attr, uint_t flags) 2188 { 2189 hat_do_memload(hat, addr, pp, attr, flags, 2190 SFMMU_INVALID_SHMERID); 2191 } 2192 2193 void 2194 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2195 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2196 { 2197 uint_t rid; 2198 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2199 hat_do_memload(hat, addr, pp, attr, flags, 2200 SFMMU_INVALID_SHMERID); 2201 return; 2202 } 2203 rid = (uint_t)((uint64_t)rcookie); 2204 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2205 hat_do_memload(hat, addr, pp, attr, flags, rid); 2206 } 2207 2208 /* 2209 * Set up addr to map to page pp with protection prot. 2210 * As an optimization we also load the TSB with the 2211 * corresponding tte but it is no big deal if the tte gets kicked out. 2212 */ 2213 static void 2214 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2215 uint_t attr, uint_t flags, uint_t rid) 2216 { 2217 tte_t tte; 2218 2219 2220 ASSERT(hat != NULL); 2221 ASSERT(PAGE_LOCKED(pp)); 2222 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2223 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2224 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2225 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2226 2227 if (PP_ISFREE(pp)) { 2228 panic("hat_memload: loading a mapping to free page %p", 2229 (void *)pp); 2230 } 2231 2232 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); 2233 2234 if (flags & ~SFMMU_LOAD_ALLFLAG) 2235 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2236 flags & ~SFMMU_LOAD_ALLFLAG); 2237 2238 if (hat->sfmmu_rmstat) 2239 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2240 2241 #if defined(SF_ERRATA_57) 2242 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2243 (addr < errata57_limit) && (attr & PROT_EXEC) && 2244 !(flags & HAT_LOAD_SHARE)) { 2245 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2246 " page executable"); 2247 attr &= ~PROT_EXEC; 2248 } 2249 #endif 2250 2251 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2252 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2253 2254 /* 2255 * Check TSB and TLB page sizes. 2256 */ 2257 if ((flags & HAT_LOAD_SHARE) == 0) { 2258 sfmmu_check_page_sizes(hat, 1); 2259 } 2260 } 2261 2262 /* 2263 * hat_devload can be called to map real memory (e.g. 2264 * /dev/kmem) and even though hat_devload will determine pf is 2265 * for memory, it will be unable to get a shared lock on the 2266 * page (because someone else has it exclusively) and will 2267 * pass dp = NULL. If tteload doesn't get a non-NULL 2268 * page pointer it can't cache memory. 2269 */ 2270 void 2271 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2272 uint_t attr, int flags) 2273 { 2274 tte_t tte; 2275 struct page *pp = NULL; 2276 int use_lgpg = 0; 2277 2278 ASSERT(hat != NULL); 2279 2280 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2281 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2282 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); 2283 if (len == 0) 2284 panic("hat_devload: zero len"); 2285 if (flags & ~SFMMU_LOAD_ALLFLAG) 2286 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2287 flags & ~SFMMU_LOAD_ALLFLAG); 2288 2289 #if defined(SF_ERRATA_57) 2290 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2291 (addr < errata57_limit) && (attr & PROT_EXEC) && 2292 !(flags & HAT_LOAD_SHARE)) { 2293 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2294 " page executable"); 2295 attr &= ~PROT_EXEC; 2296 } 2297 #endif 2298 2299 /* 2300 * If it's a memory page find its pp 2301 */ 2302 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2303 pp = page_numtopp_nolock(pfn); 2304 if (pp == NULL) { 2305 flags |= HAT_LOAD_NOCONSIST; 2306 } else { 2307 if (PP_ISFREE(pp)) { 2308 panic("hat_memload: loading " 2309 "a mapping to free page %p", 2310 (void *)pp); 2311 } 2312 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2313 panic("hat_memload: loading a mapping " 2314 "to unlocked relocatable page %p", 2315 (void *)pp); 2316 } 2317 ASSERT(len == MMU_PAGESIZE); 2318 } 2319 } 2320 2321 if (hat->sfmmu_rmstat) 2322 hat_resvstat(len, hat->sfmmu_as, addr); 2323 2324 if (flags & HAT_LOAD_NOCONSIST) { 2325 attr |= SFMMU_UNCACHEVTTE; 2326 use_lgpg = 1; 2327 } 2328 if (!pf_is_memory(pfn)) { 2329 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2330 use_lgpg = 1; 2331 switch (attr & HAT_ORDER_MASK) { 2332 case HAT_STRICTORDER: 2333 case HAT_UNORDERED_OK: 2334 /* 2335 * we set the side effect bit for all non 2336 * memory mappings unless merging is ok 2337 */ 2338 attr |= SFMMU_SIDEFFECT; 2339 break; 2340 case HAT_MERGING_OK: 2341 case HAT_LOADCACHING_OK: 2342 case HAT_STORECACHING_OK: 2343 break; 2344 default: 2345 panic("hat_devload: bad attr"); 2346 break; 2347 } 2348 } 2349 while (len) { 2350 if (!use_lgpg) { 2351 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2352 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2353 flags, SFMMU_INVALID_SHMERID); 2354 len -= MMU_PAGESIZE; 2355 addr += MMU_PAGESIZE; 2356 pfn++; 2357 continue; 2358 } 2359 /* 2360 * try to use large pages, check va/pa alignments 2361 * Note that 32M/256M page sizes are not (yet) supported. 2362 */ 2363 if ((len >= MMU_PAGESIZE4M) && 2364 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2365 !(disable_large_pages & (1 << TTE4M)) && 2366 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2367 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2368 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2369 flags, SFMMU_INVALID_SHMERID); 2370 len -= MMU_PAGESIZE4M; 2371 addr += MMU_PAGESIZE4M; 2372 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2373 } else if ((len >= MMU_PAGESIZE512K) && 2374 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2375 !(disable_large_pages & (1 << TTE512K)) && 2376 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2377 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2378 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2379 flags, SFMMU_INVALID_SHMERID); 2380 len -= MMU_PAGESIZE512K; 2381 addr += MMU_PAGESIZE512K; 2382 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2383 } else if ((len >= MMU_PAGESIZE64K) && 2384 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2385 !(disable_large_pages & (1 << TTE64K)) && 2386 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2387 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2388 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2389 flags, SFMMU_INVALID_SHMERID); 2390 len -= MMU_PAGESIZE64K; 2391 addr += MMU_PAGESIZE64K; 2392 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2393 } else { 2394 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2395 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2396 flags, SFMMU_INVALID_SHMERID); 2397 len -= MMU_PAGESIZE; 2398 addr += MMU_PAGESIZE; 2399 pfn++; 2400 } 2401 } 2402 2403 /* 2404 * Check TSB and TLB page sizes. 2405 */ 2406 if ((flags & HAT_LOAD_SHARE) == 0) { 2407 sfmmu_check_page_sizes(hat, 1); 2408 } 2409 } 2410 2411 void 2412 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2413 struct page **pps, uint_t attr, uint_t flags) 2414 { 2415 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2416 SFMMU_INVALID_SHMERID); 2417 } 2418 2419 void 2420 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2421 struct page **pps, uint_t attr, uint_t flags, 2422 hat_region_cookie_t rcookie) 2423 { 2424 uint_t rid; 2425 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2426 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2427 SFMMU_INVALID_SHMERID); 2428 return; 2429 } 2430 rid = (uint_t)((uint64_t)rcookie); 2431 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2432 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2433 } 2434 2435 /* 2436 * Map the largest extend possible out of the page array. The array may NOT 2437 * be in order. The largest possible mapping a page can have 2438 * is specified in the p_szc field. The p_szc field 2439 * cannot change as long as there any mappings (large or small) 2440 * to any of the pages that make up the large page. (ie. any 2441 * promotion/demotion of page size is not up to the hat but up to 2442 * the page free list manager). The array 2443 * should consist of properly aligned contigous pages that are 2444 * part of a big page for a large mapping to be created. 2445 */ 2446 static void 2447 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2448 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2449 { 2450 int ttesz; 2451 size_t mapsz; 2452 pgcnt_t numpg, npgs; 2453 tte_t tte; 2454 page_t *pp; 2455 uint_t large_pages_disable; 2456 2457 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2458 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2459 2460 if (hat->sfmmu_rmstat) 2461 hat_resvstat(len, hat->sfmmu_as, addr); 2462 2463 #if defined(SF_ERRATA_57) 2464 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2465 (addr < errata57_limit) && (attr & PROT_EXEC) && 2466 !(flags & HAT_LOAD_SHARE)) { 2467 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2468 "user page executable"); 2469 attr &= ~PROT_EXEC; 2470 } 2471 #endif 2472 2473 /* Get number of pages */ 2474 npgs = len >> MMU_PAGESHIFT; 2475 2476 if (flags & HAT_LOAD_SHARE) { 2477 large_pages_disable = disable_ism_large_pages; 2478 } else { 2479 large_pages_disable = disable_large_pages; 2480 } 2481 2482 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2483 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2484 rid); 2485 return; 2486 } 2487 2488 while (npgs >= NHMENTS) { 2489 pp = *pps; 2490 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2491 /* 2492 * Check if this page size is disabled. 2493 */ 2494 if (large_pages_disable & (1 << ttesz)) 2495 continue; 2496 2497 numpg = TTEPAGES(ttesz); 2498 mapsz = numpg << MMU_PAGESHIFT; 2499 if ((npgs >= numpg) && 2500 IS_P2ALIGNED(addr, mapsz) && 2501 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2502 /* 2503 * At this point we have enough pages and 2504 * we know the virtual address and the pfn 2505 * are properly aligned. We still need 2506 * to check for physical contiguity but since 2507 * it is very likely that this is the case 2508 * we will assume they are so and undo 2509 * the request if necessary. It would 2510 * be great if we could get a hint flag 2511 * like HAT_CONTIG which would tell us 2512 * the pages are contigous for sure. 2513 */ 2514 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2515 attr, ttesz); 2516 if (!sfmmu_tteload_array(hat, &tte, addr, 2517 pps, flags, rid)) { 2518 break; 2519 } 2520 } 2521 } 2522 if (ttesz == TTE8K) { 2523 /* 2524 * We were not able to map array using a large page 2525 * batch a hmeblk or fraction at a time. 2526 */ 2527 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2528 & (NHMENTS-1); 2529 numpg = NHMENTS - numpg; 2530 ASSERT(numpg <= npgs); 2531 mapsz = numpg * MMU_PAGESIZE; 2532 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2533 numpg, rid); 2534 } 2535 addr += mapsz; 2536 npgs -= numpg; 2537 pps += numpg; 2538 } 2539 2540 if (npgs) { 2541 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2542 rid); 2543 } 2544 2545 /* 2546 * Check TSB and TLB page sizes. 2547 */ 2548 if ((flags & HAT_LOAD_SHARE) == 0) { 2549 sfmmu_check_page_sizes(hat, 1); 2550 } 2551 } 2552 2553 /* 2554 * Function tries to batch 8K pages into the same hme blk. 2555 */ 2556 static void 2557 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2558 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2559 { 2560 tte_t tte; 2561 page_t *pp; 2562 struct hmehash_bucket *hmebp; 2563 struct hme_blk *hmeblkp; 2564 int index; 2565 2566 while (npgs) { 2567 /* 2568 * Acquire the hash bucket. 2569 */ 2570 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2571 rid); 2572 ASSERT(hmebp); 2573 2574 /* 2575 * Find the hment block. 2576 */ 2577 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2578 TTE8K, flags, rid); 2579 ASSERT(hmeblkp); 2580 2581 do { 2582 /* 2583 * Make the tte. 2584 */ 2585 pp = *pps; 2586 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2587 2588 /* 2589 * Add the translation. 2590 */ 2591 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2592 vaddr, pps, flags, rid); 2593 2594 /* 2595 * Goto next page. 2596 */ 2597 pps++; 2598 npgs--; 2599 2600 /* 2601 * Goto next address. 2602 */ 2603 vaddr += MMU_PAGESIZE; 2604 2605 /* 2606 * Don't crossover into a different hmentblk. 2607 */ 2608 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2609 (NHMENTS-1)); 2610 2611 } while (index != 0 && npgs != 0); 2612 2613 /* 2614 * Release the hash bucket. 2615 */ 2616 2617 sfmmu_tteload_release_hashbucket(hmebp); 2618 } 2619 } 2620 2621 /* 2622 * Construct a tte for a page: 2623 * 2624 * tte_valid = 1 2625 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2626 * tte_size = size 2627 * tte_nfo = attr & HAT_NOFAULT 2628 * tte_ie = attr & HAT_STRUCTURE_LE 2629 * tte_hmenum = hmenum 2630 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2631 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2632 * tte_ref = 1 (optimization) 2633 * tte_wr_perm = attr & PROT_WRITE; 2634 * tte_no_sync = attr & HAT_NOSYNC 2635 * tte_lock = attr & SFMMU_LOCKTTE 2636 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2637 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2638 * tte_e = attr & SFMMU_SIDEFFECT 2639 * tte_priv = !(attr & PROT_USER) 2640 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2641 * tte_glb = 0 2642 */ 2643 void 2644 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2645 { 2646 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2647 2648 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2649 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2650 2651 if (TTE_IS_NOSYNC(ttep)) { 2652 TTE_SET_REF(ttep); 2653 if (TTE_IS_WRITABLE(ttep)) { 2654 TTE_SET_MOD(ttep); 2655 } 2656 } 2657 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2658 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2659 } 2660 } 2661 2662 /* 2663 * This function will add a translation to the hme_blk and allocate the 2664 * hme_blk if one does not exist. 2665 * If a page structure is specified then it will add the 2666 * corresponding hment to the mapping list. 2667 * It will also update the hmenum field for the tte. 2668 * 2669 * Currently this function is only used for kernel mappings. 2670 * So pass invalid region to sfmmu_tteload_array(). 2671 */ 2672 void 2673 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2674 uint_t flags) 2675 { 2676 ASSERT(sfmmup == ksfmmup); 2677 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2678 SFMMU_INVALID_SHMERID); 2679 } 2680 2681 /* 2682 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2683 * Assumes that a particular page size may only be resident in one TSB. 2684 */ 2685 static void 2686 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2687 { 2688 struct tsb_info *tsbinfop = NULL; 2689 uint64_t tag; 2690 struct tsbe *tsbe_addr; 2691 uint64_t tsb_base; 2692 uint_t tsb_size; 2693 int vpshift = MMU_PAGESHIFT; 2694 int phys = 0; 2695 2696 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2697 phys = ktsb_phys; 2698 if (ttesz >= TTE4M) { 2699 #ifndef sun4v 2700 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2701 #endif 2702 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2703 tsb_size = ktsb4m_szcode; 2704 } else { 2705 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2706 tsb_size = ktsb_szcode; 2707 } 2708 } else { 2709 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2710 2711 /* 2712 * If there isn't a TSB for this page size, or the TSB is 2713 * swapped out, there is nothing to do. Note that the latter 2714 * case seems impossible but can occur if hat_pageunload() 2715 * is called on an ISM mapping while the process is swapped 2716 * out. 2717 */ 2718 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2719 return; 2720 2721 /* 2722 * If another thread is in the middle of relocating a TSB 2723 * we can't unload the entry so set a flag so that the 2724 * TSB will be flushed before it can be accessed by the 2725 * process. 2726 */ 2727 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2728 if (ttep == NULL) 2729 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2730 return; 2731 } 2732 #if defined(UTSB_PHYS) 2733 phys = 1; 2734 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2735 #else 2736 tsb_base = (uint64_t)tsbinfop->tsb_va; 2737 #endif 2738 tsb_size = tsbinfop->tsb_szc; 2739 } 2740 if (ttesz >= TTE4M) 2741 vpshift = MMU_PAGESHIFT4M; 2742 2743 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2744 tag = sfmmu_make_tsbtag(vaddr); 2745 2746 if (ttep == NULL) { 2747 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2748 } else { 2749 if (ttesz >= TTE4M) { 2750 SFMMU_STAT(sf_tsb_load4m); 2751 } else { 2752 SFMMU_STAT(sf_tsb_load8k); 2753 } 2754 2755 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2756 } 2757 } 2758 2759 /* 2760 * Unmap all entries from [start, end) matching the given page size. 2761 * 2762 * This function is used primarily to unmap replicated 64K or 512K entries 2763 * from the TSB that are inserted using the base page size TSB pointer, but 2764 * it may also be called to unmap a range of addresses from the TSB. 2765 */ 2766 void 2767 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2768 { 2769 struct tsb_info *tsbinfop; 2770 uint64_t tag; 2771 struct tsbe *tsbe_addr; 2772 caddr_t vaddr; 2773 uint64_t tsb_base; 2774 int vpshift, vpgsz; 2775 uint_t tsb_size; 2776 int phys = 0; 2777 2778 /* 2779 * Assumptions: 2780 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2781 * at a time shooting down any valid entries we encounter. 2782 * 2783 * If ttesz >= 4M we walk the range 4M at a time shooting 2784 * down any valid mappings we find. 2785 */ 2786 if (sfmmup == ksfmmup) { 2787 phys = ktsb_phys; 2788 if (ttesz >= TTE4M) { 2789 #ifndef sun4v 2790 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2791 #endif 2792 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2793 tsb_size = ktsb4m_szcode; 2794 } else { 2795 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2796 tsb_size = ktsb_szcode; 2797 } 2798 } else { 2799 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2800 2801 /* 2802 * If there isn't a TSB for this page size, or the TSB is 2803 * swapped out, there is nothing to do. Note that the latter 2804 * case seems impossible but can occur if hat_pageunload() 2805 * is called on an ISM mapping while the process is swapped 2806 * out. 2807 */ 2808 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2809 return; 2810 2811 /* 2812 * If another thread is in the middle of relocating a TSB 2813 * we can't unload the entry so set a flag so that the 2814 * TSB will be flushed before it can be accessed by the 2815 * process. 2816 */ 2817 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2818 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2819 return; 2820 } 2821 #if defined(UTSB_PHYS) 2822 phys = 1; 2823 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2824 #else 2825 tsb_base = (uint64_t)tsbinfop->tsb_va; 2826 #endif 2827 tsb_size = tsbinfop->tsb_szc; 2828 } 2829 if (ttesz >= TTE4M) { 2830 vpshift = MMU_PAGESHIFT4M; 2831 vpgsz = MMU_PAGESIZE4M; 2832 } else { 2833 vpshift = MMU_PAGESHIFT; 2834 vpgsz = MMU_PAGESIZE; 2835 } 2836 2837 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2838 tag = sfmmu_make_tsbtag(vaddr); 2839 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2840 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2841 } 2842 } 2843 2844 /* 2845 * Select the optimum TSB size given the number of mappings 2846 * that need to be cached. 2847 */ 2848 static int 2849 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2850 { 2851 int szc = 0; 2852 2853 #ifdef DEBUG 2854 if (tsb_grow_stress) { 2855 uint32_t randval = (uint32_t)gettick() >> 4; 2856 return (randval % (tsb_max_growsize + 1)); 2857 } 2858 #endif /* DEBUG */ 2859 2860 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2861 szc++; 2862 return (szc); 2863 } 2864 2865 /* 2866 * This function will add a translation to the hme_blk and allocate the 2867 * hme_blk if one does not exist. 2868 * If a page structure is specified then it will add the 2869 * corresponding hment to the mapping list. 2870 * It will also update the hmenum field for the tte. 2871 * Furthermore, it attempts to create a large page translation 2872 * for <addr,hat> at page array pps. It assumes addr and first 2873 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2874 */ 2875 static int 2876 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2877 page_t **pps, uint_t flags, uint_t rid) 2878 { 2879 struct hmehash_bucket *hmebp; 2880 struct hme_blk *hmeblkp; 2881 int ret; 2882 uint_t size; 2883 2884 /* 2885 * Get mapping size. 2886 */ 2887 size = TTE_CSZ(ttep); 2888 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2889 2890 /* 2891 * Acquire the hash bucket. 2892 */ 2893 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2894 ASSERT(hmebp); 2895 2896 /* 2897 * Find the hment block. 2898 */ 2899 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2900 rid); 2901 ASSERT(hmeblkp); 2902 2903 /* 2904 * Add the translation. 2905 */ 2906 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2907 rid); 2908 2909 /* 2910 * Release the hash bucket. 2911 */ 2912 sfmmu_tteload_release_hashbucket(hmebp); 2913 2914 return (ret); 2915 } 2916 2917 /* 2918 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2919 */ 2920 static struct hmehash_bucket * 2921 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2922 uint_t rid) 2923 { 2924 struct hmehash_bucket *hmebp; 2925 int hmeshift; 2926 void *htagid = sfmmutohtagid(sfmmup, rid); 2927 2928 ASSERT(htagid != NULL); 2929 2930 hmeshift = HME_HASH_SHIFT(size); 2931 2932 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2933 2934 SFMMU_HASH_LOCK(hmebp); 2935 2936 return (hmebp); 2937 } 2938 2939 /* 2940 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2941 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2942 * allocated. 2943 */ 2944 static struct hme_blk * 2945 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2946 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2947 { 2948 hmeblk_tag hblktag; 2949 int hmeshift; 2950 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2951 2952 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2953 2954 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2955 ASSERT(hblktag.htag_id != NULL); 2956 hmeshift = HME_HASH_SHIFT(size); 2957 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2958 hblktag.htag_rehash = HME_HASH_REHASH(size); 2959 hblktag.htag_rid = rid; 2960 2961 ttearray_realloc: 2962 2963 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2964 2965 /* 2966 * We block until hblk_reserve_lock is released; it's held by 2967 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2968 * replaced by a hblk from sfmmu8_cache. 2969 */ 2970 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2971 hblk_reserve_thread != curthread) { 2972 SFMMU_HASH_UNLOCK(hmebp); 2973 mutex_enter(&hblk_reserve_lock); 2974 mutex_exit(&hblk_reserve_lock); 2975 SFMMU_STAT(sf_hblk_reserve_hit); 2976 SFMMU_HASH_LOCK(hmebp); 2977 goto ttearray_realloc; 2978 } 2979 2980 if (hmeblkp == NULL) { 2981 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2982 hblktag, flags, rid); 2983 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2984 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2985 } else { 2986 /* 2987 * It is possible for 8k and 64k hblks to collide since they 2988 * have the same rehash value. This is because we 2989 * lazily free hblks and 8K/64K blks could be lingering. 2990 * If we find size mismatch we free the block and & try again. 2991 */ 2992 if (get_hblk_ttesz(hmeblkp) != size) { 2993 ASSERT(!hmeblkp->hblk_vcnt); 2994 ASSERT(!hmeblkp->hblk_hmecnt); 2995 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2996 &list, 0); 2997 goto ttearray_realloc; 2998 } 2999 if (hmeblkp->hblk_shw_bit) { 3000 /* 3001 * if the hblk was previously used as a shadow hblk then 3002 * we will change it to a normal hblk 3003 */ 3004 ASSERT(!hmeblkp->hblk_shared); 3005 if (hmeblkp->hblk_shw_mask) { 3006 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 3007 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3008 goto ttearray_realloc; 3009 } else { 3010 hmeblkp->hblk_shw_bit = 0; 3011 } 3012 } 3013 SFMMU_STAT(sf_hblk_hit); 3014 } 3015 3016 /* 3017 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 3018 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 3019 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 3020 * just add these hmeblks to the per-cpu pending queue. 3021 */ 3022 sfmmu_hblks_list_purge(&list, 1); 3023 3024 ASSERT(get_hblk_ttesz(hmeblkp) == size); 3025 ASSERT(!hmeblkp->hblk_shw_bit); 3026 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3027 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3028 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 3029 3030 return (hmeblkp); 3031 } 3032 3033 /* 3034 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 3035 * otherwise. 3036 */ 3037 static int 3038 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 3039 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 3040 { 3041 page_t *pp = *pps; 3042 int hmenum, size, remap; 3043 tte_t tteold, flush_tte; 3044 #ifdef DEBUG 3045 tte_t orig_old; 3046 #endif /* DEBUG */ 3047 struct sf_hment *sfhme; 3048 kmutex_t *pml, *pmtx; 3049 hatlock_t *hatlockp; 3050 int myflt; 3051 3052 /* 3053 * remove this panic when we decide to let user virtual address 3054 * space be >= USERLIMIT. 3055 */ 3056 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 3057 panic("user addr %p in kernel space", (void *)vaddr); 3058 #if defined(TTE_IS_GLOBAL) 3059 if (TTE_IS_GLOBAL(ttep)) 3060 panic("sfmmu_tteload: creating global tte"); 3061 #endif 3062 3063 #ifdef DEBUG 3064 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 3065 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 3066 panic("sfmmu_tteload: non cacheable memory tte"); 3067 #endif /* DEBUG */ 3068 3069 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 3070 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 3071 TTE_SET_REF(ttep); 3072 TTE_SET_MOD(ttep); 3073 } 3074 3075 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 3076 !TTE_IS_MOD(ttep)) { 3077 /* 3078 * Don't load TSB for dummy as in ISM. Also don't preload 3079 * the TSB if the TTE isn't writable since we're likely to 3080 * fault on it again -- preloading can be fairly expensive. 3081 */ 3082 flags |= SFMMU_NO_TSBLOAD; 3083 } 3084 3085 size = TTE_CSZ(ttep); 3086 switch (size) { 3087 case TTE8K: 3088 SFMMU_STAT(sf_tteload8k); 3089 break; 3090 case TTE64K: 3091 SFMMU_STAT(sf_tteload64k); 3092 break; 3093 case TTE512K: 3094 SFMMU_STAT(sf_tteload512k); 3095 break; 3096 case TTE4M: 3097 SFMMU_STAT(sf_tteload4m); 3098 break; 3099 case (TTE32M): 3100 SFMMU_STAT(sf_tteload32m); 3101 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3102 break; 3103 case (TTE256M): 3104 SFMMU_STAT(sf_tteload256m); 3105 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3106 break; 3107 } 3108 3109 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3110 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3111 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3112 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3113 3114 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3115 3116 /* 3117 * Need to grab mlist lock here so that pageunload 3118 * will not change tte behind us. 3119 */ 3120 if (pp) { 3121 pml = sfmmu_mlist_enter(pp); 3122 } 3123 3124 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3125 /* 3126 * Look for corresponding hment and if valid verify 3127 * pfns are equal. 3128 */ 3129 remap = TTE_IS_VALID(&tteold); 3130 if (remap) { 3131 pfn_t new_pfn, old_pfn; 3132 3133 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3134 new_pfn = TTE_TO_PFN(vaddr, ttep); 3135 3136 if (flags & HAT_LOAD_REMAP) { 3137 /* make sure we are remapping same type of pages */ 3138 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3139 panic("sfmmu_tteload - tte remap io<->memory"); 3140 } 3141 if (old_pfn != new_pfn && 3142 (pp != NULL || sfhme->hme_page != NULL)) { 3143 panic("sfmmu_tteload - tte remap pp != NULL"); 3144 } 3145 } else if (old_pfn != new_pfn) { 3146 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3147 (void *)hmeblkp); 3148 } 3149 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3150 } 3151 3152 if (pp) { 3153 if (size == TTE8K) { 3154 #ifdef VAC 3155 /* 3156 * Handle VAC consistency 3157 */ 3158 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3159 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3160 } 3161 #endif 3162 3163 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3164 pmtx = sfmmu_page_enter(pp); 3165 PP_CLRRO(pp); 3166 sfmmu_page_exit(pmtx); 3167 } else if (!PP_ISMAPPED(pp) && 3168 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3169 pmtx = sfmmu_page_enter(pp); 3170 if (!(PP_ISMOD(pp))) { 3171 PP_SETRO(pp); 3172 } 3173 sfmmu_page_exit(pmtx); 3174 } 3175 3176 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3177 /* 3178 * sfmmu_pagearray_setup failed so return 3179 */ 3180 sfmmu_mlist_exit(pml); 3181 return (1); 3182 } 3183 } 3184 3185 /* 3186 * Make sure hment is not on a mapping list. 3187 */ 3188 ASSERT(remap || (sfhme->hme_page == NULL)); 3189 3190 /* if it is not a remap then hme->next better be NULL */ 3191 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3192 3193 if (flags & HAT_LOAD_LOCK) { 3194 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3195 panic("too high lckcnt-hmeblk %p", 3196 (void *)hmeblkp); 3197 } 3198 atomic_inc_32(&hmeblkp->hblk_lckcnt); 3199 3200 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3201 } 3202 3203 #ifdef VAC 3204 if (pp && PP_ISNC(pp)) { 3205 /* 3206 * If the physical page is marked to be uncacheable, like 3207 * by a vac conflict, make sure the new mapping is also 3208 * uncacheable. 3209 */ 3210 TTE_CLR_VCACHEABLE(ttep); 3211 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3212 } 3213 #endif 3214 ttep->tte_hmenum = hmenum; 3215 3216 #ifdef DEBUG 3217 orig_old = tteold; 3218 #endif /* DEBUG */ 3219 3220 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3221 if ((sfmmup == KHATID) && 3222 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3223 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3224 } 3225 #ifdef DEBUG 3226 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3227 #endif /* DEBUG */ 3228 } 3229 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3230 3231 if (!TTE_IS_VALID(&tteold)) { 3232 3233 atomic_inc_16(&hmeblkp->hblk_vcnt); 3234 if (rid == SFMMU_INVALID_SHMERID) { 3235 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]); 3236 } else { 3237 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3238 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3239 /* 3240 * We already accounted for region ttecnt's in sfmmu 3241 * during hat_join_region() processing. Here we 3242 * only update ttecnt's in region struture. 3243 */ 3244 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]); 3245 } 3246 } 3247 3248 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3249 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3250 sfmmup != ksfmmup) { 3251 uchar_t tteflag = 1 << size; 3252 if (rid == SFMMU_INVALID_SHMERID) { 3253 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3254 hatlockp = sfmmu_hat_enter(sfmmup); 3255 sfmmup->sfmmu_tteflags |= tteflag; 3256 sfmmu_hat_exit(hatlockp); 3257 } 3258 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3259 hatlockp = sfmmu_hat_enter(sfmmup); 3260 sfmmup->sfmmu_rtteflags |= tteflag; 3261 sfmmu_hat_exit(hatlockp); 3262 } 3263 /* 3264 * Update the current CPU tsbmiss area, so the current thread 3265 * won't need to take the tsbmiss for the new pagesize. 3266 * The other threads in the process will update their tsb 3267 * miss area lazily in sfmmu_tsbmiss_exception() when they 3268 * fail to find the translation for a newly added pagesize. 3269 */ 3270 if (size > TTE64K && myflt) { 3271 struct tsbmiss *tsbmp; 3272 kpreempt_disable(); 3273 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3274 if (rid == SFMMU_INVALID_SHMERID) { 3275 if (!(tsbmp->uhat_tteflags & tteflag)) { 3276 tsbmp->uhat_tteflags |= tteflag; 3277 } 3278 } else { 3279 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3280 tsbmp->uhat_rtteflags |= tteflag; 3281 } 3282 } 3283 kpreempt_enable(); 3284 } 3285 } 3286 3287 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3288 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3289 hatlockp = sfmmu_hat_enter(sfmmup); 3290 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3291 sfmmu_hat_exit(hatlockp); 3292 } 3293 3294 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3295 hw_tte.tte_intlo; 3296 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3297 hw_tte.tte_inthi; 3298 3299 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3300 /* 3301 * If remap and new tte differs from old tte we need 3302 * to sync the mod bit and flush TLB/TSB. We don't 3303 * need to sync ref bit because we currently always set 3304 * ref bit in tteload. 3305 */ 3306 ASSERT(TTE_IS_REF(ttep)); 3307 if (TTE_IS_MOD(&tteold)) { 3308 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3309 } 3310 /* 3311 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3312 * hmes are only used for read only text. Adding this code for 3313 * completeness and future use of shared hmeblks with writable 3314 * mappings of VMODSORT vnodes. 3315 */ 3316 if (hmeblkp->hblk_shared) { 3317 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3318 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3319 xt_sync(cpuset); 3320 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3321 } else { 3322 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3323 xt_sync(sfmmup->sfmmu_cpusran); 3324 } 3325 } 3326 3327 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3328 /* 3329 * We only preload 8K and 4M mappings into the TSB, since 3330 * 64K and 512K mappings are replicated and hence don't 3331 * have a single, unique TSB entry. Ditto for 32M/256M. 3332 */ 3333 if (size == TTE8K || size == TTE4M) { 3334 sf_scd_t *scdp; 3335 hatlockp = sfmmu_hat_enter(sfmmup); 3336 /* 3337 * Don't preload private TSB if the mapping is used 3338 * by the shctx in the SCD. 3339 */ 3340 scdp = sfmmup->sfmmu_scdp; 3341 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3342 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3343 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3344 size); 3345 } 3346 sfmmu_hat_exit(hatlockp); 3347 } 3348 } 3349 if (pp) { 3350 if (!remap) { 3351 HME_ADD(sfhme, pp); 3352 atomic_inc_16(&hmeblkp->hblk_hmecnt); 3353 ASSERT(hmeblkp->hblk_hmecnt > 0); 3354 3355 /* 3356 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3357 * see pageunload() for comment. 3358 */ 3359 } 3360 sfmmu_mlist_exit(pml); 3361 } 3362 3363 return (0); 3364 } 3365 /* 3366 * Function unlocks hash bucket. 3367 */ 3368 static void 3369 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3370 { 3371 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3372 SFMMU_HASH_UNLOCK(hmebp); 3373 } 3374 3375 /* 3376 * function which checks and sets up page array for a large 3377 * translation. Will set p_vcolor, p_index, p_ro fields. 3378 * Assumes addr and pfnum of first page are properly aligned. 3379 * Will check for physical contiguity. If check fails it return 3380 * non null. 3381 */ 3382 static int 3383 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3384 { 3385 int i, index, ttesz; 3386 pfn_t pfnum; 3387 pgcnt_t npgs; 3388 page_t *pp, *pp1; 3389 kmutex_t *pmtx; 3390 #ifdef VAC 3391 int osz; 3392 int cflags = 0; 3393 int vac_err = 0; 3394 #endif 3395 int newidx = 0; 3396 3397 ttesz = TTE_CSZ(ttep); 3398 3399 ASSERT(ttesz > TTE8K); 3400 3401 npgs = TTEPAGES(ttesz); 3402 index = PAGESZ_TO_INDEX(ttesz); 3403 3404 pfnum = (*pps)->p_pagenum; 3405 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3406 3407 /* 3408 * Save the first pp so we can do HAT_TMPNC at the end. 3409 */ 3410 pp1 = *pps; 3411 #ifdef VAC 3412 osz = fnd_mapping_sz(pp1); 3413 #endif 3414 3415 for (i = 0; i < npgs; i++, pps++) { 3416 pp = *pps; 3417 ASSERT(PAGE_LOCKED(pp)); 3418 ASSERT(pp->p_szc >= ttesz); 3419 ASSERT(pp->p_szc == pp1->p_szc); 3420 ASSERT(sfmmu_mlist_held(pp)); 3421 3422 /* 3423 * XXX is it possible to maintain P_RO on the root only? 3424 */ 3425 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3426 pmtx = sfmmu_page_enter(pp); 3427 PP_CLRRO(pp); 3428 sfmmu_page_exit(pmtx); 3429 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3430 !PP_ISMOD(pp)) { 3431 pmtx = sfmmu_page_enter(pp); 3432 if (!(PP_ISMOD(pp))) { 3433 PP_SETRO(pp); 3434 } 3435 sfmmu_page_exit(pmtx); 3436 } 3437 3438 /* 3439 * If this is a remap we skip vac & contiguity checks. 3440 */ 3441 if (remap) 3442 continue; 3443 3444 /* 3445 * set p_vcolor and detect any vac conflicts. 3446 */ 3447 #ifdef VAC 3448 if (vac_err == 0) { 3449 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3450 3451 } 3452 #endif 3453 3454 /* 3455 * Save current index in case we need to undo it. 3456 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3457 * "SFMMU_INDEX_SHIFT 6" 3458 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3459 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3460 * 3461 * So: index = PAGESZ_TO_INDEX(ttesz); 3462 * if ttesz == 1 then index = 0x2 3463 * 2 then index = 0x4 3464 * 3 then index = 0x8 3465 * 4 then index = 0x10 3466 * 5 then index = 0x20 3467 * The code below checks if it's a new pagesize (ie, newidx) 3468 * in case we need to take it back out of p_index, 3469 * and then or's the new index into the existing index. 3470 */ 3471 if ((PP_MAPINDEX(pp) & index) == 0) 3472 newidx = 1; 3473 pp->p_index = (PP_MAPINDEX(pp) | index); 3474 3475 /* 3476 * contiguity check 3477 */ 3478 if (pp->p_pagenum != pfnum) { 3479 /* 3480 * If we fail the contiguity test then 3481 * the only thing we need to fix is the p_index field. 3482 * We might get a few extra flushes but since this 3483 * path is rare that is ok. The p_ro field will 3484 * get automatically fixed on the next tteload to 3485 * the page. NO TNC bit is set yet. 3486 */ 3487 while (i >= 0) { 3488 pp = *pps; 3489 if (newidx) 3490 pp->p_index = (PP_MAPINDEX(pp) & 3491 ~index); 3492 pps--; 3493 i--; 3494 } 3495 return (1); 3496 } 3497 pfnum++; 3498 addr += MMU_PAGESIZE; 3499 } 3500 3501 #ifdef VAC 3502 if (vac_err) { 3503 if (ttesz > osz) { 3504 /* 3505 * There are some smaller mappings that causes vac 3506 * conflicts. Convert all existing small mappings to 3507 * TNC. 3508 */ 3509 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3510 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3511 npgs); 3512 } else { 3513 /* EMPTY */ 3514 /* 3515 * If there exists an big page mapping, 3516 * that means the whole existing big page 3517 * has TNC setting already. No need to covert to 3518 * TNC again. 3519 */ 3520 ASSERT(PP_ISTNC(pp1)); 3521 } 3522 } 3523 #endif /* VAC */ 3524 3525 return (0); 3526 } 3527 3528 #ifdef VAC 3529 /* 3530 * Routine that detects vac consistency for a large page. It also 3531 * sets virtual color for all pp's for this big mapping. 3532 */ 3533 static int 3534 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3535 { 3536 int vcolor, ocolor; 3537 3538 ASSERT(sfmmu_mlist_held(pp)); 3539 3540 if (PP_ISNC(pp)) { 3541 return (HAT_TMPNC); 3542 } 3543 3544 vcolor = addr_to_vcolor(addr); 3545 if (PP_NEWPAGE(pp)) { 3546 PP_SET_VCOLOR(pp, vcolor); 3547 return (0); 3548 } 3549 3550 ocolor = PP_GET_VCOLOR(pp); 3551 if (ocolor == vcolor) { 3552 return (0); 3553 } 3554 3555 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3556 /* 3557 * Previous user of page had a differnet color 3558 * but since there are no current users 3559 * we just flush the cache and change the color. 3560 * As an optimization for large pages we flush the 3561 * entire cache of that color and set a flag. 3562 */ 3563 SFMMU_STAT(sf_pgcolor_conflict); 3564 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3565 CacheColor_SetFlushed(*cflags, ocolor); 3566 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3567 } 3568 PP_SET_VCOLOR(pp, vcolor); 3569 return (0); 3570 } 3571 3572 /* 3573 * We got a real conflict with a current mapping. 3574 * set flags to start unencaching all mappings 3575 * and return failure so we restart looping 3576 * the pp array from the beginning. 3577 */ 3578 return (HAT_TMPNC); 3579 } 3580 #endif /* VAC */ 3581 3582 /* 3583 * creates a large page shadow hmeblk for a tte. 3584 * The purpose of this routine is to allow us to do quick unloads because 3585 * the vm layer can easily pass a very large but sparsely populated range. 3586 */ 3587 static struct hme_blk * 3588 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3589 { 3590 struct hmehash_bucket *hmebp; 3591 hmeblk_tag hblktag; 3592 int hmeshift, size, vshift; 3593 uint_t shw_mask, newshw_mask; 3594 struct hme_blk *hmeblkp; 3595 3596 ASSERT(sfmmup != KHATID); 3597 if (mmu_page_sizes == max_mmu_page_sizes) { 3598 ASSERT(ttesz < TTE256M); 3599 } else { 3600 ASSERT(ttesz < TTE4M); 3601 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3603 } 3604 3605 if (ttesz == TTE8K) { 3606 size = TTE512K; 3607 } else { 3608 size = ++ttesz; 3609 } 3610 3611 hblktag.htag_id = sfmmup; 3612 hmeshift = HME_HASH_SHIFT(size); 3613 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3614 hblktag.htag_rehash = HME_HASH_REHASH(size); 3615 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3616 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3617 3618 SFMMU_HASH_LOCK(hmebp); 3619 3620 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3621 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3622 if (hmeblkp == NULL) { 3623 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3624 hblktag, flags, SFMMU_INVALID_SHMERID); 3625 } 3626 ASSERT(hmeblkp); 3627 if (!hmeblkp->hblk_shw_mask) { 3628 /* 3629 * if this is a unused hblk it was just allocated or could 3630 * potentially be a previous large page hblk so we need to 3631 * set the shadow bit. 3632 */ 3633 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3634 hmeblkp->hblk_shw_bit = 1; 3635 } else if (hmeblkp->hblk_shw_bit == 0) { 3636 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3637 (void *)hmeblkp); 3638 } 3639 ASSERT(hmeblkp->hblk_shw_bit == 1); 3640 ASSERT(!hmeblkp->hblk_shared); 3641 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3642 ASSERT(vshift < 8); 3643 /* 3644 * Atomically set shw mask bit 3645 */ 3646 do { 3647 shw_mask = hmeblkp->hblk_shw_mask; 3648 newshw_mask = shw_mask | (1 << vshift); 3649 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask, 3650 newshw_mask); 3651 } while (newshw_mask != shw_mask); 3652 3653 SFMMU_HASH_UNLOCK(hmebp); 3654 3655 return (hmeblkp); 3656 } 3657 3658 /* 3659 * This routine cleanup a previous shadow hmeblk and changes it to 3660 * a regular hblk. This happens rarely but it is possible 3661 * when a process wants to use large pages and there are hblks still 3662 * lying around from the previous as that used these hmeblks. 3663 * The alternative was to cleanup the shadow hblks at unload time 3664 * but since so few user processes actually use large pages, it is 3665 * better to be lazy and cleanup at this time. 3666 */ 3667 static void 3668 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3669 struct hmehash_bucket *hmebp) 3670 { 3671 caddr_t addr, endaddr; 3672 int hashno, size; 3673 3674 ASSERT(hmeblkp->hblk_shw_bit); 3675 ASSERT(!hmeblkp->hblk_shared); 3676 3677 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3678 3679 if (!hmeblkp->hblk_shw_mask) { 3680 hmeblkp->hblk_shw_bit = 0; 3681 return; 3682 } 3683 addr = (caddr_t)get_hblk_base(hmeblkp); 3684 endaddr = get_hblk_endaddr(hmeblkp); 3685 size = get_hblk_ttesz(hmeblkp); 3686 hashno = size - 1; 3687 ASSERT(hashno > 0); 3688 SFMMU_HASH_UNLOCK(hmebp); 3689 3690 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3691 3692 SFMMU_HASH_LOCK(hmebp); 3693 } 3694 3695 static void 3696 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3697 int hashno) 3698 { 3699 int hmeshift, shadow = 0; 3700 hmeblk_tag hblktag; 3701 struct hmehash_bucket *hmebp; 3702 struct hme_blk *hmeblkp; 3703 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3704 3705 ASSERT(hashno > 0); 3706 hblktag.htag_id = sfmmup; 3707 hblktag.htag_rehash = hashno; 3708 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3709 3710 hmeshift = HME_HASH_SHIFT(hashno); 3711 3712 while (addr < endaddr) { 3713 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3714 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3715 SFMMU_HASH_LOCK(hmebp); 3716 /* inline HME_HASH_SEARCH */ 3717 hmeblkp = hmebp->hmeblkp; 3718 pr_hblk = NULL; 3719 while (hmeblkp) { 3720 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3721 /* found hme_blk */ 3722 ASSERT(!hmeblkp->hblk_shared); 3723 if (hmeblkp->hblk_shw_bit) { 3724 if (hmeblkp->hblk_shw_mask) { 3725 shadow = 1; 3726 sfmmu_shadow_hcleanup(sfmmup, 3727 hmeblkp, hmebp); 3728 break; 3729 } else { 3730 hmeblkp->hblk_shw_bit = 0; 3731 } 3732 } 3733 3734 /* 3735 * Hblk_hmecnt and hblk_vcnt could be non zero 3736 * since hblk_unload() does not gurantee that. 3737 * 3738 * XXX - this could cause tteload() to spin 3739 * where sfmmu_shadow_hcleanup() is called. 3740 */ 3741 } 3742 3743 nx_hblk = hmeblkp->hblk_next; 3744 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3746 &list, 0); 3747 } else { 3748 pr_hblk = hmeblkp; 3749 } 3750 hmeblkp = nx_hblk; 3751 } 3752 3753 SFMMU_HASH_UNLOCK(hmebp); 3754 3755 if (shadow) { 3756 /* 3757 * We found another shadow hblk so cleaned its 3758 * children. We need to go back and cleanup 3759 * the original hblk so we don't change the 3760 * addr. 3761 */ 3762 shadow = 0; 3763 } else { 3764 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3765 (1 << hmeshift)); 3766 } 3767 } 3768 sfmmu_hblks_list_purge(&list, 0); 3769 } 3770 3771 /* 3772 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3773 * may still linger on after pageunload. 3774 */ 3775 static void 3776 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3777 { 3778 int hmeshift; 3779 hmeblk_tag hblktag; 3780 struct hmehash_bucket *hmebp; 3781 struct hme_blk *hmeblkp; 3782 struct hme_blk *pr_hblk; 3783 struct hme_blk *list = NULL; 3784 3785 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3786 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3787 3788 hmeshift = HME_HASH_SHIFT(ttesz); 3789 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3790 hblktag.htag_rehash = ttesz; 3791 hblktag.htag_rid = rid; 3792 hblktag.htag_id = srdp; 3793 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3794 3795 SFMMU_HASH_LOCK(hmebp); 3796 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3797 if (hmeblkp != NULL) { 3798 ASSERT(hmeblkp->hblk_shared); 3799 ASSERT(!hmeblkp->hblk_shw_bit); 3800 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3801 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3802 } 3803 ASSERT(!hmeblkp->hblk_lckcnt); 3804 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3805 &list, 0); 3806 } 3807 SFMMU_HASH_UNLOCK(hmebp); 3808 sfmmu_hblks_list_purge(&list, 0); 3809 } 3810 3811 /* ARGSUSED */ 3812 static void 3813 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3814 size_t r_size, void *r_obj, u_offset_t r_objoff) 3815 { 3816 } 3817 3818 /* 3819 * Searches for an hmeblk which maps addr, then unloads this mapping 3820 * and updates *eaddrp, if the hmeblk is found. 3821 */ 3822 static void 3823 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3824 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3825 { 3826 int hmeshift; 3827 hmeblk_tag hblktag; 3828 struct hmehash_bucket *hmebp; 3829 struct hme_blk *hmeblkp; 3830 struct hme_blk *pr_hblk; 3831 struct hme_blk *list = NULL; 3832 3833 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3834 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3835 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3836 3837 hmeshift = HME_HASH_SHIFT(ttesz); 3838 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3839 hblktag.htag_rehash = ttesz; 3840 hblktag.htag_rid = rid; 3841 hblktag.htag_id = srdp; 3842 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3843 3844 SFMMU_HASH_LOCK(hmebp); 3845 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3846 if (hmeblkp != NULL) { 3847 ASSERT(hmeblkp->hblk_shared); 3848 ASSERT(!hmeblkp->hblk_lckcnt); 3849 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3850 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3851 eaddr, NULL, HAT_UNLOAD); 3852 ASSERT(*eaddrp > addr); 3853 } 3854 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3855 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3856 &list, 0); 3857 } 3858 SFMMU_HASH_UNLOCK(hmebp); 3859 sfmmu_hblks_list_purge(&list, 0); 3860 } 3861 3862 static void 3863 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3864 { 3865 int ttesz = rgnp->rgn_pgszc; 3866 size_t rsz = rgnp->rgn_size; 3867 caddr_t rsaddr = rgnp->rgn_saddr; 3868 caddr_t readdr = rsaddr + rsz; 3869 caddr_t rhsaddr; 3870 caddr_t va; 3871 uint_t rid = rgnp->rgn_id; 3872 caddr_t cbsaddr; 3873 caddr_t cbeaddr; 3874 hat_rgn_cb_func_t rcbfunc; 3875 ulong_t cnt; 3876 3877 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3878 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3879 3880 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3881 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3882 if (ttesz < HBLK_MIN_TTESZ) { 3883 ttesz = HBLK_MIN_TTESZ; 3884 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3885 } else { 3886 rhsaddr = rsaddr; 3887 } 3888 3889 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3890 rcbfunc = sfmmu_rgn_cb_noop; 3891 } 3892 3893 while (ttesz >= HBLK_MIN_TTESZ) { 3894 cbsaddr = rsaddr; 3895 cbeaddr = rsaddr; 3896 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3897 ttesz--; 3898 continue; 3899 } 3900 cnt = 0; 3901 va = rsaddr; 3902 while (va < readdr) { 3903 ASSERT(va >= rhsaddr); 3904 if (va != cbeaddr) { 3905 if (cbeaddr != cbsaddr) { 3906 ASSERT(cbeaddr > cbsaddr); 3907 (*rcbfunc)(cbsaddr, cbeaddr, 3908 rsaddr, rsz, rgnp->rgn_obj, 3909 rgnp->rgn_objoff); 3910 } 3911 cbsaddr = va; 3912 cbeaddr = va; 3913 } 3914 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3915 ttesz, &cbeaddr); 3916 cnt++; 3917 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3918 } 3919 if (cbeaddr != cbsaddr) { 3920 ASSERT(cbeaddr > cbsaddr); 3921 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3922 rsz, rgnp->rgn_obj, 3923 rgnp->rgn_objoff); 3924 } 3925 ttesz--; 3926 } 3927 } 3928 3929 /* 3930 * Release one hardware address translation lock on the given address range. 3931 */ 3932 void 3933 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3934 { 3935 struct hmehash_bucket *hmebp; 3936 hmeblk_tag hblktag; 3937 int hmeshift, hashno = 1; 3938 struct hme_blk *hmeblkp, *list = NULL; 3939 caddr_t endaddr; 3940 3941 ASSERT(sfmmup != NULL); 3942 3943 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 3944 ASSERT((len & MMU_PAGEOFFSET) == 0); 3945 endaddr = addr + len; 3946 hblktag.htag_id = sfmmup; 3947 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3948 3949 /* 3950 * Spitfire supports 4 page sizes. 3951 * Most pages are expected to be of the smallest page size (8K) and 3952 * these will not need to be rehashed. 64K pages also don't need to be 3953 * rehashed because an hmeblk spans 64K of address space. 512K pages 3954 * might need 1 rehash and and 4M pages might need 2 rehashes. 3955 */ 3956 while (addr < endaddr) { 3957 hmeshift = HME_HASH_SHIFT(hashno); 3958 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3959 hblktag.htag_rehash = hashno; 3960 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3961 3962 SFMMU_HASH_LOCK(hmebp); 3963 3964 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3965 if (hmeblkp != NULL) { 3966 ASSERT(!hmeblkp->hblk_shared); 3967 /* 3968 * If we encounter a shadow hmeblk then 3969 * we know there are no valid hmeblks mapping 3970 * this address at this size or larger. 3971 * Just increment address by the smallest 3972 * page size. 3973 */ 3974 if (hmeblkp->hblk_shw_bit) { 3975 addr += MMU_PAGESIZE; 3976 } else { 3977 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3978 endaddr); 3979 } 3980 SFMMU_HASH_UNLOCK(hmebp); 3981 hashno = 1; 3982 continue; 3983 } 3984 SFMMU_HASH_UNLOCK(hmebp); 3985 3986 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3987 /* 3988 * We have traversed the whole list and rehashed 3989 * if necessary without finding the address to unlock 3990 * which should never happen. 3991 */ 3992 panic("sfmmu_unlock: addr not found. " 3993 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3994 } else { 3995 hashno++; 3996 } 3997 } 3998 3999 sfmmu_hblks_list_purge(&list, 0); 4000 } 4001 4002 void 4003 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 4004 hat_region_cookie_t rcookie) 4005 { 4006 sf_srd_t *srdp; 4007 sf_region_t *rgnp; 4008 int ttesz; 4009 uint_t rid; 4010 caddr_t eaddr; 4011 caddr_t va; 4012 int hmeshift; 4013 hmeblk_tag hblktag; 4014 struct hmehash_bucket *hmebp; 4015 struct hme_blk *hmeblkp; 4016 struct hme_blk *pr_hblk; 4017 struct hme_blk *list; 4018 4019 if (rcookie == HAT_INVALID_REGION_COOKIE) { 4020 hat_unlock(sfmmup, addr, len); 4021 return; 4022 } 4023 4024 ASSERT(sfmmup != NULL); 4025 ASSERT(sfmmup != ksfmmup); 4026 4027 srdp = sfmmup->sfmmu_srdp; 4028 rid = (uint_t)((uint64_t)rcookie); 4029 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS); 4030 eaddr = addr + len; 4031 va = addr; 4032 list = NULL; 4033 rgnp = srdp->srd_hmergnp[rid]; 4034 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 4035 4036 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 4037 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 4038 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 4039 ttesz = HBLK_MIN_TTESZ; 4040 } else { 4041 ttesz = rgnp->rgn_pgszc; 4042 } 4043 while (va < eaddr) { 4044 while (ttesz < rgnp->rgn_pgszc && 4045 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 4046 ttesz++; 4047 } 4048 while (ttesz >= HBLK_MIN_TTESZ) { 4049 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 4050 ttesz--; 4051 continue; 4052 } 4053 hmeshift = HME_HASH_SHIFT(ttesz); 4054 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 4055 hblktag.htag_rehash = ttesz; 4056 hblktag.htag_rid = rid; 4057 hblktag.htag_id = srdp; 4058 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 4059 SFMMU_HASH_LOCK(hmebp); 4060 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 4061 &list); 4062 if (hmeblkp == NULL) { 4063 SFMMU_HASH_UNLOCK(hmebp); 4064 ttesz--; 4065 continue; 4066 } 4067 ASSERT(hmeblkp->hblk_shared); 4068 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 4069 ASSERT(va >= eaddr || 4070 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 4071 SFMMU_HASH_UNLOCK(hmebp); 4072 break; 4073 } 4074 if (ttesz < HBLK_MIN_TTESZ) { 4075 panic("hat_unlock_region: addr not found " 4076 "addr %p hat %p", (void *)va, (void *)sfmmup); 4077 } 4078 } 4079 sfmmu_hblks_list_purge(&list, 0); 4080 } 4081 4082 /* 4083 * Function to unlock a range of addresses in an hmeblk. It returns the 4084 * next address that needs to be unlocked. 4085 * Should be called with the hash lock held. 4086 */ 4087 static caddr_t 4088 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4089 { 4090 struct sf_hment *sfhme; 4091 tte_t tteold, ttemod; 4092 int ttesz, ret; 4093 4094 ASSERT(in_hblk_range(hmeblkp, addr)); 4095 ASSERT(hmeblkp->hblk_shw_bit == 0); 4096 4097 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4098 ttesz = get_hblk_ttesz(hmeblkp); 4099 4100 HBLKTOHME(sfhme, hmeblkp, addr); 4101 while (addr < endaddr) { 4102 readtte: 4103 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4104 if (TTE_IS_VALID(&tteold)) { 4105 4106 ttemod = tteold; 4107 4108 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4109 &sfhme->hme_tte); 4110 4111 if (ret < 0) 4112 goto readtte; 4113 4114 if (hmeblkp->hblk_lckcnt == 0) 4115 panic("zero hblk lckcnt"); 4116 4117 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4118 (uintptr_t)endaddr) 4119 panic("can't unlock large tte"); 4120 4121 ASSERT(hmeblkp->hblk_lckcnt > 0); 4122 atomic_dec_32(&hmeblkp->hblk_lckcnt); 4123 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4124 } else { 4125 panic("sfmmu_hblk_unlock: invalid tte"); 4126 } 4127 addr += TTEBYTES(ttesz); 4128 sfhme++; 4129 } 4130 return (addr); 4131 } 4132 4133 /* 4134 * Physical Address Mapping Framework 4135 * 4136 * General rules: 4137 * 4138 * (1) Applies only to seg_kmem memory pages. To make things easier, 4139 * seg_kpm addresses are also accepted by the routines, but nothing 4140 * is done with them since by definition their PA mappings are static. 4141 * (2) hat_add_callback() may only be called while holding the page lock 4142 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4143 * or passing HAC_PAGELOCK flag. 4144 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4145 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4146 * callbacks may not sleep or acquire adaptive mutex locks. 4147 * (4) Either prehandler() or posthandler() (but not both) may be specified 4148 * as being NULL. Specifying an errhandler() is optional. 4149 * 4150 * Details of using the framework: 4151 * 4152 * registering a callback (hat_register_callback()) 4153 * 4154 * Pass prehandler, posthandler, errhandler addresses 4155 * as described below. If capture_cpus argument is nonzero, 4156 * suspend callback to the prehandler will occur with CPUs 4157 * captured and executing xc_loop() and CPUs will remain 4158 * captured until after the posthandler suspend callback 4159 * occurs. 4160 * 4161 * adding a callback (hat_add_callback()) 4162 * 4163 * as_pagelock(); 4164 * hat_add_callback(); 4165 * save returned pfn in private data structures or program registers; 4166 * as_pageunlock(); 4167 * 4168 * prehandler() 4169 * 4170 * Stop all accesses by physical address to this memory page. 4171 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4172 * adaptive locks. The second, SUSPEND, is called at high PIL with 4173 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4174 * locks must be XCALL_PIL or higher locks). 4175 * 4176 * May return the following errors: 4177 * EIO: A fatal error has occurred. This will result in panic. 4178 * EAGAIN: The page cannot be suspended. This will fail the 4179 * relocation. 4180 * 0: Success. 4181 * 4182 * posthandler() 4183 * 4184 * Save new pfn in private data structures or program registers; 4185 * not allowed to fail (non-zero return values will result in panic). 4186 * 4187 * errhandler() 4188 * 4189 * called when an error occurs related to the callback. Currently 4190 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4191 * a page is being freed, but there are still outstanding callback(s) 4192 * registered on the page. 4193 * 4194 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4195 * 4196 * stop using physical address 4197 * hat_delete_callback(); 4198 * 4199 */ 4200 4201 /* 4202 * Register a callback class. Each subsystem should do this once and 4203 * cache the id_t returned for use in setting up and tearing down callbacks. 4204 * 4205 * There is no facility for removing callback IDs once they are created; 4206 * the "key" should be unique for each module, so in case a module is unloaded 4207 * and subsequently re-loaded, we can recycle the module's previous entry. 4208 */ 4209 id_t 4210 hat_register_callback(int key, 4211 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4212 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4213 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4214 int capture_cpus) 4215 { 4216 id_t id; 4217 4218 /* 4219 * Search the table for a pre-existing callback associated with 4220 * the identifier "key". If one exists, we re-use that entry in 4221 * the table for this instance, otherwise we assign the next 4222 * available table slot. 4223 */ 4224 for (id = 0; id < sfmmu_max_cb_id; id++) { 4225 if (sfmmu_cb_table[id].key == key) 4226 break; 4227 } 4228 4229 if (id == sfmmu_max_cb_id) { 4230 id = sfmmu_cb_nextid++; 4231 if (id >= sfmmu_max_cb_id) 4232 panic("hat_register_callback: out of callback IDs"); 4233 } 4234 4235 ASSERT(prehandler != NULL || posthandler != NULL); 4236 4237 sfmmu_cb_table[id].key = key; 4238 sfmmu_cb_table[id].prehandler = prehandler; 4239 sfmmu_cb_table[id].posthandler = posthandler; 4240 sfmmu_cb_table[id].errhandler = errhandler; 4241 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4242 4243 return (id); 4244 } 4245 4246 #define HAC_COOKIE_NONE (void *)-1 4247 4248 /* 4249 * Add relocation callbacks to the specified addr/len which will be called 4250 * when relocating the associated page. See the description of pre and 4251 * posthandler above for more details. 4252 * 4253 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4254 * locked internally so the caller must be able to deal with the callback 4255 * running even before this function has returned. If HAC_PAGELOCK is not 4256 * set, it is assumed that the underlying memory pages are locked. 4257 * 4258 * Since the caller must track the individual page boundaries anyway, 4259 * we only allow a callback to be added to a single page (large 4260 * or small). Thus [addr, addr + len) MUST be contained within a single 4261 * page. 4262 * 4263 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4264 * _provided_that_ a unique parameter is specified for each callback. 4265 * If multiple callbacks are registered on the same range the callback will 4266 * be invoked with each unique parameter. Registering the same callback with 4267 * the same argument more than once will result in corrupted kernel state. 4268 * 4269 * Returns the pfn of the underlying kernel page in *rpfn 4270 * on success, or PFN_INVALID on failure. 4271 * 4272 * cookiep (if passed) provides storage space for an opaque cookie 4273 * to return later to hat_delete_callback(). This cookie makes the callback 4274 * deletion significantly quicker by avoiding a potentially lengthy hash 4275 * search. 4276 * 4277 * Returns values: 4278 * 0: success 4279 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4280 * EINVAL: callback ID is not valid 4281 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4282 * space 4283 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4284 */ 4285 int 4286 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4287 void *pvt, pfn_t *rpfn, void **cookiep) 4288 { 4289 struct hmehash_bucket *hmebp; 4290 hmeblk_tag hblktag; 4291 struct hme_blk *hmeblkp; 4292 int hmeshift, hashno; 4293 caddr_t saddr, eaddr, baseaddr; 4294 struct pa_hment *pahmep; 4295 struct sf_hment *sfhmep, *osfhmep; 4296 kmutex_t *pml; 4297 tte_t tte; 4298 page_t *pp; 4299 vnode_t *vp; 4300 u_offset_t off; 4301 pfn_t pfn; 4302 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4303 int locked = 0; 4304 4305 /* 4306 * For KPM mappings, just return the physical address since we 4307 * don't need to register any callbacks. 4308 */ 4309 if (IS_KPM_ADDR(vaddr)) { 4310 uint64_t paddr; 4311 SFMMU_KPM_VTOP(vaddr, paddr); 4312 *rpfn = btop(paddr); 4313 if (cookiep != NULL) 4314 *cookiep = HAC_COOKIE_NONE; 4315 return (0); 4316 } 4317 4318 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4319 *rpfn = PFN_INVALID; 4320 return (EINVAL); 4321 } 4322 4323 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4324 *rpfn = PFN_INVALID; 4325 return (ENOMEM); 4326 } 4327 4328 sfhmep = &pahmep->sfment; 4329 4330 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4331 eaddr = saddr + len; 4332 4333 rehash: 4334 /* Find the mapping(s) for this page */ 4335 for (hashno = TTE64K, hmeblkp = NULL; 4336 hmeblkp == NULL && hashno <= mmu_hashcnt; 4337 hashno++) { 4338 hmeshift = HME_HASH_SHIFT(hashno); 4339 hblktag.htag_id = ksfmmup; 4340 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4341 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4342 hblktag.htag_rehash = hashno; 4343 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4344 4345 SFMMU_HASH_LOCK(hmebp); 4346 4347 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4348 4349 if (hmeblkp == NULL) 4350 SFMMU_HASH_UNLOCK(hmebp); 4351 } 4352 4353 if (hmeblkp == NULL) { 4354 kmem_cache_free(pa_hment_cache, pahmep); 4355 *rpfn = PFN_INVALID; 4356 return (ENXIO); 4357 } 4358 4359 ASSERT(!hmeblkp->hblk_shared); 4360 4361 HBLKTOHME(osfhmep, hmeblkp, saddr); 4362 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4363 4364 if (!TTE_IS_VALID(&tte)) { 4365 SFMMU_HASH_UNLOCK(hmebp); 4366 kmem_cache_free(pa_hment_cache, pahmep); 4367 *rpfn = PFN_INVALID; 4368 return (ENXIO); 4369 } 4370 4371 /* 4372 * Make sure the boundaries for the callback fall within this 4373 * single mapping. 4374 */ 4375 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4376 ASSERT(saddr >= baseaddr); 4377 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4378 SFMMU_HASH_UNLOCK(hmebp); 4379 kmem_cache_free(pa_hment_cache, pahmep); 4380 *rpfn = PFN_INVALID; 4381 return (ERANGE); 4382 } 4383 4384 pfn = sfmmu_ttetopfn(&tte, vaddr); 4385 4386 /* 4387 * The pfn may not have a page_t underneath in which case we 4388 * just return it. This can happen if we are doing I/O to a 4389 * static portion of the kernel's address space, for instance. 4390 */ 4391 pp = osfhmep->hme_page; 4392 if (pp == NULL) { 4393 SFMMU_HASH_UNLOCK(hmebp); 4394 kmem_cache_free(pa_hment_cache, pahmep); 4395 *rpfn = pfn; 4396 if (cookiep) 4397 *cookiep = HAC_COOKIE_NONE; 4398 return (0); 4399 } 4400 ASSERT(pp == PP_PAGEROOT(pp)); 4401 4402 vp = pp->p_vnode; 4403 off = pp->p_offset; 4404 4405 pml = sfmmu_mlist_enter(pp); 4406 4407 if (flags & HAC_PAGELOCK) { 4408 if (!page_trylock(pp, SE_SHARED)) { 4409 /* 4410 * Somebody is holding SE_EXCL lock. Might 4411 * even be hat_page_relocate(). Drop all 4412 * our locks, lookup the page in &kvp, and 4413 * retry. If it doesn't exist in &kvp and &zvp, 4414 * then we must be dealing with a kernel mapped 4415 * page which doesn't actually belong to 4416 * segkmem so we punt. 4417 */ 4418 sfmmu_mlist_exit(pml); 4419 SFMMU_HASH_UNLOCK(hmebp); 4420 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4421 4422 /* check zvp before giving up */ 4423 if (pp == NULL) 4424 pp = page_lookup(&zvp, (u_offset_t)saddr, 4425 SE_SHARED); 4426 4427 /* Okay, we didn't find it, give up */ 4428 if (pp == NULL) { 4429 kmem_cache_free(pa_hment_cache, pahmep); 4430 *rpfn = pfn; 4431 if (cookiep) 4432 *cookiep = HAC_COOKIE_NONE; 4433 return (0); 4434 } 4435 page_unlock(pp); 4436 goto rehash; 4437 } 4438 locked = 1; 4439 } 4440 4441 if (!PAGE_LOCKED(pp) && !panicstr) 4442 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4443 4444 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4445 pp->p_offset != off) { 4446 /* 4447 * The page moved before we got our hands on it. Drop 4448 * all the locks and try again. 4449 */ 4450 ASSERT((flags & HAC_PAGELOCK) != 0); 4451 sfmmu_mlist_exit(pml); 4452 SFMMU_HASH_UNLOCK(hmebp); 4453 page_unlock(pp); 4454 locked = 0; 4455 goto rehash; 4456 } 4457 4458 if (!VN_ISKAS(vp)) { 4459 /* 4460 * This is not a segkmem page but another page which 4461 * has been kernel mapped. It had better have at least 4462 * a share lock on it. Return the pfn. 4463 */ 4464 sfmmu_mlist_exit(pml); 4465 SFMMU_HASH_UNLOCK(hmebp); 4466 if (locked) 4467 page_unlock(pp); 4468 kmem_cache_free(pa_hment_cache, pahmep); 4469 ASSERT(PAGE_LOCKED(pp)); 4470 *rpfn = pfn; 4471 if (cookiep) 4472 *cookiep = HAC_COOKIE_NONE; 4473 return (0); 4474 } 4475 4476 /* 4477 * Setup this pa_hment and link its embedded dummy sf_hment into 4478 * the mapping list. 4479 */ 4480 pp->p_share++; 4481 pahmep->cb_id = callback_id; 4482 pahmep->addr = vaddr; 4483 pahmep->len = len; 4484 pahmep->refcnt = 1; 4485 pahmep->flags = 0; 4486 pahmep->pvt = pvt; 4487 4488 sfhmep->hme_tte.ll = 0; 4489 sfhmep->hme_data = pahmep; 4490 sfhmep->hme_prev = osfhmep; 4491 sfhmep->hme_next = osfhmep->hme_next; 4492 4493 if (osfhmep->hme_next) 4494 osfhmep->hme_next->hme_prev = sfhmep; 4495 4496 osfhmep->hme_next = sfhmep; 4497 4498 sfmmu_mlist_exit(pml); 4499 SFMMU_HASH_UNLOCK(hmebp); 4500 4501 if (locked) 4502 page_unlock(pp); 4503 4504 *rpfn = pfn; 4505 if (cookiep) 4506 *cookiep = (void *)pahmep; 4507 4508 return (0); 4509 } 4510 4511 /* 4512 * Remove the relocation callbacks from the specified addr/len. 4513 */ 4514 void 4515 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4516 void *cookie) 4517 { 4518 struct hmehash_bucket *hmebp; 4519 hmeblk_tag hblktag; 4520 struct hme_blk *hmeblkp; 4521 int hmeshift, hashno; 4522 caddr_t saddr; 4523 struct pa_hment *pahmep; 4524 struct sf_hment *sfhmep, *osfhmep; 4525 kmutex_t *pml; 4526 tte_t tte; 4527 page_t *pp; 4528 vnode_t *vp; 4529 u_offset_t off; 4530 int locked = 0; 4531 4532 /* 4533 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4534 * remove so just return. 4535 */ 4536 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4537 return; 4538 4539 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4540 4541 rehash: 4542 /* Find the mapping(s) for this page */ 4543 for (hashno = TTE64K, hmeblkp = NULL; 4544 hmeblkp == NULL && hashno <= mmu_hashcnt; 4545 hashno++) { 4546 hmeshift = HME_HASH_SHIFT(hashno); 4547 hblktag.htag_id = ksfmmup; 4548 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4549 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4550 hblktag.htag_rehash = hashno; 4551 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4552 4553 SFMMU_HASH_LOCK(hmebp); 4554 4555 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4556 4557 if (hmeblkp == NULL) 4558 SFMMU_HASH_UNLOCK(hmebp); 4559 } 4560 4561 if (hmeblkp == NULL) 4562 return; 4563 4564 ASSERT(!hmeblkp->hblk_shared); 4565 4566 HBLKTOHME(osfhmep, hmeblkp, saddr); 4567 4568 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4569 if (!TTE_IS_VALID(&tte)) { 4570 SFMMU_HASH_UNLOCK(hmebp); 4571 return; 4572 } 4573 4574 pp = osfhmep->hme_page; 4575 if (pp == NULL) { 4576 SFMMU_HASH_UNLOCK(hmebp); 4577 ASSERT(cookie == NULL); 4578 return; 4579 } 4580 4581 vp = pp->p_vnode; 4582 off = pp->p_offset; 4583 4584 pml = sfmmu_mlist_enter(pp); 4585 4586 if (flags & HAC_PAGELOCK) { 4587 if (!page_trylock(pp, SE_SHARED)) { 4588 /* 4589 * Somebody is holding SE_EXCL lock. Might 4590 * even be hat_page_relocate(). Drop all 4591 * our locks, lookup the page in &kvp, and 4592 * retry. If it doesn't exist in &kvp and &zvp, 4593 * then we must be dealing with a kernel mapped 4594 * page which doesn't actually belong to 4595 * segkmem so we punt. 4596 */ 4597 sfmmu_mlist_exit(pml); 4598 SFMMU_HASH_UNLOCK(hmebp); 4599 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4600 /* check zvp before giving up */ 4601 if (pp == NULL) 4602 pp = page_lookup(&zvp, (u_offset_t)saddr, 4603 SE_SHARED); 4604 4605 if (pp == NULL) { 4606 ASSERT(cookie == NULL); 4607 return; 4608 } 4609 page_unlock(pp); 4610 goto rehash; 4611 } 4612 locked = 1; 4613 } 4614 4615 ASSERT(PAGE_LOCKED(pp)); 4616 4617 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4618 pp->p_offset != off) { 4619 /* 4620 * The page moved before we got our hands on it. Drop 4621 * all the locks and try again. 4622 */ 4623 ASSERT((flags & HAC_PAGELOCK) != 0); 4624 sfmmu_mlist_exit(pml); 4625 SFMMU_HASH_UNLOCK(hmebp); 4626 page_unlock(pp); 4627 locked = 0; 4628 goto rehash; 4629 } 4630 4631 if (!VN_ISKAS(vp)) { 4632 /* 4633 * This is not a segkmem page but another page which 4634 * has been kernel mapped. 4635 */ 4636 sfmmu_mlist_exit(pml); 4637 SFMMU_HASH_UNLOCK(hmebp); 4638 if (locked) 4639 page_unlock(pp); 4640 ASSERT(cookie == NULL); 4641 return; 4642 } 4643 4644 if (cookie != NULL) { 4645 pahmep = (struct pa_hment *)cookie; 4646 sfhmep = &pahmep->sfment; 4647 } else { 4648 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4649 sfhmep = sfhmep->hme_next) { 4650 4651 /* 4652 * skip va<->pa mappings 4653 */ 4654 if (!IS_PAHME(sfhmep)) 4655 continue; 4656 4657 pahmep = sfhmep->hme_data; 4658 ASSERT(pahmep != NULL); 4659 4660 /* 4661 * if pa_hment matches, remove it 4662 */ 4663 if ((pahmep->pvt == pvt) && 4664 (pahmep->addr == vaddr) && 4665 (pahmep->len == len)) { 4666 break; 4667 } 4668 } 4669 } 4670 4671 if (sfhmep == NULL) { 4672 if (!panicstr) { 4673 panic("hat_delete_callback: pa_hment not found, pp %p", 4674 (void *)pp); 4675 } 4676 return; 4677 } 4678 4679 /* 4680 * Note: at this point a valid kernel mapping must still be 4681 * present on this page. 4682 */ 4683 pp->p_share--; 4684 if (pp->p_share <= 0) 4685 panic("hat_delete_callback: zero p_share"); 4686 4687 if (--pahmep->refcnt == 0) { 4688 if (pahmep->flags != 0) 4689 panic("hat_delete_callback: pa_hment is busy"); 4690 4691 /* 4692 * Remove sfhmep from the mapping list for the page. 4693 */ 4694 if (sfhmep->hme_prev) { 4695 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4696 } else { 4697 pp->p_mapping = sfhmep->hme_next; 4698 } 4699 4700 if (sfhmep->hme_next) 4701 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4702 4703 sfmmu_mlist_exit(pml); 4704 SFMMU_HASH_UNLOCK(hmebp); 4705 4706 if (locked) 4707 page_unlock(pp); 4708 4709 kmem_cache_free(pa_hment_cache, pahmep); 4710 return; 4711 } 4712 4713 sfmmu_mlist_exit(pml); 4714 SFMMU_HASH_UNLOCK(hmebp); 4715 if (locked) 4716 page_unlock(pp); 4717 } 4718 4719 /* 4720 * hat_probe returns 1 if the translation for the address 'addr' is 4721 * loaded, zero otherwise. 4722 * 4723 * hat_probe should be used only for advisorary purposes because it may 4724 * occasionally return the wrong value. The implementation must guarantee that 4725 * returning the wrong value is a very rare event. hat_probe is used 4726 * to implement optimizations in the segment drivers. 4727 * 4728 */ 4729 int 4730 hat_probe(struct hat *sfmmup, caddr_t addr) 4731 { 4732 pfn_t pfn; 4733 tte_t tte; 4734 4735 ASSERT(sfmmup != NULL); 4736 4737 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 4738 4739 if (sfmmup == ksfmmup) { 4740 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4741 == PFN_SUSPENDED) { 4742 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4743 } 4744 } else { 4745 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4746 } 4747 4748 if (pfn != PFN_INVALID) 4749 return (1); 4750 else 4751 return (0); 4752 } 4753 4754 ssize_t 4755 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4756 { 4757 tte_t tte; 4758 4759 if (sfmmup == ksfmmup) { 4760 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4761 return (-1); 4762 } 4763 } else { 4764 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4765 return (-1); 4766 } 4767 } 4768 4769 ASSERT(TTE_IS_VALID(&tte)); 4770 return (TTEBYTES(TTE_CSZ(&tte))); 4771 } 4772 4773 uint_t 4774 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4775 { 4776 tte_t tte; 4777 4778 if (sfmmup == ksfmmup) { 4779 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4780 tte.ll = 0; 4781 } 4782 } else { 4783 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4784 tte.ll = 0; 4785 } 4786 } 4787 if (TTE_IS_VALID(&tte)) { 4788 *attr = sfmmu_ptov_attr(&tte); 4789 return (0); 4790 } 4791 *attr = 0; 4792 return ((uint_t)0xffffffff); 4793 } 4794 4795 /* 4796 * Enables more attributes on specified address range (ie. logical OR) 4797 */ 4798 void 4799 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4800 { 4801 ASSERT(hat->sfmmu_as != NULL); 4802 4803 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4804 } 4805 4806 /* 4807 * Assigns attributes to the specified address range. All the attributes 4808 * are specified. 4809 */ 4810 void 4811 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4812 { 4813 ASSERT(hat->sfmmu_as != NULL); 4814 4815 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4816 } 4817 4818 /* 4819 * Remove attributes on the specified address range (ie. loginal NAND) 4820 */ 4821 void 4822 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4823 { 4824 ASSERT(hat->sfmmu_as != NULL); 4825 4826 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4827 } 4828 4829 /* 4830 * Change attributes on an address range to that specified by attr and mode. 4831 */ 4832 static void 4833 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4834 int mode) 4835 { 4836 struct hmehash_bucket *hmebp; 4837 hmeblk_tag hblktag; 4838 int hmeshift, hashno = 1; 4839 struct hme_blk *hmeblkp, *list = NULL; 4840 caddr_t endaddr; 4841 cpuset_t cpuset; 4842 demap_range_t dmr; 4843 4844 CPUSET_ZERO(cpuset); 4845 4846 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 4847 ASSERT((len & MMU_PAGEOFFSET) == 0); 4848 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4849 4850 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4851 ((addr + len) > (caddr_t)USERLIMIT)) { 4852 panic("user addr %p in kernel space", 4853 (void *)addr); 4854 } 4855 4856 endaddr = addr + len; 4857 hblktag.htag_id = sfmmup; 4858 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4859 DEMAP_RANGE_INIT(sfmmup, &dmr); 4860 4861 while (addr < endaddr) { 4862 hmeshift = HME_HASH_SHIFT(hashno); 4863 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4864 hblktag.htag_rehash = hashno; 4865 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4866 4867 SFMMU_HASH_LOCK(hmebp); 4868 4869 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4870 if (hmeblkp != NULL) { 4871 ASSERT(!hmeblkp->hblk_shared); 4872 /* 4873 * We've encountered a shadow hmeblk so skip the range 4874 * of the next smaller mapping size. 4875 */ 4876 if (hmeblkp->hblk_shw_bit) { 4877 ASSERT(sfmmup != ksfmmup); 4878 ASSERT(hashno > 1); 4879 addr = (caddr_t)P2END((uintptr_t)addr, 4880 TTEBYTES(hashno - 1)); 4881 } else { 4882 addr = sfmmu_hblk_chgattr(sfmmup, 4883 hmeblkp, addr, endaddr, &dmr, attr, mode); 4884 } 4885 SFMMU_HASH_UNLOCK(hmebp); 4886 hashno = 1; 4887 continue; 4888 } 4889 SFMMU_HASH_UNLOCK(hmebp); 4890 4891 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4892 /* 4893 * We have traversed the whole list and rehashed 4894 * if necessary without finding the address to chgattr. 4895 * This is ok, so we increment the address by the 4896 * smallest hmeblk range for kernel mappings or for 4897 * user mappings with no large pages, and the largest 4898 * hmeblk range, to account for shadow hmeblks, for 4899 * user mappings with large pages and continue. 4900 */ 4901 if (sfmmup == ksfmmup) 4902 addr = (caddr_t)P2END((uintptr_t)addr, 4903 TTEBYTES(1)); 4904 else 4905 addr = (caddr_t)P2END((uintptr_t)addr, 4906 TTEBYTES(hashno)); 4907 hashno = 1; 4908 } else { 4909 hashno++; 4910 } 4911 } 4912 4913 sfmmu_hblks_list_purge(&list, 0); 4914 DEMAP_RANGE_FLUSH(&dmr); 4915 cpuset = sfmmup->sfmmu_cpusran; 4916 xt_sync(cpuset); 4917 } 4918 4919 /* 4920 * This function chgattr on a range of addresses in an hmeblk. It returns the 4921 * next addres that needs to be chgattr. 4922 * It should be called with the hash lock held. 4923 * XXX It should be possible to optimize chgattr by not flushing every time but 4924 * on the other hand: 4925 * 1. do one flush crosscall. 4926 * 2. only flush if we are increasing permissions (make sure this will work) 4927 */ 4928 static caddr_t 4929 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4930 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4931 { 4932 tte_t tte, tteattr, tteflags, ttemod; 4933 struct sf_hment *sfhmep; 4934 int ttesz; 4935 struct page *pp = NULL; 4936 kmutex_t *pml, *pmtx; 4937 int ret; 4938 int use_demap_range; 4939 #if defined(SF_ERRATA_57) 4940 int check_exec; 4941 #endif 4942 4943 ASSERT(in_hblk_range(hmeblkp, addr)); 4944 ASSERT(hmeblkp->hblk_shw_bit == 0); 4945 ASSERT(!hmeblkp->hblk_shared); 4946 4947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4948 ttesz = get_hblk_ttesz(hmeblkp); 4949 4950 /* 4951 * Flush the current demap region if addresses have been 4952 * skipped or the page size doesn't match. 4953 */ 4954 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4955 if (use_demap_range) { 4956 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4957 } else if (dmrp != NULL) { 4958 DEMAP_RANGE_FLUSH(dmrp); 4959 } 4960 4961 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4962 #if defined(SF_ERRATA_57) 4963 check_exec = (sfmmup != ksfmmup) && 4964 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4965 TTE_IS_EXECUTABLE(&tteattr); 4966 #endif 4967 HBLKTOHME(sfhmep, hmeblkp, addr); 4968 while (addr < endaddr) { 4969 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4970 if (TTE_IS_VALID(&tte)) { 4971 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4972 /* 4973 * if the new attr is the same as old 4974 * continue 4975 */ 4976 goto next_addr; 4977 } 4978 if (!TTE_IS_WRITABLE(&tteattr)) { 4979 /* 4980 * make sure we clear hw modify bit if we 4981 * removing write protections 4982 */ 4983 tteflags.tte_intlo |= TTE_HWWR_INT; 4984 } 4985 4986 pml = NULL; 4987 pp = sfhmep->hme_page; 4988 if (pp) { 4989 pml = sfmmu_mlist_enter(pp); 4990 } 4991 4992 if (pp != sfhmep->hme_page) { 4993 /* 4994 * tte must have been unloaded. 4995 */ 4996 ASSERT(pml); 4997 sfmmu_mlist_exit(pml); 4998 continue; 4999 } 5000 5001 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5002 5003 ttemod = tte; 5004 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 5005 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 5006 5007 #if defined(SF_ERRATA_57) 5008 if (check_exec && addr < errata57_limit) 5009 ttemod.tte_exec_perm = 0; 5010 #endif 5011 ret = sfmmu_modifytte_try(&tte, &ttemod, 5012 &sfhmep->hme_tte); 5013 5014 if (ret < 0) { 5015 /* tte changed underneath us */ 5016 if (pml) { 5017 sfmmu_mlist_exit(pml); 5018 } 5019 continue; 5020 } 5021 5022 if (tteflags.tte_intlo & TTE_HWWR_INT) { 5023 /* 5024 * need to sync if we are clearing modify bit. 5025 */ 5026 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5027 } 5028 5029 if (pp && PP_ISRO(pp)) { 5030 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 5031 pmtx = sfmmu_page_enter(pp); 5032 PP_CLRRO(pp); 5033 sfmmu_page_exit(pmtx); 5034 } 5035 } 5036 5037 if (ret > 0 && use_demap_range) { 5038 DEMAP_RANGE_MARKPG(dmrp, addr); 5039 } else if (ret > 0) { 5040 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5041 } 5042 5043 if (pml) { 5044 sfmmu_mlist_exit(pml); 5045 } 5046 } 5047 next_addr: 5048 addr += TTEBYTES(ttesz); 5049 sfhmep++; 5050 DEMAP_RANGE_NEXTPG(dmrp); 5051 } 5052 return (addr); 5053 } 5054 5055 /* 5056 * This routine converts virtual attributes to physical ones. It will 5057 * update the tteflags field with the tte mask corresponding to the attributes 5058 * affected and it returns the new attributes. It will also clear the modify 5059 * bit if we are taking away write permission. This is necessary since the 5060 * modify bit is the hardware permission bit and we need to clear it in order 5061 * to detect write faults. 5062 */ 5063 static uint64_t 5064 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5065 { 5066 tte_t ttevalue; 5067 5068 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5069 5070 switch (mode) { 5071 case SFMMU_CHGATTR: 5072 /* all attributes specified */ 5073 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5074 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5075 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5076 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5077 break; 5078 case SFMMU_SETATTR: 5079 ASSERT(!(attr & ~HAT_PROT_MASK)); 5080 ttemaskp->ll = 0; 5081 ttevalue.ll = 0; 5082 /* 5083 * a valid tte implies exec and read for sfmmu 5084 * so no need to do anything about them. 5085 * since priviledged access implies user access 5086 * PROT_USER doesn't make sense either. 5087 */ 5088 if (attr & PROT_WRITE) { 5089 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5090 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5091 } 5092 break; 5093 case SFMMU_CLRATTR: 5094 /* attributes will be nand with current ones */ 5095 if (attr & ~(PROT_WRITE | PROT_USER)) { 5096 panic("sfmmu: attr %x not supported", attr); 5097 } 5098 ttemaskp->ll = 0; 5099 ttevalue.ll = 0; 5100 if (attr & PROT_WRITE) { 5101 /* clear both writable and modify bit */ 5102 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5103 } 5104 if (attr & PROT_USER) { 5105 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5106 ttevalue.tte_intlo |= TTE_PRIV_INT; 5107 } 5108 break; 5109 default: 5110 panic("sfmmu_vtop_attr: bad mode %x", mode); 5111 } 5112 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5113 return (ttevalue.ll); 5114 } 5115 5116 static uint_t 5117 sfmmu_ptov_attr(tte_t *ttep) 5118 { 5119 uint_t attr; 5120 5121 ASSERT(TTE_IS_VALID(ttep)); 5122 5123 attr = PROT_READ; 5124 5125 if (TTE_IS_WRITABLE(ttep)) { 5126 attr |= PROT_WRITE; 5127 } 5128 if (TTE_IS_EXECUTABLE(ttep)) { 5129 attr |= PROT_EXEC; 5130 } 5131 if (!TTE_IS_PRIVILEGED(ttep)) { 5132 attr |= PROT_USER; 5133 } 5134 if (TTE_IS_NFO(ttep)) { 5135 attr |= HAT_NOFAULT; 5136 } 5137 if (TTE_IS_NOSYNC(ttep)) { 5138 attr |= HAT_NOSYNC; 5139 } 5140 if (TTE_IS_SIDEFFECT(ttep)) { 5141 attr |= SFMMU_SIDEFFECT; 5142 } 5143 if (!TTE_IS_VCACHEABLE(ttep)) { 5144 attr |= SFMMU_UNCACHEVTTE; 5145 } 5146 if (!TTE_IS_PCACHEABLE(ttep)) { 5147 attr |= SFMMU_UNCACHEPTTE; 5148 } 5149 return (attr); 5150 } 5151 5152 /* 5153 * hat_chgprot is a deprecated hat call. New segment drivers 5154 * should store all attributes and use hat_*attr calls. 5155 * 5156 * Change the protections in the virtual address range 5157 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5158 * then remove write permission, leaving the other 5159 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5160 * 5161 */ 5162 void 5163 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5164 { 5165 struct hmehash_bucket *hmebp; 5166 hmeblk_tag hblktag; 5167 int hmeshift, hashno = 1; 5168 struct hme_blk *hmeblkp, *list = NULL; 5169 caddr_t endaddr; 5170 cpuset_t cpuset; 5171 demap_range_t dmr; 5172 5173 ASSERT((len & MMU_PAGEOFFSET) == 0); 5174 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5175 5176 ASSERT(sfmmup->sfmmu_as != NULL); 5177 5178 CPUSET_ZERO(cpuset); 5179 5180 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5181 ((addr + len) > (caddr_t)USERLIMIT)) { 5182 panic("user addr %p vprot %x in kernel space", 5183 (void *)addr, vprot); 5184 } 5185 endaddr = addr + len; 5186 hblktag.htag_id = sfmmup; 5187 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5188 DEMAP_RANGE_INIT(sfmmup, &dmr); 5189 5190 while (addr < endaddr) { 5191 hmeshift = HME_HASH_SHIFT(hashno); 5192 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5193 hblktag.htag_rehash = hashno; 5194 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5195 5196 SFMMU_HASH_LOCK(hmebp); 5197 5198 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5199 if (hmeblkp != NULL) { 5200 ASSERT(!hmeblkp->hblk_shared); 5201 /* 5202 * We've encountered a shadow hmeblk so skip the range 5203 * of the next smaller mapping size. 5204 */ 5205 if (hmeblkp->hblk_shw_bit) { 5206 ASSERT(sfmmup != ksfmmup); 5207 ASSERT(hashno > 1); 5208 addr = (caddr_t)P2END((uintptr_t)addr, 5209 TTEBYTES(hashno - 1)); 5210 } else { 5211 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5212 addr, endaddr, &dmr, vprot); 5213 } 5214 SFMMU_HASH_UNLOCK(hmebp); 5215 hashno = 1; 5216 continue; 5217 } 5218 SFMMU_HASH_UNLOCK(hmebp); 5219 5220 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5221 /* 5222 * We have traversed the whole list and rehashed 5223 * if necessary without finding the address to chgprot. 5224 * This is ok so we increment the address by the 5225 * smallest hmeblk range for kernel mappings and the 5226 * largest hmeblk range, to account for shadow hmeblks, 5227 * for user mappings and continue. 5228 */ 5229 if (sfmmup == ksfmmup) 5230 addr = (caddr_t)P2END((uintptr_t)addr, 5231 TTEBYTES(1)); 5232 else 5233 addr = (caddr_t)P2END((uintptr_t)addr, 5234 TTEBYTES(hashno)); 5235 hashno = 1; 5236 } else { 5237 hashno++; 5238 } 5239 } 5240 5241 sfmmu_hblks_list_purge(&list, 0); 5242 DEMAP_RANGE_FLUSH(&dmr); 5243 cpuset = sfmmup->sfmmu_cpusran; 5244 xt_sync(cpuset); 5245 } 5246 5247 /* 5248 * This function chgprots a range of addresses in an hmeblk. It returns the 5249 * next addres that needs to be chgprot. 5250 * It should be called with the hash lock held. 5251 * XXX It shold be possible to optimize chgprot by not flushing every time but 5252 * on the other hand: 5253 * 1. do one flush crosscall. 5254 * 2. only flush if we are increasing permissions (make sure this will work) 5255 */ 5256 static caddr_t 5257 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5258 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5259 { 5260 uint_t pprot; 5261 tte_t tte, ttemod; 5262 struct sf_hment *sfhmep; 5263 uint_t tteflags; 5264 int ttesz; 5265 struct page *pp = NULL; 5266 kmutex_t *pml, *pmtx; 5267 int ret; 5268 int use_demap_range; 5269 #if defined(SF_ERRATA_57) 5270 int check_exec; 5271 #endif 5272 5273 ASSERT(in_hblk_range(hmeblkp, addr)); 5274 ASSERT(hmeblkp->hblk_shw_bit == 0); 5275 ASSERT(!hmeblkp->hblk_shared); 5276 5277 #ifdef DEBUG 5278 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5279 (endaddr < get_hblk_endaddr(hmeblkp))) { 5280 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5281 } 5282 #endif /* DEBUG */ 5283 5284 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5285 ttesz = get_hblk_ttesz(hmeblkp); 5286 5287 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5288 #if defined(SF_ERRATA_57) 5289 check_exec = (sfmmup != ksfmmup) && 5290 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5291 ((vprot & PROT_EXEC) == PROT_EXEC); 5292 #endif 5293 HBLKTOHME(sfhmep, hmeblkp, addr); 5294 5295 /* 5296 * Flush the current demap region if addresses have been 5297 * skipped or the page size doesn't match. 5298 */ 5299 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5300 if (use_demap_range) { 5301 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5302 } else if (dmrp != NULL) { 5303 DEMAP_RANGE_FLUSH(dmrp); 5304 } 5305 5306 while (addr < endaddr) { 5307 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5308 if (TTE_IS_VALID(&tte)) { 5309 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5310 /* 5311 * if the new protection is the same as old 5312 * continue 5313 */ 5314 goto next_addr; 5315 } 5316 pml = NULL; 5317 pp = sfhmep->hme_page; 5318 if (pp) { 5319 pml = sfmmu_mlist_enter(pp); 5320 } 5321 if (pp != sfhmep->hme_page) { 5322 /* 5323 * tte most have been unloaded 5324 * underneath us. Recheck 5325 */ 5326 ASSERT(pml); 5327 sfmmu_mlist_exit(pml); 5328 continue; 5329 } 5330 5331 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5332 5333 ttemod = tte; 5334 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5335 #if defined(SF_ERRATA_57) 5336 if (check_exec && addr < errata57_limit) 5337 ttemod.tte_exec_perm = 0; 5338 #endif 5339 ret = sfmmu_modifytte_try(&tte, &ttemod, 5340 &sfhmep->hme_tte); 5341 5342 if (ret < 0) { 5343 /* tte changed underneath us */ 5344 if (pml) { 5345 sfmmu_mlist_exit(pml); 5346 } 5347 continue; 5348 } 5349 5350 if (tteflags & TTE_HWWR_INT) { 5351 /* 5352 * need to sync if we are clearing modify bit. 5353 */ 5354 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5355 } 5356 5357 if (pp && PP_ISRO(pp)) { 5358 if (pprot & TTE_WRPRM_INT) { 5359 pmtx = sfmmu_page_enter(pp); 5360 PP_CLRRO(pp); 5361 sfmmu_page_exit(pmtx); 5362 } 5363 } 5364 5365 if (ret > 0 && use_demap_range) { 5366 DEMAP_RANGE_MARKPG(dmrp, addr); 5367 } else if (ret > 0) { 5368 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5369 } 5370 5371 if (pml) { 5372 sfmmu_mlist_exit(pml); 5373 } 5374 } 5375 next_addr: 5376 addr += TTEBYTES(ttesz); 5377 sfhmep++; 5378 DEMAP_RANGE_NEXTPG(dmrp); 5379 } 5380 return (addr); 5381 } 5382 5383 /* 5384 * This routine is deprecated and should only be used by hat_chgprot. 5385 * The correct routine is sfmmu_vtop_attr. 5386 * This routine converts virtual page protections to physical ones. It will 5387 * update the tteflags field with the tte mask corresponding to the protections 5388 * affected and it returns the new protections. It will also clear the modify 5389 * bit if we are taking away write permission. This is necessary since the 5390 * modify bit is the hardware permission bit and we need to clear it in order 5391 * to detect write faults. 5392 * It accepts the following special protections: 5393 * ~PROT_WRITE = remove write permissions. 5394 * ~PROT_USER = remove user permissions. 5395 */ 5396 static uint_t 5397 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5398 { 5399 if (vprot == (uint_t)~PROT_WRITE) { 5400 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5401 return (0); /* will cause wrprm to be cleared */ 5402 } 5403 if (vprot == (uint_t)~PROT_USER) { 5404 *tteflagsp = TTE_PRIV_INT; 5405 return (0); /* will cause privprm to be cleared */ 5406 } 5407 if ((vprot == 0) || (vprot == PROT_USER) || 5408 ((vprot & PROT_ALL) != vprot)) { 5409 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5410 } 5411 5412 switch (vprot) { 5413 case (PROT_READ): 5414 case (PROT_EXEC): 5415 case (PROT_EXEC | PROT_READ): 5416 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5417 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5418 case (PROT_WRITE): 5419 case (PROT_WRITE | PROT_READ): 5420 case (PROT_EXEC | PROT_WRITE): 5421 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5422 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5423 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5424 case (PROT_USER | PROT_READ): 5425 case (PROT_USER | PROT_EXEC): 5426 case (PROT_USER | PROT_EXEC | PROT_READ): 5427 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5428 return (0); /* clr prv and wrt */ 5429 case (PROT_USER | PROT_WRITE): 5430 case (PROT_USER | PROT_WRITE | PROT_READ): 5431 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5432 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5433 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5434 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5435 default: 5436 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5437 } 5438 return (0); 5439 } 5440 5441 /* 5442 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5443 * the normal algorithm would take too long for a very large VA range with 5444 * few real mappings. This routine just walks thru all HMEs in the global 5445 * hash table to find and remove mappings. 5446 */ 5447 static void 5448 hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len, 5449 uint_t flags, hat_callback_t *callback) 5450 { 5451 struct hmehash_bucket *hmebp; 5452 struct hme_blk *hmeblkp; 5453 struct hme_blk *pr_hblk = NULL; 5454 struct hme_blk *nx_hblk; 5455 struct hme_blk *list = NULL; 5456 int i; 5457 demap_range_t dmr, *dmrp; 5458 cpuset_t cpuset; 5459 caddr_t endaddr = startaddr + len; 5460 caddr_t sa; 5461 caddr_t ea; 5462 caddr_t cb_sa[MAX_CB_ADDR]; 5463 caddr_t cb_ea[MAX_CB_ADDR]; 5464 int addr_cnt = 0; 5465 int a = 0; 5466 5467 if (sfmmup->sfmmu_free) { 5468 dmrp = NULL; 5469 } else { 5470 dmrp = &dmr; 5471 DEMAP_RANGE_INIT(sfmmup, dmrp); 5472 } 5473 5474 /* 5475 * Loop through all the hash buckets of HME blocks looking for matches. 5476 */ 5477 for (i = 0; i <= UHMEHASH_SZ; i++) { 5478 hmebp = &uhme_hash[i]; 5479 SFMMU_HASH_LOCK(hmebp); 5480 hmeblkp = hmebp->hmeblkp; 5481 pr_hblk = NULL; 5482 while (hmeblkp) { 5483 nx_hblk = hmeblkp->hblk_next; 5484 5485 /* 5486 * skip if not this context, if a shadow block or 5487 * if the mapping is not in the requested range 5488 */ 5489 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5490 hmeblkp->hblk_shw_bit || 5491 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5492 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5493 pr_hblk = hmeblkp; 5494 goto next_block; 5495 } 5496 5497 ASSERT(!hmeblkp->hblk_shared); 5498 /* 5499 * unload if there are any current valid mappings 5500 */ 5501 if (hmeblkp->hblk_vcnt != 0 || 5502 hmeblkp->hblk_hmecnt != 0) 5503 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5504 sa, ea, dmrp, flags); 5505 5506 /* 5507 * on unmap we also release the HME block itself, once 5508 * all mappings are gone. 5509 */ 5510 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5511 !hmeblkp->hblk_vcnt && 5512 !hmeblkp->hblk_hmecnt) { 5513 ASSERT(!hmeblkp->hblk_lckcnt); 5514 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5515 &list, 0); 5516 } else { 5517 pr_hblk = hmeblkp; 5518 } 5519 5520 if (callback == NULL) 5521 goto next_block; 5522 5523 /* 5524 * HME blocks may span more than one page, but we may be 5525 * unmapping only one page, so check for a smaller range 5526 * for the callback 5527 */ 5528 if (sa < startaddr) 5529 sa = startaddr; 5530 if (--ea > endaddr) 5531 ea = endaddr - 1; 5532 5533 cb_sa[addr_cnt] = sa; 5534 cb_ea[addr_cnt] = ea; 5535 if (++addr_cnt == MAX_CB_ADDR) { 5536 if (dmrp != NULL) { 5537 DEMAP_RANGE_FLUSH(dmrp); 5538 cpuset = sfmmup->sfmmu_cpusran; 5539 xt_sync(cpuset); 5540 } 5541 5542 for (a = 0; a < MAX_CB_ADDR; ++a) { 5543 callback->hcb_start_addr = cb_sa[a]; 5544 callback->hcb_end_addr = cb_ea[a]; 5545 callback->hcb_function(callback); 5546 } 5547 addr_cnt = 0; 5548 } 5549 5550 next_block: 5551 hmeblkp = nx_hblk; 5552 } 5553 SFMMU_HASH_UNLOCK(hmebp); 5554 } 5555 5556 sfmmu_hblks_list_purge(&list, 0); 5557 if (dmrp != NULL) { 5558 DEMAP_RANGE_FLUSH(dmrp); 5559 cpuset = sfmmup->sfmmu_cpusran; 5560 xt_sync(cpuset); 5561 } 5562 5563 for (a = 0; a < addr_cnt; ++a) { 5564 callback->hcb_start_addr = cb_sa[a]; 5565 callback->hcb_end_addr = cb_ea[a]; 5566 callback->hcb_function(callback); 5567 } 5568 5569 /* 5570 * Check TSB and TLB page sizes if the process isn't exiting. 5571 */ 5572 if (!sfmmup->sfmmu_free) 5573 sfmmu_check_page_sizes(sfmmup, 0); 5574 } 5575 5576 /* 5577 * Unload all the mappings in the range [addr..addr+len). addr and len must 5578 * be MMU_PAGESIZE aligned. 5579 */ 5580 5581 extern struct seg *segkmap; 5582 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5583 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5584 5585 5586 void 5587 hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags, 5588 hat_callback_t *callback) 5589 { 5590 struct hmehash_bucket *hmebp; 5591 hmeblk_tag hblktag; 5592 int hmeshift, hashno, iskernel; 5593 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5594 caddr_t endaddr; 5595 cpuset_t cpuset; 5596 int addr_count = 0; 5597 int a; 5598 caddr_t cb_start_addr[MAX_CB_ADDR]; 5599 caddr_t cb_end_addr[MAX_CB_ADDR]; 5600 int issegkmap = ISSEGKMAP(sfmmup, addr); 5601 demap_range_t dmr, *dmrp; 5602 5603 ASSERT(sfmmup->sfmmu_as != NULL); 5604 5605 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5606 AS_LOCK_HELD(sfmmup->sfmmu_as)); 5607 5608 ASSERT(sfmmup != NULL); 5609 ASSERT((len & MMU_PAGEOFFSET) == 0); 5610 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5611 5612 /* 5613 * Probing through a large VA range (say 63 bits) will be slow, even 5614 * at 4 Meg steps between the probes. So, when the virtual address range 5615 * is very large, search the HME entries for what to unload. 5616 * 5617 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5618 * 5619 * UHMEHASH_SZ is number of hash buckets to examine 5620 * 5621 */ 5622 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5623 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5624 return; 5625 } 5626 5627 CPUSET_ZERO(cpuset); 5628 5629 /* 5630 * If the process is exiting, we can save a lot of fuss since 5631 * we'll flush the TLB when we free the ctx anyway. 5632 */ 5633 if (sfmmup->sfmmu_free) { 5634 dmrp = NULL; 5635 } else { 5636 dmrp = &dmr; 5637 DEMAP_RANGE_INIT(sfmmup, dmrp); 5638 } 5639 5640 endaddr = addr + len; 5641 hblktag.htag_id = sfmmup; 5642 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5643 5644 /* 5645 * It is likely for the vm to call unload over a wide range of 5646 * addresses that are actually very sparsely populated by 5647 * translations. In order to speed this up the sfmmu hat supports 5648 * the concept of shadow hmeblks. Dummy large page hmeblks that 5649 * correspond to actual small translations are allocated at tteload 5650 * time and are referred to as shadow hmeblks. Now, during unload 5651 * time, we first check if we have a shadow hmeblk for that 5652 * translation. The absence of one means the corresponding address 5653 * range is empty and can be skipped. 5654 * 5655 * The kernel is an exception to above statement and that is why 5656 * we don't use shadow hmeblks and hash starting from the smallest 5657 * page size. 5658 */ 5659 if (sfmmup == KHATID) { 5660 iskernel = 1; 5661 hashno = TTE64K; 5662 } else { 5663 iskernel = 0; 5664 if (mmu_page_sizes == max_mmu_page_sizes) { 5665 hashno = TTE256M; 5666 } else { 5667 hashno = TTE4M; 5668 } 5669 } 5670 while (addr < endaddr) { 5671 hmeshift = HME_HASH_SHIFT(hashno); 5672 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5673 hblktag.htag_rehash = hashno; 5674 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5675 5676 SFMMU_HASH_LOCK(hmebp); 5677 5678 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5679 if (hmeblkp == NULL) { 5680 /* 5681 * didn't find an hmeblk. skip the appropiate 5682 * address range. 5683 */ 5684 SFMMU_HASH_UNLOCK(hmebp); 5685 if (iskernel) { 5686 if (hashno < mmu_hashcnt) { 5687 hashno++; 5688 continue; 5689 } else { 5690 hashno = TTE64K; 5691 addr = (caddr_t)roundup((uintptr_t)addr 5692 + 1, MMU_PAGESIZE64K); 5693 continue; 5694 } 5695 } 5696 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5697 (1 << hmeshift)); 5698 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5699 ASSERT(hashno == TTE64K); 5700 continue; 5701 } 5702 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5703 hashno = TTE512K; 5704 continue; 5705 } 5706 if (mmu_page_sizes == max_mmu_page_sizes) { 5707 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5708 hashno = TTE4M; 5709 continue; 5710 } 5711 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5712 hashno = TTE32M; 5713 continue; 5714 } 5715 hashno = TTE256M; 5716 continue; 5717 } else { 5718 hashno = TTE4M; 5719 continue; 5720 } 5721 } 5722 ASSERT(hmeblkp); 5723 ASSERT(!hmeblkp->hblk_shared); 5724 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5725 /* 5726 * If the valid count is zero we can skip the range 5727 * mapped by this hmeblk. 5728 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5729 * is used by segment drivers as a hint 5730 * that the mapping resource won't be used any longer. 5731 * The best example of this is during exit(). 5732 */ 5733 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5734 get_hblk_span(hmeblkp)); 5735 if ((flags & HAT_UNLOAD_UNMAP) || 5736 (iskernel && !issegkmap)) { 5737 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5738 &list, 0); 5739 } 5740 SFMMU_HASH_UNLOCK(hmebp); 5741 5742 if (iskernel) { 5743 hashno = TTE64K; 5744 continue; 5745 } 5746 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5747 ASSERT(hashno == TTE64K); 5748 continue; 5749 } 5750 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5751 hashno = TTE512K; 5752 continue; 5753 } 5754 if (mmu_page_sizes == max_mmu_page_sizes) { 5755 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5756 hashno = TTE4M; 5757 continue; 5758 } 5759 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5760 hashno = TTE32M; 5761 continue; 5762 } 5763 hashno = TTE256M; 5764 continue; 5765 } else { 5766 hashno = TTE4M; 5767 continue; 5768 } 5769 } 5770 if (hmeblkp->hblk_shw_bit) { 5771 /* 5772 * If we encounter a shadow hmeblk we know there is 5773 * smaller sized hmeblks mapping the same address space. 5774 * Decrement the hash size and rehash. 5775 */ 5776 ASSERT(sfmmup != KHATID); 5777 hashno--; 5778 SFMMU_HASH_UNLOCK(hmebp); 5779 continue; 5780 } 5781 5782 /* 5783 * track callback address ranges. 5784 * only start a new range when it's not contiguous 5785 */ 5786 if (callback != NULL) { 5787 if (addr_count > 0 && 5788 addr == cb_end_addr[addr_count - 1]) 5789 --addr_count; 5790 else 5791 cb_start_addr[addr_count] = addr; 5792 } 5793 5794 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5795 dmrp, flags); 5796 5797 if (callback != NULL) 5798 cb_end_addr[addr_count++] = addr; 5799 5800 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5801 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5802 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5803 } 5804 SFMMU_HASH_UNLOCK(hmebp); 5805 5806 /* 5807 * Notify our caller as to exactly which pages 5808 * have been unloaded. We do these in clumps, 5809 * to minimize the number of xt_sync()s that need to occur. 5810 */ 5811 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5812 if (dmrp != NULL) { 5813 DEMAP_RANGE_FLUSH(dmrp); 5814 cpuset = sfmmup->sfmmu_cpusran; 5815 xt_sync(cpuset); 5816 } 5817 5818 for (a = 0; a < MAX_CB_ADDR; ++a) { 5819 callback->hcb_start_addr = cb_start_addr[a]; 5820 callback->hcb_end_addr = cb_end_addr[a]; 5821 callback->hcb_function(callback); 5822 } 5823 addr_count = 0; 5824 } 5825 if (iskernel) { 5826 hashno = TTE64K; 5827 continue; 5828 } 5829 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5830 ASSERT(hashno == TTE64K); 5831 continue; 5832 } 5833 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5834 hashno = TTE512K; 5835 continue; 5836 } 5837 if (mmu_page_sizes == max_mmu_page_sizes) { 5838 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5839 hashno = TTE4M; 5840 continue; 5841 } 5842 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5843 hashno = TTE32M; 5844 continue; 5845 } 5846 hashno = TTE256M; 5847 } else { 5848 hashno = TTE4M; 5849 } 5850 } 5851 5852 sfmmu_hblks_list_purge(&list, 0); 5853 if (dmrp != NULL) { 5854 DEMAP_RANGE_FLUSH(dmrp); 5855 cpuset = sfmmup->sfmmu_cpusran; 5856 xt_sync(cpuset); 5857 } 5858 if (callback && addr_count != 0) { 5859 for (a = 0; a < addr_count; ++a) { 5860 callback->hcb_start_addr = cb_start_addr[a]; 5861 callback->hcb_end_addr = cb_end_addr[a]; 5862 callback->hcb_function(callback); 5863 } 5864 } 5865 5866 /* 5867 * Check TSB and TLB page sizes if the process isn't exiting. 5868 */ 5869 if (!sfmmup->sfmmu_free) 5870 sfmmu_check_page_sizes(sfmmup, 0); 5871 } 5872 5873 /* 5874 * Unload all the mappings in the range [addr..addr+len). addr and len must 5875 * be MMU_PAGESIZE aligned. 5876 */ 5877 void 5878 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5879 { 5880 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5881 } 5882 5883 5884 /* 5885 * Find the largest mapping size for this page. 5886 */ 5887 int 5888 fnd_mapping_sz(page_t *pp) 5889 { 5890 int sz; 5891 int p_index; 5892 5893 p_index = PP_MAPINDEX(pp); 5894 5895 sz = 0; 5896 p_index >>= 1; /* don't care about 8K bit */ 5897 for (; p_index; p_index >>= 1) { 5898 sz++; 5899 } 5900 5901 return (sz); 5902 } 5903 5904 /* 5905 * This function unloads a range of addresses for an hmeblk. 5906 * It returns the next address to be unloaded. 5907 * It should be called with the hash lock held. 5908 */ 5909 static caddr_t 5910 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5911 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5912 { 5913 tte_t tte, ttemod; 5914 struct sf_hment *sfhmep; 5915 int ttesz; 5916 long ttecnt; 5917 page_t *pp; 5918 kmutex_t *pml; 5919 int ret; 5920 int use_demap_range; 5921 5922 ASSERT(in_hblk_range(hmeblkp, addr)); 5923 ASSERT(!hmeblkp->hblk_shw_bit); 5924 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5925 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5926 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5927 5928 #ifdef DEBUG 5929 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5930 (endaddr < get_hblk_endaddr(hmeblkp))) { 5931 panic("sfmmu_hblk_unload: partial unload of large page"); 5932 } 5933 #endif /* DEBUG */ 5934 5935 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5936 ttesz = get_hblk_ttesz(hmeblkp); 5937 5938 use_demap_range = ((dmrp == NULL) || 5939 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5940 5941 if (use_demap_range) { 5942 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5943 } else if (dmrp != NULL) { 5944 DEMAP_RANGE_FLUSH(dmrp); 5945 } 5946 ttecnt = 0; 5947 HBLKTOHME(sfhmep, hmeblkp, addr); 5948 5949 while (addr < endaddr) { 5950 pml = NULL; 5951 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5952 if (TTE_IS_VALID(&tte)) { 5953 pp = sfhmep->hme_page; 5954 if (pp != NULL) { 5955 pml = sfmmu_mlist_enter(pp); 5956 } 5957 5958 /* 5959 * Verify if hme still points to 'pp' now that 5960 * we have p_mapping lock. 5961 */ 5962 if (sfhmep->hme_page != pp) { 5963 if (pp != NULL && sfhmep->hme_page != NULL) { 5964 ASSERT(pml != NULL); 5965 sfmmu_mlist_exit(pml); 5966 /* Re-start this iteration. */ 5967 continue; 5968 } 5969 ASSERT((pp != NULL) && 5970 (sfhmep->hme_page == NULL)); 5971 goto tte_unloaded; 5972 } 5973 5974 /* 5975 * This point on we have both HASH and p_mapping 5976 * lock. 5977 */ 5978 ASSERT(pp == sfhmep->hme_page); 5979 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5980 5981 /* 5982 * We need to loop on modify tte because it is 5983 * possible for pagesync to come along and 5984 * change the software bits beneath us. 5985 * 5986 * Page_unload can also invalidate the tte after 5987 * we read tte outside of p_mapping lock. 5988 */ 5989 again: 5990 ttemod = tte; 5991 5992 TTE_SET_INVALID(&ttemod); 5993 ret = sfmmu_modifytte_try(&tte, &ttemod, 5994 &sfhmep->hme_tte); 5995 5996 if (ret <= 0) { 5997 if (TTE_IS_VALID(&tte)) { 5998 ASSERT(ret < 0); 5999 goto again; 6000 } 6001 if (pp != NULL) { 6002 panic("sfmmu_hblk_unload: pp = 0x%p " 6003 "tte became invalid under mlist" 6004 " lock = 0x%p", (void *)pp, 6005 (void *)pml); 6006 } 6007 continue; 6008 } 6009 6010 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6011 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6012 } 6013 6014 /* 6015 * Ok- we invalidated the tte. Do the rest of the job. 6016 */ 6017 ttecnt++; 6018 6019 if (flags & HAT_UNLOAD_UNLOCK) { 6020 ASSERT(hmeblkp->hblk_lckcnt > 0); 6021 atomic_dec_32(&hmeblkp->hblk_lckcnt); 6022 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6023 } 6024 6025 /* 6026 * Normally we would need to flush the page 6027 * from the virtual cache at this point in 6028 * order to prevent a potential cache alias 6029 * inconsistency. 6030 * The particular scenario we need to worry 6031 * about is: 6032 * Given: va1 and va2 are two virtual address 6033 * that alias and map the same physical 6034 * address. 6035 * 1. mapping exists from va1 to pa and data 6036 * has been read into the cache. 6037 * 2. unload va1. 6038 * 3. load va2 and modify data using va2. 6039 * 4 unload va2. 6040 * 5. load va1 and reference data. Unless we 6041 * flush the data cache when we unload we will 6042 * get stale data. 6043 * Fortunately, page coloring eliminates the 6044 * above scenario by remembering the color a 6045 * physical page was last or is currently 6046 * mapped to. Now, we delay the flush until 6047 * the loading of translations. Only when the 6048 * new translation is of a different color 6049 * are we forced to flush. 6050 */ 6051 if (use_demap_range) { 6052 /* 6053 * Mark this page as needing a demap. 6054 */ 6055 DEMAP_RANGE_MARKPG(dmrp, addr); 6056 } else { 6057 ASSERT(sfmmup != NULL); 6058 ASSERT(!hmeblkp->hblk_shared); 6059 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6060 sfmmup->sfmmu_free, 0); 6061 } 6062 6063 if (pp) { 6064 /* 6065 * Remove the hment from the mapping list 6066 */ 6067 ASSERT(hmeblkp->hblk_hmecnt > 0); 6068 6069 /* 6070 * Again, we cannot 6071 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6072 */ 6073 HME_SUB(sfhmep, pp); 6074 membar_stst(); 6075 atomic_dec_16(&hmeblkp->hblk_hmecnt); 6076 } 6077 6078 ASSERT(hmeblkp->hblk_vcnt > 0); 6079 atomic_dec_16(&hmeblkp->hblk_vcnt); 6080 6081 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6082 !hmeblkp->hblk_lckcnt); 6083 6084 #ifdef VAC 6085 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6086 if (PP_ISTNC(pp)) { 6087 /* 6088 * If page was temporary 6089 * uncached, try to recache 6090 * it. Note that HME_SUB() was 6091 * called above so p_index and 6092 * mlist had been updated. 6093 */ 6094 conv_tnc(pp, ttesz); 6095 } else if (pp->p_mapping == NULL) { 6096 ASSERT(kpm_enable); 6097 /* 6098 * Page is marked to be in VAC conflict 6099 * to an existing kpm mapping and/or is 6100 * kpm mapped using only the regular 6101 * pagesize. 6102 */ 6103 sfmmu_kpm_hme_unload(pp); 6104 } 6105 } 6106 #endif /* VAC */ 6107 } else if ((pp = sfhmep->hme_page) != NULL) { 6108 /* 6109 * TTE is invalid but the hme 6110 * still exists. let pageunload 6111 * complete its job. 6112 */ 6113 ASSERT(pml == NULL); 6114 pml = sfmmu_mlist_enter(pp); 6115 if (sfhmep->hme_page != NULL) { 6116 sfmmu_mlist_exit(pml); 6117 continue; 6118 } 6119 ASSERT(sfhmep->hme_page == NULL); 6120 } else if (hmeblkp->hblk_hmecnt != 0) { 6121 /* 6122 * pageunload may have not finished decrementing 6123 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6124 * wait for pageunload to finish. Rely on pageunload 6125 * to decrement hblk_hmecnt after hblk_vcnt. 6126 */ 6127 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6128 ASSERT(pml == NULL); 6129 if (pf_is_memory(pfn)) { 6130 pp = page_numtopp_nolock(pfn); 6131 if (pp != NULL) { 6132 pml = sfmmu_mlist_enter(pp); 6133 sfmmu_mlist_exit(pml); 6134 pml = NULL; 6135 } 6136 } 6137 } 6138 6139 tte_unloaded: 6140 /* 6141 * At this point, the tte we are looking at 6142 * should be unloaded, and hme has been unlinked 6143 * from page too. This is important because in 6144 * pageunload, it does ttesync() then HME_SUB. 6145 * We need to make sure HME_SUB has been completed 6146 * so we know ttesync() has been completed. Otherwise, 6147 * at exit time, after return from hat layer, VM will 6148 * release as structure which hat_setstat() (called 6149 * by ttesync()) needs. 6150 */ 6151 #ifdef DEBUG 6152 { 6153 tte_t dtte; 6154 6155 ASSERT(sfhmep->hme_page == NULL); 6156 6157 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6158 ASSERT(!TTE_IS_VALID(&dtte)); 6159 } 6160 #endif 6161 6162 if (pml) { 6163 sfmmu_mlist_exit(pml); 6164 } 6165 6166 addr += TTEBYTES(ttesz); 6167 sfhmep++; 6168 DEMAP_RANGE_NEXTPG(dmrp); 6169 } 6170 /* 6171 * For shared hmeblks this routine is only called when region is freed 6172 * and no longer referenced. So no need to decrement ttecnt 6173 * in the region structure here. 6174 */ 6175 if (ttecnt > 0 && sfmmup != NULL) { 6176 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6177 } 6178 return (addr); 6179 } 6180 6181 /* 6182 * Invalidate a virtual address range for the local CPU. 6183 * For best performance ensure that the va range is completely 6184 * mapped, otherwise the entire TLB will be flushed. 6185 */ 6186 void 6187 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size) 6188 { 6189 ssize_t sz; 6190 caddr_t endva = va + size; 6191 6192 while (va < endva) { 6193 sz = hat_getpagesize(sfmmup, va); 6194 if (sz < 0) { 6195 vtag_flushall(); 6196 break; 6197 } 6198 vtag_flushpage(va, (uint64_t)sfmmup); 6199 va += sz; 6200 } 6201 } 6202 6203 /* 6204 * Synchronize all the mappings in the range [addr..addr+len). 6205 * Can be called with clearflag having two states: 6206 * HAT_SYNC_DONTZERO means just return the rm stats 6207 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6208 */ 6209 void 6210 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6211 { 6212 struct hmehash_bucket *hmebp; 6213 hmeblk_tag hblktag; 6214 int hmeshift, hashno = 1; 6215 struct hme_blk *hmeblkp, *list = NULL; 6216 caddr_t endaddr; 6217 cpuset_t cpuset; 6218 6219 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); 6220 ASSERT((len & MMU_PAGEOFFSET) == 0); 6221 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6222 (clearflag == HAT_SYNC_ZERORM)); 6223 6224 CPUSET_ZERO(cpuset); 6225 6226 endaddr = addr + len; 6227 hblktag.htag_id = sfmmup; 6228 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6229 6230 /* 6231 * Spitfire supports 4 page sizes. 6232 * Most pages are expected to be of the smallest page 6233 * size (8K) and these will not need to be rehashed. 64K 6234 * pages also don't need to be rehashed because the an hmeblk 6235 * spans 64K of address space. 512K pages might need 1 rehash and 6236 * and 4M pages 2 rehashes. 6237 */ 6238 while (addr < endaddr) { 6239 hmeshift = HME_HASH_SHIFT(hashno); 6240 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6241 hblktag.htag_rehash = hashno; 6242 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6243 6244 SFMMU_HASH_LOCK(hmebp); 6245 6246 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6247 if (hmeblkp != NULL) { 6248 ASSERT(!hmeblkp->hblk_shared); 6249 /* 6250 * We've encountered a shadow hmeblk so skip the range 6251 * of the next smaller mapping size. 6252 */ 6253 if (hmeblkp->hblk_shw_bit) { 6254 ASSERT(sfmmup != ksfmmup); 6255 ASSERT(hashno > 1); 6256 addr = (caddr_t)P2END((uintptr_t)addr, 6257 TTEBYTES(hashno - 1)); 6258 } else { 6259 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6260 addr, endaddr, clearflag); 6261 } 6262 SFMMU_HASH_UNLOCK(hmebp); 6263 hashno = 1; 6264 continue; 6265 } 6266 SFMMU_HASH_UNLOCK(hmebp); 6267 6268 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6269 /* 6270 * We have traversed the whole list and rehashed 6271 * if necessary without finding the address to sync. 6272 * This is ok so we increment the address by the 6273 * smallest hmeblk range for kernel mappings and the 6274 * largest hmeblk range, to account for shadow hmeblks, 6275 * for user mappings and continue. 6276 */ 6277 if (sfmmup == ksfmmup) 6278 addr = (caddr_t)P2END((uintptr_t)addr, 6279 TTEBYTES(1)); 6280 else 6281 addr = (caddr_t)P2END((uintptr_t)addr, 6282 TTEBYTES(hashno)); 6283 hashno = 1; 6284 } else { 6285 hashno++; 6286 } 6287 } 6288 sfmmu_hblks_list_purge(&list, 0); 6289 cpuset = sfmmup->sfmmu_cpusran; 6290 xt_sync(cpuset); 6291 } 6292 6293 static caddr_t 6294 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6295 caddr_t endaddr, int clearflag) 6296 { 6297 tte_t tte, ttemod; 6298 struct sf_hment *sfhmep; 6299 int ttesz; 6300 struct page *pp; 6301 kmutex_t *pml; 6302 int ret; 6303 6304 ASSERT(hmeblkp->hblk_shw_bit == 0); 6305 ASSERT(!hmeblkp->hblk_shared); 6306 6307 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6308 6309 ttesz = get_hblk_ttesz(hmeblkp); 6310 HBLKTOHME(sfhmep, hmeblkp, addr); 6311 6312 while (addr < endaddr) { 6313 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6314 if (TTE_IS_VALID(&tte)) { 6315 pml = NULL; 6316 pp = sfhmep->hme_page; 6317 if (pp) { 6318 pml = sfmmu_mlist_enter(pp); 6319 } 6320 if (pp != sfhmep->hme_page) { 6321 /* 6322 * tte most have been unloaded 6323 * underneath us. Recheck 6324 */ 6325 ASSERT(pml); 6326 sfmmu_mlist_exit(pml); 6327 continue; 6328 } 6329 6330 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6331 6332 if (clearflag == HAT_SYNC_ZERORM) { 6333 ttemod = tte; 6334 TTE_CLR_RM(&ttemod); 6335 ret = sfmmu_modifytte_try(&tte, &ttemod, 6336 &sfhmep->hme_tte); 6337 if (ret < 0) { 6338 if (pml) { 6339 sfmmu_mlist_exit(pml); 6340 } 6341 continue; 6342 } 6343 6344 if (ret > 0) { 6345 sfmmu_tlb_demap(addr, sfmmup, 6346 hmeblkp, 0, 0); 6347 } 6348 } 6349 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6350 if (pml) { 6351 sfmmu_mlist_exit(pml); 6352 } 6353 } 6354 addr += TTEBYTES(ttesz); 6355 sfhmep++; 6356 } 6357 return (addr); 6358 } 6359 6360 /* 6361 * This function will sync a tte to the page struct and it will 6362 * update the hat stats. Currently it allows us to pass a NULL pp 6363 * and we will simply update the stats. We may want to change this 6364 * so we only keep stats for pages backed by pp's. 6365 */ 6366 static void 6367 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6368 { 6369 uint_t rm = 0; 6370 int sz; 6371 pgcnt_t npgs; 6372 6373 ASSERT(TTE_IS_VALID(ttep)); 6374 6375 if (TTE_IS_NOSYNC(ttep)) { 6376 return; 6377 } 6378 6379 if (TTE_IS_REF(ttep)) { 6380 rm = P_REF; 6381 } 6382 if (TTE_IS_MOD(ttep)) { 6383 rm |= P_MOD; 6384 } 6385 6386 if (rm == 0) { 6387 return; 6388 } 6389 6390 sz = TTE_CSZ(ttep); 6391 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6392 int i; 6393 caddr_t vaddr = addr; 6394 6395 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6396 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6397 } 6398 6399 } 6400 6401 /* 6402 * XXX I want to use cas to update nrm bits but they 6403 * currently belong in common/vm and not in hat where 6404 * they should be. 6405 * The nrm bits are protected by the same mutex as 6406 * the one that protects the page's mapping list. 6407 */ 6408 if (!pp) 6409 return; 6410 ASSERT(sfmmu_mlist_held(pp)); 6411 /* 6412 * If the tte is for a large page, we need to sync all the 6413 * pages covered by the tte. 6414 */ 6415 if (sz != TTE8K) { 6416 ASSERT(pp->p_szc != 0); 6417 pp = PP_GROUPLEADER(pp, sz); 6418 ASSERT(sfmmu_mlist_held(pp)); 6419 } 6420 6421 /* Get number of pages from tte size. */ 6422 npgs = TTEPAGES(sz); 6423 6424 do { 6425 ASSERT(pp); 6426 ASSERT(sfmmu_mlist_held(pp)); 6427 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6428 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6429 hat_page_setattr(pp, rm); 6430 6431 /* 6432 * Are we done? If not, we must have a large mapping. 6433 * For large mappings we need to sync the rest of the pages 6434 * covered by this tte; goto the next page. 6435 */ 6436 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6437 } 6438 6439 /* 6440 * Execute pre-callback handler of each pa_hment linked to pp 6441 * 6442 * Inputs: 6443 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6444 * capture_cpus: pointer to return value (below) 6445 * 6446 * Returns: 6447 * Propagates the subsystem callback return values back to the caller; 6448 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6449 * is zero if all of the pa_hments are of a type that do not require 6450 * capturing CPUs prior to suspending the mapping, else it is 1. 6451 */ 6452 static int 6453 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6454 { 6455 struct sf_hment *sfhmep; 6456 struct pa_hment *pahmep; 6457 int (*f)(caddr_t, uint_t, uint_t, void *); 6458 int ret; 6459 id_t id; 6460 int locked = 0; 6461 kmutex_t *pml; 6462 6463 ASSERT(PAGE_EXCL(pp)); 6464 if (!sfmmu_mlist_held(pp)) { 6465 pml = sfmmu_mlist_enter(pp); 6466 locked = 1; 6467 } 6468 6469 if (capture_cpus) 6470 *capture_cpus = 0; 6471 6472 top: 6473 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6474 /* 6475 * skip sf_hments corresponding to VA<->PA mappings; 6476 * for pa_hment's, hme_tte.ll is zero 6477 */ 6478 if (!IS_PAHME(sfhmep)) 6479 continue; 6480 6481 pahmep = sfhmep->hme_data; 6482 ASSERT(pahmep != NULL); 6483 6484 /* 6485 * skip if pre-handler has been called earlier in this loop 6486 */ 6487 if (pahmep->flags & flag) 6488 continue; 6489 6490 id = pahmep->cb_id; 6491 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6492 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6493 *capture_cpus = 1; 6494 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6495 pahmep->flags |= flag; 6496 continue; 6497 } 6498 6499 /* 6500 * Drop the mapping list lock to avoid locking order issues. 6501 */ 6502 if (locked) 6503 sfmmu_mlist_exit(pml); 6504 6505 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6506 if (ret != 0) 6507 return (ret); /* caller must do the cleanup */ 6508 6509 if (locked) { 6510 pml = sfmmu_mlist_enter(pp); 6511 pahmep->flags |= flag; 6512 goto top; 6513 } 6514 6515 pahmep->flags |= flag; 6516 } 6517 6518 if (locked) 6519 sfmmu_mlist_exit(pml); 6520 6521 return (0); 6522 } 6523 6524 /* 6525 * Execute post-callback handler of each pa_hment linked to pp 6526 * 6527 * Same overall assumptions and restrictions apply as for 6528 * hat_pageprocess_precallbacks(). 6529 */ 6530 static void 6531 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6532 { 6533 pfn_t pgpfn = pp->p_pagenum; 6534 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6535 pfn_t newpfn; 6536 struct sf_hment *sfhmep; 6537 struct pa_hment *pahmep; 6538 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6539 id_t id; 6540 int locked = 0; 6541 kmutex_t *pml; 6542 6543 ASSERT(PAGE_EXCL(pp)); 6544 if (!sfmmu_mlist_held(pp)) { 6545 pml = sfmmu_mlist_enter(pp); 6546 locked = 1; 6547 } 6548 6549 top: 6550 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6551 /* 6552 * skip sf_hments corresponding to VA<->PA mappings; 6553 * for pa_hment's, hme_tte.ll is zero 6554 */ 6555 if (!IS_PAHME(sfhmep)) 6556 continue; 6557 6558 pahmep = sfhmep->hme_data; 6559 ASSERT(pahmep != NULL); 6560 6561 if ((pahmep->flags & flag) == 0) 6562 continue; 6563 6564 pahmep->flags &= ~flag; 6565 6566 id = pahmep->cb_id; 6567 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6568 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6569 continue; 6570 6571 /* 6572 * Convert the base page PFN into the constituent PFN 6573 * which is needed by the callback handler. 6574 */ 6575 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6576 6577 /* 6578 * Drop the mapping list lock to avoid locking order issues. 6579 */ 6580 if (locked) 6581 sfmmu_mlist_exit(pml); 6582 6583 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6584 != 0) 6585 panic("sfmmu: posthandler failed"); 6586 6587 if (locked) { 6588 pml = sfmmu_mlist_enter(pp); 6589 goto top; 6590 } 6591 } 6592 6593 if (locked) 6594 sfmmu_mlist_exit(pml); 6595 } 6596 6597 /* 6598 * Suspend locked kernel mapping 6599 */ 6600 void 6601 hat_pagesuspend(struct page *pp) 6602 { 6603 struct sf_hment *sfhmep; 6604 sfmmu_t *sfmmup; 6605 tte_t tte, ttemod; 6606 struct hme_blk *hmeblkp; 6607 caddr_t addr; 6608 int index, cons; 6609 cpuset_t cpuset; 6610 6611 ASSERT(PAGE_EXCL(pp)); 6612 ASSERT(sfmmu_mlist_held(pp)); 6613 6614 mutex_enter(&kpr_suspendlock); 6615 6616 /* 6617 * We're about to suspend a kernel mapping so mark this thread as 6618 * non-traceable by DTrace. This prevents us from running into issues 6619 * with probe context trying to touch a suspended page 6620 * in the relocation codepath itself. 6621 */ 6622 curthread->t_flag |= T_DONTDTRACE; 6623 6624 index = PP_MAPINDEX(pp); 6625 cons = TTE8K; 6626 6627 retry: 6628 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6629 6630 if (IS_PAHME(sfhmep)) 6631 continue; 6632 6633 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6634 continue; 6635 6636 /* 6637 * Loop until we successfully set the suspend bit in 6638 * the TTE. 6639 */ 6640 again: 6641 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6642 ASSERT(TTE_IS_VALID(&tte)); 6643 6644 ttemod = tte; 6645 TTE_SET_SUSPEND(&ttemod); 6646 if (sfmmu_modifytte_try(&tte, &ttemod, 6647 &sfhmep->hme_tte) < 0) 6648 goto again; 6649 6650 /* 6651 * Invalidate TSB entry 6652 */ 6653 hmeblkp = sfmmu_hmetohblk(sfhmep); 6654 6655 sfmmup = hblktosfmmu(hmeblkp); 6656 ASSERT(sfmmup == ksfmmup); 6657 ASSERT(!hmeblkp->hblk_shared); 6658 6659 addr = tte_to_vaddr(hmeblkp, tte); 6660 6661 /* 6662 * No need to make sure that the TSB for this sfmmu is 6663 * not being relocated since it is ksfmmup and thus it 6664 * will never be relocated. 6665 */ 6666 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6667 6668 /* 6669 * Update xcall stats 6670 */ 6671 cpuset = cpu_ready_set; 6672 CPUSET_DEL(cpuset, CPU->cpu_id); 6673 6674 /* LINTED: constant in conditional context */ 6675 SFMMU_XCALL_STATS(ksfmmup); 6676 6677 /* 6678 * Flush TLB entry on remote CPU's 6679 */ 6680 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6681 (uint64_t)ksfmmup); 6682 xt_sync(cpuset); 6683 6684 /* 6685 * Flush TLB entry on local CPU 6686 */ 6687 vtag_flushpage(addr, (uint64_t)ksfmmup); 6688 } 6689 6690 while (index != 0) { 6691 index = index >> 1; 6692 if (index != 0) 6693 cons++; 6694 if (index & 0x1) { 6695 pp = PP_GROUPLEADER(pp, cons); 6696 goto retry; 6697 } 6698 } 6699 } 6700 6701 #ifdef DEBUG 6702 6703 #define N_PRLE 1024 6704 struct prle { 6705 page_t *targ; 6706 page_t *repl; 6707 int status; 6708 int pausecpus; 6709 hrtime_t whence; 6710 }; 6711 6712 static struct prle page_relocate_log[N_PRLE]; 6713 static int prl_entry; 6714 static kmutex_t prl_mutex; 6715 6716 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6717 mutex_enter(&prl_mutex); \ 6718 page_relocate_log[prl_entry].targ = *(t); \ 6719 page_relocate_log[prl_entry].repl = *(r); \ 6720 page_relocate_log[prl_entry].status = (s); \ 6721 page_relocate_log[prl_entry].pausecpus = (p); \ 6722 page_relocate_log[prl_entry].whence = gethrtime(); \ 6723 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6724 mutex_exit(&prl_mutex); 6725 6726 #else /* !DEBUG */ 6727 #define PAGE_RELOCATE_LOG(t, r, s, p) 6728 #endif 6729 6730 /* 6731 * Core Kernel Page Relocation Algorithm 6732 * 6733 * Input: 6734 * 6735 * target : constituent pages are SE_EXCL locked. 6736 * replacement: constituent pages are SE_EXCL locked. 6737 * 6738 * Output: 6739 * 6740 * nrelocp: number of pages relocated 6741 */ 6742 int 6743 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6744 { 6745 page_t *targ, *repl; 6746 page_t *tpp, *rpp; 6747 kmutex_t *low, *high; 6748 spgcnt_t npages, i; 6749 page_t *pl = NULL; 6750 int old_pil; 6751 cpuset_t cpuset; 6752 int cap_cpus; 6753 int ret; 6754 #ifdef VAC 6755 int cflags = 0; 6756 #endif 6757 6758 if (!kcage_on || PP_ISNORELOC(*target)) { 6759 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6760 return (EAGAIN); 6761 } 6762 6763 mutex_enter(&kpr_mutex); 6764 kreloc_thread = curthread; 6765 6766 targ = *target; 6767 repl = *replacement; 6768 ASSERT(repl != NULL); 6769 ASSERT(targ->p_szc == repl->p_szc); 6770 6771 npages = page_get_pagecnt(targ->p_szc); 6772 6773 /* 6774 * unload VA<->PA mappings that are not locked 6775 */ 6776 tpp = targ; 6777 for (i = 0; i < npages; i++) { 6778 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6779 tpp++; 6780 } 6781 6782 /* 6783 * Do "presuspend" callbacks, in a context from which we can still 6784 * block as needed. Note that we don't hold the mapping list lock 6785 * of "targ" at this point due to potential locking order issues; 6786 * we assume that between the hat_pageunload() above and holding 6787 * the SE_EXCL lock that the mapping list *cannot* change at this 6788 * point. 6789 */ 6790 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6791 if (ret != 0) { 6792 /* 6793 * EIO translates to fatal error, for all others cleanup 6794 * and return EAGAIN. 6795 */ 6796 ASSERT(ret != EIO); 6797 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6798 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6799 kreloc_thread = NULL; 6800 mutex_exit(&kpr_mutex); 6801 return (EAGAIN); 6802 } 6803 6804 /* 6805 * acquire p_mapping list lock for both the target and replacement 6806 * root pages. 6807 * 6808 * low and high refer to the need to grab the mlist locks in a 6809 * specific order in order to prevent race conditions. Thus the 6810 * lower lock must be grabbed before the higher lock. 6811 * 6812 * This will block hat_unload's accessing p_mapping list. Since 6813 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6814 * blocked. Thus, no one else will be accessing the p_mapping list 6815 * while we suspend and reload the locked mapping below. 6816 */ 6817 tpp = targ; 6818 rpp = repl; 6819 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6820 6821 kpreempt_disable(); 6822 6823 /* 6824 * We raise our PIL to 13 so that we don't get captured by 6825 * another CPU or pinned by an interrupt thread. We can't go to 6826 * PIL 14 since the nexus driver(s) may need to interrupt at 6827 * that level in the case of IOMMU pseudo mappings. 6828 */ 6829 cpuset = cpu_ready_set; 6830 CPUSET_DEL(cpuset, CPU->cpu_id); 6831 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6832 old_pil = splr(XCALL_PIL); 6833 } else { 6834 old_pil = -1; 6835 xc_attention(cpuset); 6836 } 6837 ASSERT(getpil() == XCALL_PIL); 6838 6839 /* 6840 * Now do suspend callbacks. In the case of an IOMMU mapping 6841 * this will suspend all DMA activity to the page while it is 6842 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6843 * may be captured at this point we should have acquired any needed 6844 * locks in the presuspend callback. 6845 */ 6846 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6847 if (ret != 0) { 6848 repl = targ; 6849 goto suspend_fail; 6850 } 6851 6852 /* 6853 * Raise the PIL yet again, this time to block all high-level 6854 * interrupts on this CPU. This is necessary to prevent an 6855 * interrupt routine from pinning the thread which holds the 6856 * mapping suspended and then touching the suspended page. 6857 * 6858 * Once the page is suspended we also need to be careful to 6859 * avoid calling any functions which touch any seg_kmem memory 6860 * since that memory may be backed by the very page we are 6861 * relocating in here! 6862 */ 6863 hat_pagesuspend(targ); 6864 6865 /* 6866 * Now that we are confident everybody has stopped using this page, 6867 * copy the page contents. Note we use a physical copy to prevent 6868 * locking issues and to avoid fpRAS because we can't handle it in 6869 * this context. 6870 */ 6871 for (i = 0; i < npages; i++, tpp++, rpp++) { 6872 #ifdef VAC 6873 /* 6874 * If the replacement has a different vcolor than 6875 * the one being replacd, we need to handle VAC 6876 * consistency for it just as we were setting up 6877 * a new mapping to it. 6878 */ 6879 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6880 (tpp->p_vcolor != rpp->p_vcolor) && 6881 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6882 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6883 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6884 rpp->p_pagenum); 6885 } 6886 #endif 6887 /* 6888 * Copy the contents of the page. 6889 */ 6890 ppcopy_kernel(tpp, rpp); 6891 } 6892 6893 tpp = targ; 6894 rpp = repl; 6895 for (i = 0; i < npages; i++, tpp++, rpp++) { 6896 /* 6897 * Copy attributes. VAC consistency was handled above, 6898 * if required. 6899 */ 6900 rpp->p_nrm = tpp->p_nrm; 6901 tpp->p_nrm = 0; 6902 rpp->p_index = tpp->p_index; 6903 tpp->p_index = 0; 6904 #ifdef VAC 6905 rpp->p_vcolor = tpp->p_vcolor; 6906 #endif 6907 } 6908 6909 /* 6910 * First, unsuspend the page, if we set the suspend bit, and transfer 6911 * the mapping list from the target page to the replacement page. 6912 * Next process postcallbacks; since pa_hment's are linked only to the 6913 * p_mapping list of root page, we don't iterate over the constituent 6914 * pages. 6915 */ 6916 hat_pagereload(targ, repl); 6917 6918 suspend_fail: 6919 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6920 6921 /* 6922 * Now lower our PIL and release any captured CPUs since we 6923 * are out of the "danger zone". After this it will again be 6924 * safe to acquire adaptive mutex locks, or to drop them... 6925 */ 6926 if (old_pil != -1) { 6927 splx(old_pil); 6928 } else { 6929 xc_dismissed(cpuset); 6930 } 6931 6932 kpreempt_enable(); 6933 6934 sfmmu_mlist_reloc_exit(low, high); 6935 6936 /* 6937 * Postsuspend callbacks should drop any locks held across 6938 * the suspend callbacks. As before, we don't hold the mapping 6939 * list lock at this point.. our assumption is that the mapping 6940 * list still can't change due to our holding SE_EXCL lock and 6941 * there being no unlocked mappings left. Hence the restriction 6942 * on calling context to hat_delete_callback() 6943 */ 6944 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6945 if (ret != 0) { 6946 /* 6947 * The second presuspend call failed: we got here through 6948 * the suspend_fail label above. 6949 */ 6950 ASSERT(ret != EIO); 6951 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6952 kreloc_thread = NULL; 6953 mutex_exit(&kpr_mutex); 6954 return (EAGAIN); 6955 } 6956 6957 /* 6958 * Now that we're out of the performance critical section we can 6959 * take care of updating the hash table, since we still 6960 * hold all the pages locked SE_EXCL at this point we 6961 * needn't worry about things changing out from under us. 6962 */ 6963 tpp = targ; 6964 rpp = repl; 6965 for (i = 0; i < npages; i++, tpp++, rpp++) { 6966 6967 /* 6968 * replace targ with replacement in page_hash table 6969 */ 6970 targ = tpp; 6971 page_relocate_hash(rpp, targ); 6972 6973 /* 6974 * concatenate target; caller of platform_page_relocate() 6975 * expects target to be concatenated after returning. 6976 */ 6977 ASSERT(targ->p_next == targ); 6978 ASSERT(targ->p_prev == targ); 6979 page_list_concat(&pl, &targ); 6980 } 6981 6982 ASSERT(*target == pl); 6983 *nrelocp = npages; 6984 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6985 kreloc_thread = NULL; 6986 mutex_exit(&kpr_mutex); 6987 return (0); 6988 } 6989 6990 /* 6991 * Called when stray pa_hments are found attached to a page which is 6992 * being freed. Notify the subsystem which attached the pa_hment of 6993 * the error if it registered a suitable handler, else panic. 6994 */ 6995 static void 6996 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6997 { 6998 id_t cb_id = pahmep->cb_id; 6999 7000 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 7001 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 7002 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 7003 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 7004 return; /* non-fatal */ 7005 } 7006 panic("pa_hment leaked: 0x%p", (void *)pahmep); 7007 } 7008 7009 /* 7010 * Remove all mappings to page 'pp'. 7011 */ 7012 int 7013 hat_pageunload(struct page *pp, uint_t forceflag) 7014 { 7015 struct page *origpp = pp; 7016 struct sf_hment *sfhme, *tmphme; 7017 struct hme_blk *hmeblkp; 7018 kmutex_t *pml; 7019 #ifdef VAC 7020 kmutex_t *pmtx; 7021 #endif 7022 cpuset_t cpuset, tset; 7023 int index, cons; 7024 int pa_hments; 7025 7026 ASSERT(PAGE_EXCL(pp)); 7027 7028 tmphme = NULL; 7029 pa_hments = 0; 7030 CPUSET_ZERO(cpuset); 7031 7032 pml = sfmmu_mlist_enter(pp); 7033 7034 #ifdef VAC 7035 if (pp->p_kpmref) 7036 sfmmu_kpm_pageunload(pp); 7037 ASSERT(!PP_ISMAPPED_KPM(pp)); 7038 #endif 7039 /* 7040 * Clear vpm reference. Since the page is exclusively locked 7041 * vpm cannot be referencing it. 7042 */ 7043 if (vpm_enable) { 7044 pp->p_vpmref = 0; 7045 } 7046 7047 index = PP_MAPINDEX(pp); 7048 cons = TTE8K; 7049 retry: 7050 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7051 tmphme = sfhme->hme_next; 7052 7053 if (IS_PAHME(sfhme)) { 7054 ASSERT(sfhme->hme_data != NULL); 7055 pa_hments++; 7056 continue; 7057 } 7058 7059 hmeblkp = sfmmu_hmetohblk(sfhme); 7060 7061 /* 7062 * If there are kernel mappings don't unload them, they will 7063 * be suspended. 7064 */ 7065 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7066 hmeblkp->hblk_tag.htag_id == ksfmmup) 7067 continue; 7068 7069 tset = sfmmu_pageunload(pp, sfhme, cons); 7070 CPUSET_OR(cpuset, tset); 7071 } 7072 7073 while (index != 0) { 7074 index = index >> 1; 7075 if (index != 0) 7076 cons++; 7077 if (index & 0x1) { 7078 /* Go to leading page */ 7079 pp = PP_GROUPLEADER(pp, cons); 7080 ASSERT(sfmmu_mlist_held(pp)); 7081 goto retry; 7082 } 7083 } 7084 7085 /* 7086 * cpuset may be empty if the page was only mapped by segkpm, 7087 * in which case we won't actually cross-trap. 7088 */ 7089 xt_sync(cpuset); 7090 7091 /* 7092 * The page should have no mappings at this point, unless 7093 * we were called from hat_page_relocate() in which case we 7094 * leave the locked mappings which will be suspended later. 7095 */ 7096 ASSERT(!PP_ISMAPPED(origpp) || pa_hments || 7097 (forceflag == SFMMU_KERNEL_RELOC)); 7098 7099 #ifdef VAC 7100 if (PP_ISTNC(pp)) { 7101 if (cons == TTE8K) { 7102 pmtx = sfmmu_page_enter(pp); 7103 PP_CLRTNC(pp); 7104 sfmmu_page_exit(pmtx); 7105 } else { 7106 conv_tnc(pp, cons); 7107 } 7108 } 7109 #endif /* VAC */ 7110 7111 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7112 /* 7113 * Unlink any pa_hments and free them, calling back 7114 * the responsible subsystem to notify it of the error. 7115 * This can occur in situations such as drivers leaking 7116 * DMA handles: naughty, but common enough that we'd like 7117 * to keep the system running rather than bringing it 7118 * down with an obscure error like "pa_hment leaked" 7119 * which doesn't aid the user in debugging their driver. 7120 */ 7121 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7122 tmphme = sfhme->hme_next; 7123 if (IS_PAHME(sfhme)) { 7124 struct pa_hment *pahmep = sfhme->hme_data; 7125 sfmmu_pahment_leaked(pahmep); 7126 HME_SUB(sfhme, pp); 7127 kmem_cache_free(pa_hment_cache, pahmep); 7128 } 7129 } 7130 7131 ASSERT(!PP_ISMAPPED(origpp)); 7132 } 7133 7134 sfmmu_mlist_exit(pml); 7135 7136 return (0); 7137 } 7138 7139 cpuset_t 7140 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7141 { 7142 struct hme_blk *hmeblkp; 7143 sfmmu_t *sfmmup; 7144 tte_t tte, ttemod; 7145 #ifdef DEBUG 7146 tte_t orig_old; 7147 #endif /* DEBUG */ 7148 caddr_t addr; 7149 int ttesz; 7150 int ret; 7151 cpuset_t cpuset; 7152 7153 ASSERT(pp != NULL); 7154 ASSERT(sfmmu_mlist_held(pp)); 7155 ASSERT(!PP_ISKAS(pp)); 7156 7157 CPUSET_ZERO(cpuset); 7158 7159 hmeblkp = sfmmu_hmetohblk(sfhme); 7160 7161 readtte: 7162 sfmmu_copytte(&sfhme->hme_tte, &tte); 7163 if (TTE_IS_VALID(&tte)) { 7164 sfmmup = hblktosfmmu(hmeblkp); 7165 ttesz = get_hblk_ttesz(hmeblkp); 7166 /* 7167 * Only unload mappings of 'cons' size. 7168 */ 7169 if (ttesz != cons) 7170 return (cpuset); 7171 7172 /* 7173 * Note that we have p_mapping lock, but no hash lock here. 7174 * hblk_unload() has to have both hash lock AND p_mapping 7175 * lock before it tries to modify tte. So, the tte could 7176 * not become invalid in the sfmmu_modifytte_try() below. 7177 */ 7178 ttemod = tte; 7179 #ifdef DEBUG 7180 orig_old = tte; 7181 #endif /* DEBUG */ 7182 7183 TTE_SET_INVALID(&ttemod); 7184 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7185 if (ret < 0) { 7186 #ifdef DEBUG 7187 /* only R/M bits can change. */ 7188 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7189 #endif /* DEBUG */ 7190 goto readtte; 7191 } 7192 7193 if (ret == 0) { 7194 panic("pageunload: cas failed?"); 7195 } 7196 7197 addr = tte_to_vaddr(hmeblkp, tte); 7198 7199 if (hmeblkp->hblk_shared) { 7200 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7201 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7202 sf_region_t *rgnp; 7203 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7204 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7205 ASSERT(srdp != NULL); 7206 rgnp = srdp->srd_hmergnp[rid]; 7207 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7208 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7209 sfmmu_ttesync(NULL, addr, &tte, pp); 7210 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7211 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]); 7212 } else { 7213 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7214 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]); 7215 7216 /* 7217 * We need to flush the page from the virtual cache 7218 * in order to prevent a virtual cache alias 7219 * inconsistency. The particular scenario we need 7220 * to worry about is: 7221 * Given: va1 and va2 are two virtual address that 7222 * alias and will map the same physical address. 7223 * 1. mapping exists from va1 to pa and data has 7224 * been read into the cache. 7225 * 2. unload va1. 7226 * 3. load va2 and modify data using va2. 7227 * 4 unload va2. 7228 * 5. load va1 and reference data. Unless we flush 7229 * the data cache when we unload we will get 7230 * stale data. 7231 * This scenario is taken care of by using virtual 7232 * page coloring. 7233 */ 7234 if (sfmmup->sfmmu_ismhat) { 7235 /* 7236 * Flush TSBs, TLBs and caches 7237 * of every process 7238 * sharing this ism segment. 7239 */ 7240 sfmmu_hat_lock_all(); 7241 mutex_enter(&ism_mlist_lock); 7242 kpreempt_disable(); 7243 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7244 pp->p_pagenum, CACHE_NO_FLUSH); 7245 kpreempt_enable(); 7246 mutex_exit(&ism_mlist_lock); 7247 sfmmu_hat_unlock_all(); 7248 cpuset = cpu_ready_set; 7249 } else { 7250 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7251 cpuset = sfmmup->sfmmu_cpusran; 7252 } 7253 } 7254 7255 /* 7256 * Hme_sub has to run after ttesync() and a_rss update. 7257 * See hblk_unload(). 7258 */ 7259 HME_SUB(sfhme, pp); 7260 membar_stst(); 7261 7262 /* 7263 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7264 * since pteload may have done a HME_ADD() right after 7265 * we did the HME_SUB() above. Hmecnt is now maintained 7266 * by cas only. no lock guranteed its value. The only 7267 * gurantee we have is the hmecnt should not be less than 7268 * what it should be so the hblk will not be taken away. 7269 * It's also important that we decremented the hmecnt after 7270 * we are done with hmeblkp so that this hmeblk won't be 7271 * stolen. 7272 */ 7273 ASSERT(hmeblkp->hblk_hmecnt > 0); 7274 ASSERT(hmeblkp->hblk_vcnt > 0); 7275 atomic_dec_16(&hmeblkp->hblk_vcnt); 7276 atomic_dec_16(&hmeblkp->hblk_hmecnt); 7277 /* 7278 * This is bug 4063182. 7279 * XXX: fixme 7280 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7281 * !hmeblkp->hblk_lckcnt); 7282 */ 7283 } else { 7284 panic("invalid tte? pp %p &tte %p", 7285 (void *)pp, (void *)&tte); 7286 } 7287 7288 return (cpuset); 7289 } 7290 7291 /* 7292 * While relocating a kernel page, this function will move the mappings 7293 * from tpp to dpp and modify any associated data with these mappings. 7294 * It also unsuspends the suspended kernel mapping. 7295 */ 7296 static void 7297 hat_pagereload(struct page *tpp, struct page *dpp) 7298 { 7299 struct sf_hment *sfhme; 7300 tte_t tte, ttemod; 7301 int index, cons; 7302 7303 ASSERT(getpil() == PIL_MAX); 7304 ASSERT(sfmmu_mlist_held(tpp)); 7305 ASSERT(sfmmu_mlist_held(dpp)); 7306 7307 index = PP_MAPINDEX(tpp); 7308 cons = TTE8K; 7309 7310 /* Update real mappings to the page */ 7311 retry: 7312 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7313 if (IS_PAHME(sfhme)) 7314 continue; 7315 sfmmu_copytte(&sfhme->hme_tte, &tte); 7316 ttemod = tte; 7317 7318 /* 7319 * replace old pfn with new pfn in TTE 7320 */ 7321 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7322 7323 /* 7324 * clear suspend bit 7325 */ 7326 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7327 TTE_CLR_SUSPEND(&ttemod); 7328 7329 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7330 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7331 7332 /* 7333 * set hme_page point to new page 7334 */ 7335 sfhme->hme_page = dpp; 7336 } 7337 7338 /* 7339 * move p_mapping list from old page to new page 7340 */ 7341 dpp->p_mapping = tpp->p_mapping; 7342 tpp->p_mapping = NULL; 7343 dpp->p_share = tpp->p_share; 7344 tpp->p_share = 0; 7345 7346 while (index != 0) { 7347 index = index >> 1; 7348 if (index != 0) 7349 cons++; 7350 if (index & 0x1) { 7351 tpp = PP_GROUPLEADER(tpp, cons); 7352 dpp = PP_GROUPLEADER(dpp, cons); 7353 goto retry; 7354 } 7355 } 7356 7357 curthread->t_flag &= ~T_DONTDTRACE; 7358 mutex_exit(&kpr_suspendlock); 7359 } 7360 7361 uint_t 7362 hat_pagesync(struct page *pp, uint_t clearflag) 7363 { 7364 struct sf_hment *sfhme, *tmphme = NULL; 7365 struct hme_blk *hmeblkp; 7366 kmutex_t *pml; 7367 cpuset_t cpuset, tset; 7368 int index, cons; 7369 extern ulong_t po_share; 7370 page_t *save_pp = pp; 7371 int stop_on_sh = 0; 7372 uint_t shcnt; 7373 7374 CPUSET_ZERO(cpuset); 7375 7376 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7377 return (PP_GENERIC_ATTR(pp)); 7378 } 7379 7380 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7381 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7382 return (PP_GENERIC_ATTR(pp)); 7383 } 7384 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7385 return (PP_GENERIC_ATTR(pp)); 7386 } 7387 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7388 if (pp->p_share > po_share) { 7389 hat_page_setattr(pp, P_REF); 7390 return (PP_GENERIC_ATTR(pp)); 7391 } 7392 stop_on_sh = 1; 7393 shcnt = 0; 7394 } 7395 } 7396 7397 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7398 pml = sfmmu_mlist_enter(pp); 7399 index = PP_MAPINDEX(pp); 7400 cons = TTE8K; 7401 retry: 7402 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7403 /* 7404 * We need to save the next hment on the list since 7405 * it is possible for pagesync to remove an invalid hment 7406 * from the list. 7407 */ 7408 tmphme = sfhme->hme_next; 7409 if (IS_PAHME(sfhme)) 7410 continue; 7411 /* 7412 * If we are looking for large mappings and this hme doesn't 7413 * reach the range we are seeking, just ignore it. 7414 */ 7415 hmeblkp = sfmmu_hmetohblk(sfhme); 7416 7417 if (hme_size(sfhme) < cons) 7418 continue; 7419 7420 if (stop_on_sh) { 7421 if (hmeblkp->hblk_shared) { 7422 sf_srd_t *srdp = hblktosrd(hmeblkp); 7423 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7424 sf_region_t *rgnp; 7425 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7426 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7427 ASSERT(srdp != NULL); 7428 rgnp = srdp->srd_hmergnp[rid]; 7429 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7430 rgnp, rid); 7431 shcnt += rgnp->rgn_refcnt; 7432 } else { 7433 shcnt++; 7434 } 7435 if (shcnt > po_share) { 7436 /* 7437 * tell the pager to spare the page this time 7438 * around. 7439 */ 7440 hat_page_setattr(save_pp, P_REF); 7441 index = 0; 7442 break; 7443 } 7444 } 7445 tset = sfmmu_pagesync(pp, sfhme, 7446 clearflag & ~HAT_SYNC_STOPON_RM); 7447 CPUSET_OR(cpuset, tset); 7448 7449 /* 7450 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7451 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7452 */ 7453 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7454 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7455 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7456 index = 0; 7457 break; 7458 } 7459 } 7460 7461 while (index) { 7462 index = index >> 1; 7463 cons++; 7464 if (index & 0x1) { 7465 /* Go to leading page */ 7466 pp = PP_GROUPLEADER(pp, cons); 7467 goto retry; 7468 } 7469 } 7470 7471 xt_sync(cpuset); 7472 sfmmu_mlist_exit(pml); 7473 return (PP_GENERIC_ATTR(save_pp)); 7474 } 7475 7476 /* 7477 * Get all the hardware dependent attributes for a page struct 7478 */ 7479 static cpuset_t 7480 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7481 uint_t clearflag) 7482 { 7483 caddr_t addr; 7484 tte_t tte, ttemod; 7485 struct hme_blk *hmeblkp; 7486 int ret; 7487 sfmmu_t *sfmmup; 7488 cpuset_t cpuset; 7489 7490 ASSERT(pp != NULL); 7491 ASSERT(sfmmu_mlist_held(pp)); 7492 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7493 (clearflag == HAT_SYNC_ZERORM)); 7494 7495 SFMMU_STAT(sf_pagesync); 7496 7497 CPUSET_ZERO(cpuset); 7498 7499 sfmmu_pagesync_retry: 7500 7501 sfmmu_copytte(&sfhme->hme_tte, &tte); 7502 if (TTE_IS_VALID(&tte)) { 7503 hmeblkp = sfmmu_hmetohblk(sfhme); 7504 sfmmup = hblktosfmmu(hmeblkp); 7505 addr = tte_to_vaddr(hmeblkp, tte); 7506 if (clearflag == HAT_SYNC_ZERORM) { 7507 ttemod = tte; 7508 TTE_CLR_RM(&ttemod); 7509 ret = sfmmu_modifytte_try(&tte, &ttemod, 7510 &sfhme->hme_tte); 7511 if (ret < 0) { 7512 /* 7513 * cas failed and the new value is not what 7514 * we want. 7515 */ 7516 goto sfmmu_pagesync_retry; 7517 } 7518 7519 if (ret > 0) { 7520 /* we win the cas */ 7521 if (hmeblkp->hblk_shared) { 7522 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7523 uint_t rid = 7524 hmeblkp->hblk_tag.htag_rid; 7525 sf_region_t *rgnp; 7526 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7527 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7528 ASSERT(srdp != NULL); 7529 rgnp = srdp->srd_hmergnp[rid]; 7530 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7531 srdp, rgnp, rid); 7532 cpuset = sfmmu_rgntlb_demap(addr, 7533 rgnp, hmeblkp, 1); 7534 } else { 7535 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7536 0, 0); 7537 cpuset = sfmmup->sfmmu_cpusran; 7538 } 7539 } 7540 } 7541 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7542 &tte, pp); 7543 } 7544 return (cpuset); 7545 } 7546 7547 /* 7548 * Remove write permission from a mappings to a page, so that 7549 * we can detect the next modification of it. This requires modifying 7550 * the TTE then invalidating (demap) any TLB entry using that TTE. 7551 * This code is similar to sfmmu_pagesync(). 7552 */ 7553 static cpuset_t 7554 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7555 { 7556 caddr_t addr; 7557 tte_t tte; 7558 tte_t ttemod; 7559 struct hme_blk *hmeblkp; 7560 int ret; 7561 sfmmu_t *sfmmup; 7562 cpuset_t cpuset; 7563 7564 ASSERT(pp != NULL); 7565 ASSERT(sfmmu_mlist_held(pp)); 7566 7567 CPUSET_ZERO(cpuset); 7568 SFMMU_STAT(sf_clrwrt); 7569 7570 retry: 7571 7572 sfmmu_copytte(&sfhme->hme_tte, &tte); 7573 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7574 hmeblkp = sfmmu_hmetohblk(sfhme); 7575 sfmmup = hblktosfmmu(hmeblkp); 7576 addr = tte_to_vaddr(hmeblkp, tte); 7577 7578 ttemod = tte; 7579 TTE_CLR_WRT(&ttemod); 7580 TTE_CLR_MOD(&ttemod); 7581 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7582 7583 /* 7584 * if cas failed and the new value is not what 7585 * we want retry 7586 */ 7587 if (ret < 0) 7588 goto retry; 7589 7590 /* we win the cas */ 7591 if (ret > 0) { 7592 if (hmeblkp->hblk_shared) { 7593 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7594 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7595 sf_region_t *rgnp; 7596 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7597 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7598 ASSERT(srdp != NULL); 7599 rgnp = srdp->srd_hmergnp[rid]; 7600 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7601 srdp, rgnp, rid); 7602 cpuset = sfmmu_rgntlb_demap(addr, 7603 rgnp, hmeblkp, 1); 7604 } else { 7605 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7606 cpuset = sfmmup->sfmmu_cpusran; 7607 } 7608 } 7609 } 7610 7611 return (cpuset); 7612 } 7613 7614 /* 7615 * Walk all mappings of a page, removing write permission and clearing the 7616 * ref/mod bits. This code is similar to hat_pagesync() 7617 */ 7618 static void 7619 hat_page_clrwrt(page_t *pp) 7620 { 7621 struct sf_hment *sfhme; 7622 struct sf_hment *tmphme = NULL; 7623 kmutex_t *pml; 7624 cpuset_t cpuset; 7625 cpuset_t tset; 7626 int index; 7627 int cons; 7628 7629 CPUSET_ZERO(cpuset); 7630 7631 pml = sfmmu_mlist_enter(pp); 7632 index = PP_MAPINDEX(pp); 7633 cons = TTE8K; 7634 retry: 7635 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7636 tmphme = sfhme->hme_next; 7637 7638 /* 7639 * If we are looking for large mappings and this hme doesn't 7640 * reach the range we are seeking, just ignore its. 7641 */ 7642 7643 if (hme_size(sfhme) < cons) 7644 continue; 7645 7646 tset = sfmmu_pageclrwrt(pp, sfhme); 7647 CPUSET_OR(cpuset, tset); 7648 } 7649 7650 while (index) { 7651 index = index >> 1; 7652 cons++; 7653 if (index & 0x1) { 7654 /* Go to leading page */ 7655 pp = PP_GROUPLEADER(pp, cons); 7656 goto retry; 7657 } 7658 } 7659 7660 xt_sync(cpuset); 7661 sfmmu_mlist_exit(pml); 7662 } 7663 7664 /* 7665 * Set the given REF/MOD/RO bits for the given page. 7666 * For a vnode with a sorted v_pages list, we need to change 7667 * the attributes and the v_pages list together under page_vnode_mutex. 7668 */ 7669 void 7670 hat_page_setattr(page_t *pp, uint_t flag) 7671 { 7672 vnode_t *vp = pp->p_vnode; 7673 page_t **listp; 7674 kmutex_t *pmtx; 7675 kmutex_t *vphm = NULL; 7676 int noshuffle; 7677 7678 noshuffle = flag & P_NSH; 7679 flag &= ~P_NSH; 7680 7681 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7682 7683 /* 7684 * nothing to do if attribute already set 7685 */ 7686 if ((pp->p_nrm & flag) == flag) 7687 return; 7688 7689 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7690 !noshuffle) { 7691 vphm = page_vnode_mutex(vp); 7692 mutex_enter(vphm); 7693 } 7694 7695 pmtx = sfmmu_page_enter(pp); 7696 pp->p_nrm |= flag; 7697 sfmmu_page_exit(pmtx); 7698 7699 if (vphm != NULL) { 7700 /* 7701 * Some File Systems examine v_pages for NULL w/o 7702 * grabbing the vphm mutex. Must not let it become NULL when 7703 * pp is the only page on the list. 7704 */ 7705 if (pp->p_vpnext != pp) { 7706 page_vpsub(&vp->v_pages, pp); 7707 if (vp->v_pages != NULL) 7708 listp = &vp->v_pages->p_vpprev->p_vpnext; 7709 else 7710 listp = &vp->v_pages; 7711 page_vpadd(listp, pp); 7712 } 7713 mutex_exit(vphm); 7714 } 7715 } 7716 7717 void 7718 hat_page_clrattr(page_t *pp, uint_t flag) 7719 { 7720 vnode_t *vp = pp->p_vnode; 7721 kmutex_t *pmtx; 7722 7723 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7724 7725 pmtx = sfmmu_page_enter(pp); 7726 7727 /* 7728 * Caller is expected to hold page's io lock for VMODSORT to work 7729 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7730 * bit is cleared. 7731 * We don't have assert to avoid tripping some existing third party 7732 * code. The dirty page is moved back to top of the v_page list 7733 * after IO is done in pvn_write_done(). 7734 */ 7735 pp->p_nrm &= ~flag; 7736 sfmmu_page_exit(pmtx); 7737 7738 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7739 7740 /* 7741 * VMODSORT works by removing write permissions and getting 7742 * a fault when a page is made dirty. At this point 7743 * we need to remove write permission from all mappings 7744 * to this page. 7745 */ 7746 hat_page_clrwrt(pp); 7747 } 7748 } 7749 7750 uint_t 7751 hat_page_getattr(page_t *pp, uint_t flag) 7752 { 7753 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7754 return ((uint_t)(pp->p_nrm & flag)); 7755 } 7756 7757 /* 7758 * DEBUG kernels: verify that a kernel va<->pa translation 7759 * is safe by checking the underlying page_t is in a page 7760 * relocation-safe state. 7761 */ 7762 #ifdef DEBUG 7763 void 7764 sfmmu_check_kpfn(pfn_t pfn) 7765 { 7766 page_t *pp; 7767 int index, cons; 7768 7769 if (hat_check_vtop == 0) 7770 return; 7771 7772 if (kvseg.s_base == NULL || panicstr) 7773 return; 7774 7775 pp = page_numtopp_nolock(pfn); 7776 if (!pp) 7777 return; 7778 7779 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7780 return; 7781 7782 /* 7783 * Handed a large kernel page, we dig up the root page since we 7784 * know the root page might have the lock also. 7785 */ 7786 if (pp->p_szc != 0) { 7787 index = PP_MAPINDEX(pp); 7788 cons = TTE8K; 7789 again: 7790 while (index != 0) { 7791 index >>= 1; 7792 if (index != 0) 7793 cons++; 7794 if (index & 0x1) { 7795 pp = PP_GROUPLEADER(pp, cons); 7796 goto again; 7797 } 7798 } 7799 } 7800 7801 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7802 return; 7803 7804 /* 7805 * Pages need to be locked or allocated "permanent" (either from 7806 * static_arena arena or explicitly setting PG_NORELOC when calling 7807 * page_create_va()) for VA->PA translations to be valid. 7808 */ 7809 if (!PP_ISNORELOC(pp)) 7810 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7811 (void *)pp); 7812 else 7813 panic("Illegal VA->PA translation, pp 0x%p not locked", 7814 (void *)pp); 7815 } 7816 #endif /* DEBUG */ 7817 7818 /* 7819 * Returns a page frame number for a given virtual address. 7820 * Returns PFN_INVALID to indicate an invalid mapping 7821 */ 7822 pfn_t 7823 hat_getpfnum(struct hat *hat, caddr_t addr) 7824 { 7825 pfn_t pfn; 7826 tte_t tte; 7827 7828 /* 7829 * We would like to 7830 * ASSERT(AS_LOCK_HELD(as)); 7831 * but we can't because the iommu driver will call this 7832 * routine at interrupt time and it can't grab the as lock 7833 * or it will deadlock: A thread could have the as lock 7834 * and be waiting for io. The io can't complete 7835 * because the interrupt thread is blocked trying to grab 7836 * the as lock. 7837 */ 7838 7839 if (hat == ksfmmup) { 7840 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7841 ASSERT(segkmem_lpszc > 0); 7842 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7843 if (pfn != PFN_INVALID) { 7844 sfmmu_check_kpfn(pfn); 7845 return (pfn); 7846 } 7847 } else if (segkpm && IS_KPM_ADDR(addr)) { 7848 return (sfmmu_kpm_vatopfn(addr)); 7849 } 7850 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7851 == PFN_SUSPENDED) { 7852 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7853 } 7854 sfmmu_check_kpfn(pfn); 7855 return (pfn); 7856 } else { 7857 return (sfmmu_uvatopfn(addr, hat, NULL)); 7858 } 7859 } 7860 7861 /* 7862 * This routine will return both pfn and tte for the vaddr. 7863 */ 7864 static pfn_t 7865 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7866 { 7867 struct hmehash_bucket *hmebp; 7868 hmeblk_tag hblktag; 7869 int hmeshift, hashno = 1; 7870 struct hme_blk *hmeblkp = NULL; 7871 tte_t tte; 7872 7873 struct sf_hment *sfhmep; 7874 pfn_t pfn; 7875 7876 /* support for ISM */ 7877 ism_map_t *ism_map; 7878 ism_blk_t *ism_blkp; 7879 int i; 7880 sfmmu_t *ism_hatid = NULL; 7881 sfmmu_t *locked_hatid = NULL; 7882 sfmmu_t *sv_sfmmup = sfmmup; 7883 caddr_t sv_vaddr = vaddr; 7884 sf_srd_t *srdp; 7885 7886 if (ttep == NULL) { 7887 ttep = &tte; 7888 } else { 7889 ttep->ll = 0; 7890 } 7891 7892 ASSERT(sfmmup != ksfmmup); 7893 SFMMU_STAT(sf_user_vtop); 7894 /* 7895 * Set ism_hatid if vaddr falls in a ISM segment. 7896 */ 7897 ism_blkp = sfmmup->sfmmu_iblk; 7898 if (ism_blkp != NULL) { 7899 sfmmu_ismhat_enter(sfmmup, 0); 7900 locked_hatid = sfmmup; 7901 } 7902 while (ism_blkp != NULL && ism_hatid == NULL) { 7903 ism_map = ism_blkp->iblk_maps; 7904 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7905 if (vaddr >= ism_start(ism_map[i]) && 7906 vaddr < ism_end(ism_map[i])) { 7907 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7908 vaddr = (caddr_t)(vaddr - 7909 ism_start(ism_map[i])); 7910 break; 7911 } 7912 } 7913 ism_blkp = ism_blkp->iblk_next; 7914 } 7915 if (locked_hatid) { 7916 sfmmu_ismhat_exit(locked_hatid, 0); 7917 } 7918 7919 hblktag.htag_id = sfmmup; 7920 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7921 do { 7922 hmeshift = HME_HASH_SHIFT(hashno); 7923 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7924 hblktag.htag_rehash = hashno; 7925 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7926 7927 SFMMU_HASH_LOCK(hmebp); 7928 7929 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7930 if (hmeblkp != NULL) { 7931 ASSERT(!hmeblkp->hblk_shared); 7932 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7933 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7934 SFMMU_HASH_UNLOCK(hmebp); 7935 if (TTE_IS_VALID(ttep)) { 7936 pfn = TTE_TO_PFN(vaddr, ttep); 7937 return (pfn); 7938 } 7939 break; 7940 } 7941 SFMMU_HASH_UNLOCK(hmebp); 7942 hashno++; 7943 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7944 7945 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 7946 return (PFN_INVALID); 7947 } 7948 srdp = sv_sfmmup->sfmmu_srdp; 7949 ASSERT(srdp != NULL); 7950 ASSERT(srdp->srd_refcnt != 0); 7951 hblktag.htag_id = srdp; 7952 hashno = 1; 7953 do { 7954 hmeshift = HME_HASH_SHIFT(hashno); 7955 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 7956 hblktag.htag_rehash = hashno; 7957 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 7958 7959 SFMMU_HASH_LOCK(hmebp); 7960 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 7961 hmeblkp = hmeblkp->hblk_next) { 7962 uint_t rid; 7963 sf_region_t *rgnp; 7964 caddr_t rsaddr; 7965 caddr_t readdr; 7966 7967 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 7968 sv_sfmmup->sfmmu_hmeregion_map)) { 7969 continue; 7970 } 7971 ASSERT(hmeblkp->hblk_shared); 7972 rid = hmeblkp->hblk_tag.htag_rid; 7973 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7974 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7975 rgnp = srdp->srd_hmergnp[rid]; 7976 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7977 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 7978 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7979 rsaddr = rgnp->rgn_saddr; 7980 readdr = rsaddr + rgnp->rgn_size; 7981 #ifdef DEBUG 7982 if (TTE_IS_VALID(ttep) || 7983 get_hblk_ttesz(hmeblkp) > TTE8K) { 7984 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 7985 ASSERT(eva > sv_vaddr); 7986 ASSERT(sv_vaddr >= rsaddr); 7987 ASSERT(sv_vaddr < readdr); 7988 ASSERT(eva <= readdr); 7989 } 7990 #endif /* DEBUG */ 7991 /* 7992 * Continue the search if we 7993 * found an invalid 8K tte outside of the area 7994 * covered by this hmeblk's region. 7995 */ 7996 if (TTE_IS_VALID(ttep)) { 7997 SFMMU_HASH_UNLOCK(hmebp); 7998 pfn = TTE_TO_PFN(sv_vaddr, ttep); 7999 return (pfn); 8000 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8001 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8002 SFMMU_HASH_UNLOCK(hmebp); 8003 pfn = PFN_INVALID; 8004 return (pfn); 8005 } 8006 } 8007 SFMMU_HASH_UNLOCK(hmebp); 8008 hashno++; 8009 } while (hashno <= mmu_hashcnt); 8010 return (PFN_INVALID); 8011 } 8012 8013 8014 /* 8015 * For compatability with AT&T and later optimizations 8016 */ 8017 /* ARGSUSED */ 8018 void 8019 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8020 { 8021 ASSERT(hat != NULL); 8022 } 8023 8024 /* 8025 * Return the number of mappings to a particular page. This number is an 8026 * approximation of the number of people sharing the page. 8027 * 8028 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8029 * hat_page_checkshare() can be used to compare threshold to share 8030 * count that reflects the number of region sharers albeit at higher cost. 8031 */ 8032 ulong_t 8033 hat_page_getshare(page_t *pp) 8034 { 8035 page_t *spp = pp; /* start page */ 8036 kmutex_t *pml; 8037 ulong_t cnt; 8038 int index, sz = TTE64K; 8039 8040 /* 8041 * We need to grab the mlist lock to make sure any outstanding 8042 * load/unloads complete. Otherwise we could return zero 8043 * even though the unload(s) hasn't finished yet. 8044 */ 8045 pml = sfmmu_mlist_enter(spp); 8046 cnt = spp->p_share; 8047 8048 #ifdef VAC 8049 if (kpm_enable) 8050 cnt += spp->p_kpmref; 8051 #endif 8052 if (vpm_enable && pp->p_vpmref) { 8053 cnt += 1; 8054 } 8055 8056 /* 8057 * If we have any large mappings, we count the number of 8058 * mappings that this large page is part of. 8059 */ 8060 index = PP_MAPINDEX(spp); 8061 index >>= 1; 8062 while (index) { 8063 pp = PP_GROUPLEADER(spp, sz); 8064 if ((index & 0x1) && pp != spp) { 8065 cnt += pp->p_share; 8066 spp = pp; 8067 } 8068 index >>= 1; 8069 sz++; 8070 } 8071 sfmmu_mlist_exit(pml); 8072 return (cnt); 8073 } 8074 8075 /* 8076 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8077 * otherwise. Count shared hmeblks by region's refcnt. 8078 */ 8079 int 8080 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8081 { 8082 kmutex_t *pml; 8083 ulong_t cnt = 0; 8084 int index, sz = TTE8K; 8085 struct sf_hment *sfhme, *tmphme = NULL; 8086 struct hme_blk *hmeblkp; 8087 8088 pml = sfmmu_mlist_enter(pp); 8089 8090 #ifdef VAC 8091 if (kpm_enable) 8092 cnt = pp->p_kpmref; 8093 #endif 8094 8095 if (vpm_enable && pp->p_vpmref) { 8096 cnt += 1; 8097 } 8098 8099 if (pp->p_share + cnt > sh_thresh) { 8100 sfmmu_mlist_exit(pml); 8101 return (1); 8102 } 8103 8104 index = PP_MAPINDEX(pp); 8105 8106 again: 8107 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8108 tmphme = sfhme->hme_next; 8109 if (IS_PAHME(sfhme)) { 8110 continue; 8111 } 8112 8113 hmeblkp = sfmmu_hmetohblk(sfhme); 8114 if (hme_size(sfhme) != sz) { 8115 continue; 8116 } 8117 8118 if (hmeblkp->hblk_shared) { 8119 sf_srd_t *srdp = hblktosrd(hmeblkp); 8120 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8121 sf_region_t *rgnp; 8122 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8123 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8124 ASSERT(srdp != NULL); 8125 rgnp = srdp->srd_hmergnp[rid]; 8126 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8127 rgnp, rid); 8128 cnt += rgnp->rgn_refcnt; 8129 } else { 8130 cnt++; 8131 } 8132 if (cnt > sh_thresh) { 8133 sfmmu_mlist_exit(pml); 8134 return (1); 8135 } 8136 } 8137 8138 index >>= 1; 8139 sz++; 8140 while (index) { 8141 pp = PP_GROUPLEADER(pp, sz); 8142 ASSERT(sfmmu_mlist_held(pp)); 8143 if (index & 0x1) { 8144 goto again; 8145 } 8146 index >>= 1; 8147 sz++; 8148 } 8149 sfmmu_mlist_exit(pml); 8150 return (0); 8151 } 8152 8153 /* 8154 * Unload all large mappings to the pp and reset the p_szc field of every 8155 * constituent page according to the remaining mappings. 8156 * 8157 * pp must be locked SE_EXCL. Even though no other constituent pages are 8158 * locked it's legal to unload the large mappings to the pp because all 8159 * constituent pages of large locked mappings have to be locked SE_SHARED. 8160 * This means if we have SE_EXCL lock on one of constituent pages none of the 8161 * large mappings to pp are locked. 8162 * 8163 * Decrease p_szc field starting from the last constituent page and ending 8164 * with the root page. This method is used because other threads rely on the 8165 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8166 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8167 * ensures that p_szc changes of the constituent pages appears atomic for all 8168 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8169 * 8170 * This mechanism is only used for file system pages where it's not always 8171 * possible to get SE_EXCL locks on all constituent pages to demote the size 8172 * code (as is done for anonymous or kernel large pages). 8173 * 8174 * See more comments in front of sfmmu_mlspl_enter(). 8175 */ 8176 void 8177 hat_page_demote(page_t *pp) 8178 { 8179 int index; 8180 int sz; 8181 cpuset_t cpuset; 8182 int sync = 0; 8183 page_t *rootpp; 8184 struct sf_hment *sfhme; 8185 struct sf_hment *tmphme = NULL; 8186 struct hme_blk *hmeblkp; 8187 uint_t pszc; 8188 page_t *lastpp; 8189 cpuset_t tset; 8190 pgcnt_t npgs; 8191 kmutex_t *pml; 8192 kmutex_t *pmtx = NULL; 8193 8194 ASSERT(PAGE_EXCL(pp)); 8195 ASSERT(!PP_ISFREE(pp)); 8196 ASSERT(!PP_ISKAS(pp)); 8197 ASSERT(page_szc_lock_assert(pp)); 8198 pml = sfmmu_mlist_enter(pp); 8199 8200 pszc = pp->p_szc; 8201 if (pszc == 0) { 8202 goto out; 8203 } 8204 8205 index = PP_MAPINDEX(pp) >> 1; 8206 8207 if (index) { 8208 CPUSET_ZERO(cpuset); 8209 sz = TTE64K; 8210 sync = 1; 8211 } 8212 8213 while (index) { 8214 if (!(index & 0x1)) { 8215 index >>= 1; 8216 sz++; 8217 continue; 8218 } 8219 ASSERT(sz <= pszc); 8220 rootpp = PP_GROUPLEADER(pp, sz); 8221 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8222 tmphme = sfhme->hme_next; 8223 ASSERT(!IS_PAHME(sfhme)); 8224 hmeblkp = sfmmu_hmetohblk(sfhme); 8225 if (hme_size(sfhme) != sz) { 8226 continue; 8227 } 8228 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8229 CPUSET_OR(cpuset, tset); 8230 } 8231 if (index >>= 1) { 8232 sz++; 8233 } 8234 } 8235 8236 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8237 8238 if (sync) { 8239 xt_sync(cpuset); 8240 #ifdef VAC 8241 if (PP_ISTNC(pp)) { 8242 conv_tnc(rootpp, sz); 8243 } 8244 #endif /* VAC */ 8245 } 8246 8247 pmtx = sfmmu_page_enter(pp); 8248 8249 ASSERT(pp->p_szc == pszc); 8250 rootpp = PP_PAGEROOT(pp); 8251 ASSERT(rootpp->p_szc == pszc); 8252 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8253 8254 while (lastpp != rootpp) { 8255 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8256 ASSERT(sz < pszc); 8257 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8258 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8259 while (--npgs > 0) { 8260 lastpp->p_szc = (uchar_t)sz; 8261 lastpp = PP_PAGEPREV(lastpp); 8262 } 8263 if (sz) { 8264 /* 8265 * make sure before current root's pszc 8266 * is updated all updates to constituent pages pszc 8267 * fields are globally visible. 8268 */ 8269 membar_producer(); 8270 } 8271 lastpp->p_szc = sz; 8272 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8273 if (lastpp != rootpp) { 8274 lastpp = PP_PAGEPREV(lastpp); 8275 } 8276 } 8277 if (sz == 0) { 8278 /* the loop above doesn't cover this case */ 8279 rootpp->p_szc = 0; 8280 } 8281 out: 8282 ASSERT(pp->p_szc == 0); 8283 if (pmtx != NULL) { 8284 sfmmu_page_exit(pmtx); 8285 } 8286 sfmmu_mlist_exit(pml); 8287 } 8288 8289 /* 8290 * Refresh the HAT ismttecnt[] element for size szc. 8291 * Caller must have set ISM busy flag to prevent mapping 8292 * lists from changing while we're traversing them. 8293 */ 8294 pgcnt_t 8295 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8296 { 8297 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8298 ism_map_t *ism_map; 8299 pgcnt_t npgs = 0; 8300 pgcnt_t npgs_scd = 0; 8301 int j; 8302 sf_scd_t *scdp; 8303 uchar_t rid; 8304 8305 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8306 scdp = sfmmup->sfmmu_scdp; 8307 8308 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8309 ism_map = ism_blkp->iblk_maps; 8310 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8311 rid = ism_map[j].imap_rid; 8312 ASSERT(rid == SFMMU_INVALID_ISMRID || 8313 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8314 8315 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8316 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8317 /* ISM is in sfmmup's SCD */ 8318 npgs_scd += 8319 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8320 } else { 8321 /* ISMs is not in SCD */ 8322 npgs += 8323 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8324 } 8325 } 8326 } 8327 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8328 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8329 return (npgs); 8330 } 8331 8332 /* 8333 * Yield the memory claim requirement for an address space. 8334 * 8335 * This is currently implemented as the number of bytes that have active 8336 * hardware translations that have page structures. Therefore, it can 8337 * underestimate the traditional resident set size, eg, if the 8338 * physical page is present and the hardware translation is missing; 8339 * and it can overestimate the rss, eg, if there are active 8340 * translations to a frame buffer with page structs. 8341 * Also, it does not take sharing into account. 8342 * 8343 * Note that we don't acquire locks here since this function is most often 8344 * called from the clock thread. 8345 */ 8346 size_t 8347 hat_get_mapped_size(struct hat *hat) 8348 { 8349 size_t assize = 0; 8350 int i; 8351 8352 if (hat == NULL) 8353 return (0); 8354 8355 for (i = 0; i < mmu_page_sizes; i++) 8356 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8357 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8358 8359 if (hat->sfmmu_iblk == NULL) 8360 return (assize); 8361 8362 for (i = 0; i < mmu_page_sizes; i++) 8363 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8364 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8365 8366 return (assize); 8367 } 8368 8369 int 8370 hat_stats_enable(struct hat *hat) 8371 { 8372 hatlock_t *hatlockp; 8373 8374 hatlockp = sfmmu_hat_enter(hat); 8375 hat->sfmmu_rmstat++; 8376 sfmmu_hat_exit(hatlockp); 8377 return (1); 8378 } 8379 8380 void 8381 hat_stats_disable(struct hat *hat) 8382 { 8383 hatlock_t *hatlockp; 8384 8385 hatlockp = sfmmu_hat_enter(hat); 8386 hat->sfmmu_rmstat--; 8387 sfmmu_hat_exit(hatlockp); 8388 } 8389 8390 /* 8391 * Routines for entering or removing ourselves from the 8392 * ism_hat's mapping list. This is used for both private and 8393 * SCD hats. 8394 */ 8395 static void 8396 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8397 { 8398 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8399 8400 iment->iment_prev = NULL; 8401 iment->iment_next = ism_hat->sfmmu_iment; 8402 if (ism_hat->sfmmu_iment) { 8403 ism_hat->sfmmu_iment->iment_prev = iment; 8404 } 8405 ism_hat->sfmmu_iment = iment; 8406 } 8407 8408 static void 8409 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8410 { 8411 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8412 8413 if (ism_hat->sfmmu_iment == NULL) { 8414 panic("ism map entry remove - no entries"); 8415 } 8416 8417 if (iment->iment_prev) { 8418 ASSERT(ism_hat->sfmmu_iment != iment); 8419 iment->iment_prev->iment_next = iment->iment_next; 8420 } else { 8421 ASSERT(ism_hat->sfmmu_iment == iment); 8422 ism_hat->sfmmu_iment = iment->iment_next; 8423 } 8424 8425 if (iment->iment_next) { 8426 iment->iment_next->iment_prev = iment->iment_prev; 8427 } 8428 8429 /* 8430 * zero out the entry 8431 */ 8432 iment->iment_next = NULL; 8433 iment->iment_prev = NULL; 8434 iment->iment_hat = NULL; 8435 iment->iment_base_va = 0; 8436 } 8437 8438 /* 8439 * Hat_share()/unshare() return an (non-zero) error 8440 * when saddr and daddr are not properly aligned. 8441 * 8442 * The top level mapping element determines the alignment 8443 * requirement for saddr and daddr, depending on different 8444 * architectures. 8445 * 8446 * When hat_share()/unshare() are not supported, 8447 * HATOP_SHARE()/UNSHARE() return 0 8448 */ 8449 int 8450 hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid, 8451 caddr_t sptaddr, size_t len, uint_t ismszc) 8452 { 8453 ism_blk_t *ism_blkp; 8454 ism_blk_t *new_iblk; 8455 ism_map_t *ism_map; 8456 ism_ment_t *ism_ment; 8457 int i, added; 8458 hatlock_t *hatlockp; 8459 int reload_mmu = 0; 8460 uint_t ismshift = page_get_shift(ismszc); 8461 size_t ismpgsz = page_get_pagesize(ismszc); 8462 uint_t ismmask = (uint_t)ismpgsz - 1; 8463 size_t sh_size = ISM_SHIFT(ismshift, len); 8464 ushort_t ismhatflag; 8465 hat_region_cookie_t rcookie; 8466 sf_scd_t *old_scdp; 8467 8468 #ifdef DEBUG 8469 caddr_t eaddr = addr + len; 8470 #endif /* DEBUG */ 8471 8472 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8473 ASSERT(sptaddr == ISMID_STARTADDR); 8474 /* 8475 * Check the alignment. 8476 */ 8477 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8478 return (EINVAL); 8479 8480 /* 8481 * Check size alignment. 8482 */ 8483 if (!ISM_ALIGNED(ismshift, len)) 8484 return (EINVAL); 8485 8486 /* 8487 * Allocate ism_ment for the ism_hat's mapping list, and an 8488 * ism map blk in case we need one. We must do our 8489 * allocations before acquiring locks to prevent a deadlock 8490 * in the kmem allocator on the mapping list lock. 8491 */ 8492 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8493 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8494 8495 /* 8496 * Serialize ISM mappings with the ISM busy flag, and also the 8497 * trap handlers. 8498 */ 8499 sfmmu_ismhat_enter(sfmmup, 0); 8500 8501 /* 8502 * Allocate an ism map blk if necessary. 8503 */ 8504 if (sfmmup->sfmmu_iblk == NULL) { 8505 sfmmup->sfmmu_iblk = new_iblk; 8506 bzero(new_iblk, sizeof (*new_iblk)); 8507 new_iblk->iblk_nextpa = (uint64_t)-1; 8508 membar_stst(); /* make sure next ptr visible to all CPUs */ 8509 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8510 reload_mmu = 1; 8511 new_iblk = NULL; 8512 } 8513 8514 #ifdef DEBUG 8515 /* 8516 * Make sure mapping does not already exist. 8517 */ 8518 ism_blkp = sfmmup->sfmmu_iblk; 8519 while (ism_blkp != NULL) { 8520 ism_map = ism_blkp->iblk_maps; 8521 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8522 if ((addr >= ism_start(ism_map[i]) && 8523 addr < ism_end(ism_map[i])) || 8524 eaddr > ism_start(ism_map[i]) && 8525 eaddr <= ism_end(ism_map[i])) { 8526 panic("sfmmu_share: Already mapped!"); 8527 } 8528 } 8529 ism_blkp = ism_blkp->iblk_next; 8530 } 8531 #endif /* DEBUG */ 8532 8533 ASSERT(ismszc >= TTE4M); 8534 if (ismszc == TTE4M) { 8535 ismhatflag = HAT_4M_FLAG; 8536 } else if (ismszc == TTE32M) { 8537 ismhatflag = HAT_32M_FLAG; 8538 } else if (ismszc == TTE256M) { 8539 ismhatflag = HAT_256M_FLAG; 8540 } 8541 /* 8542 * Add mapping to first available mapping slot. 8543 */ 8544 ism_blkp = sfmmup->sfmmu_iblk; 8545 added = 0; 8546 while (!added) { 8547 ism_map = ism_blkp->iblk_maps; 8548 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8549 if (ism_map[i].imap_ismhat == NULL) { 8550 8551 ism_map[i].imap_ismhat = ism_hatid; 8552 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8553 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8554 ism_map[i].imap_hatflags = ismhatflag; 8555 ism_map[i].imap_sz_mask = ismmask; 8556 /* 8557 * imap_seg is checked in ISM_CHECK to see if 8558 * non-NULL, then other info assumed valid. 8559 */ 8560 membar_stst(); 8561 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8562 ism_map[i].imap_ment = ism_ment; 8563 8564 /* 8565 * Now add ourselves to the ism_hat's 8566 * mapping list. 8567 */ 8568 ism_ment->iment_hat = sfmmup; 8569 ism_ment->iment_base_va = addr; 8570 ism_hatid->sfmmu_ismhat = 1; 8571 mutex_enter(&ism_mlist_lock); 8572 iment_add(ism_ment, ism_hatid); 8573 mutex_exit(&ism_mlist_lock); 8574 added = 1; 8575 break; 8576 } 8577 } 8578 if (!added && ism_blkp->iblk_next == NULL) { 8579 ism_blkp->iblk_next = new_iblk; 8580 new_iblk = NULL; 8581 bzero(ism_blkp->iblk_next, 8582 sizeof (*ism_blkp->iblk_next)); 8583 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8584 membar_stst(); 8585 ism_blkp->iblk_nextpa = 8586 va_to_pa((caddr_t)ism_blkp->iblk_next); 8587 } 8588 ism_blkp = ism_blkp->iblk_next; 8589 } 8590 8591 /* 8592 * After calling hat_join_region, sfmmup may join a new SCD or 8593 * move from the old scd to a new scd, in which case, we want to 8594 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8595 * sfmmu_check_page_sizes at the end of this routine. 8596 */ 8597 old_scdp = sfmmup->sfmmu_scdp; 8598 8599 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8600 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8601 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8602 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8603 } 8604 /* 8605 * Update our counters for this sfmmup's ism mappings. 8606 */ 8607 for (i = 0; i <= ismszc; i++) { 8608 if (!(disable_ism_large_pages & (1 << i))) 8609 (void) ism_tsb_entries(sfmmup, i); 8610 } 8611 8612 /* 8613 * For ISM and DISM we do not support 512K pages, so we only only 8614 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8615 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8616 * 8617 * Need to set 32M/256M ISM flags to make sure 8618 * sfmmu_check_page_sizes() enables them on Panther. 8619 */ 8620 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8621 8622 switch (ismszc) { 8623 case TTE256M: 8624 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8625 hatlockp = sfmmu_hat_enter(sfmmup); 8626 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8627 sfmmu_hat_exit(hatlockp); 8628 } 8629 break; 8630 case TTE32M: 8631 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8632 hatlockp = sfmmu_hat_enter(sfmmup); 8633 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8634 sfmmu_hat_exit(hatlockp); 8635 } 8636 break; 8637 default: 8638 break; 8639 } 8640 8641 /* 8642 * If we updated the ismblkpa for this HAT we must make 8643 * sure all CPUs running this process reload their tsbmiss area. 8644 * Otherwise they will fail to load the mappings in the tsbmiss 8645 * handler and will loop calling pagefault(). 8646 */ 8647 if (reload_mmu) { 8648 hatlockp = sfmmu_hat_enter(sfmmup); 8649 sfmmu_sync_mmustate(sfmmup); 8650 sfmmu_hat_exit(hatlockp); 8651 } 8652 8653 sfmmu_ismhat_exit(sfmmup, 0); 8654 8655 /* 8656 * Free up ismblk if we didn't use it. 8657 */ 8658 if (new_iblk != NULL) 8659 kmem_cache_free(ism_blk_cache, new_iblk); 8660 8661 /* 8662 * Check TSB and TLB page sizes. 8663 */ 8664 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8665 sfmmu_check_page_sizes(sfmmup, 0); 8666 } else { 8667 sfmmu_check_page_sizes(sfmmup, 1); 8668 } 8669 return (0); 8670 } 8671 8672 /* 8673 * hat_unshare removes exactly one ism_map from 8674 * this process's as. It expects multiple calls 8675 * to hat_unshare for multiple shm segments. 8676 */ 8677 void 8678 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8679 { 8680 ism_map_t *ism_map; 8681 ism_ment_t *free_ment = NULL; 8682 ism_blk_t *ism_blkp; 8683 struct hat *ism_hatid; 8684 int found, i; 8685 hatlock_t *hatlockp; 8686 struct tsb_info *tsbinfo; 8687 uint_t ismshift = page_get_shift(ismszc); 8688 size_t sh_size = ISM_SHIFT(ismshift, len); 8689 uchar_t ism_rid; 8690 sf_scd_t *old_scdp; 8691 8692 ASSERT(ISM_ALIGNED(ismshift, addr)); 8693 ASSERT(ISM_ALIGNED(ismshift, len)); 8694 ASSERT(sfmmup != NULL); 8695 ASSERT(sfmmup != ksfmmup); 8696 8697 ASSERT(sfmmup->sfmmu_as != NULL); 8698 8699 /* 8700 * Make sure that during the entire time ISM mappings are removed, 8701 * the trap handlers serialize behind us, and that no one else 8702 * can be mucking with ISM mappings. This also lets us get away 8703 * with not doing expensive cross calls to flush the TLB -- we 8704 * just discard the context, flush the entire TSB, and call it 8705 * a day. 8706 */ 8707 sfmmu_ismhat_enter(sfmmup, 0); 8708 8709 /* 8710 * Remove the mapping. 8711 * 8712 * We can't have any holes in the ism map. 8713 * The tsb miss code while searching the ism map will 8714 * stop on an empty map slot. So we must move 8715 * everyone past the hole up 1 if any. 8716 * 8717 * Also empty ism map blks are not freed until the 8718 * process exits. This is to prevent a MT race condition 8719 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8720 */ 8721 found = 0; 8722 ism_blkp = sfmmup->sfmmu_iblk; 8723 while (!found && ism_blkp != NULL) { 8724 ism_map = ism_blkp->iblk_maps; 8725 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8726 if (addr == ism_start(ism_map[i]) && 8727 sh_size == (size_t)(ism_size(ism_map[i]))) { 8728 found = 1; 8729 break; 8730 } 8731 } 8732 if (!found) 8733 ism_blkp = ism_blkp->iblk_next; 8734 } 8735 8736 if (found) { 8737 ism_hatid = ism_map[i].imap_ismhat; 8738 ism_rid = ism_map[i].imap_rid; 8739 ASSERT(ism_hatid != NULL); 8740 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8741 8742 /* 8743 * After hat_leave_region, the sfmmup may leave SCD, 8744 * in which case, we want to grow the private tsb size when 8745 * calling sfmmu_check_page_sizes at the end of the routine. 8746 */ 8747 old_scdp = sfmmup->sfmmu_scdp; 8748 /* 8749 * Then remove ourselves from the region. 8750 */ 8751 if (ism_rid != SFMMU_INVALID_ISMRID) { 8752 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8753 HAT_REGION_ISM); 8754 } 8755 8756 /* 8757 * And now guarantee that any other cpu 8758 * that tries to process an ISM miss 8759 * will go to tl=0. 8760 */ 8761 hatlockp = sfmmu_hat_enter(sfmmup); 8762 sfmmu_invalidate_ctx(sfmmup); 8763 sfmmu_hat_exit(hatlockp); 8764 8765 /* 8766 * Remove ourselves from the ism mapping list. 8767 */ 8768 mutex_enter(&ism_mlist_lock); 8769 iment_sub(ism_map[i].imap_ment, ism_hatid); 8770 mutex_exit(&ism_mlist_lock); 8771 free_ment = ism_map[i].imap_ment; 8772 8773 /* 8774 * We delete the ism map by copying 8775 * the next map over the current one. 8776 * We will take the next one in the maps 8777 * array or from the next ism_blk. 8778 */ 8779 while (ism_blkp != NULL) { 8780 ism_map = ism_blkp->iblk_maps; 8781 while (i < (ISM_MAP_SLOTS - 1)) { 8782 ism_map[i] = ism_map[i + 1]; 8783 i++; 8784 } 8785 /* i == (ISM_MAP_SLOTS - 1) */ 8786 ism_blkp = ism_blkp->iblk_next; 8787 if (ism_blkp != NULL) { 8788 ism_map[i] = ism_blkp->iblk_maps[0]; 8789 i = 0; 8790 } else { 8791 ism_map[i].imap_seg = 0; 8792 ism_map[i].imap_vb_shift = 0; 8793 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8794 ism_map[i].imap_hatflags = 0; 8795 ism_map[i].imap_sz_mask = 0; 8796 ism_map[i].imap_ismhat = NULL; 8797 ism_map[i].imap_ment = NULL; 8798 } 8799 } 8800 8801 /* 8802 * Now flush entire TSB for the process, since 8803 * demapping page by page can be too expensive. 8804 * We don't have to flush the TLB here anymore 8805 * since we switch to a new TLB ctx instead. 8806 * Also, there is no need to flush if the process 8807 * is exiting since the TSB will be freed later. 8808 */ 8809 if (!sfmmup->sfmmu_free) { 8810 hatlockp = sfmmu_hat_enter(sfmmup); 8811 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8812 tsbinfo = tsbinfo->tsb_next) { 8813 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8814 continue; 8815 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8816 tsbinfo->tsb_flags |= 8817 TSB_FLUSH_NEEDED; 8818 continue; 8819 } 8820 8821 sfmmu_inv_tsb(tsbinfo->tsb_va, 8822 TSB_BYTES(tsbinfo->tsb_szc)); 8823 } 8824 sfmmu_hat_exit(hatlockp); 8825 } 8826 } 8827 8828 /* 8829 * Update our counters for this sfmmup's ism mappings. 8830 */ 8831 for (i = 0; i <= ismszc; i++) { 8832 if (!(disable_ism_large_pages & (1 << i))) 8833 (void) ism_tsb_entries(sfmmup, i); 8834 } 8835 8836 sfmmu_ismhat_exit(sfmmup, 0); 8837 8838 /* 8839 * We must do our freeing here after dropping locks 8840 * to prevent a deadlock in the kmem allocator on the 8841 * mapping list lock. 8842 */ 8843 if (free_ment != NULL) 8844 kmem_cache_free(ism_ment_cache, free_ment); 8845 8846 /* 8847 * Check TSB and TLB page sizes if the process isn't exiting. 8848 */ 8849 if (!sfmmup->sfmmu_free) { 8850 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8851 sfmmu_check_page_sizes(sfmmup, 1); 8852 } else { 8853 sfmmu_check_page_sizes(sfmmup, 0); 8854 } 8855 } 8856 } 8857 8858 /* ARGSUSED */ 8859 static int 8860 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8861 { 8862 /* void *buf is sfmmu_t pointer */ 8863 bzero(buf, sizeof (sfmmu_t)); 8864 8865 return (0); 8866 } 8867 8868 /* ARGSUSED */ 8869 static void 8870 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8871 { 8872 /* void *buf is sfmmu_t pointer */ 8873 } 8874 8875 /* 8876 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8877 * field to be the pa of this hmeblk 8878 */ 8879 /* ARGSUSED */ 8880 static int 8881 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8882 { 8883 struct hme_blk *hmeblkp; 8884 8885 bzero(buf, (size_t)cdrarg); 8886 hmeblkp = (struct hme_blk *)buf; 8887 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8888 8889 #ifdef HBLK_TRACE 8890 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8891 #endif /* HBLK_TRACE */ 8892 8893 return (0); 8894 } 8895 8896 /* ARGSUSED */ 8897 static void 8898 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8899 { 8900 8901 #ifdef HBLK_TRACE 8902 8903 struct hme_blk *hmeblkp; 8904 8905 hmeblkp = (struct hme_blk *)buf; 8906 mutex_destroy(&hmeblkp->hblk_audit_lock); 8907 8908 #endif /* HBLK_TRACE */ 8909 } 8910 8911 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8912 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8913 /* 8914 * The kmem allocator will callback into our reclaim routine when the system 8915 * is running low in memory. We traverse the hash and free up all unused but 8916 * still cached hme_blks. We also traverse the free list and free them up 8917 * as well. 8918 */ 8919 /*ARGSUSED*/ 8920 static void 8921 sfmmu_hblkcache_reclaim(void *cdrarg) 8922 { 8923 int i; 8924 struct hmehash_bucket *hmebp; 8925 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8926 static struct hmehash_bucket *uhmehash_reclaim_hand; 8927 static struct hmehash_bucket *khmehash_reclaim_hand; 8928 struct hme_blk *list = NULL, *last_hmeblkp; 8929 cpuset_t cpuset = cpu_ready_set; 8930 cpu_hme_pend_t *cpuhp; 8931 8932 /* Free up hmeblks on the cpu pending lists */ 8933 for (i = 0; i < NCPU; i++) { 8934 cpuhp = &cpu_hme_pend[i]; 8935 if (cpuhp->chp_listp != NULL) { 8936 mutex_enter(&cpuhp->chp_mutex); 8937 if (cpuhp->chp_listp == NULL) { 8938 mutex_exit(&cpuhp->chp_mutex); 8939 continue; 8940 } 8941 for (last_hmeblkp = cpuhp->chp_listp; 8942 last_hmeblkp->hblk_next != NULL; 8943 last_hmeblkp = last_hmeblkp->hblk_next) 8944 ; 8945 last_hmeblkp->hblk_next = list; 8946 list = cpuhp->chp_listp; 8947 cpuhp->chp_listp = NULL; 8948 cpuhp->chp_count = 0; 8949 mutex_exit(&cpuhp->chp_mutex); 8950 } 8951 8952 } 8953 8954 if (list != NULL) { 8955 kpreempt_disable(); 8956 CPUSET_DEL(cpuset, CPU->cpu_id); 8957 xt_sync(cpuset); 8958 xt_sync(cpuset); 8959 kpreempt_enable(); 8960 sfmmu_hblk_free(&list); 8961 list = NULL; 8962 } 8963 8964 hmebp = uhmehash_reclaim_hand; 8965 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8966 uhmehash_reclaim_hand = hmebp = uhme_hash; 8967 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8968 8969 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8970 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8971 hmeblkp = hmebp->hmeblkp; 8972 pr_hblk = NULL; 8973 while (hmeblkp) { 8974 nx_hblk = hmeblkp->hblk_next; 8975 if (!hmeblkp->hblk_vcnt && 8976 !hmeblkp->hblk_hmecnt) { 8977 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8978 pr_hblk, &list, 0); 8979 } else { 8980 pr_hblk = hmeblkp; 8981 } 8982 hmeblkp = nx_hblk; 8983 } 8984 SFMMU_HASH_UNLOCK(hmebp); 8985 } 8986 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8987 hmebp = uhme_hash; 8988 } 8989 8990 hmebp = khmehash_reclaim_hand; 8991 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8992 khmehash_reclaim_hand = hmebp = khme_hash; 8993 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8994 8995 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8996 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8997 hmeblkp = hmebp->hmeblkp; 8998 pr_hblk = NULL; 8999 while (hmeblkp) { 9000 nx_hblk = hmeblkp->hblk_next; 9001 if (!hmeblkp->hblk_vcnt && 9002 !hmeblkp->hblk_hmecnt) { 9003 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9004 pr_hblk, &list, 0); 9005 } else { 9006 pr_hblk = hmeblkp; 9007 } 9008 hmeblkp = nx_hblk; 9009 } 9010 SFMMU_HASH_UNLOCK(hmebp); 9011 } 9012 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9013 hmebp = khme_hash; 9014 } 9015 sfmmu_hblks_list_purge(&list, 0); 9016 } 9017 9018 /* 9019 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9020 * same goes for sfmmu_get_addrvcolor(). 9021 * 9022 * This function will return the virtual color for the specified page. The 9023 * virtual color corresponds to this page current mapping or its last mapping. 9024 * It is used by memory allocators to choose addresses with the correct 9025 * alignment so vac consistency is automatically maintained. If the page 9026 * has no color it returns -1. 9027 */ 9028 /*ARGSUSED*/ 9029 int 9030 sfmmu_get_ppvcolor(struct page *pp) 9031 { 9032 #ifdef VAC 9033 int color; 9034 9035 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9036 return (-1); 9037 } 9038 color = PP_GET_VCOLOR(pp); 9039 ASSERT(color < mmu_btop(shm_alignment)); 9040 return (color); 9041 #else 9042 return (-1); 9043 #endif /* VAC */ 9044 } 9045 9046 /* 9047 * This function will return the desired alignment for vac consistency 9048 * (vac color) given a virtual address. If no vac is present it returns -1. 9049 */ 9050 /*ARGSUSED*/ 9051 int 9052 sfmmu_get_addrvcolor(caddr_t vaddr) 9053 { 9054 #ifdef VAC 9055 if (cache & CACHE_VAC) { 9056 return (addr_to_vcolor(vaddr)); 9057 } else { 9058 return (-1); 9059 } 9060 #else 9061 return (-1); 9062 #endif /* VAC */ 9063 } 9064 9065 #ifdef VAC 9066 /* 9067 * Check for conflicts. 9068 * A conflict exists if the new and existent mappings do not match in 9069 * their "shm_alignment fields. If conflicts exist, the existant mappings 9070 * are flushed unless one of them is locked. If one of them is locked, then 9071 * the mappings are flushed and converted to non-cacheable mappings. 9072 */ 9073 static void 9074 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9075 { 9076 struct hat *tmphat; 9077 struct sf_hment *sfhmep, *tmphme = NULL; 9078 struct hme_blk *hmeblkp; 9079 int vcolor; 9080 tte_t tte; 9081 9082 ASSERT(sfmmu_mlist_held(pp)); 9083 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9084 9085 vcolor = addr_to_vcolor(addr); 9086 if (PP_NEWPAGE(pp)) { 9087 PP_SET_VCOLOR(pp, vcolor); 9088 return; 9089 } 9090 9091 if (PP_GET_VCOLOR(pp) == vcolor) { 9092 return; 9093 } 9094 9095 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9096 /* 9097 * Previous user of page had a different color 9098 * but since there are no current users 9099 * we just flush the cache and change the color. 9100 */ 9101 SFMMU_STAT(sf_pgcolor_conflict); 9102 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9103 PP_SET_VCOLOR(pp, vcolor); 9104 return; 9105 } 9106 9107 /* 9108 * If we get here we have a vac conflict with a current 9109 * mapping. VAC conflict policy is as follows. 9110 * - The default is to unload the other mappings unless: 9111 * - If we have a large mapping we uncache the page. 9112 * We need to uncache the rest of the large page too. 9113 * - If any of the mappings are locked we uncache the page. 9114 * - If the requested mapping is inconsistent 9115 * with another mapping and that mapping 9116 * is in the same address space we have to 9117 * make it non-cached. The default thing 9118 * to do is unload the inconsistent mapping 9119 * but if they are in the same address space 9120 * we run the risk of unmapping the pc or the 9121 * stack which we will use as we return to the user, 9122 * in which case we can then fault on the thing 9123 * we just unloaded and get into an infinite loop. 9124 */ 9125 if (PP_ISMAPPED_LARGE(pp)) { 9126 int sz; 9127 9128 /* 9129 * Existing mapping is for big pages. We don't unload 9130 * existing big mappings to satisfy new mappings. 9131 * Always convert all mappings to TNC. 9132 */ 9133 sz = fnd_mapping_sz(pp); 9134 pp = PP_GROUPLEADER(pp, sz); 9135 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9136 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9137 TTEPAGES(sz)); 9138 9139 return; 9140 } 9141 9142 /* 9143 * check if any mapping is in same as or if it is locked 9144 * since in that case we need to uncache. 9145 */ 9146 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9147 tmphme = sfhmep->hme_next; 9148 if (IS_PAHME(sfhmep)) 9149 continue; 9150 hmeblkp = sfmmu_hmetohblk(sfhmep); 9151 tmphat = hblktosfmmu(hmeblkp); 9152 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9153 ASSERT(TTE_IS_VALID(&tte)); 9154 if (hmeblkp->hblk_shared || tmphat == hat || 9155 hmeblkp->hblk_lckcnt) { 9156 /* 9157 * We have an uncache conflict 9158 */ 9159 SFMMU_STAT(sf_uncache_conflict); 9160 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9161 return; 9162 } 9163 } 9164 9165 /* 9166 * We have an unload conflict 9167 * We have already checked for LARGE mappings, therefore 9168 * the remaining mapping(s) must be TTE8K. 9169 */ 9170 SFMMU_STAT(sf_unload_conflict); 9171 9172 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9173 tmphme = sfhmep->hme_next; 9174 if (IS_PAHME(sfhmep)) 9175 continue; 9176 hmeblkp = sfmmu_hmetohblk(sfhmep); 9177 ASSERT(!hmeblkp->hblk_shared); 9178 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9179 } 9180 9181 if (PP_ISMAPPED_KPM(pp)) 9182 sfmmu_kpm_vac_unload(pp, addr); 9183 9184 /* 9185 * Unloads only do TLB flushes so we need to flush the 9186 * cache here. 9187 */ 9188 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9189 PP_SET_VCOLOR(pp, vcolor); 9190 } 9191 9192 /* 9193 * Whenever a mapping is unloaded and the page is in TNC state, 9194 * we see if the page can be made cacheable again. 'pp' is 9195 * the page that we just unloaded a mapping from, the size 9196 * of mapping that was unloaded is 'ottesz'. 9197 * Remark: 9198 * The recache policy for mpss pages can leave a performance problem 9199 * under the following circumstances: 9200 * . A large page in uncached mode has just been unmapped. 9201 * . All constituent pages are TNC due to a conflicting small mapping. 9202 * . There are many other, non conflicting, small mappings around for 9203 * a lot of the constituent pages. 9204 * . We're called w/ the "old" groupleader page and the old ottesz, 9205 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9206 * we end up w/ TTE8K or npages == 1. 9207 * . We call tst_tnc w/ the old groupleader only, and if there is no 9208 * conflict, we re-cache only this page. 9209 * . All other small mappings are not checked and will be left in TNC mode. 9210 * The problem is not very serious because: 9211 * . mpss is actually only defined for heap and stack, so the probability 9212 * is not very high that a large page mapping exists in parallel to a small 9213 * one (this is possible, but seems to be bad programming style in the 9214 * appl). 9215 * . The problem gets a little bit more serious, when those TNC pages 9216 * have to be mapped into kernel space, e.g. for networking. 9217 * . When VAC alias conflicts occur in applications, this is regarded 9218 * as an application bug. So if kstat's show them, the appl should 9219 * be changed anyway. 9220 */ 9221 void 9222 conv_tnc(page_t *pp, int ottesz) 9223 { 9224 int cursz, dosz; 9225 pgcnt_t curnpgs, dopgs; 9226 pgcnt_t pg64k; 9227 page_t *pp2; 9228 9229 /* 9230 * Determine how big a range we check for TNC and find 9231 * leader page. cursz is the size of the biggest 9232 * mapping that still exist on 'pp'. 9233 */ 9234 if (PP_ISMAPPED_LARGE(pp)) { 9235 cursz = fnd_mapping_sz(pp); 9236 } else { 9237 cursz = TTE8K; 9238 } 9239 9240 if (ottesz >= cursz) { 9241 dosz = ottesz; 9242 pp2 = pp; 9243 } else { 9244 dosz = cursz; 9245 pp2 = PP_GROUPLEADER(pp, dosz); 9246 } 9247 9248 pg64k = TTEPAGES(TTE64K); 9249 dopgs = TTEPAGES(dosz); 9250 9251 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9252 9253 while (dopgs != 0) { 9254 curnpgs = TTEPAGES(cursz); 9255 if (tst_tnc(pp2, curnpgs)) { 9256 SFMMU_STAT_ADD(sf_recache, curnpgs); 9257 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9258 curnpgs); 9259 } 9260 9261 ASSERT(dopgs >= curnpgs); 9262 dopgs -= curnpgs; 9263 9264 if (dopgs == 0) { 9265 break; 9266 } 9267 9268 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9269 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9270 cursz = fnd_mapping_sz(pp2); 9271 } else { 9272 cursz = TTE8K; 9273 } 9274 } 9275 } 9276 9277 /* 9278 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9279 * returns 0 otherwise. Note that oaddr argument is valid for only 9280 * 8k pages. 9281 */ 9282 int 9283 tst_tnc(page_t *pp, pgcnt_t npages) 9284 { 9285 struct sf_hment *sfhme; 9286 struct hme_blk *hmeblkp; 9287 tte_t tte; 9288 caddr_t vaddr; 9289 int clr_valid = 0; 9290 int color, color1, bcolor; 9291 int i, ncolors; 9292 9293 ASSERT(pp != NULL); 9294 ASSERT(!(cache & CACHE_WRITEBACK)); 9295 9296 if (npages > 1) { 9297 ncolors = CACHE_NUM_COLOR; 9298 } 9299 9300 for (i = 0; i < npages; i++) { 9301 ASSERT(sfmmu_mlist_held(pp)); 9302 ASSERT(PP_ISTNC(pp)); 9303 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9304 9305 if (PP_ISPNC(pp)) { 9306 return (0); 9307 } 9308 9309 clr_valid = 0; 9310 if (PP_ISMAPPED_KPM(pp)) { 9311 caddr_t kpmvaddr; 9312 9313 ASSERT(kpm_enable); 9314 kpmvaddr = hat_kpm_page2va(pp, 1); 9315 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9316 color1 = addr_to_vcolor(kpmvaddr); 9317 clr_valid = 1; 9318 } 9319 9320 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9321 if (IS_PAHME(sfhme)) 9322 continue; 9323 hmeblkp = sfmmu_hmetohblk(sfhme); 9324 9325 sfmmu_copytte(&sfhme->hme_tte, &tte); 9326 ASSERT(TTE_IS_VALID(&tte)); 9327 9328 vaddr = tte_to_vaddr(hmeblkp, tte); 9329 color = addr_to_vcolor(vaddr); 9330 9331 if (npages > 1) { 9332 /* 9333 * If there is a big mapping, make sure 9334 * 8K mapping is consistent with the big 9335 * mapping. 9336 */ 9337 bcolor = i % ncolors; 9338 if (color != bcolor) { 9339 return (0); 9340 } 9341 } 9342 if (!clr_valid) { 9343 clr_valid = 1; 9344 color1 = color; 9345 } 9346 9347 if (color1 != color) { 9348 return (0); 9349 } 9350 } 9351 9352 pp = PP_PAGENEXT(pp); 9353 } 9354 9355 return (1); 9356 } 9357 9358 void 9359 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9360 pgcnt_t npages) 9361 { 9362 kmutex_t *pmtx; 9363 int i, ncolors, bcolor; 9364 kpm_hlk_t *kpmp; 9365 cpuset_t cpuset; 9366 9367 ASSERT(pp != NULL); 9368 ASSERT(!(cache & CACHE_WRITEBACK)); 9369 9370 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9371 pmtx = sfmmu_page_enter(pp); 9372 9373 /* 9374 * Fast path caching single unmapped page 9375 */ 9376 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9377 flags == HAT_CACHE) { 9378 PP_CLRTNC(pp); 9379 PP_CLRPNC(pp); 9380 sfmmu_page_exit(pmtx); 9381 sfmmu_kpm_kpmp_exit(kpmp); 9382 return; 9383 } 9384 9385 /* 9386 * We need to capture all cpus in order to change cacheability 9387 * because we can't allow one cpu to access the same physical 9388 * page using a cacheable and a non-cachebale mapping at the same 9389 * time. Since we may end up walking the ism mapping list 9390 * have to grab it's lock now since we can't after all the 9391 * cpus have been captured. 9392 */ 9393 sfmmu_hat_lock_all(); 9394 mutex_enter(&ism_mlist_lock); 9395 kpreempt_disable(); 9396 cpuset = cpu_ready_set; 9397 xc_attention(cpuset); 9398 9399 if (npages > 1) { 9400 /* 9401 * Make sure all colors are flushed since the 9402 * sfmmu_page_cache() only flushes one color- 9403 * it does not know big pages. 9404 */ 9405 ncolors = CACHE_NUM_COLOR; 9406 if (flags & HAT_TMPNC) { 9407 for (i = 0; i < ncolors; i++) { 9408 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9409 } 9410 cache_flush_flag = CACHE_NO_FLUSH; 9411 } 9412 } 9413 9414 for (i = 0; i < npages; i++) { 9415 9416 ASSERT(sfmmu_mlist_held(pp)); 9417 9418 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9419 9420 if (npages > 1) { 9421 bcolor = i % ncolors; 9422 } else { 9423 bcolor = NO_VCOLOR; 9424 } 9425 9426 sfmmu_page_cache(pp, flags, cache_flush_flag, 9427 bcolor); 9428 } 9429 9430 pp = PP_PAGENEXT(pp); 9431 } 9432 9433 xt_sync(cpuset); 9434 xc_dismissed(cpuset); 9435 mutex_exit(&ism_mlist_lock); 9436 sfmmu_hat_unlock_all(); 9437 sfmmu_page_exit(pmtx); 9438 sfmmu_kpm_kpmp_exit(kpmp); 9439 kpreempt_enable(); 9440 } 9441 9442 /* 9443 * This function changes the virtual cacheability of all mappings to a 9444 * particular page. When changing from uncache to cacheable the mappings will 9445 * only be changed if all of them have the same virtual color. 9446 * We need to flush the cache in all cpus. It is possible that 9447 * a process referenced a page as cacheable but has sinced exited 9448 * and cleared the mapping list. We still to flush it but have no 9449 * state so all cpus is the only alternative. 9450 */ 9451 static void 9452 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9453 { 9454 struct sf_hment *sfhme; 9455 struct hme_blk *hmeblkp; 9456 sfmmu_t *sfmmup; 9457 tte_t tte, ttemod; 9458 caddr_t vaddr; 9459 int ret, color; 9460 pfn_t pfn; 9461 9462 color = bcolor; 9463 pfn = pp->p_pagenum; 9464 9465 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9466 9467 if (IS_PAHME(sfhme)) 9468 continue; 9469 hmeblkp = sfmmu_hmetohblk(sfhme); 9470 9471 sfmmu_copytte(&sfhme->hme_tte, &tte); 9472 ASSERT(TTE_IS_VALID(&tte)); 9473 vaddr = tte_to_vaddr(hmeblkp, tte); 9474 color = addr_to_vcolor(vaddr); 9475 9476 #ifdef DEBUG 9477 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9478 ASSERT(color == bcolor); 9479 } 9480 #endif 9481 9482 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9483 9484 ttemod = tte; 9485 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9486 TTE_CLR_VCACHEABLE(&ttemod); 9487 } else { /* flags & HAT_CACHE */ 9488 TTE_SET_VCACHEABLE(&ttemod); 9489 } 9490 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9491 if (ret < 0) { 9492 /* 9493 * Since all cpus are captured modifytte should not 9494 * fail. 9495 */ 9496 panic("sfmmu_page_cache: write to tte failed"); 9497 } 9498 9499 sfmmup = hblktosfmmu(hmeblkp); 9500 if (cache_flush_flag == CACHE_FLUSH) { 9501 /* 9502 * Flush TSBs, TLBs and caches 9503 */ 9504 if (hmeblkp->hblk_shared) { 9505 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9506 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9507 sf_region_t *rgnp; 9508 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9509 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9510 ASSERT(srdp != NULL); 9511 rgnp = srdp->srd_hmergnp[rid]; 9512 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9513 srdp, rgnp, rid); 9514 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9515 hmeblkp, 0); 9516 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9517 } else if (sfmmup->sfmmu_ismhat) { 9518 if (flags & HAT_CACHE) { 9519 SFMMU_STAT(sf_ism_recache); 9520 } else { 9521 SFMMU_STAT(sf_ism_uncache); 9522 } 9523 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9524 pfn, CACHE_FLUSH); 9525 } else { 9526 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9527 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9528 } 9529 9530 /* 9531 * all cache entries belonging to this pfn are 9532 * now flushed. 9533 */ 9534 cache_flush_flag = CACHE_NO_FLUSH; 9535 } else { 9536 /* 9537 * Flush only TSBs and TLBs. 9538 */ 9539 if (hmeblkp->hblk_shared) { 9540 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9541 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9542 sf_region_t *rgnp; 9543 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9544 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9545 ASSERT(srdp != NULL); 9546 rgnp = srdp->srd_hmergnp[rid]; 9547 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9548 srdp, rgnp, rid); 9549 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9550 hmeblkp, 0); 9551 } else if (sfmmup->sfmmu_ismhat) { 9552 if (flags & HAT_CACHE) { 9553 SFMMU_STAT(sf_ism_recache); 9554 } else { 9555 SFMMU_STAT(sf_ism_uncache); 9556 } 9557 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9558 pfn, CACHE_NO_FLUSH); 9559 } else { 9560 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9561 } 9562 } 9563 } 9564 9565 if (PP_ISMAPPED_KPM(pp)) 9566 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9567 9568 switch (flags) { 9569 9570 default: 9571 panic("sfmmu_pagecache: unknown flags"); 9572 break; 9573 9574 case HAT_CACHE: 9575 PP_CLRTNC(pp); 9576 PP_CLRPNC(pp); 9577 PP_SET_VCOLOR(pp, color); 9578 break; 9579 9580 case HAT_TMPNC: 9581 PP_SETTNC(pp); 9582 PP_SET_VCOLOR(pp, NO_VCOLOR); 9583 break; 9584 9585 case HAT_UNCACHE: 9586 PP_SETPNC(pp); 9587 PP_CLRTNC(pp); 9588 PP_SET_VCOLOR(pp, NO_VCOLOR); 9589 break; 9590 } 9591 } 9592 #endif /* VAC */ 9593 9594 9595 /* 9596 * Wrapper routine used to return a context. 9597 * 9598 * It's the responsibility of the caller to guarantee that the 9599 * process serializes on calls here by taking the HAT lock for 9600 * the hat. 9601 * 9602 */ 9603 static void 9604 sfmmu_get_ctx(sfmmu_t *sfmmup) 9605 { 9606 mmu_ctx_t *mmu_ctxp; 9607 uint_t pstate_save; 9608 int ret; 9609 9610 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9611 ASSERT(sfmmup != ksfmmup); 9612 9613 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9614 sfmmu_setup_tsbinfo(sfmmup); 9615 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9616 } 9617 9618 kpreempt_disable(); 9619 9620 mmu_ctxp = CPU_MMU_CTXP(CPU); 9621 ASSERT(mmu_ctxp); 9622 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9623 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9624 9625 /* 9626 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9627 */ 9628 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9629 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE); 9630 9631 /* 9632 * Let the MMU set up the page sizes to use for 9633 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9634 */ 9635 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9636 mmu_set_ctx_page_sizes(sfmmup); 9637 } 9638 9639 /* 9640 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9641 * interrupts disabled to prevent race condition with wrap-around 9642 * ctx invalidatation. In sun4v, ctx invalidation also involves 9643 * a HV call to set the number of TSBs to 0. If interrupts are not 9644 * disabled until after sfmmu_load_mmustate is complete TSBs may 9645 * become assigned to INVALID_CONTEXT. This is not allowed. 9646 */ 9647 pstate_save = sfmmu_disable_intrs(); 9648 9649 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9650 sfmmup->sfmmu_scdp != NULL) { 9651 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9652 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9653 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9654 /* debug purpose only */ 9655 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9656 != INVALID_CONTEXT); 9657 } 9658 sfmmu_load_mmustate(sfmmup); 9659 9660 sfmmu_enable_intrs(pstate_save); 9661 9662 kpreempt_enable(); 9663 } 9664 9665 /* 9666 * When all cnums are used up in a MMU, cnum will wrap around to the 9667 * next generation and start from 2. 9668 */ 9669 static void 9670 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum) 9671 { 9672 9673 /* caller must have disabled the preemption */ 9674 ASSERT(curthread->t_preempt >= 1); 9675 ASSERT(mmu_ctxp != NULL); 9676 9677 /* acquire Per-MMU (PM) spin lock */ 9678 mutex_enter(&mmu_ctxp->mmu_lock); 9679 9680 /* re-check to see if wrap-around is needed */ 9681 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9682 goto done; 9683 9684 SFMMU_MMU_STAT(mmu_wrap_around); 9685 9686 /* update gnum */ 9687 ASSERT(mmu_ctxp->mmu_gnum != 0); 9688 mmu_ctxp->mmu_gnum++; 9689 if (mmu_ctxp->mmu_gnum == 0 || 9690 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9691 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9692 (void *)mmu_ctxp); 9693 } 9694 9695 if (mmu_ctxp->mmu_ncpus > 1) { 9696 cpuset_t cpuset; 9697 9698 membar_enter(); /* make sure updated gnum visible */ 9699 9700 SFMMU_XCALL_STATS(NULL); 9701 9702 /* xcall to others on the same MMU to invalidate ctx */ 9703 cpuset = mmu_ctxp->mmu_cpuset; 9704 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum); 9705 CPUSET_DEL(cpuset, CPU->cpu_id); 9706 CPUSET_AND(cpuset, cpu_ready_set); 9707 9708 /* 9709 * Pass in INVALID_CONTEXT as the first parameter to 9710 * sfmmu_raise_tsb_exception, which invalidates the context 9711 * of any process running on the CPUs in the MMU. 9712 */ 9713 xt_some(cpuset, sfmmu_raise_tsb_exception, 9714 INVALID_CONTEXT, INVALID_CONTEXT); 9715 xt_sync(cpuset); 9716 9717 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9718 } 9719 9720 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9721 sfmmu_setctx_sec(INVALID_CONTEXT); 9722 sfmmu_clear_utsbinfo(); 9723 } 9724 9725 /* 9726 * No xcall is needed here. For sun4u systems all CPUs in context 9727 * domain share a single physical MMU therefore it's enough to flush 9728 * TLB on local CPU. On sun4v systems we use 1 global context 9729 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9730 * handler. Note that vtag_flushall_uctxs() is called 9731 * for Ultra II machine, where the equivalent flushall functionality 9732 * is implemented in SW, and only user ctx TLB entries are flushed. 9733 */ 9734 if (&vtag_flushall_uctxs != NULL) { 9735 vtag_flushall_uctxs(); 9736 } else { 9737 vtag_flushall(); 9738 } 9739 9740 /* reset mmu cnum, skips cnum 0 and 1 */ 9741 if (reset_cnum == B_TRUE) 9742 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9743 9744 done: 9745 mutex_exit(&mmu_ctxp->mmu_lock); 9746 } 9747 9748 9749 /* 9750 * For multi-threaded process, set the process context to INVALID_CONTEXT 9751 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9752 * process, we can just load the MMU state directly without having to 9753 * set context invalid. Caller must hold the hat lock since we don't 9754 * acquire it here. 9755 */ 9756 static void 9757 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9758 { 9759 uint_t cnum; 9760 uint_t pstate_save; 9761 9762 ASSERT(sfmmup != ksfmmup); 9763 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9764 9765 kpreempt_disable(); 9766 9767 /* 9768 * We check whether the pass'ed-in sfmmup is the same as the 9769 * current running proc. This is to makes sure the current proc 9770 * stays single-threaded if it already is. 9771 */ 9772 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9773 (curthread->t_procp->p_lwpcnt == 1)) { 9774 /* single-thread */ 9775 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9776 if (cnum != INVALID_CONTEXT) { 9777 uint_t curcnum; 9778 /* 9779 * Disable interrupts to prevent race condition 9780 * with sfmmu_ctx_wrap_around ctx invalidation. 9781 * In sun4v, ctx invalidation involves setting 9782 * TSB to NULL, hence, interrupts should be disabled 9783 * untill after sfmmu_load_mmustate is completed. 9784 */ 9785 pstate_save = sfmmu_disable_intrs(); 9786 curcnum = sfmmu_getctx_sec(); 9787 if (curcnum == cnum) 9788 sfmmu_load_mmustate(sfmmup); 9789 sfmmu_enable_intrs(pstate_save); 9790 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9791 } 9792 } else { 9793 /* 9794 * multi-thread 9795 * or when sfmmup is not the same as the curproc. 9796 */ 9797 sfmmu_invalidate_ctx(sfmmup); 9798 } 9799 9800 kpreempt_enable(); 9801 } 9802 9803 9804 /* 9805 * Replace the specified TSB with a new TSB. This function gets called when 9806 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9807 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9808 * (8K). 9809 * 9810 * Caller must hold the HAT lock, but should assume any tsb_info 9811 * pointers it has are no longer valid after calling this function. 9812 * 9813 * Return values: 9814 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9815 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9816 * something to this tsbinfo/TSB 9817 * TSB_SUCCESS Operation succeeded 9818 */ 9819 static tsb_replace_rc_t 9820 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9821 hatlock_t *hatlockp, uint_t flags) 9822 { 9823 struct tsb_info *new_tsbinfo = NULL; 9824 struct tsb_info *curtsb, *prevtsb; 9825 uint_t tte_sz_mask; 9826 int i; 9827 9828 ASSERT(sfmmup != ksfmmup); 9829 ASSERT(sfmmup->sfmmu_ismhat == 0); 9830 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9831 ASSERT(szc <= tsb_max_growsize); 9832 9833 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9834 return (TSB_LOSTRACE); 9835 9836 /* 9837 * Find the tsb_info ahead of this one in the list, and 9838 * also make sure that the tsb_info passed in really 9839 * exists! 9840 */ 9841 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9842 curtsb != old_tsbinfo && curtsb != NULL; 9843 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9844 ; 9845 ASSERT(curtsb != NULL); 9846 9847 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9848 /* 9849 * The process is swapped out, so just set the new size 9850 * code. When it swaps back in, we'll allocate a new one 9851 * of the new chosen size. 9852 */ 9853 curtsb->tsb_szc = szc; 9854 return (TSB_SUCCESS); 9855 } 9856 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9857 9858 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9859 9860 /* 9861 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9862 * If we fail to allocate a TSB, exit. 9863 * 9864 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9865 * then try 4M slab after the initial alloc fails. 9866 * 9867 * If tsb swapin with tsb size > 4M, then try 4M after the 9868 * initial alloc fails. 9869 */ 9870 sfmmu_hat_exit(hatlockp); 9871 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9872 tte_sz_mask, flags, sfmmup) && 9873 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9874 (!(flags & TSB_SWAPIN) && 9875 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9876 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9877 tte_sz_mask, flags, sfmmup))) { 9878 (void) sfmmu_hat_enter(sfmmup); 9879 if (!(flags & TSB_SWAPIN)) 9880 SFMMU_STAT(sf_tsb_resize_failures); 9881 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9882 return (TSB_ALLOCFAIL); 9883 } 9884 (void) sfmmu_hat_enter(sfmmup); 9885 9886 /* 9887 * Re-check to make sure somebody else didn't muck with us while we 9888 * didn't hold the HAT lock. If the process swapped out, fine, just 9889 * exit; this can happen if we try to shrink the TSB from the context 9890 * of another process (such as on an ISM unmap), though it is rare. 9891 */ 9892 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9893 SFMMU_STAT(sf_tsb_resize_failures); 9894 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9895 sfmmu_hat_exit(hatlockp); 9896 sfmmu_tsbinfo_free(new_tsbinfo); 9897 (void) sfmmu_hat_enter(sfmmup); 9898 return (TSB_LOSTRACE); 9899 } 9900 9901 #ifdef DEBUG 9902 /* Reverify that the tsb_info still exists.. for debugging only */ 9903 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9904 curtsb != old_tsbinfo && curtsb != NULL; 9905 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9906 ; 9907 ASSERT(curtsb != NULL); 9908 #endif /* DEBUG */ 9909 9910 /* 9911 * Quiesce any CPUs running this process on their next TLB miss 9912 * so they atomically see the new tsb_info. We temporarily set the 9913 * context to invalid context so new threads that come on processor 9914 * after we do the xcall to cpusran will also serialize behind the 9915 * HAT lock on TLB miss and will see the new TSB. Since this short 9916 * race with a new thread coming on processor is relatively rare, 9917 * this synchronization mechanism should be cheaper than always 9918 * pausing all CPUs for the duration of the setup, which is what 9919 * the old implementation did. This is particuarly true if we are 9920 * copying a huge chunk of memory around during that window. 9921 * 9922 * The memory barriers are to make sure things stay consistent 9923 * with resume() since it does not hold the HAT lock while 9924 * walking the list of tsb_info structures. 9925 */ 9926 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9927 /* The TSB is either growing or shrinking. */ 9928 sfmmu_invalidate_ctx(sfmmup); 9929 } else { 9930 /* 9931 * It is illegal to swap in TSBs from a process other 9932 * than a process being swapped in. This in turn 9933 * implies we do not have a valid MMU context here 9934 * since a process needs one to resolve translation 9935 * misses. 9936 */ 9937 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9938 } 9939 9940 #ifdef DEBUG 9941 ASSERT(max_mmu_ctxdoms > 0); 9942 9943 /* 9944 * Process should have INVALID_CONTEXT on all MMUs 9945 */ 9946 for (i = 0; i < max_mmu_ctxdoms; i++) { 9947 9948 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9949 } 9950 #endif 9951 9952 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9953 membar_stst(); /* strict ordering required */ 9954 if (prevtsb) 9955 prevtsb->tsb_next = new_tsbinfo; 9956 else 9957 sfmmup->sfmmu_tsb = new_tsbinfo; 9958 membar_enter(); /* make sure new TSB globally visible */ 9959 9960 /* 9961 * We need to migrate TSB entries from the old TSB to the new TSB 9962 * if tsb_remap_ttes is set and the TSB is growing. 9963 */ 9964 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9965 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9966 9967 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9968 9969 /* 9970 * Drop the HAT lock to free our old tsb_info. 9971 */ 9972 sfmmu_hat_exit(hatlockp); 9973 9974 if ((flags & TSB_GROW) == TSB_GROW) { 9975 SFMMU_STAT(sf_tsb_grow); 9976 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9977 SFMMU_STAT(sf_tsb_shrink); 9978 } 9979 9980 sfmmu_tsbinfo_free(old_tsbinfo); 9981 9982 (void) sfmmu_hat_enter(sfmmup); 9983 return (TSB_SUCCESS); 9984 } 9985 9986 /* 9987 * This function will re-program hat pgsz array, and invalidate the 9988 * process' context, forcing the process to switch to another 9989 * context on the next TLB miss, and therefore start using the 9990 * TLB that is reprogrammed for the new page sizes. 9991 */ 9992 void 9993 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9994 { 9995 int i; 9996 hatlock_t *hatlockp = NULL; 9997 9998 hatlockp = sfmmu_hat_enter(sfmmup); 9999 /* USIII+-IV+ optimization, requires hat lock */ 10000 if (tmp_pgsz) { 10001 for (i = 0; i < mmu_page_sizes; i++) 10002 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10003 } 10004 SFMMU_STAT(sf_tlb_reprog_pgsz); 10005 10006 sfmmu_invalidate_ctx(sfmmup); 10007 10008 sfmmu_hat_exit(hatlockp); 10009 } 10010 10011 /* 10012 * The scd_rttecnt field in the SCD must be updated to take account of the 10013 * regions which it contains. 10014 */ 10015 static void 10016 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10017 { 10018 uint_t rid; 10019 uint_t i, j; 10020 ulong_t w; 10021 sf_region_t *rgnp; 10022 10023 ASSERT(srdp != NULL); 10024 10025 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10026 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10027 continue; 10028 } 10029 10030 j = 0; 10031 while (w) { 10032 if (!(w & 0x1)) { 10033 j++; 10034 w >>= 1; 10035 continue; 10036 } 10037 rid = (i << BT_ULSHIFT) | j; 10038 j++; 10039 w >>= 1; 10040 10041 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10042 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10043 rgnp = srdp->srd_hmergnp[rid]; 10044 ASSERT(rgnp->rgn_refcnt > 0); 10045 ASSERT(rgnp->rgn_id == rid); 10046 10047 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10048 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10049 10050 /* 10051 * Maintain the tsb0 inflation cnt for the regions 10052 * in the SCD. 10053 */ 10054 if (rgnp->rgn_pgszc >= TTE4M) { 10055 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10056 rgnp->rgn_size >> 10057 (TTE_PAGE_SHIFT(TTE8K) + 2); 10058 } 10059 } 10060 } 10061 } 10062 10063 /* 10064 * This function assumes that there are either four or six supported page 10065 * sizes and at most two programmable TLBs, so we need to decide which 10066 * page sizes are most important and then tell the MMU layer so it 10067 * can adjust the TLB page sizes accordingly (if supported). 10068 * 10069 * If these assumptions change, this function will need to be 10070 * updated to support whatever the new limits are. 10071 * 10072 * The growing flag is nonzero if we are growing the address space, 10073 * and zero if it is shrinking. This allows us to decide whether 10074 * to grow or shrink our TSB, depending upon available memory 10075 * conditions. 10076 */ 10077 static void 10078 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10079 { 10080 uint64_t ttecnt[MMU_PAGE_SIZES]; 10081 uint64_t tte8k_cnt, tte4m_cnt; 10082 uint8_t i; 10083 int sectsb_thresh; 10084 10085 /* 10086 * Kernel threads, processes with small address spaces not using 10087 * large pages, and dummy ISM HATs need not apply. 10088 */ 10089 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10090 return; 10091 10092 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10093 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10094 return; 10095 10096 for (i = 0; i < mmu_page_sizes; i++) { 10097 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10098 sfmmup->sfmmu_ismttecnt[i]; 10099 } 10100 10101 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10102 if (&mmu_check_page_sizes) 10103 mmu_check_page_sizes(sfmmup, ttecnt); 10104 10105 /* 10106 * Calculate the number of 8k ttes to represent the span of these 10107 * pages. 10108 */ 10109 tte8k_cnt = ttecnt[TTE8K] + 10110 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10111 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10112 if (mmu_page_sizes == max_mmu_page_sizes) { 10113 tte4m_cnt = ttecnt[TTE4M] + 10114 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10115 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10116 } else { 10117 tte4m_cnt = ttecnt[TTE4M]; 10118 } 10119 10120 /* 10121 * Inflate tte8k_cnt to allow for region large page allocation failure. 10122 */ 10123 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10124 10125 /* 10126 * Inflate TSB sizes by a factor of 2 if this process 10127 * uses 4M text pages to minimize extra conflict misses 10128 * in the first TSB since without counting text pages 10129 * 8K TSB may become too small. 10130 * 10131 * Also double the size of the second TSB to minimize 10132 * extra conflict misses due to competition between 4M text pages 10133 * and data pages. 10134 * 10135 * We need to adjust the second TSB allocation threshold by the 10136 * inflation factor, since there is no point in creating a second 10137 * TSB when we know all the mappings can fit in the I/D TLBs. 10138 */ 10139 sectsb_thresh = tsb_sectsb_threshold; 10140 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10141 tte8k_cnt <<= 1; 10142 tte4m_cnt <<= 1; 10143 sectsb_thresh <<= 1; 10144 } 10145 10146 /* 10147 * Check to see if our TSB is the right size; we may need to 10148 * grow or shrink it. If the process is small, our work is 10149 * finished at this point. 10150 */ 10151 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10152 return; 10153 } 10154 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10155 } 10156 10157 static void 10158 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10159 uint64_t tte4m_cnt, int sectsb_thresh) 10160 { 10161 int tsb_bits; 10162 uint_t tsb_szc; 10163 struct tsb_info *tsbinfop; 10164 hatlock_t *hatlockp = NULL; 10165 10166 hatlockp = sfmmu_hat_enter(sfmmup); 10167 ASSERT(hatlockp != NULL); 10168 tsbinfop = sfmmup->sfmmu_tsb; 10169 ASSERT(tsbinfop != NULL); 10170 10171 /* 10172 * If we're growing, select the size based on RSS. If we're 10173 * shrinking, leave some room so we don't have to turn around and 10174 * grow again immediately. 10175 */ 10176 if (growing) 10177 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10178 else 10179 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10180 10181 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10182 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10183 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10184 hatlockp, TSB_SHRINK); 10185 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10186 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10187 hatlockp, TSB_GROW); 10188 } 10189 tsbinfop = sfmmup->sfmmu_tsb; 10190 10191 /* 10192 * With the TLB and first TSB out of the way, we need to see if 10193 * we need a second TSB for 4M pages. If we managed to reprogram 10194 * the TLB page sizes above, the process will start using this new 10195 * TSB right away; otherwise, it will start using it on the next 10196 * context switch. Either way, it's no big deal so there's no 10197 * synchronization with the trap handlers here unless we grow the 10198 * TSB (in which case it's required to prevent using the old one 10199 * after it's freed). Note: second tsb is required for 32M/256M 10200 * page sizes. 10201 */ 10202 if (tte4m_cnt > sectsb_thresh) { 10203 /* 10204 * If we're growing, select the size based on RSS. If we're 10205 * shrinking, leave some room so we don't have to turn 10206 * around and grow again immediately. 10207 */ 10208 if (growing) 10209 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10210 else 10211 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10212 if (tsbinfop->tsb_next == NULL) { 10213 struct tsb_info *newtsb; 10214 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10215 0 : TSB_ALLOC; 10216 10217 sfmmu_hat_exit(hatlockp); 10218 10219 /* 10220 * Try to allocate a TSB for 4[32|256]M pages. If we 10221 * can't get the size we want, retry w/a minimum sized 10222 * TSB. If that still didn't work, give up; we can 10223 * still run without one. 10224 */ 10225 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10226 TSB4M|TSB32M|TSB256M:TSB4M; 10227 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10228 allocflags, sfmmup)) && 10229 (tsb_szc <= TSB_4M_SZCODE || 10230 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10231 tsb_bits, allocflags, sfmmup)) && 10232 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10233 tsb_bits, allocflags, sfmmup)) { 10234 return; 10235 } 10236 10237 hatlockp = sfmmu_hat_enter(sfmmup); 10238 10239 sfmmu_invalidate_ctx(sfmmup); 10240 10241 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10242 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10243 SFMMU_STAT(sf_tsb_sectsb_create); 10244 sfmmu_hat_exit(hatlockp); 10245 return; 10246 } else { 10247 /* 10248 * It's annoying, but possible for us 10249 * to get here.. we dropped the HAT lock 10250 * because of locking order in the kmem 10251 * allocator, and while we were off getting 10252 * our memory, some other thread decided to 10253 * do us a favor and won the race to get a 10254 * second TSB for this process. Sigh. 10255 */ 10256 sfmmu_hat_exit(hatlockp); 10257 sfmmu_tsbinfo_free(newtsb); 10258 return; 10259 } 10260 } 10261 10262 /* 10263 * We have a second TSB, see if it's big enough. 10264 */ 10265 tsbinfop = tsbinfop->tsb_next; 10266 10267 /* 10268 * Check to see if our second TSB is the right size; 10269 * we may need to grow or shrink it. 10270 * To prevent thrashing (e.g. growing the TSB on a 10271 * subsequent map operation), only try to shrink if 10272 * the TSB reach exceeds twice the virtual address 10273 * space size. 10274 */ 10275 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10276 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10277 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10278 tsb_szc, hatlockp, TSB_SHRINK); 10279 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10280 TSB_OK_GROW()) { 10281 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10282 tsb_szc, hatlockp, TSB_GROW); 10283 } 10284 } 10285 10286 sfmmu_hat_exit(hatlockp); 10287 } 10288 10289 /* 10290 * Free up a sfmmu 10291 * Since the sfmmu is currently embedded in the hat struct we simply zero 10292 * out our fields and free up the ism map blk list if any. 10293 */ 10294 static void 10295 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10296 { 10297 ism_blk_t *blkp, *nx_blkp; 10298 #ifdef DEBUG 10299 ism_map_t *map; 10300 int i; 10301 #endif 10302 10303 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10304 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10305 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10306 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10307 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10308 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10309 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10310 10311 sfmmup->sfmmu_free = 0; 10312 sfmmup->sfmmu_ismhat = 0; 10313 10314 blkp = sfmmup->sfmmu_iblk; 10315 sfmmup->sfmmu_iblk = NULL; 10316 10317 while (blkp) { 10318 #ifdef DEBUG 10319 map = blkp->iblk_maps; 10320 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10321 ASSERT(map[i].imap_seg == 0); 10322 ASSERT(map[i].imap_ismhat == NULL); 10323 ASSERT(map[i].imap_ment == NULL); 10324 } 10325 #endif 10326 nx_blkp = blkp->iblk_next; 10327 blkp->iblk_next = NULL; 10328 blkp->iblk_nextpa = (uint64_t)-1; 10329 kmem_cache_free(ism_blk_cache, blkp); 10330 blkp = nx_blkp; 10331 } 10332 } 10333 10334 /* 10335 * Locking primitves accessed by HATLOCK macros 10336 */ 10337 10338 #define SFMMU_SPL_MTX (0x0) 10339 #define SFMMU_ML_MTX (0x1) 10340 10341 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10342 SPL_HASH(pg) : MLIST_HASH(pg)) 10343 10344 kmutex_t * 10345 sfmmu_page_enter(struct page *pp) 10346 { 10347 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10348 } 10349 10350 void 10351 sfmmu_page_exit(kmutex_t *spl) 10352 { 10353 mutex_exit(spl); 10354 } 10355 10356 int 10357 sfmmu_page_spl_held(struct page *pp) 10358 { 10359 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10360 } 10361 10362 kmutex_t * 10363 sfmmu_mlist_enter(struct page *pp) 10364 { 10365 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10366 } 10367 10368 void 10369 sfmmu_mlist_exit(kmutex_t *mml) 10370 { 10371 mutex_exit(mml); 10372 } 10373 10374 int 10375 sfmmu_mlist_held(struct page *pp) 10376 { 10377 10378 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10379 } 10380 10381 /* 10382 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10383 * sfmmu_mlist_enter() case mml_table lock array is used and for 10384 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10385 * 10386 * The lock is taken on a root page so that it protects an operation on all 10387 * constituent pages of a large page pp belongs to. 10388 * 10389 * The routine takes a lock from the appropriate array. The lock is determined 10390 * by hashing the root page. After taking the lock this routine checks if the 10391 * root page has the same size code that was used to determine the root (i.e 10392 * that root hasn't changed). If root page has the expected p_szc field we 10393 * have the right lock and it's returned to the caller. If root's p_szc 10394 * decreased we release the lock and retry from the beginning. This case can 10395 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10396 * value and taking the lock. The number of retries due to p_szc decrease is 10397 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10398 * determined by hashing pp itself. 10399 * 10400 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10401 * possible that p_szc can increase. To increase p_szc a thread has to lock 10402 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10403 * callers that don't hold a page locked recheck if hmeblk through which pp 10404 * was found still maps this pp. If it doesn't map it anymore returned lock 10405 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10406 * p_szc increase after taking the lock it returns this lock without further 10407 * retries because in this case the caller doesn't care about which lock was 10408 * taken. The caller will drop it right away. 10409 * 10410 * After the routine returns it's guaranteed that hat_page_demote() can't 10411 * change p_szc field of any of constituent pages of a large page pp belongs 10412 * to as long as pp was either locked at least SHARED prior to this call or 10413 * the caller finds that hment that pointed to this pp still references this 10414 * pp (this also assumes that the caller holds hme hash bucket lock so that 10415 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10416 * hat_pageunload()). 10417 */ 10418 static kmutex_t * 10419 sfmmu_mlspl_enter(struct page *pp, int type) 10420 { 10421 kmutex_t *mtx; 10422 uint_t prev_rszc = UINT_MAX; 10423 page_t *rootpp; 10424 uint_t szc; 10425 uint_t rszc; 10426 uint_t pszc = pp->p_szc; 10427 10428 ASSERT(pp != NULL); 10429 10430 again: 10431 if (pszc == 0) { 10432 mtx = SFMMU_MLSPL_MTX(type, pp); 10433 mutex_enter(mtx); 10434 return (mtx); 10435 } 10436 10437 /* The lock lives in the root page */ 10438 rootpp = PP_GROUPLEADER(pp, pszc); 10439 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10440 mutex_enter(mtx); 10441 10442 /* 10443 * Return mml in the following 3 cases: 10444 * 10445 * 1) If pp itself is root since if its p_szc decreased before we took 10446 * the lock pp is still the root of smaller szc page. And if its p_szc 10447 * increased it doesn't matter what lock we return (see comment in 10448 * front of this routine). 10449 * 10450 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10451 * large page we have the right lock since any previous potential 10452 * hat_page_demote() is done demoting from greater than current root's 10453 * p_szc because hat_page_demote() changes root's p_szc last. No 10454 * further hat_page_demote() can start or be in progress since it 10455 * would need the same lock we currently hold. 10456 * 10457 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10458 * matter what lock we return (see comment in front of this routine). 10459 */ 10460 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10461 rszc >= prev_rszc) { 10462 return (mtx); 10463 } 10464 10465 /* 10466 * hat_page_demote() could have decreased root's p_szc. 10467 * In this case pp's p_szc must also be smaller than pszc. 10468 * Retry. 10469 */ 10470 if (rszc < pszc) { 10471 szc = pp->p_szc; 10472 if (szc < pszc) { 10473 mutex_exit(mtx); 10474 pszc = szc; 10475 goto again; 10476 } 10477 /* 10478 * pp's p_szc increased after it was decreased. 10479 * page cannot be mapped. Return current lock. The caller 10480 * will drop it right away. 10481 */ 10482 return (mtx); 10483 } 10484 10485 /* 10486 * root's p_szc is greater than pp's p_szc. 10487 * hat_page_demote() is not done with all pages 10488 * yet. Wait for it to complete. 10489 */ 10490 mutex_exit(mtx); 10491 rootpp = PP_GROUPLEADER(rootpp, rszc); 10492 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10493 mutex_enter(mtx); 10494 mutex_exit(mtx); 10495 prev_rszc = rszc; 10496 goto again; 10497 } 10498 10499 static int 10500 sfmmu_mlspl_held(struct page *pp, int type) 10501 { 10502 kmutex_t *mtx; 10503 10504 ASSERT(pp != NULL); 10505 /* The lock lives in the root page */ 10506 pp = PP_PAGEROOT(pp); 10507 ASSERT(pp != NULL); 10508 10509 mtx = SFMMU_MLSPL_MTX(type, pp); 10510 return (MUTEX_HELD(mtx)); 10511 } 10512 10513 static uint_t 10514 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10515 { 10516 struct hme_blk *hblkp; 10517 10518 10519 if (freehblkp != NULL) { 10520 mutex_enter(&freehblkp_lock); 10521 if (freehblkp != NULL) { 10522 /* 10523 * If the current thread is owning hblk_reserve OR 10524 * critical request from sfmmu_hblk_steal() 10525 * let it succeed even if freehblkcnt is really low. 10526 */ 10527 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10528 SFMMU_STAT(sf_get_free_throttle); 10529 mutex_exit(&freehblkp_lock); 10530 return (0); 10531 } 10532 freehblkcnt--; 10533 *hmeblkpp = freehblkp; 10534 hblkp = *hmeblkpp; 10535 freehblkp = hblkp->hblk_next; 10536 mutex_exit(&freehblkp_lock); 10537 hblkp->hblk_next = NULL; 10538 SFMMU_STAT(sf_get_free_success); 10539 10540 ASSERT(hblkp->hblk_hmecnt == 0); 10541 ASSERT(hblkp->hblk_vcnt == 0); 10542 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10543 10544 return (1); 10545 } 10546 mutex_exit(&freehblkp_lock); 10547 } 10548 10549 /* Check cpu hblk pending queues */ 10550 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10551 hblkp = *hmeblkpp; 10552 hblkp->hblk_next = NULL; 10553 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10554 10555 ASSERT(hblkp->hblk_hmecnt == 0); 10556 ASSERT(hblkp->hblk_vcnt == 0); 10557 10558 return (1); 10559 } 10560 10561 SFMMU_STAT(sf_get_free_fail); 10562 return (0); 10563 } 10564 10565 static uint_t 10566 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10567 { 10568 struct hme_blk *hblkp; 10569 10570 ASSERT(hmeblkp->hblk_hmecnt == 0); 10571 ASSERT(hmeblkp->hblk_vcnt == 0); 10572 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10573 10574 /* 10575 * If the current thread is mapping into kernel space, 10576 * let it succede even if freehblkcnt is max 10577 * so that it will avoid freeing it to kmem. 10578 * This will prevent stack overflow due to 10579 * possible recursion since kmem_cache_free() 10580 * might require creation of a slab which 10581 * in turn needs an hmeblk to map that slab; 10582 * let's break this vicious chain at the first 10583 * opportunity. 10584 */ 10585 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10586 mutex_enter(&freehblkp_lock); 10587 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10588 SFMMU_STAT(sf_put_free_success); 10589 freehblkcnt++; 10590 hmeblkp->hblk_next = freehblkp; 10591 freehblkp = hmeblkp; 10592 mutex_exit(&freehblkp_lock); 10593 return (1); 10594 } 10595 mutex_exit(&freehblkp_lock); 10596 } 10597 10598 /* 10599 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10600 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10601 * we are not in the process of mapping into kernel space. 10602 */ 10603 ASSERT(!critical); 10604 while (freehblkcnt > HBLK_RESERVE_CNT) { 10605 mutex_enter(&freehblkp_lock); 10606 if (freehblkcnt > HBLK_RESERVE_CNT) { 10607 freehblkcnt--; 10608 hblkp = freehblkp; 10609 freehblkp = hblkp->hblk_next; 10610 mutex_exit(&freehblkp_lock); 10611 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10612 kmem_cache_free(sfmmu8_cache, hblkp); 10613 continue; 10614 } 10615 mutex_exit(&freehblkp_lock); 10616 } 10617 SFMMU_STAT(sf_put_free_fail); 10618 return (0); 10619 } 10620 10621 static void 10622 sfmmu_hblk_swap(struct hme_blk *new) 10623 { 10624 struct hme_blk *old, *hblkp, *prev; 10625 uint64_t newpa; 10626 caddr_t base, vaddr, endaddr; 10627 struct hmehash_bucket *hmebp; 10628 struct sf_hment *osfhme, *nsfhme; 10629 page_t *pp; 10630 kmutex_t *pml; 10631 tte_t tte; 10632 struct hme_blk *list = NULL; 10633 10634 #ifdef DEBUG 10635 hmeblk_tag hblktag; 10636 struct hme_blk *found; 10637 #endif 10638 old = HBLK_RESERVE; 10639 ASSERT(!old->hblk_shared); 10640 10641 /* 10642 * save pa before bcopy clobbers it 10643 */ 10644 newpa = new->hblk_nextpa; 10645 10646 base = (caddr_t)get_hblk_base(old); 10647 endaddr = base + get_hblk_span(old); 10648 10649 /* 10650 * acquire hash bucket lock. 10651 */ 10652 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10653 SFMMU_INVALID_SHMERID); 10654 10655 /* 10656 * copy contents from old to new 10657 */ 10658 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10659 10660 /* 10661 * add new to hash chain 10662 */ 10663 sfmmu_hblk_hash_add(hmebp, new, newpa); 10664 10665 /* 10666 * search hash chain for hblk_reserve; this needs to be performed 10667 * after adding new, otherwise prev won't correspond to the hblk which 10668 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10669 * remove old later. 10670 */ 10671 for (prev = NULL, 10672 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10673 prev = hblkp, hblkp = hblkp->hblk_next) 10674 ; 10675 10676 if (hblkp != old) 10677 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10678 10679 /* 10680 * p_mapping list is still pointing to hments in hblk_reserve; 10681 * fix up p_mapping list so that they point to hments in new. 10682 * 10683 * Since all these mappings are created by hblk_reserve_thread 10684 * on the way and it's using at least one of the buffers from each of 10685 * the newly minted slabs, there is no danger of any of these 10686 * mappings getting unloaded by another thread. 10687 * 10688 * tsbmiss could only modify ref/mod bits of hments in old/new. 10689 * Since all of these hments hold mappings established by segkmem 10690 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10691 * have no meaning for the mappings in hblk_reserve. hments in 10692 * old and new are identical except for ref/mod bits. 10693 */ 10694 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10695 10696 HBLKTOHME(osfhme, old, vaddr); 10697 sfmmu_copytte(&osfhme->hme_tte, &tte); 10698 10699 if (TTE_IS_VALID(&tte)) { 10700 if ((pp = osfhme->hme_page) == NULL) 10701 panic("sfmmu_hblk_swap: page not mapped"); 10702 10703 pml = sfmmu_mlist_enter(pp); 10704 10705 if (pp != osfhme->hme_page) 10706 panic("sfmmu_hblk_swap: mapping changed"); 10707 10708 HBLKTOHME(nsfhme, new, vaddr); 10709 10710 HME_ADD(nsfhme, pp); 10711 HME_SUB(osfhme, pp); 10712 10713 sfmmu_mlist_exit(pml); 10714 } 10715 } 10716 10717 /* 10718 * remove old from hash chain 10719 */ 10720 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10721 10722 #ifdef DEBUG 10723 10724 hblktag.htag_id = ksfmmup; 10725 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10726 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10727 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10728 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10729 10730 if (found != new) 10731 panic("sfmmu_hblk_swap: new hblk not found"); 10732 #endif 10733 10734 SFMMU_HASH_UNLOCK(hmebp); 10735 10736 /* 10737 * Reset hblk_reserve 10738 */ 10739 bzero((void *)old, HME8BLK_SZ); 10740 old->hblk_nextpa = va_to_pa((caddr_t)old); 10741 } 10742 10743 /* 10744 * Grab the mlist mutex for both pages passed in. 10745 * 10746 * low and high will be returned as pointers to the mutexes for these pages. 10747 * low refers to the mutex residing in the lower bin of the mlist hash, while 10748 * high refers to the mutex residing in the higher bin of the mlist hash. This 10749 * is due to the locking order restrictions on the same thread grabbing 10750 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10751 * 10752 * If both pages hash to the same mutex, only grab that single mutex, and 10753 * high will be returned as NULL 10754 * If the pages hash to different bins in the hash, grab the lower addressed 10755 * lock first and then the higher addressed lock in order to follow the locking 10756 * rules involved with the same thread grabbing multiple mlist mutexes. 10757 * low and high will both have non-NULL values. 10758 */ 10759 static void 10760 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10761 kmutex_t **low, kmutex_t **high) 10762 { 10763 kmutex_t *mml_targ, *mml_repl; 10764 10765 /* 10766 * no need to do the dance around szc as in sfmmu_mlist_enter() 10767 * because this routine is only called by hat_page_relocate() and all 10768 * targ and repl pages are already locked EXCL so szc can't change. 10769 */ 10770 10771 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10772 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10773 10774 if (mml_targ == mml_repl) { 10775 *low = mml_targ; 10776 *high = NULL; 10777 } else { 10778 if (mml_targ < mml_repl) { 10779 *low = mml_targ; 10780 *high = mml_repl; 10781 } else { 10782 *low = mml_repl; 10783 *high = mml_targ; 10784 } 10785 } 10786 10787 mutex_enter(*low); 10788 if (*high) 10789 mutex_enter(*high); 10790 } 10791 10792 static void 10793 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10794 { 10795 if (high) 10796 mutex_exit(high); 10797 mutex_exit(low); 10798 } 10799 10800 static hatlock_t * 10801 sfmmu_hat_enter(sfmmu_t *sfmmup) 10802 { 10803 hatlock_t *hatlockp; 10804 10805 if (sfmmup != ksfmmup) { 10806 hatlockp = TSB_HASH(sfmmup); 10807 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10808 return (hatlockp); 10809 } 10810 return (NULL); 10811 } 10812 10813 static hatlock_t * 10814 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10815 { 10816 hatlock_t *hatlockp; 10817 10818 if (sfmmup != ksfmmup) { 10819 hatlockp = TSB_HASH(sfmmup); 10820 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10821 return (NULL); 10822 return (hatlockp); 10823 } 10824 return (NULL); 10825 } 10826 10827 static void 10828 sfmmu_hat_exit(hatlock_t *hatlockp) 10829 { 10830 if (hatlockp != NULL) 10831 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10832 } 10833 10834 static void 10835 sfmmu_hat_lock_all(void) 10836 { 10837 int i; 10838 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10839 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10840 } 10841 10842 static void 10843 sfmmu_hat_unlock_all(void) 10844 { 10845 int i; 10846 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10847 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10848 } 10849 10850 int 10851 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10852 { 10853 ASSERT(sfmmup != ksfmmup); 10854 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10855 } 10856 10857 /* 10858 * Locking primitives to provide consistency between ISM unmap 10859 * and other operations. Since ISM unmap can take a long time, we 10860 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10861 * contention on the hatlock buckets while ISM segments are being 10862 * unmapped. The tradeoff is that the flags don't prevent priority 10863 * inversion from occurring, so we must request kernel priority in 10864 * case we have to sleep to keep from getting buried while holding 10865 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10866 * threads from running (for example, in sfmmu_uvatopfn()). 10867 */ 10868 static void 10869 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10870 { 10871 hatlock_t *hatlockp; 10872 10873 if (!hatlock_held) 10874 hatlockp = sfmmu_hat_enter(sfmmup); 10875 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10876 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10877 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10878 if (!hatlock_held) 10879 sfmmu_hat_exit(hatlockp); 10880 } 10881 10882 static void 10883 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10884 { 10885 hatlock_t *hatlockp; 10886 10887 if (!hatlock_held) 10888 hatlockp = sfmmu_hat_enter(sfmmup); 10889 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10890 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10891 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10892 if (!hatlock_held) 10893 sfmmu_hat_exit(hatlockp); 10894 } 10895 10896 /* 10897 * 10898 * Algorithm: 10899 * 10900 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10901 * hblks. 10902 * 10903 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10904 * 10905 * (a) try to return an hblk from reserve pool of free hblks; 10906 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10907 * and return hblk_reserve. 10908 * 10909 * (3) call kmem_cache_alloc() to allocate hblk; 10910 * 10911 * (a) if hblk_reserve_lock is held by the current thread, 10912 * atomically replace hblk_reserve by the hblk that is 10913 * returned by kmem_cache_alloc; release hblk_reserve_lock 10914 * and call kmem_cache_alloc() again. 10915 * (b) if reserve pool is not full, add the hblk that is 10916 * returned by kmem_cache_alloc to reserve pool and 10917 * call kmem_cache_alloc again. 10918 * 10919 */ 10920 static struct hme_blk * 10921 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10922 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10923 uint_t flags, uint_t rid) 10924 { 10925 struct hme_blk *hmeblkp = NULL; 10926 struct hme_blk *newhblkp; 10927 struct hme_blk *shw_hblkp = NULL; 10928 struct kmem_cache *sfmmu_cache = NULL; 10929 uint64_t hblkpa; 10930 ulong_t index; 10931 uint_t owner; /* set to 1 if using hblk_reserve */ 10932 uint_t forcefree; 10933 int sleep; 10934 sf_srd_t *srdp; 10935 sf_region_t *rgnp; 10936 10937 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10938 ASSERT(hblktag.htag_rid == rid); 10939 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10940 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10941 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10942 10943 /* 10944 * If segkmem is not created yet, allocate from static hmeblks 10945 * created at the end of startup_modules(). See the block comment 10946 * in startup_modules() describing how we estimate the number of 10947 * static hmeblks that will be needed during re-map. 10948 */ 10949 if (!hblk_alloc_dynamic) { 10950 10951 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 10952 10953 if (size == TTE8K) { 10954 index = nucleus_hblk8.index; 10955 if (index >= nucleus_hblk8.len) { 10956 /* 10957 * If we panic here, see startup_modules() to 10958 * make sure that we are calculating the 10959 * number of hblk8's that we need correctly. 10960 */ 10961 prom_panic("no nucleus hblk8 to allocate"); 10962 } 10963 hmeblkp = 10964 (struct hme_blk *)&nucleus_hblk8.list[index]; 10965 nucleus_hblk8.index++; 10966 SFMMU_STAT(sf_hblk8_nalloc); 10967 } else { 10968 index = nucleus_hblk1.index; 10969 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 10970 /* 10971 * If we panic here, see startup_modules(). 10972 * Most likely you need to update the 10973 * calculation of the number of hblk1 elements 10974 * that the kernel needs to boot. 10975 */ 10976 prom_panic("no nucleus hblk1 to allocate"); 10977 } 10978 hmeblkp = 10979 (struct hme_blk *)&nucleus_hblk1.list[index]; 10980 nucleus_hblk1.index++; 10981 SFMMU_STAT(sf_hblk1_nalloc); 10982 } 10983 10984 goto hblk_init; 10985 } 10986 10987 SFMMU_HASH_UNLOCK(hmebp); 10988 10989 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 10990 if (mmu_page_sizes == max_mmu_page_sizes) { 10991 if (size < TTE256M) 10992 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10993 size, flags); 10994 } else { 10995 if (size < TTE4M) 10996 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10997 size, flags); 10998 } 10999 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11000 /* 11001 * Shared hmes use per region bitmaps in rgn_hmeflag 11002 * rather than shadow hmeblks to keep track of the 11003 * mapping sizes which have been allocated for the region. 11004 * Here we cleanup old invalid hmeblks with this rid, 11005 * which may be left around by pageunload(). 11006 */ 11007 int ttesz; 11008 caddr_t va; 11009 caddr_t eva = vaddr + TTEBYTES(size); 11010 11011 ASSERT(sfmmup != KHATID); 11012 11013 srdp = sfmmup->sfmmu_srdp; 11014 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11015 rgnp = srdp->srd_hmergnp[rid]; 11016 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11017 ASSERT(rgnp->rgn_refcnt != 0); 11018 ASSERT(size <= rgnp->rgn_pgszc); 11019 11020 ttesz = HBLK_MIN_TTESZ; 11021 do { 11022 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11023 continue; 11024 } 11025 11026 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11027 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11028 } else if (ttesz < size) { 11029 for (va = vaddr; va < eva; 11030 va += TTEBYTES(ttesz)) { 11031 sfmmu_cleanup_rhblk(srdp, va, rid, 11032 ttesz); 11033 } 11034 } 11035 } while (++ttesz <= rgnp->rgn_pgszc); 11036 } 11037 11038 fill_hblk: 11039 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11040 11041 if (owner && size == TTE8K) { 11042 11043 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11044 /* 11045 * We are really in a tight spot. We already own 11046 * hblk_reserve and we need another hblk. In anticipation 11047 * of this kind of scenario, we specifically set aside 11048 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11049 * by owner of hblk_reserve. 11050 */ 11051 SFMMU_STAT(sf_hblk_recurse_cnt); 11052 11053 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11054 panic("sfmmu_hblk_alloc: reserve list is empty"); 11055 11056 goto hblk_verify; 11057 } 11058 11059 ASSERT(!owner); 11060 11061 if ((flags & HAT_NO_KALLOC) == 0) { 11062 11063 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11064 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11065 11066 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11067 hmeblkp = sfmmu_hblk_steal(size); 11068 } else { 11069 /* 11070 * if we are the owner of hblk_reserve, 11071 * swap hblk_reserve with hmeblkp and 11072 * start a fresh life. Hope things go 11073 * better this time. 11074 */ 11075 if (hblk_reserve_thread == curthread) { 11076 ASSERT(sfmmu_cache == sfmmu8_cache); 11077 sfmmu_hblk_swap(hmeblkp); 11078 hblk_reserve_thread = NULL; 11079 mutex_exit(&hblk_reserve_lock); 11080 goto fill_hblk; 11081 } 11082 /* 11083 * let's donate this hblk to our reserve list if 11084 * we are not mapping kernel range 11085 */ 11086 if (size == TTE8K && sfmmup != KHATID) { 11087 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11088 goto fill_hblk; 11089 } 11090 } 11091 } else { 11092 /* 11093 * We are here to map the slab in sfmmu8_cache; let's 11094 * check if we could tap our reserve list; if successful, 11095 * this will avoid the pain of going thru sfmmu_hblk_swap 11096 */ 11097 SFMMU_STAT(sf_hblk_slab_cnt); 11098 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11099 /* 11100 * let's start hblk_reserve dance 11101 */ 11102 SFMMU_STAT(sf_hblk_reserve_cnt); 11103 owner = 1; 11104 mutex_enter(&hblk_reserve_lock); 11105 hmeblkp = HBLK_RESERVE; 11106 hblk_reserve_thread = curthread; 11107 } 11108 } 11109 11110 hblk_verify: 11111 ASSERT(hmeblkp != NULL); 11112 set_hblk_sz(hmeblkp, size); 11113 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11114 SFMMU_HASH_LOCK(hmebp); 11115 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11116 if (newhblkp != NULL) { 11117 SFMMU_HASH_UNLOCK(hmebp); 11118 if (hmeblkp != HBLK_RESERVE) { 11119 /* 11120 * This is really tricky! 11121 * 11122 * vmem_alloc(vmem_seg_arena) 11123 * vmem_alloc(vmem_internal_arena) 11124 * segkmem_alloc(heap_arena) 11125 * vmem_alloc(heap_arena) 11126 * page_create() 11127 * hat_memload() 11128 * kmem_cache_free() 11129 * kmem_cache_alloc() 11130 * kmem_slab_create() 11131 * vmem_alloc(kmem_internal_arena) 11132 * segkmem_alloc(heap_arena) 11133 * vmem_alloc(heap_arena) 11134 * page_create() 11135 * hat_memload() 11136 * kmem_cache_free() 11137 * ... 11138 * 11139 * Thus, hat_memload() could call kmem_cache_free 11140 * for enough number of times that we could easily 11141 * hit the bottom of the stack or run out of reserve 11142 * list of vmem_seg structs. So, we must donate 11143 * this hblk to reserve list if it's allocated 11144 * from sfmmu8_cache *and* mapping kernel range. 11145 * We don't need to worry about freeing hmeblk1's 11146 * to kmem since they don't map any kmem slabs. 11147 * 11148 * Note: When segkmem supports largepages, we must 11149 * free hmeblk1's to reserve list as well. 11150 */ 11151 forcefree = (sfmmup == KHATID) ? 1 : 0; 11152 if (size == TTE8K && 11153 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11154 goto re_verify; 11155 } 11156 ASSERT(sfmmup != KHATID); 11157 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11158 } else { 11159 /* 11160 * Hey! we don't need hblk_reserve any more. 11161 */ 11162 ASSERT(owner); 11163 hblk_reserve_thread = NULL; 11164 mutex_exit(&hblk_reserve_lock); 11165 owner = 0; 11166 } 11167 re_verify: 11168 /* 11169 * let's check if the goodies are still present 11170 */ 11171 SFMMU_HASH_LOCK(hmebp); 11172 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11173 if (newhblkp != NULL) { 11174 /* 11175 * return newhblkp if it's not hblk_reserve; 11176 * if newhblkp is hblk_reserve, return it 11177 * _only if_ we are the owner of hblk_reserve. 11178 */ 11179 if (newhblkp != HBLK_RESERVE || owner) { 11180 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11181 newhblkp->hblk_shared); 11182 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11183 !newhblkp->hblk_shared); 11184 return (newhblkp); 11185 } else { 11186 /* 11187 * we just hit hblk_reserve in the hash and 11188 * we are not the owner of that; 11189 * 11190 * block until hblk_reserve_thread completes 11191 * swapping hblk_reserve and try the dance 11192 * once again. 11193 */ 11194 SFMMU_HASH_UNLOCK(hmebp); 11195 mutex_enter(&hblk_reserve_lock); 11196 mutex_exit(&hblk_reserve_lock); 11197 SFMMU_STAT(sf_hblk_reserve_hit); 11198 goto fill_hblk; 11199 } 11200 } else { 11201 /* 11202 * it's no more! try the dance once again. 11203 */ 11204 SFMMU_HASH_UNLOCK(hmebp); 11205 goto fill_hblk; 11206 } 11207 } 11208 11209 hblk_init: 11210 if (SFMMU_IS_SHMERID_VALID(rid)) { 11211 uint16_t tteflag = 0x1 << 11212 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11213 11214 if (!(rgnp->rgn_hmeflags & tteflag)) { 11215 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11216 } 11217 hmeblkp->hblk_shared = 1; 11218 } else { 11219 hmeblkp->hblk_shared = 0; 11220 } 11221 set_hblk_sz(hmeblkp, size); 11222 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11223 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11224 hmeblkp->hblk_tag = hblktag; 11225 hmeblkp->hblk_shadow = shw_hblkp; 11226 hblkpa = hmeblkp->hblk_nextpa; 11227 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11228 11229 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11230 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11231 ASSERT(hmeblkp->hblk_hmecnt == 0); 11232 ASSERT(hmeblkp->hblk_vcnt == 0); 11233 ASSERT(hmeblkp->hblk_lckcnt == 0); 11234 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11235 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11236 return (hmeblkp); 11237 } 11238 11239 /* 11240 * This function cleans up the hme_blk and returns it to the free list. 11241 */ 11242 /* ARGSUSED */ 11243 static void 11244 sfmmu_hblk_free(struct hme_blk **listp) 11245 { 11246 struct hme_blk *hmeblkp, *next_hmeblkp; 11247 int size; 11248 uint_t critical; 11249 uint64_t hblkpa; 11250 11251 ASSERT(*listp != NULL); 11252 11253 hmeblkp = *listp; 11254 while (hmeblkp != NULL) { 11255 next_hmeblkp = hmeblkp->hblk_next; 11256 ASSERT(!hmeblkp->hblk_hmecnt); 11257 ASSERT(!hmeblkp->hblk_vcnt); 11258 ASSERT(!hmeblkp->hblk_lckcnt); 11259 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11260 ASSERT(hmeblkp->hblk_shared == 0); 11261 ASSERT(hmeblkp->hblk_shw_bit == 0); 11262 ASSERT(hmeblkp->hblk_shadow == NULL); 11263 11264 hblkpa = va_to_pa((caddr_t)hmeblkp); 11265 ASSERT(hblkpa != (uint64_t)-1); 11266 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11267 11268 size = get_hblk_ttesz(hmeblkp); 11269 hmeblkp->hblk_next = NULL; 11270 hmeblkp->hblk_nextpa = hblkpa; 11271 11272 if (hmeblkp->hblk_nuc_bit == 0) { 11273 11274 if (size != TTE8K || 11275 !sfmmu_put_free_hblk(hmeblkp, critical)) 11276 kmem_cache_free(get_hblk_cache(hmeblkp), 11277 hmeblkp); 11278 } 11279 hmeblkp = next_hmeblkp; 11280 } 11281 } 11282 11283 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11284 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11285 11286 static uint_t sfmmu_hblk_steal_twice; 11287 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11288 11289 /* 11290 * Steal a hmeblk from user or kernel hme hash lists. 11291 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11292 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11293 * tap into critical reserve of freehblkp. 11294 * Note: We remain looping in this routine until we find one. 11295 */ 11296 static struct hme_blk * 11297 sfmmu_hblk_steal(int size) 11298 { 11299 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11300 struct hmehash_bucket *hmebp; 11301 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11302 uint64_t hblkpa; 11303 int i; 11304 uint_t loop_cnt = 0, critical; 11305 11306 for (;;) { 11307 /* Check cpu hblk pending queues */ 11308 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11309 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11310 ASSERT(hmeblkp->hblk_hmecnt == 0); 11311 ASSERT(hmeblkp->hblk_vcnt == 0); 11312 return (hmeblkp); 11313 } 11314 11315 if (size == TTE8K) { 11316 critical = 11317 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11318 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11319 return (hmeblkp); 11320 } 11321 11322 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11323 uhmehash_steal_hand; 11324 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11325 11326 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11327 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11328 SFMMU_HASH_LOCK(hmebp); 11329 hmeblkp = hmebp->hmeblkp; 11330 hblkpa = hmebp->hmeh_nextpa; 11331 pr_hblk = NULL; 11332 while (hmeblkp) { 11333 /* 11334 * check if it is a hmeblk that is not locked 11335 * and not shared. skip shadow hmeblks with 11336 * shadow_mask set i.e valid count non zero. 11337 */ 11338 if ((get_hblk_ttesz(hmeblkp) == size) && 11339 (hmeblkp->hblk_shw_bit == 0 || 11340 hmeblkp->hblk_vcnt == 0) && 11341 (hmeblkp->hblk_lckcnt == 0)) { 11342 /* 11343 * there is a high probability that we 11344 * will find a free one. search some 11345 * buckets for a free hmeblk initially 11346 * before unloading a valid hmeblk. 11347 */ 11348 if ((hmeblkp->hblk_vcnt == 0 && 11349 hmeblkp->hblk_hmecnt == 0) || (i >= 11350 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11351 if (sfmmu_steal_this_hblk(hmebp, 11352 hmeblkp, hblkpa, pr_hblk)) { 11353 /* 11354 * Hblk is unloaded 11355 * successfully 11356 */ 11357 break; 11358 } 11359 } 11360 } 11361 pr_hblk = hmeblkp; 11362 hblkpa = hmeblkp->hblk_nextpa; 11363 hmeblkp = hmeblkp->hblk_next; 11364 } 11365 11366 SFMMU_HASH_UNLOCK(hmebp); 11367 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11368 hmebp = uhme_hash; 11369 } 11370 uhmehash_steal_hand = hmebp; 11371 11372 if (hmeblkp != NULL) 11373 break; 11374 11375 /* 11376 * in the worst case, look for a free one in the kernel 11377 * hash table. 11378 */ 11379 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11380 SFMMU_HASH_LOCK(hmebp); 11381 hmeblkp = hmebp->hmeblkp; 11382 hblkpa = hmebp->hmeh_nextpa; 11383 pr_hblk = NULL; 11384 while (hmeblkp) { 11385 /* 11386 * check if it is free hmeblk 11387 */ 11388 if ((get_hblk_ttesz(hmeblkp) == size) && 11389 (hmeblkp->hblk_lckcnt == 0) && 11390 (hmeblkp->hblk_vcnt == 0) && 11391 (hmeblkp->hblk_hmecnt == 0)) { 11392 if (sfmmu_steal_this_hblk(hmebp, 11393 hmeblkp, hblkpa, pr_hblk)) { 11394 break; 11395 } else { 11396 /* 11397 * Cannot fail since we have 11398 * hash lock. 11399 */ 11400 panic("fail to steal?"); 11401 } 11402 } 11403 11404 pr_hblk = hmeblkp; 11405 hblkpa = hmeblkp->hblk_nextpa; 11406 hmeblkp = hmeblkp->hblk_next; 11407 } 11408 11409 SFMMU_HASH_UNLOCK(hmebp); 11410 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11411 hmebp = khme_hash; 11412 } 11413 11414 if (hmeblkp != NULL) 11415 break; 11416 sfmmu_hblk_steal_twice++; 11417 } 11418 return (hmeblkp); 11419 } 11420 11421 /* 11422 * This routine does real work to prepare a hblk to be "stolen" by 11423 * unloading the mappings, updating shadow counts .... 11424 * It returns 1 if the block is ready to be reused (stolen), or 0 11425 * means the block cannot be stolen yet- pageunload is still working 11426 * on this hblk. 11427 */ 11428 static int 11429 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11430 uint64_t hblkpa, struct hme_blk *pr_hblk) 11431 { 11432 int shw_size, vshift; 11433 struct hme_blk *shw_hblkp; 11434 caddr_t vaddr; 11435 uint_t shw_mask, newshw_mask; 11436 struct hme_blk *list = NULL; 11437 11438 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11439 11440 /* 11441 * check if the hmeblk is free, unload if necessary 11442 */ 11443 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11444 sfmmu_t *sfmmup; 11445 demap_range_t dmr; 11446 11447 sfmmup = hblktosfmmu(hmeblkp); 11448 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11449 return (0); 11450 } 11451 DEMAP_RANGE_INIT(sfmmup, &dmr); 11452 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11453 (caddr_t)get_hblk_base(hmeblkp), 11454 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11455 DEMAP_RANGE_FLUSH(&dmr); 11456 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11457 /* 11458 * Pageunload is working on the same hblk. 11459 */ 11460 return (0); 11461 } 11462 11463 sfmmu_hblk_steal_unload_count++; 11464 } 11465 11466 ASSERT(hmeblkp->hblk_lckcnt == 0); 11467 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11468 11469 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11470 hmeblkp->hblk_nextpa = hblkpa; 11471 11472 shw_hblkp = hmeblkp->hblk_shadow; 11473 if (shw_hblkp) { 11474 ASSERT(!hmeblkp->hblk_shared); 11475 shw_size = get_hblk_ttesz(shw_hblkp); 11476 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11477 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11478 ASSERT(vshift < 8); 11479 /* 11480 * Atomically clear shadow mask bit 11481 */ 11482 do { 11483 shw_mask = shw_hblkp->hblk_shw_mask; 11484 ASSERT(shw_mask & (1 << vshift)); 11485 newshw_mask = shw_mask & ~(1 << vshift); 11486 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 11487 shw_mask, newshw_mask); 11488 } while (newshw_mask != shw_mask); 11489 hmeblkp->hblk_shadow = NULL; 11490 } 11491 11492 /* 11493 * remove shadow bit if we are stealing an unused shadow hmeblk. 11494 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11495 * we are indeed allocating a shadow hmeblk. 11496 */ 11497 hmeblkp->hblk_shw_bit = 0; 11498 11499 if (hmeblkp->hblk_shared) { 11500 sf_srd_t *srdp; 11501 sf_region_t *rgnp; 11502 uint_t rid; 11503 11504 srdp = hblktosrd(hmeblkp); 11505 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11506 rid = hmeblkp->hblk_tag.htag_rid; 11507 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11508 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11509 rgnp = srdp->srd_hmergnp[rid]; 11510 ASSERT(rgnp != NULL); 11511 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11512 hmeblkp->hblk_shared = 0; 11513 } 11514 11515 sfmmu_hblk_steal_count++; 11516 SFMMU_STAT(sf_steal_count); 11517 11518 return (1); 11519 } 11520 11521 struct hme_blk * 11522 sfmmu_hmetohblk(struct sf_hment *sfhme) 11523 { 11524 struct hme_blk *hmeblkp; 11525 struct sf_hment *sfhme0; 11526 struct hme_blk *hblk_dummy = 0; 11527 11528 /* 11529 * No dummy sf_hments, please. 11530 */ 11531 ASSERT(sfhme->hme_tte.ll != 0); 11532 11533 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11534 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11535 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11536 11537 return (hmeblkp); 11538 } 11539 11540 /* 11541 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11542 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11543 * KM_SLEEP allocation. 11544 * 11545 * Return 0 on success, -1 otherwise. 11546 */ 11547 static void 11548 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11549 { 11550 struct tsb_info *tsbinfop, *next; 11551 tsb_replace_rc_t rc; 11552 boolean_t gotfirst = B_FALSE; 11553 11554 ASSERT(sfmmup != ksfmmup); 11555 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11556 11557 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11558 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11559 } 11560 11561 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11562 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11563 } else { 11564 return; 11565 } 11566 11567 ASSERT(sfmmup->sfmmu_tsb != NULL); 11568 11569 /* 11570 * Loop over all tsbinfo's replacing them with ones that actually have 11571 * a TSB. If any of the replacements ever fail, bail out of the loop. 11572 */ 11573 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11574 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11575 next = tsbinfop->tsb_next; 11576 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11577 hatlockp, TSB_SWAPIN); 11578 if (rc != TSB_SUCCESS) { 11579 break; 11580 } 11581 gotfirst = B_TRUE; 11582 } 11583 11584 switch (rc) { 11585 case TSB_SUCCESS: 11586 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11587 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11588 return; 11589 case TSB_LOSTRACE: 11590 break; 11591 case TSB_ALLOCFAIL: 11592 break; 11593 default: 11594 panic("sfmmu_replace_tsb returned unrecognized failure code " 11595 "%d", rc); 11596 } 11597 11598 /* 11599 * In this case, we failed to get one of our TSBs. If we failed to 11600 * get the first TSB, get one of minimum size (8KB). Walk the list 11601 * and throw away the tsbinfos, starting where the allocation failed; 11602 * we can get by with just one TSB as long as we don't leave the 11603 * SWAPPED tsbinfo structures lying around. 11604 */ 11605 tsbinfop = sfmmup->sfmmu_tsb; 11606 next = tsbinfop->tsb_next; 11607 tsbinfop->tsb_next = NULL; 11608 11609 sfmmu_hat_exit(hatlockp); 11610 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11611 next = tsbinfop->tsb_next; 11612 sfmmu_tsbinfo_free(tsbinfop); 11613 } 11614 hatlockp = sfmmu_hat_enter(sfmmup); 11615 11616 /* 11617 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11618 * pages. 11619 */ 11620 if (!gotfirst) { 11621 tsbinfop = sfmmup->sfmmu_tsb; 11622 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11623 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11624 ASSERT(rc == TSB_SUCCESS); 11625 } 11626 11627 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11628 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11629 } 11630 11631 static int 11632 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11633 { 11634 ulong_t bix = 0; 11635 uint_t rid; 11636 sf_region_t *rgnp; 11637 11638 ASSERT(srdp != NULL); 11639 ASSERT(srdp->srd_refcnt != 0); 11640 11641 w <<= BT_ULSHIFT; 11642 while (bmw) { 11643 if (!(bmw & 0x1)) { 11644 bix++; 11645 bmw >>= 1; 11646 continue; 11647 } 11648 rid = w | bix; 11649 rgnp = srdp->srd_hmergnp[rid]; 11650 ASSERT(rgnp->rgn_refcnt > 0); 11651 ASSERT(rgnp->rgn_id == rid); 11652 if (addr < rgnp->rgn_saddr || 11653 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11654 bix++; 11655 bmw >>= 1; 11656 } else { 11657 return (1); 11658 } 11659 } 11660 return (0); 11661 } 11662 11663 /* 11664 * Handle exceptions for low level tsb_handler. 11665 * 11666 * There are many scenarios that could land us here: 11667 * 11668 * If the context is invalid we land here. The context can be invalid 11669 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11670 * perform a wrap around operation in order to allocate a new context. 11671 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11672 * TSBs configuration is changeing for this process and we are forced into 11673 * here to do a syncronization operation. If the context is valid we can 11674 * be here from window trap hanlder. In this case just call trap to handle 11675 * the fault. 11676 * 11677 * Note that the process will run in INVALID_CONTEXT before 11678 * faulting into here and subsequently loading the MMU registers 11679 * (including the TSB base register) associated with this process. 11680 * For this reason, the trap handlers must all test for 11681 * INVALID_CONTEXT before attempting to access any registers other 11682 * than the context registers. 11683 */ 11684 void 11685 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11686 { 11687 sfmmu_t *sfmmup, *shsfmmup; 11688 uint_t ctxtype; 11689 klwp_id_t lwp; 11690 char lwp_save_state; 11691 hatlock_t *hatlockp, *shatlockp; 11692 struct tsb_info *tsbinfop; 11693 struct tsbmiss *tsbmp; 11694 sf_scd_t *scdp; 11695 11696 SFMMU_STAT(sf_tsb_exceptions); 11697 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11698 sfmmup = astosfmmu(curthread->t_procp->p_as); 11699 /* 11700 * note that in sun4u, tagacces register contains ctxnum 11701 * while sun4v passes ctxtype in the tagaccess register. 11702 */ 11703 ctxtype = tagaccess & TAGACC_CTX_MASK; 11704 11705 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11706 ASSERT(sfmmup->sfmmu_ismhat == 0); 11707 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11708 ctxtype == INVALID_CONTEXT); 11709 11710 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11711 /* 11712 * We may land here because shme bitmap and pagesize 11713 * flags are updated lazily in tsbmiss area on other cpus. 11714 * If we detect here that tsbmiss area is out of sync with 11715 * sfmmu update it and retry the trapped instruction. 11716 * Otherwise call trap(). 11717 */ 11718 int ret = 0; 11719 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11720 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11721 11722 /* 11723 * Must set lwp state to LWP_SYS before 11724 * trying to acquire any adaptive lock 11725 */ 11726 lwp = ttolwp(curthread); 11727 ASSERT(lwp); 11728 lwp_save_state = lwp->lwp_state; 11729 lwp->lwp_state = LWP_SYS; 11730 11731 hatlockp = sfmmu_hat_enter(sfmmup); 11732 kpreempt_disable(); 11733 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11734 ASSERT(sfmmup == tsbmp->usfmmup); 11735 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11736 ~tteflag_mask) || 11737 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11738 ~tteflag_mask)) { 11739 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11740 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11741 ret = 1; 11742 } 11743 if (sfmmup->sfmmu_srdp != NULL) { 11744 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11745 ulong_t *tm = tsbmp->shmermap; 11746 ulong_t i; 11747 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11748 ulong_t d = tm[i] ^ sm[i]; 11749 if (d) { 11750 if (d & sm[i]) { 11751 if (!ret && sfmmu_is_rgnva( 11752 sfmmup->sfmmu_srdp, 11753 addr, i, d & sm[i])) { 11754 ret = 1; 11755 } 11756 } 11757 tm[i] = sm[i]; 11758 } 11759 } 11760 } 11761 kpreempt_enable(); 11762 sfmmu_hat_exit(hatlockp); 11763 lwp->lwp_state = lwp_save_state; 11764 if (ret) { 11765 return; 11766 } 11767 } else if (ctxtype == INVALID_CONTEXT) { 11768 /* 11769 * First, make sure we come out of here with a valid ctx, 11770 * since if we don't get one we'll simply loop on the 11771 * faulting instruction. 11772 * 11773 * If the ISM mappings are changing, the TSB is relocated, 11774 * the process is swapped, the process is joining SCD or 11775 * leaving SCD or shared regions we serialize behind the 11776 * controlling thread with hat lock, sfmmu_flags and 11777 * sfmmu_tsb_cv condition variable. 11778 */ 11779 11780 /* 11781 * Must set lwp state to LWP_SYS before 11782 * trying to acquire any adaptive lock 11783 */ 11784 lwp = ttolwp(curthread); 11785 ASSERT(lwp); 11786 lwp_save_state = lwp->lwp_state; 11787 lwp->lwp_state = LWP_SYS; 11788 11789 hatlockp = sfmmu_hat_enter(sfmmup); 11790 retry: 11791 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11792 shsfmmup = scdp->scd_sfmmup; 11793 ASSERT(shsfmmup != NULL); 11794 11795 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11796 tsbinfop = tsbinfop->tsb_next) { 11797 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11798 /* drop the private hat lock */ 11799 sfmmu_hat_exit(hatlockp); 11800 /* acquire the shared hat lock */ 11801 shatlockp = sfmmu_hat_enter(shsfmmup); 11802 /* 11803 * recheck to see if anything changed 11804 * after we drop the private hat lock. 11805 */ 11806 if (sfmmup->sfmmu_scdp == scdp && 11807 shsfmmup == scdp->scd_sfmmup) { 11808 sfmmu_tsb_chk_reloc(shsfmmup, 11809 shatlockp); 11810 } 11811 sfmmu_hat_exit(shatlockp); 11812 hatlockp = sfmmu_hat_enter(sfmmup); 11813 goto retry; 11814 } 11815 } 11816 } 11817 11818 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11819 tsbinfop = tsbinfop->tsb_next) { 11820 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11821 cv_wait(&sfmmup->sfmmu_tsb_cv, 11822 HATLOCK_MUTEXP(hatlockp)); 11823 goto retry; 11824 } 11825 } 11826 11827 /* 11828 * Wait for ISM maps to be updated. 11829 */ 11830 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11831 cv_wait(&sfmmup->sfmmu_tsb_cv, 11832 HATLOCK_MUTEXP(hatlockp)); 11833 goto retry; 11834 } 11835 11836 /* Is this process joining an SCD? */ 11837 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11838 /* 11839 * Flush private TSB and setup shared TSB. 11840 * sfmmu_finish_join_scd() does not drop the 11841 * hat lock. 11842 */ 11843 sfmmu_finish_join_scd(sfmmup); 11844 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11845 } 11846 11847 /* 11848 * If we're swapping in, get TSB(s). Note that we must do 11849 * this before we get a ctx or load the MMU state. Once 11850 * we swap in we have to recheck to make sure the TSB(s) and 11851 * ISM mappings didn't change while we slept. 11852 */ 11853 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11854 sfmmu_tsb_swapin(sfmmup, hatlockp); 11855 goto retry; 11856 } 11857 11858 sfmmu_get_ctx(sfmmup); 11859 11860 sfmmu_hat_exit(hatlockp); 11861 /* 11862 * Must restore lwp_state if not calling 11863 * trap() for further processing. Restore 11864 * it anyway. 11865 */ 11866 lwp->lwp_state = lwp_save_state; 11867 return; 11868 } 11869 trap(rp, (caddr_t)tagaccess, traptype, 0); 11870 } 11871 11872 static void 11873 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11874 { 11875 struct tsb_info *tp; 11876 11877 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11878 11879 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11880 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11881 cv_wait(&sfmmup->sfmmu_tsb_cv, 11882 HATLOCK_MUTEXP(hatlockp)); 11883 break; 11884 } 11885 } 11886 } 11887 11888 /* 11889 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11890 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11891 * rather than spinning to avoid send mondo timeouts with 11892 * interrupts enabled. When the lock is acquired it is immediately 11893 * released and we return back to sfmmu_vatopfn just after 11894 * the GET_TTE call. 11895 */ 11896 void 11897 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11898 { 11899 struct page **pp; 11900 11901 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11902 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11903 } 11904 11905 /* 11906 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 11907 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 11908 * cross traps which cannot be handled while spinning in the 11909 * trap handlers. Simply enter and exit the kpr_suspendlock spin 11910 * mutex, which is held by the holder of the suspend bit, and then 11911 * retry the trapped instruction after unwinding. 11912 */ 11913 /*ARGSUSED*/ 11914 void 11915 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 11916 { 11917 ASSERT(curthread != kreloc_thread); 11918 mutex_enter(&kpr_suspendlock); 11919 mutex_exit(&kpr_suspendlock); 11920 } 11921 11922 /* 11923 * This routine could be optimized to reduce the number of xcalls by flushing 11924 * the entire TLBs if region reference count is above some threshold but the 11925 * tradeoff will depend on the size of the TLB. So for now flush the specific 11926 * page a context at a time. 11927 * 11928 * If uselocks is 0 then it's called after all cpus were captured and all the 11929 * hat locks were taken. In this case don't take the region lock by relying on 11930 * the order of list region update operations in hat_join_region(), 11931 * hat_leave_region() and hat_dup_region(). The ordering in those routines 11932 * guarantees that list is always forward walkable and reaches active sfmmus 11933 * regardless of where xc_attention() captures a cpu. 11934 */ 11935 cpuset_t 11936 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 11937 struct hme_blk *hmeblkp, int uselocks) 11938 { 11939 sfmmu_t *sfmmup; 11940 cpuset_t cpuset; 11941 cpuset_t rcpuset; 11942 hatlock_t *hatlockp; 11943 uint_t rid = rgnp->rgn_id; 11944 sf_rgn_link_t *rlink; 11945 sf_scd_t *scdp; 11946 11947 ASSERT(hmeblkp->hblk_shared); 11948 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11949 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11950 11951 CPUSET_ZERO(rcpuset); 11952 if (uselocks) { 11953 mutex_enter(&rgnp->rgn_mutex); 11954 } 11955 sfmmup = rgnp->rgn_sfmmu_head; 11956 while (sfmmup != NULL) { 11957 if (uselocks) { 11958 hatlockp = sfmmu_hat_enter(sfmmup); 11959 } 11960 11961 /* 11962 * When an SCD is created the SCD hat is linked on the sfmmu 11963 * region lists for each hme region which is part of the 11964 * SCD. If we find an SCD hat, when walking these lists, 11965 * then we flush the shared TSBs, if we find a private hat, 11966 * which is part of an SCD, but where the region 11967 * is not part of the SCD then we flush the private TSBs. 11968 */ 11969 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 11970 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11971 scdp = sfmmup->sfmmu_scdp; 11972 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 11973 if (uselocks) { 11974 sfmmu_hat_exit(hatlockp); 11975 } 11976 goto next; 11977 } 11978 } 11979 11980 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 11981 11982 kpreempt_disable(); 11983 cpuset = sfmmup->sfmmu_cpusran; 11984 CPUSET_AND(cpuset, cpu_ready_set); 11985 CPUSET_DEL(cpuset, CPU->cpu_id); 11986 SFMMU_XCALL_STATS(sfmmup); 11987 xt_some(cpuset, vtag_flushpage_tl1, 11988 (uint64_t)addr, (uint64_t)sfmmup); 11989 vtag_flushpage(addr, (uint64_t)sfmmup); 11990 if (uselocks) { 11991 sfmmu_hat_exit(hatlockp); 11992 } 11993 kpreempt_enable(); 11994 CPUSET_OR(rcpuset, cpuset); 11995 11996 next: 11997 /* LINTED: constant in conditional context */ 11998 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 11999 ASSERT(rlink != NULL); 12000 sfmmup = rlink->next; 12001 } 12002 if (uselocks) { 12003 mutex_exit(&rgnp->rgn_mutex); 12004 } 12005 return (rcpuset); 12006 } 12007 12008 /* 12009 * This routine takes an sfmmu pointer and the va for an adddress in an 12010 * ISM region as input and returns the corresponding region id in ism_rid. 12011 * The return value of 1 indicates that a region has been found and ism_rid 12012 * is valid, otherwise 0 is returned. 12013 */ 12014 static int 12015 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12016 { 12017 ism_blk_t *ism_blkp; 12018 int i; 12019 ism_map_t *ism_map; 12020 #ifdef DEBUG 12021 struct hat *ism_hatid; 12022 #endif 12023 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12024 12025 ism_blkp = sfmmup->sfmmu_iblk; 12026 while (ism_blkp != NULL) { 12027 ism_map = ism_blkp->iblk_maps; 12028 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12029 if ((va >= ism_start(ism_map[i])) && 12030 (va < ism_end(ism_map[i]))) { 12031 12032 *ism_rid = ism_map[i].imap_rid; 12033 #ifdef DEBUG 12034 ism_hatid = ism_map[i].imap_ismhat; 12035 ASSERT(ism_hatid == ism_sfmmup); 12036 ASSERT(ism_hatid->sfmmu_ismhat); 12037 #endif 12038 return (1); 12039 } 12040 } 12041 ism_blkp = ism_blkp->iblk_next; 12042 } 12043 return (0); 12044 } 12045 12046 /* 12047 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12048 * This routine may be called with all cpu's captured. Therefore, the 12049 * caller is responsible for holding all locks and disabling kernel 12050 * preemption. 12051 */ 12052 /* ARGSUSED */ 12053 static void 12054 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12055 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12056 { 12057 cpuset_t cpuset; 12058 caddr_t va; 12059 ism_ment_t *ment; 12060 sfmmu_t *sfmmup; 12061 #ifdef VAC 12062 int vcolor; 12063 #endif 12064 12065 sf_scd_t *scdp; 12066 uint_t ism_rid; 12067 12068 ASSERT(!hmeblkp->hblk_shared); 12069 /* 12070 * Walk the ism_hat's mapping list and flush the page 12071 * from every hat sharing this ism_hat. This routine 12072 * may be called while all cpu's have been captured. 12073 * Therefore we can't attempt to grab any locks. For now 12074 * this means we will protect the ism mapping list under 12075 * a single lock which will be grabbed by the caller. 12076 * If hat_share/unshare scalibility becomes a performance 12077 * problem then we may need to re-think ism mapping list locking. 12078 */ 12079 ASSERT(ism_sfmmup->sfmmu_ismhat); 12080 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12081 addr = addr - ISMID_STARTADDR; 12082 12083 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12084 12085 sfmmup = ment->iment_hat; 12086 12087 va = ment->iment_base_va; 12088 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12089 12090 /* 12091 * When an SCD is created the SCD hat is linked on the ism 12092 * mapping lists for each ISM segment which is part of the 12093 * SCD. If we find an SCD hat, when walking these lists, 12094 * then we flush the shared TSBs, if we find a private hat, 12095 * which is part of an SCD, but where the region 12096 * corresponding to this va is not part of the SCD then we 12097 * flush the private TSBs. 12098 */ 12099 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12100 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12101 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12102 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12103 &ism_rid)) { 12104 cmn_err(CE_PANIC, 12105 "can't find matching ISM rid!"); 12106 } 12107 12108 scdp = sfmmup->sfmmu_scdp; 12109 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12110 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12111 ism_rid)) { 12112 continue; 12113 } 12114 } 12115 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12116 12117 cpuset = sfmmup->sfmmu_cpusran; 12118 CPUSET_AND(cpuset, cpu_ready_set); 12119 CPUSET_DEL(cpuset, CPU->cpu_id); 12120 SFMMU_XCALL_STATS(sfmmup); 12121 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12122 (uint64_t)sfmmup); 12123 vtag_flushpage(va, (uint64_t)sfmmup); 12124 12125 #ifdef VAC 12126 /* 12127 * Flush D$ 12128 * When flushing D$ we must flush all 12129 * cpu's. See sfmmu_cache_flush(). 12130 */ 12131 if (cache_flush_flag == CACHE_FLUSH) { 12132 cpuset = cpu_ready_set; 12133 CPUSET_DEL(cpuset, CPU->cpu_id); 12134 12135 SFMMU_XCALL_STATS(sfmmup); 12136 vcolor = addr_to_vcolor(va); 12137 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12138 vac_flushpage(pfnum, vcolor); 12139 } 12140 #endif /* VAC */ 12141 } 12142 } 12143 12144 /* 12145 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12146 * a particular virtual address and ctx. If noflush is set we do not 12147 * flush the TLB/TSB. This function may or may not be called with the 12148 * HAT lock held. 12149 */ 12150 static void 12151 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12152 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12153 int hat_lock_held) 12154 { 12155 #ifdef VAC 12156 int vcolor; 12157 #endif 12158 cpuset_t cpuset; 12159 hatlock_t *hatlockp; 12160 12161 ASSERT(!hmeblkp->hblk_shared); 12162 12163 #if defined(lint) && !defined(VAC) 12164 pfnum = pfnum; 12165 cpu_flag = cpu_flag; 12166 cache_flush_flag = cache_flush_flag; 12167 #endif 12168 12169 /* 12170 * There is no longer a need to protect against ctx being 12171 * stolen here since we don't store the ctx in the TSB anymore. 12172 */ 12173 #ifdef VAC 12174 vcolor = addr_to_vcolor(addr); 12175 #endif 12176 12177 /* 12178 * We must hold the hat lock during the flush of TLB, 12179 * to avoid a race with sfmmu_invalidate_ctx(), where 12180 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12181 * causing TLB demap routine to skip flush on that MMU. 12182 * If the context on a MMU has already been set to 12183 * INVALID_CONTEXT, we just get an extra flush on 12184 * that MMU. 12185 */ 12186 if (!hat_lock_held && !tlb_noflush) 12187 hatlockp = sfmmu_hat_enter(sfmmup); 12188 12189 kpreempt_disable(); 12190 if (!tlb_noflush) { 12191 /* 12192 * Flush the TSB and TLB. 12193 */ 12194 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12195 12196 cpuset = sfmmup->sfmmu_cpusran; 12197 CPUSET_AND(cpuset, cpu_ready_set); 12198 CPUSET_DEL(cpuset, CPU->cpu_id); 12199 12200 SFMMU_XCALL_STATS(sfmmup); 12201 12202 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12203 (uint64_t)sfmmup); 12204 12205 vtag_flushpage(addr, (uint64_t)sfmmup); 12206 } 12207 12208 if (!hat_lock_held && !tlb_noflush) 12209 sfmmu_hat_exit(hatlockp); 12210 12211 #ifdef VAC 12212 /* 12213 * Flush the D$ 12214 * 12215 * Even if the ctx is stolen, we need to flush the 12216 * cache. Our ctx stealer only flushes the TLBs. 12217 */ 12218 if (cache_flush_flag == CACHE_FLUSH) { 12219 if (cpu_flag & FLUSH_ALL_CPUS) { 12220 cpuset = cpu_ready_set; 12221 } else { 12222 cpuset = sfmmup->sfmmu_cpusran; 12223 CPUSET_AND(cpuset, cpu_ready_set); 12224 } 12225 CPUSET_DEL(cpuset, CPU->cpu_id); 12226 SFMMU_XCALL_STATS(sfmmup); 12227 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12228 vac_flushpage(pfnum, vcolor); 12229 } 12230 #endif /* VAC */ 12231 kpreempt_enable(); 12232 } 12233 12234 /* 12235 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12236 * address and ctx. If noflush is set we do not currently do anything. 12237 * This function may or may not be called with the HAT lock held. 12238 */ 12239 static void 12240 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12241 int tlb_noflush, int hat_lock_held) 12242 { 12243 cpuset_t cpuset; 12244 hatlock_t *hatlockp; 12245 12246 ASSERT(!hmeblkp->hblk_shared); 12247 12248 /* 12249 * If the process is exiting we have nothing to do. 12250 */ 12251 if (tlb_noflush) 12252 return; 12253 12254 /* 12255 * Flush TSB. 12256 */ 12257 if (!hat_lock_held) 12258 hatlockp = sfmmu_hat_enter(sfmmup); 12259 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12260 12261 kpreempt_disable(); 12262 12263 cpuset = sfmmup->sfmmu_cpusran; 12264 CPUSET_AND(cpuset, cpu_ready_set); 12265 CPUSET_DEL(cpuset, CPU->cpu_id); 12266 12267 SFMMU_XCALL_STATS(sfmmup); 12268 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12269 12270 vtag_flushpage(addr, (uint64_t)sfmmup); 12271 12272 if (!hat_lock_held) 12273 sfmmu_hat_exit(hatlockp); 12274 12275 kpreempt_enable(); 12276 12277 } 12278 12279 /* 12280 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12281 * call handler that can flush a range of pages to save on xcalls. 12282 */ 12283 static int sfmmu_xcall_save; 12284 12285 /* 12286 * this routine is never used for demaping addresses backed by SRD hmeblks. 12287 */ 12288 static void 12289 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12290 { 12291 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12292 hatlock_t *hatlockp; 12293 cpuset_t cpuset; 12294 uint64_t sfmmu_pgcnt; 12295 pgcnt_t pgcnt = 0; 12296 int pgunload = 0; 12297 int dirtypg = 0; 12298 caddr_t addr = dmrp->dmr_addr; 12299 caddr_t eaddr; 12300 uint64_t bitvec = dmrp->dmr_bitvec; 12301 12302 ASSERT(bitvec & 1); 12303 12304 /* 12305 * Flush TSB and calculate number of pages to flush. 12306 */ 12307 while (bitvec != 0) { 12308 dirtypg = 0; 12309 /* 12310 * Find the first page to flush and then count how many 12311 * pages there are after it that also need to be flushed. 12312 * This way the number of TSB flushes is minimized. 12313 */ 12314 while ((bitvec & 1) == 0) { 12315 pgcnt++; 12316 addr += MMU_PAGESIZE; 12317 bitvec >>= 1; 12318 } 12319 while (bitvec & 1) { 12320 dirtypg++; 12321 bitvec >>= 1; 12322 } 12323 eaddr = addr + ptob(dirtypg); 12324 hatlockp = sfmmu_hat_enter(sfmmup); 12325 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12326 sfmmu_hat_exit(hatlockp); 12327 pgunload += dirtypg; 12328 addr = eaddr; 12329 pgcnt += dirtypg; 12330 } 12331 12332 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12333 if (sfmmup->sfmmu_free == 0) { 12334 addr = dmrp->dmr_addr; 12335 bitvec = dmrp->dmr_bitvec; 12336 12337 /* 12338 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12339 * as it will be used to pack argument for xt_some 12340 */ 12341 ASSERT((pgcnt > 0) && 12342 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12343 12344 /* 12345 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12346 * the low 6 bits of sfmmup. This is doable since pgcnt 12347 * always >= 1. 12348 */ 12349 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12350 sfmmu_pgcnt = (uint64_t)sfmmup | 12351 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12352 12353 /* 12354 * We must hold the hat lock during the flush of TLB, 12355 * to avoid a race with sfmmu_invalidate_ctx(), where 12356 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12357 * causing TLB demap routine to skip flush on that MMU. 12358 * If the context on a MMU has already been set to 12359 * INVALID_CONTEXT, we just get an extra flush on 12360 * that MMU. 12361 */ 12362 hatlockp = sfmmu_hat_enter(sfmmup); 12363 kpreempt_disable(); 12364 12365 cpuset = sfmmup->sfmmu_cpusran; 12366 CPUSET_AND(cpuset, cpu_ready_set); 12367 CPUSET_DEL(cpuset, CPU->cpu_id); 12368 12369 SFMMU_XCALL_STATS(sfmmup); 12370 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12371 sfmmu_pgcnt); 12372 12373 for (; bitvec != 0; bitvec >>= 1) { 12374 if (bitvec & 1) 12375 vtag_flushpage(addr, (uint64_t)sfmmup); 12376 addr += MMU_PAGESIZE; 12377 } 12378 kpreempt_enable(); 12379 sfmmu_hat_exit(hatlockp); 12380 12381 sfmmu_xcall_save += (pgunload-1); 12382 } 12383 dmrp->dmr_bitvec = 0; 12384 } 12385 12386 /* 12387 * In cases where we need to synchronize with TLB/TSB miss trap 12388 * handlers, _and_ need to flush the TLB, it's a lot easier to 12389 * throw away the context from the process than to do a 12390 * special song and dance to keep things consistent for the 12391 * handlers. 12392 * 12393 * Since the process suddenly ends up without a context and our caller 12394 * holds the hat lock, threads that fault after this function is called 12395 * will pile up on the lock. We can then do whatever we need to 12396 * atomically from the context of the caller. The first blocked thread 12397 * to resume executing will get the process a new context, and the 12398 * process will resume executing. 12399 * 12400 * One added advantage of this approach is that on MMUs that 12401 * support a "flush all" operation, we will delay the flush until 12402 * cnum wrap-around, and then flush the TLB one time. This 12403 * is rather rare, so it's a lot less expensive than making 8000 12404 * x-calls to flush the TLB 8000 times. 12405 * 12406 * A per-process (PP) lock is used to synchronize ctx allocations in 12407 * resume() and ctx invalidations here. 12408 */ 12409 static void 12410 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12411 { 12412 cpuset_t cpuset; 12413 int cnum, currcnum; 12414 mmu_ctx_t *mmu_ctxp; 12415 int i; 12416 uint_t pstate_save; 12417 12418 SFMMU_STAT(sf_ctx_inv); 12419 12420 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12421 ASSERT(sfmmup != ksfmmup); 12422 12423 kpreempt_disable(); 12424 12425 mmu_ctxp = CPU_MMU_CTXP(CPU); 12426 ASSERT(mmu_ctxp); 12427 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12428 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12429 12430 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12431 12432 pstate_save = sfmmu_disable_intrs(); 12433 12434 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12435 /* set HAT cnum invalid across all context domains. */ 12436 for (i = 0; i < max_mmu_ctxdoms; i++) { 12437 12438 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12439 if (cnum == INVALID_CONTEXT) { 12440 continue; 12441 } 12442 12443 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12444 } 12445 membar_enter(); /* make sure globally visible to all CPUs */ 12446 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12447 12448 sfmmu_enable_intrs(pstate_save); 12449 12450 cpuset = sfmmup->sfmmu_cpusran; 12451 CPUSET_DEL(cpuset, CPU->cpu_id); 12452 CPUSET_AND(cpuset, cpu_ready_set); 12453 if (!CPUSET_ISNULL(cpuset)) { 12454 SFMMU_XCALL_STATS(sfmmup); 12455 xt_some(cpuset, sfmmu_raise_tsb_exception, 12456 (uint64_t)sfmmup, INVALID_CONTEXT); 12457 xt_sync(cpuset); 12458 SFMMU_STAT(sf_tsb_raise_exception); 12459 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12460 } 12461 12462 /* 12463 * If the hat to-be-invalidated is the same as the current 12464 * process on local CPU we need to invalidate 12465 * this CPU context as well. 12466 */ 12467 if ((sfmmu_getctx_sec() == currcnum) && 12468 (currcnum != INVALID_CONTEXT)) { 12469 /* sets shared context to INVALID too */ 12470 sfmmu_setctx_sec(INVALID_CONTEXT); 12471 sfmmu_clear_utsbinfo(); 12472 } 12473 12474 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12475 12476 kpreempt_enable(); 12477 12478 /* 12479 * we hold the hat lock, so nobody should allocate a context 12480 * for us yet 12481 */ 12482 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12483 } 12484 12485 #ifdef VAC 12486 /* 12487 * We need to flush the cache in all cpus. It is possible that 12488 * a process referenced a page as cacheable but has sinced exited 12489 * and cleared the mapping list. We still to flush it but have no 12490 * state so all cpus is the only alternative. 12491 */ 12492 void 12493 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12494 { 12495 cpuset_t cpuset; 12496 12497 kpreempt_disable(); 12498 cpuset = cpu_ready_set; 12499 CPUSET_DEL(cpuset, CPU->cpu_id); 12500 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12501 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12502 xt_sync(cpuset); 12503 vac_flushpage(pfnum, vcolor); 12504 kpreempt_enable(); 12505 } 12506 12507 void 12508 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12509 { 12510 cpuset_t cpuset; 12511 12512 ASSERT(vcolor >= 0); 12513 12514 kpreempt_disable(); 12515 cpuset = cpu_ready_set; 12516 CPUSET_DEL(cpuset, CPU->cpu_id); 12517 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12518 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12519 xt_sync(cpuset); 12520 vac_flushcolor(vcolor, pfnum); 12521 kpreempt_enable(); 12522 } 12523 #endif /* VAC */ 12524 12525 /* 12526 * We need to prevent processes from accessing the TSB using a cached physical 12527 * address. It's alright if they try to access the TSB via virtual address 12528 * since they will just fault on that virtual address once the mapping has 12529 * been suspended. 12530 */ 12531 #pragma weak sendmondo_in_recover 12532 12533 /* ARGSUSED */ 12534 static int 12535 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12536 { 12537 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12538 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12539 hatlock_t *hatlockp; 12540 sf_scd_t *scdp; 12541 12542 if (flags != HAT_PRESUSPEND) 12543 return (0); 12544 12545 /* 12546 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12547 * be a shared hat, then set SCD's tsbinfo's flag. 12548 * If tsb is not shared, sfmmup is a private hat, then set 12549 * its private tsbinfo's flag. 12550 */ 12551 hatlockp = sfmmu_hat_enter(sfmmup); 12552 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12553 12554 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12555 sfmmu_tsb_inv_ctx(sfmmup); 12556 sfmmu_hat_exit(hatlockp); 12557 } else { 12558 /* release lock on the shared hat */ 12559 sfmmu_hat_exit(hatlockp); 12560 /* sfmmup is a shared hat */ 12561 ASSERT(sfmmup->sfmmu_scdhat); 12562 scdp = sfmmup->sfmmu_scdp; 12563 ASSERT(scdp != NULL); 12564 /* get private hat from the scd list */ 12565 mutex_enter(&scdp->scd_mutex); 12566 sfmmup = scdp->scd_sf_list; 12567 while (sfmmup != NULL) { 12568 hatlockp = sfmmu_hat_enter(sfmmup); 12569 /* 12570 * We do not call sfmmu_tsb_inv_ctx here because 12571 * sendmondo_in_recover check is only needed for 12572 * sun4u. 12573 */ 12574 sfmmu_invalidate_ctx(sfmmup); 12575 sfmmu_hat_exit(hatlockp); 12576 sfmmup = sfmmup->sfmmu_scd_link.next; 12577 12578 } 12579 mutex_exit(&scdp->scd_mutex); 12580 } 12581 return (0); 12582 } 12583 12584 static void 12585 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12586 { 12587 extern uint32_t sendmondo_in_recover; 12588 12589 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12590 12591 /* 12592 * For Cheetah+ Erratum 25: 12593 * Wait for any active recovery to finish. We can't risk 12594 * relocating the TSB of the thread running mondo_recover_proc() 12595 * since, if we did that, we would deadlock. The scenario we are 12596 * trying to avoid is as follows: 12597 * 12598 * THIS CPU RECOVER CPU 12599 * -------- ----------- 12600 * Begins recovery, walking through TSB 12601 * hat_pagesuspend() TSB TTE 12602 * TLB miss on TSB TTE, spins at TL1 12603 * xt_sync() 12604 * send_mondo_timeout() 12605 * mondo_recover_proc() 12606 * ((deadlocked)) 12607 * 12608 * The second half of the workaround is that mondo_recover_proc() 12609 * checks to see if the tsb_info has the RELOC flag set, and if it 12610 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12611 * and hence avoiding the TLB miss that could result in a deadlock. 12612 */ 12613 if (&sendmondo_in_recover) { 12614 membar_enter(); /* make sure RELOC flag visible */ 12615 while (sendmondo_in_recover) { 12616 drv_usecwait(1); 12617 membar_consumer(); 12618 } 12619 } 12620 12621 sfmmu_invalidate_ctx(sfmmup); 12622 } 12623 12624 /* ARGSUSED */ 12625 static int 12626 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12627 void *tsbinfo, pfn_t newpfn) 12628 { 12629 hatlock_t *hatlockp; 12630 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12631 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12632 12633 if (flags != HAT_POSTUNSUSPEND) 12634 return (0); 12635 12636 hatlockp = sfmmu_hat_enter(sfmmup); 12637 12638 SFMMU_STAT(sf_tsb_reloc); 12639 12640 /* 12641 * The process may have swapped out while we were relocating one 12642 * of its TSBs. If so, don't bother doing the setup since the 12643 * process can't be using the memory anymore. 12644 */ 12645 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12646 ASSERT(va == tsbinfop->tsb_va); 12647 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12648 12649 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12650 sfmmu_inv_tsb(tsbinfop->tsb_va, 12651 TSB_BYTES(tsbinfop->tsb_szc)); 12652 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12653 } 12654 } 12655 12656 membar_exit(); 12657 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12658 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12659 12660 sfmmu_hat_exit(hatlockp); 12661 12662 return (0); 12663 } 12664 12665 /* 12666 * Allocate and initialize a tsb_info structure. Note that we may or may not 12667 * allocate a TSB here, depending on the flags passed in. 12668 */ 12669 static int 12670 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12671 uint_t flags, sfmmu_t *sfmmup) 12672 { 12673 int err; 12674 12675 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12676 sfmmu_tsbinfo_cache, KM_SLEEP); 12677 12678 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12679 tsb_szc, flags, sfmmup)) != 0) { 12680 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12681 SFMMU_STAT(sf_tsb_allocfail); 12682 *tsbinfopp = NULL; 12683 return (err); 12684 } 12685 SFMMU_STAT(sf_tsb_alloc); 12686 12687 /* 12688 * Bump the TSB size counters for this TSB size. 12689 */ 12690 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12691 return (0); 12692 } 12693 12694 static void 12695 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12696 { 12697 caddr_t tsbva = tsbinfo->tsb_va; 12698 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12699 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12700 vmem_t *vmp = tsbinfo->tsb_vmp; 12701 12702 /* 12703 * If we allocated this TSB from relocatable kernel memory, then we 12704 * need to uninstall the callback handler. 12705 */ 12706 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12707 uintptr_t slab_mask; 12708 caddr_t slab_vaddr; 12709 page_t **ppl; 12710 int ret; 12711 12712 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12713 if (tsb_size > MMU_PAGESIZE4M) 12714 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12715 else 12716 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12717 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12718 12719 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12720 ASSERT(ret == 0); 12721 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12722 0, NULL); 12723 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12724 } 12725 12726 if (kmem_cachep != NULL) { 12727 kmem_cache_free(kmem_cachep, tsbva); 12728 } else { 12729 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12730 } 12731 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12732 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12733 } 12734 12735 static void 12736 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12737 { 12738 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12739 sfmmu_tsb_free(tsbinfo); 12740 } 12741 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12742 12743 } 12744 12745 /* 12746 * Setup all the references to physical memory for this tsbinfo. 12747 * The underlying page(s) must be locked. 12748 */ 12749 static void 12750 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12751 { 12752 ASSERT(pfn != PFN_INVALID); 12753 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12754 12755 #ifndef sun4v 12756 if (tsbinfo->tsb_szc == 0) { 12757 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12758 PROT_WRITE|PROT_READ, TTE8K); 12759 } else { 12760 /* 12761 * Round down PA and use a large mapping; the handlers will 12762 * compute the TSB pointer at the correct offset into the 12763 * big virtual page. NOTE: this assumes all TSBs larger 12764 * than 8K must come from physically contiguous slabs of 12765 * size tsb_slab_size. 12766 */ 12767 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12768 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12769 } 12770 tsbinfo->tsb_pa = ptob(pfn); 12771 12772 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12773 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12774 12775 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12776 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12777 #else /* sun4v */ 12778 tsbinfo->tsb_pa = ptob(pfn); 12779 #endif /* sun4v */ 12780 } 12781 12782 12783 /* 12784 * Returns zero on success, ENOMEM if over the high water mark, 12785 * or EAGAIN if the caller needs to retry with a smaller TSB 12786 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12787 * 12788 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12789 * is specified and the TSB requested is PAGESIZE, though it 12790 * may sleep waiting for memory if sufficient memory is not 12791 * available. 12792 */ 12793 static int 12794 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12795 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12796 { 12797 caddr_t vaddr = NULL; 12798 caddr_t slab_vaddr; 12799 uintptr_t slab_mask; 12800 int tsbbytes = TSB_BYTES(tsbcode); 12801 int lowmem = 0; 12802 struct kmem_cache *kmem_cachep = NULL; 12803 vmem_t *vmp = NULL; 12804 lgrp_id_t lgrpid = LGRP_NONE; 12805 pfn_t pfn; 12806 uint_t cbflags = HAC_SLEEP; 12807 page_t **pplist; 12808 int ret; 12809 12810 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12811 if (tsbbytes > MMU_PAGESIZE4M) 12812 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12813 else 12814 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12815 12816 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12817 flags |= TSB_ALLOC; 12818 12819 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12820 12821 tsbinfo->tsb_sfmmu = sfmmup; 12822 12823 /* 12824 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12825 * return. 12826 */ 12827 if ((flags & TSB_ALLOC) == 0) { 12828 tsbinfo->tsb_szc = tsbcode; 12829 tsbinfo->tsb_ttesz_mask = tteszmask; 12830 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12831 tsbinfo->tsb_pa = -1; 12832 tsbinfo->tsb_tte.ll = 0; 12833 tsbinfo->tsb_next = NULL; 12834 tsbinfo->tsb_flags = TSB_SWAPPED; 12835 tsbinfo->tsb_cache = NULL; 12836 tsbinfo->tsb_vmp = NULL; 12837 return (0); 12838 } 12839 12840 #ifdef DEBUG 12841 /* 12842 * For debugging: 12843 * Randomly force allocation failures every tsb_alloc_mtbf 12844 * tries if TSB_FORCEALLOC is not specified. This will 12845 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12846 * it is even, to allow testing of both failure paths... 12847 */ 12848 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12849 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12850 tsb_alloc_count = 0; 12851 tsb_alloc_fail_mtbf++; 12852 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12853 } 12854 #endif /* DEBUG */ 12855 12856 /* 12857 * Enforce high water mark if we are not doing a forced allocation 12858 * and are not shrinking a process' TSB. 12859 */ 12860 if ((flags & TSB_SHRINK) == 0 && 12861 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12862 if ((flags & TSB_FORCEALLOC) == 0) 12863 return (ENOMEM); 12864 lowmem = 1; 12865 } 12866 12867 /* 12868 * Allocate from the correct location based upon the size of the TSB 12869 * compared to the base page size, and what memory conditions dictate. 12870 * Note we always do nonblocking allocations from the TSB arena since 12871 * we don't want memory fragmentation to cause processes to block 12872 * indefinitely waiting for memory; until the kernel algorithms that 12873 * coalesce large pages are improved this is our best option. 12874 * 12875 * Algorithm: 12876 * If allocating a "large" TSB (>8K), allocate from the 12877 * appropriate kmem_tsb_default_arena vmem arena 12878 * else if low on memory or the TSB_FORCEALLOC flag is set or 12879 * tsb_forceheap is set 12880 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12881 * KM_SLEEP (never fails) 12882 * else 12883 * Allocate from appropriate sfmmu_tsb_cache with 12884 * KM_NOSLEEP 12885 * endif 12886 */ 12887 if (tsb_lgrp_affinity) 12888 lgrpid = lgrp_home_id(curthread); 12889 if (lgrpid == LGRP_NONE) 12890 lgrpid = 0; /* use lgrp of boot CPU */ 12891 12892 if (tsbbytes > MMU_PAGESIZE) { 12893 if (tsbbytes > MMU_PAGESIZE4M) { 12894 vmp = kmem_bigtsb_default_arena[lgrpid]; 12895 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12896 0, 0, NULL, NULL, VM_NOSLEEP); 12897 } else { 12898 vmp = kmem_tsb_default_arena[lgrpid]; 12899 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12900 0, 0, NULL, NULL, VM_NOSLEEP); 12901 } 12902 #ifdef DEBUG 12903 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 12904 #else /* !DEBUG */ 12905 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 12906 #endif /* DEBUG */ 12907 kmem_cachep = sfmmu_tsb8k_cache; 12908 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 12909 ASSERT(vaddr != NULL); 12910 } else { 12911 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 12912 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 12913 } 12914 12915 tsbinfo->tsb_cache = kmem_cachep; 12916 tsbinfo->tsb_vmp = vmp; 12917 12918 if (vaddr == NULL) { 12919 return (EAGAIN); 12920 } 12921 12922 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 12923 kmem_cachep = tsbinfo->tsb_cache; 12924 12925 /* 12926 * If we are allocating from outside the cage, then we need to 12927 * register a relocation callback handler. Note that for now 12928 * since pseudo mappings always hang off of the slab's root page, 12929 * we need only lock the first 8K of the TSB slab. This is a bit 12930 * hacky but it is good for performance. 12931 */ 12932 if (kmem_cachep != sfmmu_tsb8k_cache) { 12933 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 12934 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 12935 ASSERT(ret == 0); 12936 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 12937 cbflags, (void *)tsbinfo, &pfn, NULL); 12938 12939 /* 12940 * Need to free up resources if we could not successfully 12941 * add the callback function and return an error condition. 12942 */ 12943 if (ret != 0) { 12944 if (kmem_cachep) { 12945 kmem_cache_free(kmem_cachep, vaddr); 12946 } else { 12947 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 12948 } 12949 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 12950 S_WRITE); 12951 return (EAGAIN); 12952 } 12953 } else { 12954 /* 12955 * Since allocation of 8K TSBs from heap is rare and occurs 12956 * during memory pressure we allocate them from permanent 12957 * memory rather than using callbacks to get the PFN. 12958 */ 12959 pfn = hat_getpfnum(kas.a_hat, vaddr); 12960 } 12961 12962 tsbinfo->tsb_va = vaddr; 12963 tsbinfo->tsb_szc = tsbcode; 12964 tsbinfo->tsb_ttesz_mask = tteszmask; 12965 tsbinfo->tsb_next = NULL; 12966 tsbinfo->tsb_flags = 0; 12967 12968 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 12969 12970 sfmmu_inv_tsb(vaddr, tsbbytes); 12971 12972 if (kmem_cachep != sfmmu_tsb8k_cache) { 12973 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 12974 } 12975 12976 return (0); 12977 } 12978 12979 /* 12980 * Initialize per cpu tsb and per cpu tsbmiss_area 12981 */ 12982 void 12983 sfmmu_init_tsbs(void) 12984 { 12985 int i; 12986 struct tsbmiss *tsbmissp; 12987 struct kpmtsbm *kpmtsbmp; 12988 #ifndef sun4v 12989 extern int dcache_line_mask; 12990 #endif /* sun4v */ 12991 extern uint_t vac_colors; 12992 12993 /* 12994 * Init. tsb miss area. 12995 */ 12996 tsbmissp = tsbmiss_area; 12997 12998 for (i = 0; i < NCPU; tsbmissp++, i++) { 12999 /* 13000 * initialize the tsbmiss area. 13001 * Do this for all possible CPUs as some may be added 13002 * while the system is running. There is no cost to this. 13003 */ 13004 tsbmissp->ksfmmup = ksfmmup; 13005 #ifndef sun4v 13006 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13007 #endif /* sun4v */ 13008 tsbmissp->khashstart = 13009 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13010 tsbmissp->uhashstart = 13011 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13012 tsbmissp->khashsz = khmehash_num; 13013 tsbmissp->uhashsz = uhmehash_num; 13014 } 13015 13016 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13017 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13018 13019 if (kpm_enable == 0) 13020 return; 13021 13022 /* -- Begin KPM specific init -- */ 13023 13024 if (kpm_smallpages) { 13025 /* 13026 * If we're using base pagesize pages for seg_kpm 13027 * mappings, we use the kernel TSB since we can't afford 13028 * to allocate a second huge TSB for these mappings. 13029 */ 13030 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13031 kpm_tsbsz = ktsb_szcode; 13032 kpmsm_tsbbase = kpm_tsbbase; 13033 kpmsm_tsbsz = kpm_tsbsz; 13034 } else { 13035 /* 13036 * In VAC conflict case, just put the entries in the 13037 * kernel 8K indexed TSB for now so we can find them. 13038 * This could really be changed in the future if we feel 13039 * the need... 13040 */ 13041 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13042 kpmsm_tsbsz = ktsb_szcode; 13043 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13044 kpm_tsbsz = ktsb4m_szcode; 13045 } 13046 13047 kpmtsbmp = kpmtsbm_area; 13048 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13049 /* 13050 * Initialize the kpmtsbm area. 13051 * Do this for all possible CPUs as some may be added 13052 * while the system is running. There is no cost to this. 13053 */ 13054 kpmtsbmp->vbase = kpm_vbase; 13055 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13056 kpmtsbmp->sz_shift = kpm_size_shift; 13057 kpmtsbmp->kpmp_shift = kpmp_shift; 13058 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13059 if (kpm_smallpages == 0) { 13060 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13061 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13062 } else { 13063 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13064 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13065 } 13066 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13067 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13068 #ifdef DEBUG 13069 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13070 #endif /* DEBUG */ 13071 if (ktsb_phys) 13072 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13073 } 13074 13075 /* -- End KPM specific init -- */ 13076 } 13077 13078 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13079 struct tsb_info ktsb_info[2]; 13080 13081 /* 13082 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13083 */ 13084 void 13085 sfmmu_init_ktsbinfo() 13086 { 13087 ASSERT(ksfmmup != NULL); 13088 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13089 /* 13090 * Allocate tsbinfos for kernel and copy in data 13091 * to make debug easier and sun4v setup easier. 13092 */ 13093 ktsb_info[0].tsb_sfmmu = ksfmmup; 13094 ktsb_info[0].tsb_szc = ktsb_szcode; 13095 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13096 ktsb_info[0].tsb_va = ktsb_base; 13097 ktsb_info[0].tsb_pa = ktsb_pbase; 13098 ktsb_info[0].tsb_flags = 0; 13099 ktsb_info[0].tsb_tte.ll = 0; 13100 ktsb_info[0].tsb_cache = NULL; 13101 13102 ktsb_info[1].tsb_sfmmu = ksfmmup; 13103 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13104 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13105 ktsb_info[1].tsb_va = ktsb4m_base; 13106 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13107 ktsb_info[1].tsb_flags = 0; 13108 ktsb_info[1].tsb_tte.ll = 0; 13109 ktsb_info[1].tsb_cache = NULL; 13110 13111 /* Link them into ksfmmup. */ 13112 ktsb_info[0].tsb_next = &ktsb_info[1]; 13113 ktsb_info[1].tsb_next = NULL; 13114 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13115 13116 sfmmu_setup_tsbinfo(ksfmmup); 13117 } 13118 13119 /* 13120 * Cache the last value returned from va_to_pa(). If the VA specified 13121 * in the current call to cached_va_to_pa() maps to the same Page (as the 13122 * previous call to cached_va_to_pa()), then compute the PA using 13123 * cached info, else call va_to_pa(). 13124 * 13125 * Note: this function is neither MT-safe nor consistent in the presence 13126 * of multiple, interleaved threads. This function was created to enable 13127 * an optimization used during boot (at a point when there's only one thread 13128 * executing on the "boot CPU", and before startup_vm() has been called). 13129 */ 13130 static uint64_t 13131 cached_va_to_pa(void *vaddr) 13132 { 13133 static uint64_t prev_vaddr_base = 0; 13134 static uint64_t prev_pfn = 0; 13135 13136 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13137 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13138 } else { 13139 uint64_t pa = va_to_pa(vaddr); 13140 13141 if (pa != ((uint64_t)-1)) { 13142 /* 13143 * Computed physical address is valid. Cache its 13144 * related info for the next cached_va_to_pa() call. 13145 */ 13146 prev_pfn = pa & MMU_PAGEMASK; 13147 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13148 } 13149 13150 return (pa); 13151 } 13152 } 13153 13154 /* 13155 * Carve up our nucleus hblk region. We may allocate more hblks than 13156 * asked due to rounding errors but we are guaranteed to have at least 13157 * enough space to allocate the requested number of hblk8's and hblk1's. 13158 */ 13159 void 13160 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13161 { 13162 struct hme_blk *hmeblkp; 13163 size_t hme8blk_sz, hme1blk_sz; 13164 size_t i; 13165 size_t hblk8_bound; 13166 ulong_t j = 0, k = 0; 13167 13168 ASSERT(addr != NULL && size != 0); 13169 13170 /* Need to use proper structure alignment */ 13171 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13172 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13173 13174 nucleus_hblk8.list = (void *)addr; 13175 nucleus_hblk8.index = 0; 13176 13177 /* 13178 * Use as much memory as possible for hblk8's since we 13179 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13180 * We need to hold back enough space for the hblk1's which 13181 * we'll allocate next. 13182 */ 13183 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13184 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13185 hmeblkp = (struct hme_blk *)addr; 13186 addr += hme8blk_sz; 13187 hmeblkp->hblk_nuc_bit = 1; 13188 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13189 } 13190 nucleus_hblk8.len = j; 13191 ASSERT(j >= nhblk8); 13192 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13193 13194 nucleus_hblk1.list = (void *)addr; 13195 nucleus_hblk1.index = 0; 13196 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13197 hmeblkp = (struct hme_blk *)addr; 13198 addr += hme1blk_sz; 13199 hmeblkp->hblk_nuc_bit = 1; 13200 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13201 } 13202 ASSERT(k >= nhblk1); 13203 nucleus_hblk1.len = k; 13204 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13205 } 13206 13207 /* 13208 * This function is currently not supported on this platform. For what 13209 * it's supposed to do, see hat.c and hat_srmmu.c 13210 */ 13211 /* ARGSUSED */ 13212 faultcode_t 13213 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13214 uint_t flags) 13215 { 13216 return (FC_NOSUPPORT); 13217 } 13218 13219 /* 13220 * Searchs the mapping list of the page for a mapping of the same size. If not 13221 * found the corresponding bit is cleared in the p_index field. When large 13222 * pages are more prevalent in the system, we can maintain the mapping list 13223 * in order and we don't have to traverse the list each time. Just check the 13224 * next and prev entries, and if both are of different size, we clear the bit. 13225 */ 13226 static void 13227 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13228 { 13229 struct sf_hment *sfhmep; 13230 struct hme_blk *hmeblkp; 13231 int index; 13232 pgcnt_t npgs; 13233 13234 ASSERT(ttesz > TTE8K); 13235 13236 ASSERT(sfmmu_mlist_held(pp)); 13237 13238 ASSERT(PP_ISMAPPED_LARGE(pp)); 13239 13240 /* 13241 * Traverse mapping list looking for another mapping of same size. 13242 * since we only want to clear index field if all mappings of 13243 * that size are gone. 13244 */ 13245 13246 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13247 if (IS_PAHME(sfhmep)) 13248 continue; 13249 hmeblkp = sfmmu_hmetohblk(sfhmep); 13250 if (hme_size(sfhmep) == ttesz) { 13251 /* 13252 * another mapping of the same size. don't clear index. 13253 */ 13254 return; 13255 } 13256 } 13257 13258 /* 13259 * Clear the p_index bit for large page. 13260 */ 13261 index = PAGESZ_TO_INDEX(ttesz); 13262 npgs = TTEPAGES(ttesz); 13263 while (npgs-- > 0) { 13264 ASSERT(pp->p_index & index); 13265 pp->p_index &= ~index; 13266 pp = PP_PAGENEXT(pp); 13267 } 13268 } 13269 13270 /* 13271 * return supported features 13272 */ 13273 /* ARGSUSED */ 13274 int 13275 hat_supported(enum hat_features feature, void *arg) 13276 { 13277 switch (feature) { 13278 case HAT_SHARED_PT: 13279 case HAT_DYNAMIC_ISM_UNMAP: 13280 case HAT_VMODSORT: 13281 return (1); 13282 case HAT_SHARED_REGIONS: 13283 if (shctx_on) 13284 return (1); 13285 else 13286 return (0); 13287 default: 13288 return (0); 13289 } 13290 } 13291 13292 void 13293 hat_enter(struct hat *hat) 13294 { 13295 hatlock_t *hatlockp; 13296 13297 if (hat != ksfmmup) { 13298 hatlockp = TSB_HASH(hat); 13299 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13300 } 13301 } 13302 13303 void 13304 hat_exit(struct hat *hat) 13305 { 13306 hatlock_t *hatlockp; 13307 13308 if (hat != ksfmmup) { 13309 hatlockp = TSB_HASH(hat); 13310 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13311 } 13312 } 13313 13314 /*ARGSUSED*/ 13315 void 13316 hat_reserve(struct as *as, caddr_t addr, size_t len) 13317 { 13318 } 13319 13320 static void 13321 hat_kstat_init(void) 13322 { 13323 kstat_t *ksp; 13324 13325 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13326 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13327 KSTAT_FLAG_VIRTUAL); 13328 if (ksp) { 13329 ksp->ks_data = (void *) &sfmmu_global_stat; 13330 kstat_install(ksp); 13331 } 13332 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13333 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13334 KSTAT_FLAG_VIRTUAL); 13335 if (ksp) { 13336 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13337 kstat_install(ksp); 13338 } 13339 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13340 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13341 KSTAT_FLAG_WRITABLE); 13342 if (ksp) { 13343 ksp->ks_update = sfmmu_kstat_percpu_update; 13344 kstat_install(ksp); 13345 } 13346 } 13347 13348 /* ARGSUSED */ 13349 static int 13350 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13351 { 13352 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13353 struct tsbmiss *tsbm = tsbmiss_area; 13354 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13355 int i; 13356 13357 ASSERT(cpu_kstat); 13358 if (rw == KSTAT_READ) { 13359 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13360 cpu_kstat->sf_itlb_misses = 0; 13361 cpu_kstat->sf_dtlb_misses = 0; 13362 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13363 tsbm->uprot_traps; 13364 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13365 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13366 cpu_kstat->sf_tsb_hits = 0; 13367 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13368 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13369 } 13370 } else { 13371 /* KSTAT_WRITE is used to clear stats */ 13372 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13373 tsbm->utsb_misses = 0; 13374 tsbm->ktsb_misses = 0; 13375 tsbm->uprot_traps = 0; 13376 tsbm->kprot_traps = 0; 13377 kpmtsbm->kpm_dtlb_misses = 0; 13378 kpmtsbm->kpm_tsb_misses = 0; 13379 } 13380 } 13381 return (0); 13382 } 13383 13384 #ifdef DEBUG 13385 13386 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13387 13388 /* 13389 * A tte checker. *orig_old is the value we read before cas. 13390 * *cur is the value returned by cas. 13391 * *new is the desired value when we do the cas. 13392 * 13393 * *hmeblkp is currently unused. 13394 */ 13395 13396 /* ARGSUSED */ 13397 void 13398 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13399 { 13400 pfn_t i, j, k; 13401 int cpuid = CPU->cpu_id; 13402 13403 gorig[cpuid] = orig_old; 13404 gcur[cpuid] = cur; 13405 gnew[cpuid] = new; 13406 13407 #ifdef lint 13408 hmeblkp = hmeblkp; 13409 #endif 13410 13411 if (TTE_IS_VALID(orig_old)) { 13412 if (TTE_IS_VALID(cur)) { 13413 i = TTE_TO_TTEPFN(orig_old); 13414 j = TTE_TO_TTEPFN(cur); 13415 k = TTE_TO_TTEPFN(new); 13416 if (i != j) { 13417 /* remap error? */ 13418 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13419 } 13420 13421 if (i != k) { 13422 /* remap error? */ 13423 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13424 } 13425 } else { 13426 if (TTE_IS_VALID(new)) { 13427 panic("chk_tte: invalid cur? "); 13428 } 13429 13430 i = TTE_TO_TTEPFN(orig_old); 13431 k = TTE_TO_TTEPFN(new); 13432 if (i != k) { 13433 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13434 } 13435 } 13436 } else { 13437 if (TTE_IS_VALID(cur)) { 13438 j = TTE_TO_TTEPFN(cur); 13439 if (TTE_IS_VALID(new)) { 13440 k = TTE_TO_TTEPFN(new); 13441 if (j != k) { 13442 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13443 j, k); 13444 } 13445 } else { 13446 panic("chk_tte: why here?"); 13447 } 13448 } else { 13449 if (!TTE_IS_VALID(new)) { 13450 panic("chk_tte: why here2 ?"); 13451 } 13452 } 13453 } 13454 } 13455 13456 #endif /* DEBUG */ 13457 13458 extern void prefetch_tsbe_read(struct tsbe *); 13459 extern void prefetch_tsbe_write(struct tsbe *); 13460 13461 13462 /* 13463 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13464 * us optimal performance on Cheetah+. You can only have 8 outstanding 13465 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13466 * prefetch to make the most utilization of the prefetch capability. 13467 */ 13468 #define TSBE_PREFETCH_STRIDE (7) 13469 13470 void 13471 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13472 { 13473 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13474 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13475 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13476 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13477 struct tsbe *old; 13478 struct tsbe *new; 13479 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13480 uint64_t va; 13481 int new_offset; 13482 int i; 13483 int vpshift; 13484 int last_prefetch; 13485 13486 if (old_bytes == new_bytes) { 13487 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13488 } else { 13489 13490 /* 13491 * A TSBE is 16 bytes which means there are four TSBE's per 13492 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13493 */ 13494 old = (struct tsbe *)old_tsbinfo->tsb_va; 13495 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13496 for (i = 0; i < old_entries; i++, old++) { 13497 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13498 prefetch_tsbe_read(old); 13499 if (!old->tte_tag.tag_invalid) { 13500 /* 13501 * We have a valid TTE to remap. Check the 13502 * size. We won't remap 64K or 512K TTEs 13503 * because they span more than one TSB entry 13504 * and are indexed using an 8K virt. page. 13505 * Ditto for 32M and 256M TTEs. 13506 */ 13507 if (TTE_CSZ(&old->tte_data) == TTE64K || 13508 TTE_CSZ(&old->tte_data) == TTE512K) 13509 continue; 13510 if (mmu_page_sizes == max_mmu_page_sizes) { 13511 if (TTE_CSZ(&old->tte_data) == TTE32M || 13512 TTE_CSZ(&old->tte_data) == TTE256M) 13513 continue; 13514 } 13515 13516 /* clear the lower 22 bits of the va */ 13517 va = *(uint64_t *)old << 22; 13518 /* turn va into a virtual pfn */ 13519 va >>= 22 - TSB_START_SIZE; 13520 /* 13521 * or in bits from the offset in the tsb 13522 * to get the real virtual pfn. These 13523 * correspond to bits [21:13] in the va 13524 */ 13525 vpshift = 13526 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13527 0x1ff; 13528 va |= (i << vpshift); 13529 va >>= vpshift; 13530 new_offset = va & (new_entries - 1); 13531 new = new_base + new_offset; 13532 prefetch_tsbe_write(new); 13533 *new = *old; 13534 } 13535 } 13536 } 13537 } 13538 13539 /* 13540 * unused in sfmmu 13541 */ 13542 void 13543 hat_dump(void) 13544 { 13545 } 13546 13547 /* 13548 * Called when a thread is exiting and we have switched to the kernel address 13549 * space. Perform the same VM initialization resume() uses when switching 13550 * processes. 13551 * 13552 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13553 * we call it anyway in case the semantics change in the future. 13554 */ 13555 /*ARGSUSED*/ 13556 void 13557 hat_thread_exit(kthread_t *thd) 13558 { 13559 uint_t pgsz_cnum; 13560 uint_t pstate_save; 13561 13562 ASSERT(thd->t_procp->p_as == &kas); 13563 13564 pgsz_cnum = KCONTEXT; 13565 #ifdef sun4u 13566 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13567 #endif 13568 13569 /* 13570 * Note that sfmmu_load_mmustate() is currently a no-op for 13571 * kernel threads. We need to disable interrupts here, 13572 * simply because otherwise sfmmu_load_mmustate() would panic 13573 * if the caller does not disable interrupts. 13574 */ 13575 pstate_save = sfmmu_disable_intrs(); 13576 13577 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13578 sfmmu_setctx_sec(pgsz_cnum); 13579 sfmmu_load_mmustate(ksfmmup); 13580 sfmmu_enable_intrs(pstate_save); 13581 } 13582 13583 13584 /* 13585 * SRD support 13586 */ 13587 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13588 (((uintptr_t)(vp)) >> 11)) & \ 13589 srd_hashmask) 13590 13591 /* 13592 * Attach the process to the srd struct associated with the exec vnode 13593 * from which the process is started. 13594 */ 13595 void 13596 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13597 { 13598 uint_t hash = SRD_HASH_FUNCTION(evp); 13599 sf_srd_t *srdp; 13600 sf_srd_t *newsrdp; 13601 13602 ASSERT(sfmmup != ksfmmup); 13603 ASSERT(sfmmup->sfmmu_srdp == NULL); 13604 13605 if (!shctx_on) { 13606 return; 13607 } 13608 13609 VN_HOLD(evp); 13610 13611 if (srd_buckets[hash].srdb_srdp != NULL) { 13612 mutex_enter(&srd_buckets[hash].srdb_lock); 13613 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13614 srdp = srdp->srd_hash) { 13615 if (srdp->srd_evp == evp) { 13616 ASSERT(srdp->srd_refcnt >= 0); 13617 sfmmup->sfmmu_srdp = srdp; 13618 atomic_inc_32( 13619 (volatile uint_t *)&srdp->srd_refcnt); 13620 mutex_exit(&srd_buckets[hash].srdb_lock); 13621 return; 13622 } 13623 } 13624 mutex_exit(&srd_buckets[hash].srdb_lock); 13625 } 13626 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13627 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13628 13629 newsrdp->srd_evp = evp; 13630 newsrdp->srd_refcnt = 1; 13631 newsrdp->srd_hmergnfree = NULL; 13632 newsrdp->srd_ismrgnfree = NULL; 13633 13634 mutex_enter(&srd_buckets[hash].srdb_lock); 13635 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13636 srdp = srdp->srd_hash) { 13637 if (srdp->srd_evp == evp) { 13638 ASSERT(srdp->srd_refcnt >= 0); 13639 sfmmup->sfmmu_srdp = srdp; 13640 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 13641 mutex_exit(&srd_buckets[hash].srdb_lock); 13642 kmem_cache_free(srd_cache, newsrdp); 13643 return; 13644 } 13645 } 13646 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13647 srd_buckets[hash].srdb_srdp = newsrdp; 13648 sfmmup->sfmmu_srdp = newsrdp; 13649 13650 mutex_exit(&srd_buckets[hash].srdb_lock); 13651 13652 } 13653 13654 static void 13655 sfmmu_leave_srd(sfmmu_t *sfmmup) 13656 { 13657 vnode_t *evp; 13658 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13659 uint_t hash; 13660 sf_srd_t **prev_srdpp; 13661 sf_region_t *rgnp; 13662 sf_region_t *nrgnp; 13663 #ifdef DEBUG 13664 int rgns = 0; 13665 #endif 13666 int i; 13667 13668 ASSERT(sfmmup != ksfmmup); 13669 ASSERT(srdp != NULL); 13670 ASSERT(srdp->srd_refcnt > 0); 13671 ASSERT(sfmmup->sfmmu_scdp == NULL); 13672 ASSERT(sfmmup->sfmmu_free == 1); 13673 13674 sfmmup->sfmmu_srdp = NULL; 13675 evp = srdp->srd_evp; 13676 ASSERT(evp != NULL); 13677 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) { 13678 VN_RELE(evp); 13679 return; 13680 } 13681 13682 hash = SRD_HASH_FUNCTION(evp); 13683 mutex_enter(&srd_buckets[hash].srdb_lock); 13684 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13685 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13686 if (srdp->srd_evp == evp) { 13687 break; 13688 } 13689 } 13690 if (srdp == NULL || srdp->srd_refcnt) { 13691 mutex_exit(&srd_buckets[hash].srdb_lock); 13692 VN_RELE(evp); 13693 return; 13694 } 13695 *prev_srdpp = srdp->srd_hash; 13696 mutex_exit(&srd_buckets[hash].srdb_lock); 13697 13698 ASSERT(srdp->srd_refcnt == 0); 13699 VN_RELE(evp); 13700 13701 #ifdef DEBUG 13702 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13703 ASSERT(srdp->srd_rgnhash[i] == NULL); 13704 } 13705 #endif /* DEBUG */ 13706 13707 /* free each hme regions in the srd */ 13708 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13709 nrgnp = rgnp->rgn_next; 13710 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13711 ASSERT(rgnp->rgn_refcnt == 0); 13712 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13713 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13714 ASSERT(rgnp->rgn_hmeflags == 0); 13715 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13716 #ifdef DEBUG 13717 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13718 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13719 } 13720 rgns++; 13721 #endif /* DEBUG */ 13722 kmem_cache_free(region_cache, rgnp); 13723 } 13724 ASSERT(rgns == srdp->srd_next_hmerid); 13725 13726 #ifdef DEBUG 13727 rgns = 0; 13728 #endif 13729 /* free each ism rgns in the srd */ 13730 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13731 nrgnp = rgnp->rgn_next; 13732 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13733 ASSERT(rgnp->rgn_refcnt == 0); 13734 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13735 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13736 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13737 #ifdef DEBUG 13738 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13739 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13740 } 13741 rgns++; 13742 #endif /* DEBUG */ 13743 kmem_cache_free(region_cache, rgnp); 13744 } 13745 ASSERT(rgns == srdp->srd_next_ismrid); 13746 ASSERT(srdp->srd_ismbusyrgns == 0); 13747 ASSERT(srdp->srd_hmebusyrgns == 0); 13748 13749 srdp->srd_next_ismrid = 0; 13750 srdp->srd_next_hmerid = 0; 13751 13752 bzero((void *)srdp->srd_ismrgnp, 13753 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13754 bzero((void *)srdp->srd_hmergnp, 13755 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13756 13757 ASSERT(srdp->srd_scdp == NULL); 13758 kmem_cache_free(srd_cache, srdp); 13759 } 13760 13761 /* ARGSUSED */ 13762 static int 13763 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13764 { 13765 sf_srd_t *srdp = (sf_srd_t *)buf; 13766 bzero(buf, sizeof (*srdp)); 13767 13768 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13769 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13770 return (0); 13771 } 13772 13773 /* ARGSUSED */ 13774 static void 13775 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13776 { 13777 sf_srd_t *srdp = (sf_srd_t *)buf; 13778 13779 mutex_destroy(&srdp->srd_mutex); 13780 mutex_destroy(&srdp->srd_scd_mutex); 13781 } 13782 13783 /* 13784 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13785 * at the same time for the same process and address range. This is ensured by 13786 * the fact that address space is locked as writer when a process joins the 13787 * regions. Therefore there's no need to hold an srd lock during the entire 13788 * execution of hat_join_region()/hat_leave_region(). 13789 */ 13790 13791 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13792 (((uintptr_t)(obj)) >> 11)) & \ 13793 srd_rgn_hashmask) 13794 /* 13795 * This routine implements the shared context functionality required when 13796 * attaching a segment to an address space. It must be called from 13797 * hat_share() for D(ISM) segments and from segvn_create() for segments 13798 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13799 * which is saved in the private segment data for hme segments and 13800 * the ism_map structure for ism segments. 13801 */ 13802 hat_region_cookie_t 13803 hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size, 13804 void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc, 13805 hat_rgn_cb_func_t r_cb_function, uint_t flags) 13806 { 13807 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13808 uint_t rhash; 13809 uint_t rid; 13810 hatlock_t *hatlockp; 13811 sf_region_t *rgnp; 13812 sf_region_t *new_rgnp = NULL; 13813 int i; 13814 uint16_t *nextidp; 13815 sf_region_t **freelistp; 13816 int maxids; 13817 sf_region_t **rarrp; 13818 uint16_t *busyrgnsp; 13819 ulong_t rttecnt; 13820 uchar_t tteflag; 13821 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13822 int text = (r_type == HAT_REGION_TEXT); 13823 13824 if (srdp == NULL || r_size == 0) { 13825 return (HAT_INVALID_REGION_COOKIE); 13826 } 13827 13828 ASSERT(sfmmup != ksfmmup); 13829 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 13830 ASSERT(srdp->srd_refcnt > 0); 13831 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13832 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13833 ASSERT(r_pgszc < mmu_page_sizes); 13834 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13835 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13836 panic("hat_join_region: region addr or size is not aligned\n"); 13837 } 13838 13839 13840 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13841 SFMMU_REGION_HME; 13842 /* 13843 * Currently only support shared hmes for the read only main text 13844 * region. 13845 */ 13846 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13847 (r_perm & PROT_WRITE))) { 13848 return (HAT_INVALID_REGION_COOKIE); 13849 } 13850 13851 rhash = RGN_HASH_FUNCTION(r_obj); 13852 13853 if (r_type == SFMMU_REGION_ISM) { 13854 nextidp = &srdp->srd_next_ismrid; 13855 freelistp = &srdp->srd_ismrgnfree; 13856 maxids = SFMMU_MAX_ISM_REGIONS; 13857 rarrp = srdp->srd_ismrgnp; 13858 busyrgnsp = &srdp->srd_ismbusyrgns; 13859 } else { 13860 nextidp = &srdp->srd_next_hmerid; 13861 freelistp = &srdp->srd_hmergnfree; 13862 maxids = SFMMU_MAX_HME_REGIONS; 13863 rarrp = srdp->srd_hmergnp; 13864 busyrgnsp = &srdp->srd_hmebusyrgns; 13865 } 13866 13867 mutex_enter(&srdp->srd_mutex); 13868 13869 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13870 rgnp = rgnp->rgn_hash) { 13871 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13872 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13873 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13874 break; 13875 } 13876 } 13877 13878 rfound: 13879 if (rgnp != NULL) { 13880 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13881 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13882 ASSERT(rgnp->rgn_refcnt >= 0); 13883 rid = rgnp->rgn_id; 13884 ASSERT(rid < maxids); 13885 ASSERT(rarrp[rid] == rgnp); 13886 ASSERT(rid < *nextidp); 13887 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 13888 mutex_exit(&srdp->srd_mutex); 13889 if (new_rgnp != NULL) { 13890 kmem_cache_free(region_cache, new_rgnp); 13891 } 13892 if (r_type == SFMMU_REGION_HME) { 13893 int myjoin = 13894 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 13895 13896 sfmmu_link_to_hmeregion(sfmmup, rgnp); 13897 /* 13898 * bitmap should be updated after linking sfmmu on 13899 * region list so that pageunload() doesn't skip 13900 * TSB/TLB flush. As soon as bitmap is updated another 13901 * thread in this process can already start accessing 13902 * this region. 13903 */ 13904 /* 13905 * Normally ttecnt accounting is done as part of 13906 * pagefault handling. But a process may not take any 13907 * pagefaults on shared hmeblks created by some other 13908 * process. To compensate for this assume that the 13909 * entire region will end up faulted in using 13910 * the region's pagesize. 13911 * 13912 */ 13913 if (r_pgszc > TTE8K) { 13914 tteflag = 1 << r_pgszc; 13915 if (disable_large_pages & tteflag) { 13916 tteflag = 0; 13917 } 13918 } else { 13919 tteflag = 0; 13920 } 13921 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 13922 hatlockp = sfmmu_hat_enter(sfmmup); 13923 sfmmup->sfmmu_rtteflags |= tteflag; 13924 sfmmu_hat_exit(hatlockp); 13925 } 13926 hatlockp = sfmmu_hat_enter(sfmmup); 13927 13928 /* 13929 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 13930 * region to allow for large page allocation failure. 13931 */ 13932 if (r_pgszc >= TTE4M) { 13933 sfmmup->sfmmu_tsb0_4minflcnt += 13934 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 13935 } 13936 13937 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 13938 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 13939 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 13940 rttecnt); 13941 13942 if (text && r_pgszc >= TTE4M && 13943 (tteflag || ((disable_large_pages >> TTE4M) & 13944 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 13945 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 13946 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 13947 } 13948 13949 sfmmu_hat_exit(hatlockp); 13950 /* 13951 * On Panther we need to make sure TLB is programmed 13952 * to accept 32M/256M pages. Call 13953 * sfmmu_check_page_sizes() now to make sure TLB is 13954 * setup before making hmeregions visible to other 13955 * threads. 13956 */ 13957 sfmmu_check_page_sizes(sfmmup, 1); 13958 hatlockp = sfmmu_hat_enter(sfmmup); 13959 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 13960 13961 /* 13962 * if context is invalid tsb miss exception code will 13963 * call sfmmu_check_page_sizes() and update tsbmiss 13964 * area later. 13965 */ 13966 kpreempt_disable(); 13967 if (myjoin && 13968 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 13969 != INVALID_CONTEXT)) { 13970 struct tsbmiss *tsbmp; 13971 13972 tsbmp = &tsbmiss_area[CPU->cpu_id]; 13973 ASSERT(sfmmup == tsbmp->usfmmup); 13974 BT_SET(tsbmp->shmermap, rid); 13975 if (r_pgszc > TTE64K) { 13976 tsbmp->uhat_rtteflags |= tteflag; 13977 } 13978 13979 } 13980 kpreempt_enable(); 13981 13982 sfmmu_hat_exit(hatlockp); 13983 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 13984 HAT_INVALID_REGION_COOKIE); 13985 } else { 13986 hatlockp = sfmmu_hat_enter(sfmmup); 13987 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 13988 sfmmu_hat_exit(hatlockp); 13989 } 13990 ASSERT(rid < maxids); 13991 13992 if (r_type == SFMMU_REGION_ISM) { 13993 sfmmu_find_scd(sfmmup); 13994 } 13995 return ((hat_region_cookie_t)((uint64_t)rid)); 13996 } 13997 13998 ASSERT(new_rgnp == NULL); 13999 14000 if (*busyrgnsp >= maxids) { 14001 mutex_exit(&srdp->srd_mutex); 14002 return (HAT_INVALID_REGION_COOKIE); 14003 } 14004 14005 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14006 if (*freelistp != NULL) { 14007 rgnp = *freelistp; 14008 *freelistp = rgnp->rgn_next; 14009 ASSERT(rgnp->rgn_id < *nextidp); 14010 ASSERT(rgnp->rgn_id < maxids); 14011 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14012 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14013 == r_type); 14014 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14015 ASSERT(rgnp->rgn_hmeflags == 0); 14016 } else { 14017 /* 14018 * release local locks before memory allocation. 14019 */ 14020 mutex_exit(&srdp->srd_mutex); 14021 14022 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14023 14024 mutex_enter(&srdp->srd_mutex); 14025 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14026 rgnp = rgnp->rgn_hash) { 14027 if (rgnp->rgn_saddr == r_saddr && 14028 rgnp->rgn_size == r_size && 14029 rgnp->rgn_obj == r_obj && 14030 rgnp->rgn_objoff == r_objoff && 14031 rgnp->rgn_perm == r_perm && 14032 rgnp->rgn_pgszc == r_pgszc) { 14033 break; 14034 } 14035 } 14036 if (rgnp != NULL) { 14037 goto rfound; 14038 } 14039 14040 if (*nextidp >= maxids) { 14041 mutex_exit(&srdp->srd_mutex); 14042 goto fail; 14043 } 14044 rgnp = new_rgnp; 14045 new_rgnp = NULL; 14046 rgnp->rgn_id = (*nextidp)++; 14047 ASSERT(rgnp->rgn_id < maxids); 14048 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14049 rarrp[rgnp->rgn_id] = rgnp; 14050 } 14051 14052 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14053 ASSERT(rgnp->rgn_hmeflags == 0); 14054 #ifdef DEBUG 14055 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14056 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14057 } 14058 #endif 14059 rgnp->rgn_saddr = r_saddr; 14060 rgnp->rgn_size = r_size; 14061 rgnp->rgn_obj = r_obj; 14062 rgnp->rgn_objoff = r_objoff; 14063 rgnp->rgn_perm = r_perm; 14064 rgnp->rgn_pgszc = r_pgszc; 14065 rgnp->rgn_flags = r_type; 14066 rgnp->rgn_refcnt = 0; 14067 rgnp->rgn_cb_function = r_cb_function; 14068 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14069 srdp->srd_rgnhash[rhash] = rgnp; 14070 (*busyrgnsp)++; 14071 ASSERT(*busyrgnsp <= maxids); 14072 goto rfound; 14073 14074 fail: 14075 ASSERT(new_rgnp != NULL); 14076 kmem_cache_free(region_cache, new_rgnp); 14077 return (HAT_INVALID_REGION_COOKIE); 14078 } 14079 14080 /* 14081 * This function implements the shared context functionality required 14082 * when detaching a segment from an address space. It must be called 14083 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14084 * for segments with a valid region_cookie. 14085 * It will also be called from all seg_vn routines which change a 14086 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14087 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14088 * from segvn_fault(). 14089 */ 14090 void 14091 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14092 { 14093 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14094 sf_scd_t *scdp; 14095 uint_t rhash; 14096 uint_t rid = (uint_t)((uint64_t)rcookie); 14097 hatlock_t *hatlockp = NULL; 14098 sf_region_t *rgnp; 14099 sf_region_t **prev_rgnpp; 14100 sf_region_t *cur_rgnp; 14101 void *r_obj; 14102 int i; 14103 caddr_t r_saddr; 14104 caddr_t r_eaddr; 14105 size_t r_size; 14106 uchar_t r_pgszc; 14107 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14108 14109 ASSERT(sfmmup != ksfmmup); 14110 ASSERT(srdp != NULL); 14111 ASSERT(srdp->srd_refcnt > 0); 14112 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14113 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14114 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14115 14116 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14117 SFMMU_REGION_HME; 14118 14119 if (r_type == SFMMU_REGION_ISM) { 14120 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14121 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14122 rgnp = srdp->srd_ismrgnp[rid]; 14123 } else { 14124 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14125 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14126 rgnp = srdp->srd_hmergnp[rid]; 14127 } 14128 ASSERT(rgnp != NULL); 14129 ASSERT(rgnp->rgn_id == rid); 14130 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14131 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14132 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); 14133 14134 if (sfmmup->sfmmu_free) { 14135 ulong_t rttecnt; 14136 r_pgszc = rgnp->rgn_pgszc; 14137 r_size = rgnp->rgn_size; 14138 14139 ASSERT(sfmmup->sfmmu_scdp == NULL); 14140 if (r_type == SFMMU_REGION_ISM) { 14141 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14142 } else { 14143 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14144 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14145 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14146 14147 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14148 -rttecnt); 14149 14150 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14151 } 14152 } else if (r_type == SFMMU_REGION_ISM) { 14153 hatlockp = sfmmu_hat_enter(sfmmup); 14154 ASSERT(rid < srdp->srd_next_ismrid); 14155 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14156 scdp = sfmmup->sfmmu_scdp; 14157 if (scdp != NULL && 14158 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14159 sfmmu_leave_scd(sfmmup, r_type); 14160 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14161 } 14162 sfmmu_hat_exit(hatlockp); 14163 } else { 14164 ulong_t rttecnt; 14165 r_pgszc = rgnp->rgn_pgszc; 14166 r_saddr = rgnp->rgn_saddr; 14167 r_size = rgnp->rgn_size; 14168 r_eaddr = r_saddr + r_size; 14169 14170 ASSERT(r_type == SFMMU_REGION_HME); 14171 hatlockp = sfmmu_hat_enter(sfmmup); 14172 ASSERT(rid < srdp->srd_next_hmerid); 14173 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14174 14175 /* 14176 * If region is part of an SCD call sfmmu_leave_scd(). 14177 * Otherwise if process is not exiting and has valid context 14178 * just drop the context on the floor to lose stale TLB 14179 * entries and force the update of tsb miss area to reflect 14180 * the new region map. After that clean our TSB entries. 14181 */ 14182 scdp = sfmmup->sfmmu_scdp; 14183 if (scdp != NULL && 14184 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14185 sfmmu_leave_scd(sfmmup, r_type); 14186 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14187 } 14188 sfmmu_invalidate_ctx(sfmmup); 14189 14190 i = TTE8K; 14191 while (i < mmu_page_sizes) { 14192 if (rgnp->rgn_ttecnt[i] != 0) { 14193 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14194 r_eaddr, i); 14195 if (i < TTE4M) { 14196 i = TTE4M; 14197 continue; 14198 } else { 14199 break; 14200 } 14201 } 14202 i++; 14203 } 14204 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14205 if (r_pgszc >= TTE4M) { 14206 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14207 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14208 rttecnt); 14209 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14210 } 14211 14212 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14213 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14214 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14215 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14216 14217 sfmmu_hat_exit(hatlockp); 14218 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14219 /* sfmmup left the scd, grow private tsb */ 14220 sfmmu_check_page_sizes(sfmmup, 1); 14221 } else { 14222 sfmmu_check_page_sizes(sfmmup, 0); 14223 } 14224 } 14225 14226 if (r_type == SFMMU_REGION_HME) { 14227 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14228 } 14229 14230 r_obj = rgnp->rgn_obj; 14231 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) { 14232 return; 14233 } 14234 14235 /* 14236 * looks like nobody uses this region anymore. Free it. 14237 */ 14238 rhash = RGN_HASH_FUNCTION(r_obj); 14239 mutex_enter(&srdp->srd_mutex); 14240 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14241 (cur_rgnp = *prev_rgnpp) != NULL; 14242 prev_rgnpp = &cur_rgnp->rgn_hash) { 14243 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14244 break; 14245 } 14246 } 14247 14248 if (cur_rgnp == NULL) { 14249 mutex_exit(&srdp->srd_mutex); 14250 return; 14251 } 14252 14253 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14254 *prev_rgnpp = rgnp->rgn_hash; 14255 if (r_type == SFMMU_REGION_ISM) { 14256 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14257 ASSERT(rid < srdp->srd_next_ismrid); 14258 rgnp->rgn_next = srdp->srd_ismrgnfree; 14259 srdp->srd_ismrgnfree = rgnp; 14260 ASSERT(srdp->srd_ismbusyrgns > 0); 14261 srdp->srd_ismbusyrgns--; 14262 mutex_exit(&srdp->srd_mutex); 14263 return; 14264 } 14265 mutex_exit(&srdp->srd_mutex); 14266 14267 /* 14268 * Destroy region's hmeblks. 14269 */ 14270 sfmmu_unload_hmeregion(srdp, rgnp); 14271 14272 rgnp->rgn_hmeflags = 0; 14273 14274 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14275 ASSERT(rgnp->rgn_id == rid); 14276 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14277 rgnp->rgn_ttecnt[i] = 0; 14278 } 14279 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14280 mutex_enter(&srdp->srd_mutex); 14281 ASSERT(rid < srdp->srd_next_hmerid); 14282 rgnp->rgn_next = srdp->srd_hmergnfree; 14283 srdp->srd_hmergnfree = rgnp; 14284 ASSERT(srdp->srd_hmebusyrgns > 0); 14285 srdp->srd_hmebusyrgns--; 14286 mutex_exit(&srdp->srd_mutex); 14287 } 14288 14289 /* 14290 * For now only called for hmeblk regions and not for ISM regions. 14291 */ 14292 void 14293 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14294 { 14295 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14296 uint_t rid = (uint_t)((uint64_t)rcookie); 14297 sf_region_t *rgnp; 14298 sf_rgn_link_t *rlink; 14299 sf_rgn_link_t *hrlink; 14300 ulong_t rttecnt; 14301 14302 ASSERT(sfmmup != ksfmmup); 14303 ASSERT(srdp != NULL); 14304 ASSERT(srdp->srd_refcnt > 0); 14305 14306 ASSERT(rid < srdp->srd_next_hmerid); 14307 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14308 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14309 14310 rgnp = srdp->srd_hmergnp[rid]; 14311 ASSERT(rgnp->rgn_refcnt > 0); 14312 ASSERT(rgnp->rgn_id == rid); 14313 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14314 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14315 14316 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 14317 14318 /* LINTED: constant in conditional context */ 14319 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14320 ASSERT(rlink != NULL); 14321 mutex_enter(&rgnp->rgn_mutex); 14322 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14323 /* LINTED: constant in conditional context */ 14324 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14325 ASSERT(hrlink != NULL); 14326 ASSERT(hrlink->prev == NULL); 14327 rlink->next = rgnp->rgn_sfmmu_head; 14328 rlink->prev = NULL; 14329 hrlink->prev = sfmmup; 14330 /* 14331 * make sure rlink's next field is correct 14332 * before making this link visible. 14333 */ 14334 membar_stst(); 14335 rgnp->rgn_sfmmu_head = sfmmup; 14336 mutex_exit(&rgnp->rgn_mutex); 14337 14338 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14339 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14340 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14341 /* update tsb0 inflation count */ 14342 if (rgnp->rgn_pgszc >= TTE4M) { 14343 sfmmup->sfmmu_tsb0_4minflcnt += 14344 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14345 } 14346 /* 14347 * Update regionid bitmask without hat lock since no other thread 14348 * can update this region bitmask right now. 14349 */ 14350 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14351 } 14352 14353 /* ARGSUSED */ 14354 static int 14355 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14356 { 14357 sf_region_t *rgnp = (sf_region_t *)buf; 14358 bzero(buf, sizeof (*rgnp)); 14359 14360 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14361 14362 return (0); 14363 } 14364 14365 /* ARGSUSED */ 14366 static void 14367 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14368 { 14369 sf_region_t *rgnp = (sf_region_t *)buf; 14370 mutex_destroy(&rgnp->rgn_mutex); 14371 } 14372 14373 static int 14374 sfrgnmap_isnull(sf_region_map_t *map) 14375 { 14376 int i; 14377 14378 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14379 if (map->bitmap[i] != 0) { 14380 return (0); 14381 } 14382 } 14383 return (1); 14384 } 14385 14386 static int 14387 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14388 { 14389 int i; 14390 14391 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14392 if (map->bitmap[i] != 0) { 14393 return (0); 14394 } 14395 } 14396 return (1); 14397 } 14398 14399 #ifdef DEBUG 14400 static void 14401 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14402 { 14403 sfmmu_t *sp; 14404 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14405 14406 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14407 ASSERT(srdp == sp->sfmmu_srdp); 14408 if (sp == sfmmup) { 14409 if (onlist) { 14410 return; 14411 } else { 14412 panic("shctx: sfmmu 0x%p found on scd" 14413 "list 0x%p", (void *)sfmmup, 14414 (void *)*headp); 14415 } 14416 } 14417 } 14418 if (onlist) { 14419 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14420 (void *)sfmmup, (void *)*headp); 14421 } else { 14422 return; 14423 } 14424 } 14425 #else /* DEBUG */ 14426 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14427 #endif /* DEBUG */ 14428 14429 /* 14430 * Removes an sfmmu from the SCD sfmmu list. 14431 */ 14432 static void 14433 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14434 { 14435 ASSERT(sfmmup->sfmmu_srdp != NULL); 14436 check_scd_sfmmu_list(headp, sfmmup, 1); 14437 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14438 ASSERT(*headp != sfmmup); 14439 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14440 sfmmup->sfmmu_scd_link.next; 14441 } else { 14442 ASSERT(*headp == sfmmup); 14443 *headp = sfmmup->sfmmu_scd_link.next; 14444 } 14445 if (sfmmup->sfmmu_scd_link.next != NULL) { 14446 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14447 sfmmup->sfmmu_scd_link.prev; 14448 } 14449 } 14450 14451 14452 /* 14453 * Adds an sfmmu to the start of the queue. 14454 */ 14455 static void 14456 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14457 { 14458 check_scd_sfmmu_list(headp, sfmmup, 0); 14459 sfmmup->sfmmu_scd_link.prev = NULL; 14460 sfmmup->sfmmu_scd_link.next = *headp; 14461 if (*headp != NULL) 14462 (*headp)->sfmmu_scd_link.prev = sfmmup; 14463 *headp = sfmmup; 14464 } 14465 14466 /* 14467 * Remove an scd from the start of the queue. 14468 */ 14469 static void 14470 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14471 { 14472 if (scdp->scd_prev != NULL) { 14473 ASSERT(*headp != scdp); 14474 scdp->scd_prev->scd_next = scdp->scd_next; 14475 } else { 14476 ASSERT(*headp == scdp); 14477 *headp = scdp->scd_next; 14478 } 14479 14480 if (scdp->scd_next != NULL) { 14481 scdp->scd_next->scd_prev = scdp->scd_prev; 14482 } 14483 } 14484 14485 /* 14486 * Add an scd to the start of the queue. 14487 */ 14488 static void 14489 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14490 { 14491 scdp->scd_prev = NULL; 14492 scdp->scd_next = *headp; 14493 if (*headp != NULL) { 14494 (*headp)->scd_prev = scdp; 14495 } 14496 *headp = scdp; 14497 } 14498 14499 static int 14500 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14501 { 14502 uint_t rid; 14503 uint_t i; 14504 uint_t j; 14505 ulong_t w; 14506 sf_region_t *rgnp; 14507 ulong_t tte8k_cnt = 0; 14508 ulong_t tte4m_cnt = 0; 14509 uint_t tsb_szc; 14510 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14511 sfmmu_t *ism_hatid; 14512 struct tsb_info *newtsb; 14513 int szc; 14514 14515 ASSERT(srdp != NULL); 14516 14517 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14518 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14519 continue; 14520 } 14521 j = 0; 14522 while (w) { 14523 if (!(w & 0x1)) { 14524 j++; 14525 w >>= 1; 14526 continue; 14527 } 14528 rid = (i << BT_ULSHIFT) | j; 14529 j++; 14530 w >>= 1; 14531 14532 if (rid < SFMMU_MAX_HME_REGIONS) { 14533 rgnp = srdp->srd_hmergnp[rid]; 14534 ASSERT(rgnp->rgn_id == rid); 14535 ASSERT(rgnp->rgn_refcnt > 0); 14536 14537 if (rgnp->rgn_pgszc < TTE4M) { 14538 tte8k_cnt += rgnp->rgn_size >> 14539 TTE_PAGE_SHIFT(TTE8K); 14540 } else { 14541 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14542 tte4m_cnt += rgnp->rgn_size >> 14543 TTE_PAGE_SHIFT(TTE4M); 14544 /* 14545 * Inflate SCD tsb0 by preallocating 14546 * 1/4 8k ttecnt for 4M regions to 14547 * allow for lgpg alloc failure. 14548 */ 14549 tte8k_cnt += rgnp->rgn_size >> 14550 (TTE_PAGE_SHIFT(TTE8K) + 2); 14551 } 14552 } else { 14553 rid -= SFMMU_MAX_HME_REGIONS; 14554 rgnp = srdp->srd_ismrgnp[rid]; 14555 ASSERT(rgnp->rgn_id == rid); 14556 ASSERT(rgnp->rgn_refcnt > 0); 14557 14558 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14559 ASSERT(ism_hatid->sfmmu_ismhat); 14560 14561 for (szc = 0; szc < TTE4M; szc++) { 14562 tte8k_cnt += 14563 ism_hatid->sfmmu_ttecnt[szc] << 14564 TTE_BSZS_SHIFT(szc); 14565 } 14566 14567 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14568 if (rgnp->rgn_pgszc >= TTE4M) { 14569 tte4m_cnt += rgnp->rgn_size >> 14570 TTE_PAGE_SHIFT(TTE4M); 14571 } 14572 } 14573 } 14574 } 14575 14576 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14577 14578 /* Allocate both the SCD TSBs here. */ 14579 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14580 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14581 (tsb_szc <= TSB_4M_SZCODE || 14582 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14583 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14584 TSB_ALLOC, scsfmmup))) { 14585 14586 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14587 return (TSB_ALLOCFAIL); 14588 } else { 14589 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14590 14591 if (tte4m_cnt) { 14592 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14593 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14594 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14595 (tsb_szc <= TSB_4M_SZCODE || 14596 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14597 TSB4M|TSB32M|TSB256M, 14598 TSB_ALLOC, scsfmmup))) { 14599 /* 14600 * If we fail to allocate the 2nd shared tsb, 14601 * just free the 1st tsb, return failure. 14602 */ 14603 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14604 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14605 return (TSB_ALLOCFAIL); 14606 } else { 14607 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14608 newtsb->tsb_flags |= TSB_SHAREDCTX; 14609 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14610 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14611 } 14612 } 14613 SFMMU_STAT(sf_scd_1sttsb_alloc); 14614 } 14615 return (TSB_SUCCESS); 14616 } 14617 14618 static void 14619 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14620 { 14621 while (scd_sfmmu->sfmmu_tsb != NULL) { 14622 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14623 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14624 scd_sfmmu->sfmmu_tsb = next; 14625 } 14626 } 14627 14628 /* 14629 * Link the sfmmu onto the hme region list. 14630 */ 14631 void 14632 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14633 { 14634 uint_t rid; 14635 sf_rgn_link_t *rlink; 14636 sfmmu_t *head; 14637 sf_rgn_link_t *hrlink; 14638 14639 rid = rgnp->rgn_id; 14640 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14641 14642 /* LINTED: constant in conditional context */ 14643 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14644 ASSERT(rlink != NULL); 14645 mutex_enter(&rgnp->rgn_mutex); 14646 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14647 rlink->next = NULL; 14648 rlink->prev = NULL; 14649 /* 14650 * make sure rlink's next field is NULL 14651 * before making this link visible. 14652 */ 14653 membar_stst(); 14654 rgnp->rgn_sfmmu_head = sfmmup; 14655 } else { 14656 /* LINTED: constant in conditional context */ 14657 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14658 ASSERT(hrlink != NULL); 14659 ASSERT(hrlink->prev == NULL); 14660 rlink->next = head; 14661 rlink->prev = NULL; 14662 hrlink->prev = sfmmup; 14663 /* 14664 * make sure rlink's next field is correct 14665 * before making this link visible. 14666 */ 14667 membar_stst(); 14668 rgnp->rgn_sfmmu_head = sfmmup; 14669 } 14670 mutex_exit(&rgnp->rgn_mutex); 14671 } 14672 14673 /* 14674 * Unlink the sfmmu from the hme region list. 14675 */ 14676 void 14677 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14678 { 14679 uint_t rid; 14680 sf_rgn_link_t *rlink; 14681 14682 rid = rgnp->rgn_id; 14683 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14684 14685 /* LINTED: constant in conditional context */ 14686 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14687 ASSERT(rlink != NULL); 14688 mutex_enter(&rgnp->rgn_mutex); 14689 if (rgnp->rgn_sfmmu_head == sfmmup) { 14690 sfmmu_t *next = rlink->next; 14691 rgnp->rgn_sfmmu_head = next; 14692 /* 14693 * if we are stopped by xc_attention() after this 14694 * point the forward link walking in 14695 * sfmmu_rgntlb_demap() will work correctly since the 14696 * head correctly points to the next element. 14697 */ 14698 membar_stst(); 14699 rlink->next = NULL; 14700 ASSERT(rlink->prev == NULL); 14701 if (next != NULL) { 14702 sf_rgn_link_t *nrlink; 14703 /* LINTED: constant in conditional context */ 14704 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14705 ASSERT(nrlink != NULL); 14706 ASSERT(nrlink->prev == sfmmup); 14707 nrlink->prev = NULL; 14708 } 14709 } else { 14710 sfmmu_t *next = rlink->next; 14711 sfmmu_t *prev = rlink->prev; 14712 sf_rgn_link_t *prlink; 14713 14714 ASSERT(prev != NULL); 14715 /* LINTED: constant in conditional context */ 14716 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14717 ASSERT(prlink != NULL); 14718 ASSERT(prlink->next == sfmmup); 14719 prlink->next = next; 14720 /* 14721 * if we are stopped by xc_attention() 14722 * after this point the forward link walking 14723 * will work correctly since the prev element 14724 * correctly points to the next element. 14725 */ 14726 membar_stst(); 14727 rlink->next = NULL; 14728 rlink->prev = NULL; 14729 if (next != NULL) { 14730 sf_rgn_link_t *nrlink; 14731 /* LINTED: constant in conditional context */ 14732 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14733 ASSERT(nrlink != NULL); 14734 ASSERT(nrlink->prev == sfmmup); 14735 nrlink->prev = prev; 14736 } 14737 } 14738 mutex_exit(&rgnp->rgn_mutex); 14739 } 14740 14741 /* 14742 * Link scd sfmmu onto ism or hme region list for each region in the 14743 * scd region map. 14744 */ 14745 void 14746 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14747 { 14748 uint_t rid; 14749 uint_t i; 14750 uint_t j; 14751 ulong_t w; 14752 sf_region_t *rgnp; 14753 sfmmu_t *scsfmmup; 14754 14755 scsfmmup = scdp->scd_sfmmup; 14756 ASSERT(scsfmmup->sfmmu_scdhat); 14757 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14758 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14759 continue; 14760 } 14761 j = 0; 14762 while (w) { 14763 if (!(w & 0x1)) { 14764 j++; 14765 w >>= 1; 14766 continue; 14767 } 14768 rid = (i << BT_ULSHIFT) | j; 14769 j++; 14770 w >>= 1; 14771 14772 if (rid < SFMMU_MAX_HME_REGIONS) { 14773 rgnp = srdp->srd_hmergnp[rid]; 14774 ASSERT(rgnp->rgn_id == rid); 14775 ASSERT(rgnp->rgn_refcnt > 0); 14776 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14777 } else { 14778 sfmmu_t *ism_hatid = NULL; 14779 ism_ment_t *ism_ment; 14780 rid -= SFMMU_MAX_HME_REGIONS; 14781 rgnp = srdp->srd_ismrgnp[rid]; 14782 ASSERT(rgnp->rgn_id == rid); 14783 ASSERT(rgnp->rgn_refcnt > 0); 14784 14785 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14786 ASSERT(ism_hatid->sfmmu_ismhat); 14787 ism_ment = &scdp->scd_ism_links[rid]; 14788 ism_ment->iment_hat = scsfmmup; 14789 ism_ment->iment_base_va = rgnp->rgn_saddr; 14790 mutex_enter(&ism_mlist_lock); 14791 iment_add(ism_ment, ism_hatid); 14792 mutex_exit(&ism_mlist_lock); 14793 14794 } 14795 } 14796 } 14797 } 14798 /* 14799 * Unlink scd sfmmu from ism or hme region list for each region in the 14800 * scd region map. 14801 */ 14802 void 14803 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14804 { 14805 uint_t rid; 14806 uint_t i; 14807 uint_t j; 14808 ulong_t w; 14809 sf_region_t *rgnp; 14810 sfmmu_t *scsfmmup; 14811 14812 scsfmmup = scdp->scd_sfmmup; 14813 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14814 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14815 continue; 14816 } 14817 j = 0; 14818 while (w) { 14819 if (!(w & 0x1)) { 14820 j++; 14821 w >>= 1; 14822 continue; 14823 } 14824 rid = (i << BT_ULSHIFT) | j; 14825 j++; 14826 w >>= 1; 14827 14828 if (rid < SFMMU_MAX_HME_REGIONS) { 14829 rgnp = srdp->srd_hmergnp[rid]; 14830 ASSERT(rgnp->rgn_id == rid); 14831 ASSERT(rgnp->rgn_refcnt > 0); 14832 sfmmu_unlink_from_hmeregion(scsfmmup, 14833 rgnp); 14834 14835 } else { 14836 sfmmu_t *ism_hatid = NULL; 14837 ism_ment_t *ism_ment; 14838 rid -= SFMMU_MAX_HME_REGIONS; 14839 rgnp = srdp->srd_ismrgnp[rid]; 14840 ASSERT(rgnp->rgn_id == rid); 14841 ASSERT(rgnp->rgn_refcnt > 0); 14842 14843 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14844 ASSERT(ism_hatid->sfmmu_ismhat); 14845 ism_ment = &scdp->scd_ism_links[rid]; 14846 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14847 ASSERT(ism_ment->iment_base_va == 14848 rgnp->rgn_saddr); 14849 mutex_enter(&ism_mlist_lock); 14850 iment_sub(ism_ment, ism_hatid); 14851 mutex_exit(&ism_mlist_lock); 14852 14853 } 14854 } 14855 } 14856 } 14857 /* 14858 * Allocates and initialises a new SCD structure, this is called with 14859 * the srd_scd_mutex held and returns with the reference count 14860 * initialised to 1. 14861 */ 14862 static sf_scd_t * 14863 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14864 { 14865 sf_scd_t *new_scdp; 14866 sfmmu_t *scsfmmup; 14867 int i; 14868 14869 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14870 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14871 14872 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14873 new_scdp->scd_sfmmup = scsfmmup; 14874 scsfmmup->sfmmu_srdp = srdp; 14875 scsfmmup->sfmmu_scdp = new_scdp; 14876 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14877 scsfmmup->sfmmu_scdhat = 1; 14878 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14879 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 14880 14881 ASSERT(max_mmu_ctxdoms > 0); 14882 for (i = 0; i < max_mmu_ctxdoms; i++) { 14883 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 14884 scsfmmup->sfmmu_ctxs[i].gnum = 0; 14885 } 14886 14887 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14888 new_scdp->scd_rttecnt[i] = 0; 14889 } 14890 14891 new_scdp->scd_region_map = *new_map; 14892 new_scdp->scd_refcnt = 1; 14893 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 14894 kmem_cache_free(scd_cache, new_scdp); 14895 kmem_cache_free(sfmmuid_cache, scsfmmup); 14896 return (NULL); 14897 } 14898 if (&mmu_init_scd) { 14899 mmu_init_scd(new_scdp); 14900 } 14901 return (new_scdp); 14902 } 14903 14904 /* 14905 * The first phase of a process joining an SCD. The hat structure is 14906 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 14907 * and a cross-call with context invalidation is used to cause the 14908 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 14909 * routine. 14910 */ 14911 static void 14912 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 14913 { 14914 hatlock_t *hatlockp; 14915 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14916 int i; 14917 sf_scd_t *old_scdp; 14918 14919 ASSERT(srdp != NULL); 14920 ASSERT(scdp != NULL); 14921 ASSERT(scdp->scd_refcnt > 0); 14922 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 14923 14924 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 14925 ASSERT(old_scdp != scdp); 14926 14927 mutex_enter(&old_scdp->scd_mutex); 14928 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 14929 mutex_exit(&old_scdp->scd_mutex); 14930 /* 14931 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 14932 * include the shme rgn ttecnt for rgns that 14933 * were in the old SCD 14934 */ 14935 for (i = 0; i < mmu_page_sizes; i++) { 14936 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 14937 old_scdp->scd_rttecnt[i]); 14938 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14939 sfmmup->sfmmu_scdrttecnt[i]); 14940 } 14941 } 14942 14943 /* 14944 * Move sfmmu to the scd lists. 14945 */ 14946 mutex_enter(&scdp->scd_mutex); 14947 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 14948 mutex_exit(&scdp->scd_mutex); 14949 SF_SCD_INCR_REF(scdp); 14950 14951 hatlockp = sfmmu_hat_enter(sfmmup); 14952 /* 14953 * For a multi-thread process, we must stop 14954 * all the other threads before joining the scd. 14955 */ 14956 14957 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 14958 14959 sfmmu_invalidate_ctx(sfmmup); 14960 sfmmup->sfmmu_scdp = scdp; 14961 14962 /* 14963 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 14964 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 14965 */ 14966 for (i = 0; i < mmu_page_sizes; i++) { 14967 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 14968 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 14969 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14970 -sfmmup->sfmmu_scdrttecnt[i]); 14971 } 14972 /* update tsb0 inflation count */ 14973 if (old_scdp != NULL) { 14974 sfmmup->sfmmu_tsb0_4minflcnt += 14975 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 14976 } 14977 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14978 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 14979 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 14980 14981 sfmmu_hat_exit(hatlockp); 14982 14983 if (old_scdp != NULL) { 14984 SF_SCD_DECR_REF(srdp, old_scdp); 14985 } 14986 14987 } 14988 14989 /* 14990 * This routine is called by a process to become part of an SCD. It is called 14991 * from sfmmu_tsbmiss_exception() once most of the initial work has been 14992 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 14993 */ 14994 static void 14995 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 14996 { 14997 struct tsb_info *tsbinfop; 14998 14999 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15000 ASSERT(sfmmup->sfmmu_scdp != NULL); 15001 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15002 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15003 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15004 15005 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15006 tsbinfop = tsbinfop->tsb_next) { 15007 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15008 continue; 15009 } 15010 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15011 15012 sfmmu_inv_tsb(tsbinfop->tsb_va, 15013 TSB_BYTES(tsbinfop->tsb_szc)); 15014 } 15015 15016 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15017 sfmmu_ism_hatflags(sfmmup, 1); 15018 15019 SFMMU_STAT(sf_join_scd); 15020 } 15021 15022 /* 15023 * This routine is called in order to check if there is an SCD which matches 15024 * the process's region map if not then a new SCD may be created. 15025 */ 15026 static void 15027 sfmmu_find_scd(sfmmu_t *sfmmup) 15028 { 15029 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15030 sf_scd_t *scdp, *new_scdp; 15031 int ret; 15032 15033 ASSERT(srdp != NULL); 15034 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); 15035 15036 mutex_enter(&srdp->srd_scd_mutex); 15037 for (scdp = srdp->srd_scdp; scdp != NULL; 15038 scdp = scdp->scd_next) { 15039 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15040 &sfmmup->sfmmu_region_map, ret); 15041 if (ret == 1) { 15042 SF_SCD_INCR_REF(scdp); 15043 mutex_exit(&srdp->srd_scd_mutex); 15044 sfmmu_join_scd(scdp, sfmmup); 15045 ASSERT(scdp->scd_refcnt >= 2); 15046 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt); 15047 return; 15048 } else { 15049 /* 15050 * If the sfmmu region map is a subset of the scd 15051 * region map, then the assumption is that this process 15052 * will continue attaching to ISM segments until the 15053 * region maps are equal. 15054 */ 15055 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15056 &sfmmup->sfmmu_region_map, ret); 15057 if (ret == 1) { 15058 mutex_exit(&srdp->srd_scd_mutex); 15059 return; 15060 } 15061 } 15062 } 15063 15064 ASSERT(scdp == NULL); 15065 /* 15066 * No matching SCD has been found, create a new one. 15067 */ 15068 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15069 NULL) { 15070 mutex_exit(&srdp->srd_scd_mutex); 15071 return; 15072 } 15073 15074 /* 15075 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15076 */ 15077 15078 /* Set scd_rttecnt for shme rgns in SCD */ 15079 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15080 15081 /* 15082 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15083 */ 15084 sfmmu_link_scd_to_regions(srdp, new_scdp); 15085 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15086 SFMMU_STAT_ADD(sf_create_scd, 1); 15087 15088 mutex_exit(&srdp->srd_scd_mutex); 15089 sfmmu_join_scd(new_scdp, sfmmup); 15090 ASSERT(new_scdp->scd_refcnt >= 2); 15091 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt); 15092 } 15093 15094 /* 15095 * This routine is called by a process to remove itself from an SCD. It is 15096 * either called when the processes has detached from a segment or from 15097 * hat_free_start() as a result of calling exit. 15098 */ 15099 static void 15100 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15101 { 15102 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15103 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15104 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15105 int i; 15106 15107 ASSERT(scdp != NULL); 15108 ASSERT(srdp != NULL); 15109 15110 if (sfmmup->sfmmu_free) { 15111 /* 15112 * If the process is part of an SCD the sfmmu is unlinked 15113 * from scd_sf_list. 15114 */ 15115 mutex_enter(&scdp->scd_mutex); 15116 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15117 mutex_exit(&scdp->scd_mutex); 15118 /* 15119 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15120 * are about to leave the SCD 15121 */ 15122 for (i = 0; i < mmu_page_sizes; i++) { 15123 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15124 scdp->scd_rttecnt[i]); 15125 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15126 sfmmup->sfmmu_scdrttecnt[i]); 15127 sfmmup->sfmmu_scdrttecnt[i] = 0; 15128 } 15129 sfmmup->sfmmu_scdp = NULL; 15130 15131 SF_SCD_DECR_REF(srdp, scdp); 15132 return; 15133 } 15134 15135 ASSERT(r_type != SFMMU_REGION_ISM || 15136 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15137 ASSERT(scdp->scd_refcnt); 15138 ASSERT(!sfmmup->sfmmu_free); 15139 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15140 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); 15141 15142 /* 15143 * Wait for ISM maps to be updated. 15144 */ 15145 if (r_type != SFMMU_REGION_ISM) { 15146 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15147 sfmmup->sfmmu_scdp != NULL) { 15148 cv_wait(&sfmmup->sfmmu_tsb_cv, 15149 HATLOCK_MUTEXP(hatlockp)); 15150 } 15151 15152 if (sfmmup->sfmmu_scdp == NULL) { 15153 sfmmu_hat_exit(hatlockp); 15154 return; 15155 } 15156 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15157 } 15158 15159 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15160 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15161 /* 15162 * Since HAT_JOIN_SCD was set our context 15163 * is still invalid. 15164 */ 15165 } else { 15166 /* 15167 * For a multi-thread process, we must stop 15168 * all the other threads before leaving the scd. 15169 */ 15170 15171 sfmmu_invalidate_ctx(sfmmup); 15172 } 15173 15174 /* Clear all the rid's for ISM, delete flags, etc */ 15175 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15176 sfmmu_ism_hatflags(sfmmup, 0); 15177 15178 /* 15179 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15180 * are in SCD before this sfmmup leaves the SCD. 15181 */ 15182 for (i = 0; i < mmu_page_sizes; i++) { 15183 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15184 scdp->scd_rttecnt[i]); 15185 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15186 sfmmup->sfmmu_scdrttecnt[i]); 15187 sfmmup->sfmmu_scdrttecnt[i] = 0; 15188 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15189 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15190 sfmmup->sfmmu_scdismttecnt[i] = 0; 15191 } 15192 /* update tsb0 inflation count */ 15193 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15194 15195 if (r_type != SFMMU_REGION_ISM) { 15196 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15197 } 15198 sfmmup->sfmmu_scdp = NULL; 15199 15200 sfmmu_hat_exit(hatlockp); 15201 15202 /* 15203 * Unlink sfmmu from scd_sf_list this can be done without holding 15204 * the hat lock as we hold the sfmmu_as lock which prevents 15205 * hat_join_region from adding this thread to the scd again. Other 15206 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15207 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15208 * while holding the hat lock. 15209 */ 15210 mutex_enter(&scdp->scd_mutex); 15211 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15212 mutex_exit(&scdp->scd_mutex); 15213 SFMMU_STAT(sf_leave_scd); 15214 15215 SF_SCD_DECR_REF(srdp, scdp); 15216 hatlockp = sfmmu_hat_enter(sfmmup); 15217 15218 } 15219 15220 /* 15221 * Unlink and free up an SCD structure with a reference count of 0. 15222 */ 15223 static void 15224 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15225 { 15226 sfmmu_t *scsfmmup; 15227 sf_scd_t *sp; 15228 hatlock_t *shatlockp; 15229 int i, ret; 15230 15231 mutex_enter(&srdp->srd_scd_mutex); 15232 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15233 if (sp == scdp) 15234 break; 15235 } 15236 if (sp == NULL || sp->scd_refcnt) { 15237 mutex_exit(&srdp->srd_scd_mutex); 15238 return; 15239 } 15240 15241 /* 15242 * It is possible that the scd has been freed and reallocated with a 15243 * different region map while we've been waiting for the srd_scd_mutex. 15244 */ 15245 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15246 if (ret != 1) { 15247 mutex_exit(&srdp->srd_scd_mutex); 15248 return; 15249 } 15250 15251 ASSERT(scdp->scd_sf_list == NULL); 15252 /* 15253 * Unlink scd from srd_scdp list. 15254 */ 15255 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15256 mutex_exit(&srdp->srd_scd_mutex); 15257 15258 sfmmu_unlink_scd_from_regions(srdp, scdp); 15259 15260 /* Clear shared context tsb and release ctx */ 15261 scsfmmup = scdp->scd_sfmmup; 15262 15263 /* 15264 * create a barrier so that scd will not be destroyed 15265 * if other thread still holds the same shared hat lock. 15266 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15267 * shared hat lock before checking the shared tsb reloc flag. 15268 */ 15269 shatlockp = sfmmu_hat_enter(scsfmmup); 15270 sfmmu_hat_exit(shatlockp); 15271 15272 sfmmu_free_scd_tsbs(scsfmmup); 15273 15274 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15275 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15276 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15277 SFMMU_L2_HMERLINKS_SIZE); 15278 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15279 } 15280 } 15281 kmem_cache_free(sfmmuid_cache, scsfmmup); 15282 kmem_cache_free(scd_cache, scdp); 15283 SFMMU_STAT(sf_destroy_scd); 15284 } 15285 15286 /* 15287 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15288 * bits which are set in the ism_region_map parameter. This flag indicates to 15289 * the tsbmiss handler that mapping for these segments should be loaded using 15290 * the shared context. 15291 */ 15292 static void 15293 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15294 { 15295 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15296 ism_blk_t *ism_blkp; 15297 ism_map_t *ism_map; 15298 int i, rid; 15299 15300 ASSERT(sfmmup->sfmmu_iblk != NULL); 15301 ASSERT(scdp != NULL); 15302 /* 15303 * Note that the caller either set HAT_ISMBUSY flag or checked 15304 * under hat lock that HAT_ISMBUSY was not set by another thread. 15305 */ 15306 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15307 15308 ism_blkp = sfmmup->sfmmu_iblk; 15309 while (ism_blkp != NULL) { 15310 ism_map = ism_blkp->iblk_maps; 15311 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15312 rid = ism_map[i].imap_rid; 15313 if (rid == SFMMU_INVALID_ISMRID) { 15314 continue; 15315 } 15316 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15317 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15318 addflag) { 15319 ism_map[i].imap_hatflags |= 15320 HAT_CTX1_FLAG; 15321 } else { 15322 ism_map[i].imap_hatflags &= 15323 ~HAT_CTX1_FLAG; 15324 } 15325 } 15326 ism_blkp = ism_blkp->iblk_next; 15327 } 15328 } 15329 15330 static int 15331 sfmmu_srd_lock_held(sf_srd_t *srdp) 15332 { 15333 return (MUTEX_HELD(&srdp->srd_mutex)); 15334 } 15335 15336 /* ARGSUSED */ 15337 static int 15338 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15339 { 15340 sf_scd_t *scdp = (sf_scd_t *)buf; 15341 15342 bzero(buf, sizeof (sf_scd_t)); 15343 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15344 return (0); 15345 } 15346 15347 /* ARGSUSED */ 15348 static void 15349 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15350 { 15351 sf_scd_t *scdp = (sf_scd_t *)buf; 15352 15353 mutex_destroy(&scdp->scd_mutex); 15354 } 15355 15356 /* 15357 * The listp parameter is a pointer to a list of hmeblks which are partially 15358 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15359 * freeing process is to cross-call all cpus to ensure that there are no 15360 * remaining cached references. 15361 * 15362 * If the local generation number is less than the global then we can free 15363 * hmeblks which are already on the pending queue as another cpu has completed 15364 * the cross-call. 15365 * 15366 * We cross-call to make sure that there are no threads on other cpus accessing 15367 * these hmblks and then complete the process of freeing them under the 15368 * following conditions: 15369 * The total number of pending hmeblks is greater than the threshold 15370 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15371 * It is at least 1 second since the last time we cross-called 15372 * 15373 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15374 */ 15375 static void 15376 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15377 { 15378 struct hme_blk *hblkp, *pr_hblkp = NULL; 15379 int count = 0; 15380 cpuset_t cpuset = cpu_ready_set; 15381 cpu_hme_pend_t *cpuhp; 15382 timestruc_t now; 15383 int one_second_expired = 0; 15384 15385 gethrestime_lasttick(&now); 15386 15387 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15388 ASSERT(hblkp->hblk_shw_bit == 0); 15389 ASSERT(hblkp->hblk_shared == 0); 15390 count++; 15391 pr_hblkp = hblkp; 15392 } 15393 15394 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15395 mutex_enter(&cpuhp->chp_mutex); 15396 15397 if ((cpuhp->chp_count + count) == 0) { 15398 mutex_exit(&cpuhp->chp_mutex); 15399 return; 15400 } 15401 15402 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15403 one_second_expired = 1; 15404 } 15405 15406 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15407 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15408 one_second_expired)) { 15409 /* Append global list to local */ 15410 if (pr_hblkp == NULL) { 15411 *listp = cpuhp->chp_listp; 15412 } else { 15413 pr_hblkp->hblk_next = cpuhp->chp_listp; 15414 } 15415 cpuhp->chp_listp = NULL; 15416 cpuhp->chp_count = 0; 15417 cpuhp->chp_timestamp = now.tv_sec; 15418 mutex_exit(&cpuhp->chp_mutex); 15419 15420 kpreempt_disable(); 15421 CPUSET_DEL(cpuset, CPU->cpu_id); 15422 xt_sync(cpuset); 15423 xt_sync(cpuset); 15424 kpreempt_enable(); 15425 15426 /* 15427 * At this stage we know that no trap handlers on other 15428 * cpus can have references to hmeblks on the list. 15429 */ 15430 sfmmu_hblk_free(listp); 15431 } else if (*listp != NULL) { 15432 pr_hblkp->hblk_next = cpuhp->chp_listp; 15433 cpuhp->chp_listp = *listp; 15434 cpuhp->chp_count += count; 15435 *listp = NULL; 15436 mutex_exit(&cpuhp->chp_mutex); 15437 } else { 15438 mutex_exit(&cpuhp->chp_mutex); 15439 } 15440 } 15441 15442 /* 15443 * Add an hmeblk to the the hash list. 15444 */ 15445 void 15446 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15447 uint64_t hblkpa) 15448 { 15449 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15450 #ifdef DEBUG 15451 if (hmebp->hmeblkp == NULL) { 15452 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15453 } 15454 #endif /* DEBUG */ 15455 15456 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15457 /* 15458 * Since the TSB miss handler now does not lock the hash chain before 15459 * walking it, make sure that the hmeblks nextpa is globally visible 15460 * before we make the hmeblk globally visible by updating the chain root 15461 * pointer in the hash bucket. 15462 */ 15463 membar_producer(); 15464 hmebp->hmeh_nextpa = hblkpa; 15465 hmeblkp->hblk_next = hmebp->hmeblkp; 15466 hmebp->hmeblkp = hmeblkp; 15467 15468 } 15469 15470 /* 15471 * This function is the first part of a 2 part process to remove an hmeblk 15472 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15473 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15474 * a per-cpu pending list using the virtual address pointer. 15475 * 15476 * TSB miss trap handlers that start after this phase will no longer see 15477 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15478 * can still use it for further chain traversal because we haven't yet modifed 15479 * the next physical pointer or freed it. 15480 * 15481 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15482 * we reuse or free this hmeblk. This will make sure all lingering references to 15483 * the hmeblk after first phase disappear before we finally reclaim it. 15484 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15485 * during their traversal. 15486 * 15487 * The hmehash_mutex must be held when calling this function. 15488 * 15489 * Input: 15490 * hmebp - hme hash bucket pointer 15491 * hmeblkp - address of hmeblk to be removed 15492 * pr_hblk - virtual address of previous hmeblkp 15493 * listp - pointer to list of hmeblks linked by virtual address 15494 * free_now flag - indicates that a complete removal from the hash chains 15495 * is necessary. 15496 * 15497 * It is inefficient to use the free_now flag as a cross-call is required to 15498 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15499 * in short supply. 15500 */ 15501 void 15502 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15503 struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now) 15504 { 15505 int shw_size, vshift; 15506 struct hme_blk *shw_hblkp; 15507 uint_t shw_mask, newshw_mask; 15508 caddr_t vaddr; 15509 int size; 15510 cpuset_t cpuset = cpu_ready_set; 15511 15512 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15513 15514 if (hmebp->hmeblkp == hmeblkp) { 15515 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15516 hmebp->hmeblkp = hmeblkp->hblk_next; 15517 } else { 15518 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15519 pr_hblk->hblk_next = hmeblkp->hblk_next; 15520 } 15521 15522 size = get_hblk_ttesz(hmeblkp); 15523 shw_hblkp = hmeblkp->hblk_shadow; 15524 if (shw_hblkp) { 15525 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15526 ASSERT(!hmeblkp->hblk_shared); 15527 #ifdef DEBUG 15528 if (mmu_page_sizes == max_mmu_page_sizes) { 15529 ASSERT(size < TTE256M); 15530 } else { 15531 ASSERT(size < TTE4M); 15532 } 15533 #endif /* DEBUG */ 15534 15535 shw_size = get_hblk_ttesz(shw_hblkp); 15536 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15537 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15538 ASSERT(vshift < 8); 15539 /* 15540 * Atomically clear shadow mask bit 15541 */ 15542 do { 15543 shw_mask = shw_hblkp->hblk_shw_mask; 15544 ASSERT(shw_mask & (1 << vshift)); 15545 newshw_mask = shw_mask & ~(1 << vshift); 15546 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 15547 shw_mask, newshw_mask); 15548 } while (newshw_mask != shw_mask); 15549 hmeblkp->hblk_shadow = NULL; 15550 } 15551 hmeblkp->hblk_shw_bit = 0; 15552 15553 if (hmeblkp->hblk_shared) { 15554 #ifdef DEBUG 15555 sf_srd_t *srdp; 15556 sf_region_t *rgnp; 15557 uint_t rid; 15558 15559 srdp = hblktosrd(hmeblkp); 15560 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15561 rid = hmeblkp->hblk_tag.htag_rid; 15562 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15563 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15564 rgnp = srdp->srd_hmergnp[rid]; 15565 ASSERT(rgnp != NULL); 15566 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15567 #endif /* DEBUG */ 15568 hmeblkp->hblk_shared = 0; 15569 } 15570 if (free_now) { 15571 kpreempt_disable(); 15572 CPUSET_DEL(cpuset, CPU->cpu_id); 15573 xt_sync(cpuset); 15574 xt_sync(cpuset); 15575 kpreempt_enable(); 15576 15577 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15578 hmeblkp->hblk_next = NULL; 15579 } else { 15580 /* Append hmeblkp to listp for processing later. */ 15581 hmeblkp->hblk_next = *listp; 15582 *listp = hmeblkp; 15583 } 15584 } 15585 15586 /* 15587 * This routine is called when memory is in short supply and returns a free 15588 * hmeblk of the requested size from the cpu pending lists. 15589 */ 15590 static struct hme_blk * 15591 sfmmu_check_pending_hblks(int size) 15592 { 15593 int i; 15594 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15595 int found_hmeblk; 15596 cpuset_t cpuset = cpu_ready_set; 15597 cpu_hme_pend_t *cpuhp; 15598 15599 /* Flush cpu hblk pending queues */ 15600 for (i = 0; i < NCPU; i++) { 15601 cpuhp = &cpu_hme_pend[i]; 15602 if (cpuhp->chp_listp != NULL) { 15603 mutex_enter(&cpuhp->chp_mutex); 15604 if (cpuhp->chp_listp == NULL) { 15605 mutex_exit(&cpuhp->chp_mutex); 15606 continue; 15607 } 15608 found_hmeblk = 0; 15609 last_hmeblkp = NULL; 15610 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15611 hmeblkp = hmeblkp->hblk_next) { 15612 if (get_hblk_ttesz(hmeblkp) == size) { 15613 if (last_hmeblkp == NULL) { 15614 cpuhp->chp_listp = 15615 hmeblkp->hblk_next; 15616 } else { 15617 last_hmeblkp->hblk_next = 15618 hmeblkp->hblk_next; 15619 } 15620 ASSERT(cpuhp->chp_count > 0); 15621 cpuhp->chp_count--; 15622 found_hmeblk = 1; 15623 break; 15624 } else { 15625 last_hmeblkp = hmeblkp; 15626 } 15627 } 15628 mutex_exit(&cpuhp->chp_mutex); 15629 15630 if (found_hmeblk) { 15631 kpreempt_disable(); 15632 CPUSET_DEL(cpuset, CPU->cpu_id); 15633 xt_sync(cpuset); 15634 xt_sync(cpuset); 15635 kpreempt_enable(); 15636 return (hmeblkp); 15637 } 15638 } 15639 } 15640 return (NULL); 15641 } 15642