1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * VM - Hardware Address Translation management. 28 * 29 * This file describes the contents of the sun-reference-mmu(sfmmu)- 30 * specific hat data structures and the sfmmu-specific hat procedures. 31 * The machine-independent interface is described in <vm/hat.h>. 32 */ 33 34 #ifndef _VM_HAT_SFMMU_H 35 #define _VM_HAT_SFMMU_H 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 #ifndef _ASM 42 43 #include <sys/types.h> 44 45 #endif /* _ASM */ 46 47 #ifdef _KERNEL 48 49 #include <sys/pte.h> 50 #include <vm/mach_sfmmu.h> 51 #include <sys/mmu.h> 52 53 /* 54 * Don't alter these without considering changes to ism_map_t. 55 */ 56 #define DEFAULT_ISM_PAGESIZE MMU_PAGESIZE4M 57 #define DEFAULT_ISM_PAGESZC TTE4M 58 #define ISM_PG_SIZE(ism_vbshift) (1 << ism_vbshift) 59 #define ISM_SZ_MASK(ism_vbshift) (ISM_PG_SIZE(ism_vbshift) - 1) 60 #define ISM_MAP_SLOTS 8 /* Change this carefully. */ 61 62 #ifndef _ASM 63 64 #include <sys/t_lock.h> 65 #include <vm/hat.h> 66 #include <vm/seg.h> 67 #include <sys/machparam.h> 68 #include <sys/systm.h> 69 #include <sys/x_call.h> 70 #include <vm/page.h> 71 #include <sys/ksynch.h> 72 73 typedef struct hat sfmmu_t; 74 typedef struct sf_scd sf_scd_t; 75 76 /* 77 * SFMMU attributes for hat_memload/hat_devload 78 */ 79 #define SFMMU_UNCACHEPTTE 0x01000000 /* unencache in physical $ */ 80 #define SFMMU_UNCACHEVTTE 0x02000000 /* unencache in virtual $ */ 81 #define SFMMU_SIDEFFECT 0x04000000 /* set side effect bit */ 82 #define SFMMU_LOAD_ALLATTR (HAT_PROT_MASK | HAT_ORDER_MASK | \ 83 HAT_ENDIAN_MASK | HAT_NOFAULT | HAT_NOSYNC | \ 84 SFMMU_UNCACHEPTTE | SFMMU_UNCACHEVTTE | SFMMU_SIDEFFECT) 85 86 87 /* 88 * sfmmu flags for hat_memload/hat_devload 89 */ 90 #define SFMMU_NO_TSBLOAD 0x08000000 /* do not preload tsb */ 91 #define SFMMU_LOAD_ALLFLAG (HAT_LOAD | HAT_LOAD_LOCK | \ 92 HAT_LOAD_ADV | HAT_LOAD_CONTIG | HAT_LOAD_NOCONSIST | \ 93 HAT_LOAD_SHARE | HAT_LOAD_REMAP | SFMMU_NO_TSBLOAD | \ 94 HAT_RELOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_TEXT) 95 96 /* 97 * sfmmu internal flag to hat_pageunload that spares locked mappings 98 */ 99 #define SFMMU_KERNEL_RELOC 0x8000 100 101 /* 102 * mode for sfmmu_chgattr 103 */ 104 #define SFMMU_SETATTR 0x0 105 #define SFMMU_CLRATTR 0x1 106 #define SFMMU_CHGATTR 0x2 107 108 /* 109 * sfmmu specific flags for page_t 110 */ 111 #define P_PNC 0x8 /* non-caching is permanent bit */ 112 #define P_TNC 0x10 /* non-caching is temporary bit */ 113 #define P_KPMS 0x20 /* kpm mapped small (vac alias prevention) */ 114 #define P_KPMC 0x40 /* kpm conflict page (vac alias prevention) */ 115 #define P_EXEC 0x80 /* execution reference (I-cache filled) */ 116 117 #define PP_GENERIC_ATTR(pp) ((pp)->p_nrm & (P_MOD | P_REF | P_RO)) 118 #define PP_ISMOD(pp) ((pp)->p_nrm & P_MOD) 119 #define PP_ISREF(pp) ((pp)->p_nrm & P_REF) 120 #define PP_ISRO(pp) ((pp)->p_nrm & P_RO) 121 #define PP_ISNC(pp) ((pp)->p_nrm & (P_PNC|P_TNC)) 122 #define PP_ISPNC(pp) ((pp)->p_nrm & P_PNC) 123 #ifdef VAC 124 #define PP_ISTNC(pp) ((pp)->p_nrm & P_TNC) 125 #endif 126 #define PP_ISKPMS(pp) ((pp)->p_nrm & P_KPMS) 127 #define PP_ISKPMC(pp) ((pp)->p_nrm & P_KPMC) 128 #define PP_ISEXEC(pp) ((pp)->p_nrm & P_EXEC) 129 130 #define PP_SETMOD(pp) ((pp)->p_nrm |= P_MOD) 131 #define PP_SETREF(pp) ((pp)->p_nrm |= P_REF) 132 #define PP_SETREFMOD(pp) ((pp)->p_nrm |= (P_REF|P_MOD)) 133 #define PP_SETRO(pp) ((pp)->p_nrm |= P_RO) 134 #define PP_SETREFRO(pp) ((pp)->p_nrm |= (P_REF|P_RO)) 135 #define PP_SETPNC(pp) ((pp)->p_nrm |= P_PNC) 136 #ifdef VAC 137 #define PP_SETTNC(pp) ((pp)->p_nrm |= P_TNC) 138 #endif 139 #define PP_SETKPMS(pp) ((pp)->p_nrm |= P_KPMS) 140 #define PP_SETKPMC(pp) ((pp)->p_nrm |= P_KPMC) 141 #define PP_SETEXEC(pp) ((pp)->p_nrm |= P_EXEC) 142 143 #define PP_CLRMOD(pp) ((pp)->p_nrm &= ~P_MOD) 144 #define PP_CLRREF(pp) ((pp)->p_nrm &= ~P_REF) 145 #define PP_CLRREFMOD(pp) ((pp)->p_nrm &= ~(P_REF|P_MOD)) 146 #define PP_CLRRO(pp) ((pp)->p_nrm &= ~P_RO) 147 #define PP_CLRPNC(pp) ((pp)->p_nrm &= ~P_PNC) 148 #ifdef VAC 149 #define PP_CLRTNC(pp) ((pp)->p_nrm &= ~P_TNC) 150 #endif 151 #define PP_CLRKPMS(pp) ((pp)->p_nrm &= ~P_KPMS) 152 #define PP_CLRKPMC(pp) ((pp)->p_nrm &= ~P_KPMC) 153 #define PP_CLREXEC(pp) ((pp)->p_nrm &= ~P_EXEC) 154 155 /* 156 * Support for non-coherent I-cache. If the MD property "coherency" 157 * is set to 0, it means that the I-cache must be flushed in 158 * software. Use the "soft exec" bit in the TTE to detect when a page 159 * has been executed, so that it can be flushed before it is re-used 160 * for another program. 161 */ 162 #define TTE_EXECUTED(ttep) \ 163 (TTE_IS_EXECUTABLE(ttep) && TTE_IS_SOFTEXEC(ttep)) 164 165 /* 166 * All shared memory segments attached with the SHM_SHARE_MMU flag (ISM) 167 * will be constrained to a 4M, 32M or 256M alignment. Also since every newly- 168 * created ISM segment is created out of a new address space at base va 169 * of 0 we don't need to store it. 170 */ 171 #define ISM_ALIGN(shift) (1 << shift) /* base va aligned to <n>M */ 172 #define ISM_ALIGNED(shift, va) (((uintptr_t)va & (ISM_ALIGN(shift) - 1)) == 0) 173 #define ISM_SHIFT(shift, x) ((uintptr_t)x >> (shift)) 174 175 /* 176 * Pad locks out to cache sub-block boundaries to prevent 177 * false sharing, so several processes don't contend for 178 * the same line if they aren't using the same lock. Since 179 * this is a typedef we also have a bit of freedom in 180 * changing lock implementations later if we decide it 181 * is necessary. 182 */ 183 typedef struct hat_lock { 184 kmutex_t hl_mutex; 185 uchar_t hl_pad[64 - sizeof (kmutex_t)]; 186 } hatlock_t; 187 188 #define HATLOCK_MUTEXP(hatlockp) (&((hatlockp)->hl_mutex)) 189 190 /* 191 * All segments mapped with ISM are guaranteed to be 4M, 32M or 256M aligned. 192 * Also size is guaranteed to be in 4M, 32M or 256M chunks. 193 * ism_seg consists of the following members: 194 * [XX..22] base address of ism segment. XX is 63 or 31 depending whether 195 * caddr_t is 64 bits or 32 bits. 196 * [21..0] size of segment. 197 * 198 * NOTE: Don't alter this structure without changing defines above and 199 * the tsb_miss and protection handlers. 200 */ 201 typedef struct ism_map { 202 uintptr_t imap_seg; /* base va + sz of ISM segment */ 203 uchar_t imap_vb_shift; /* mmu_pageshift for ism page size */ 204 uchar_t imap_rid; /* region id for ism */ 205 ushort_t imap_hatflags; /* primary ism page size */ 206 uint_t imap_sz_mask; /* mmu_pagemask for ism page size */ 207 sfmmu_t *imap_ismhat; /* hat id of dummy ISM as */ 208 struct ism_ment *imap_ment; /* pointer to mapping list entry */ 209 } ism_map_t; 210 211 #define ism_start(map) ((caddr_t)((map).imap_seg & \ 212 ~ISM_SZ_MASK((map).imap_vb_shift))) 213 #define ism_size(map) ((map).imap_seg & ISM_SZ_MASK((map).imap_vb_shift)) 214 #define ism_end(map) ((caddr_t)(ism_start(map) + (ism_size(map) * \ 215 ISM_PG_SIZE((map).imap_vb_shift)))) 216 /* 217 * ISM mapping entry. Used to link all hat's sharing a ism_hat. 218 * Same function as the p_mapping list for a page. 219 */ 220 typedef struct ism_ment { 221 sfmmu_t *iment_hat; /* back pointer to hat_share() hat */ 222 caddr_t iment_base_va; /* hat's va base for this ism seg */ 223 struct ism_ment *iment_next; /* next ism map entry */ 224 struct ism_ment *iment_prev; /* prev ism map entry */ 225 } ism_ment_t; 226 227 /* 228 * ISM segment block. One will be hung off the sfmmu structure if a 229 * a process uses ISM. More will be linked using ismblk_next if more 230 * than ISM_MAP_SLOTS segments are attached to this proc. 231 * 232 * All modifications to fields in this structure will be protected 233 * by the hat mutex. In order to avoid grabbing this lock in low level 234 * routines (tsb miss/protection handlers and vatopfn) while not 235 * introducing any race conditions with hat_unshare, we will set 236 * CTX_ISM_BUSY bit in the ctx struct. Any mmu traps that occur 237 * for this ctx while this bit is set will be handled in sfmmu_tsb_excption 238 * where it will synchronize behind the hat mutex. 239 */ 240 typedef struct ism_blk { 241 ism_map_t iblk_maps[ISM_MAP_SLOTS]; 242 struct ism_blk *iblk_next; 243 uint64_t iblk_nextpa; 244 } ism_blk_t; 245 246 /* 247 * TSB access information. All fields are protected by the process's 248 * hat lock. 249 */ 250 251 struct tsb_info { 252 caddr_t tsb_va; /* tsb base virtual address */ 253 uint64_t tsb_pa; /* tsb base physical address */ 254 struct tsb_info *tsb_next; /* next tsb used by this process */ 255 uint16_t tsb_szc; /* tsb size code */ 256 uint16_t tsb_flags; /* flags for this tsb; see below */ 257 uint_t tsb_ttesz_mask; /* page size masks; see below */ 258 259 tte_t tsb_tte; /* tte to lock into DTLB */ 260 sfmmu_t *tsb_sfmmu; /* sfmmu */ 261 kmem_cache_t *tsb_cache; /* cache from which mem allocated */ 262 vmem_t *tsb_vmp; /* vmem arena from which mem alloc'd */ 263 }; 264 265 /* 266 * Values for "tsb_ttesz_mask" bitmask. 267 */ 268 #define TSB8K (1 << TTE8K) 269 #define TSB64K (1 << TTE64K) 270 #define TSB512K (1 << TTE512K) 271 #define TSB4M (1 << TTE4M) 272 #define TSB32M (1 << TTE32M) 273 #define TSB256M (1 << TTE256M) 274 275 /* 276 * Values for "tsb_flags" field. 277 */ 278 #define TSB_RELOC_FLAG 0x1 279 #define TSB_FLUSH_NEEDED 0x2 280 #define TSB_SWAPPED 0x4 281 #define TSB_SHAREDCTX 0x8 282 283 #endif /* !_ASM */ 284 285 /* 286 * Data structures for shared hmeblk support. 287 */ 288 289 /* 290 * Do not increase the maximum number of ism/hme regions without checking first 291 * the impact on ism_map_t, TSB miss area, hblk tag and region id type in 292 * sf_region structure. 293 * Initially, shared hmes will only be used for the main text segment 294 * therefore this value will be set to 64, it will be increased when shared 295 * libraries are included. 296 */ 297 298 #define SFMMU_MAX_HME_REGIONS (64) 299 #define SFMMU_HMERGNMAP_WORDS BT_BITOUL(SFMMU_MAX_HME_REGIONS) 300 301 #define SFMMU_PRIVATE 0 302 #define SFMMU_SHARED 1 303 304 #define HMEBLK_ENDPA 1 305 306 #ifndef _ASM 307 308 #define SFMMU_MAX_ISM_REGIONS (64) 309 #define SFMMU_ISMRGNMAP_WORDS BT_BITOUL(SFMMU_MAX_ISM_REGIONS) 310 311 #define SFMMU_RGNMAP_WORDS (SFMMU_HMERGNMAP_WORDS + SFMMU_ISMRGNMAP_WORDS) 312 313 #define SFMMU_MAX_REGION_BUCKETS (128) 314 #define SFMMU_MAX_SRD_BUCKETS (2048) 315 316 typedef struct sf_hmeregion_map { 317 ulong_t bitmap[SFMMU_HMERGNMAP_WORDS]; 318 } sf_hmeregion_map_t; 319 320 typedef struct sf_ismregion_map { 321 ulong_t bitmap[SFMMU_ISMRGNMAP_WORDS]; 322 } sf_ismregion_map_t; 323 324 typedef union sf_region_map_u { 325 struct _h_rmap_s { 326 sf_hmeregion_map_t hmeregion_map; 327 sf_ismregion_map_t ismregion_map; 328 } h_rmap_s; 329 ulong_t bitmap[SFMMU_RGNMAP_WORDS]; 330 } sf_region_map_t; 331 332 #define SF_RGNMAP_ZERO(map) { \ 333 int _i; \ 334 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \ 335 (map).bitmap[_i] = 0; \ 336 } \ 337 } 338 339 /* 340 * Returns 1 if map1 and map2 are equal. 341 */ 342 #define SF_RGNMAP_EQUAL(map1, map2, rval) { \ 343 int _i; \ 344 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \ 345 if ((map1)->bitmap[_i] != (map2)->bitmap[_i]) \ 346 break; \ 347 } \ 348 if (_i < SFMMU_RGNMAP_WORDS) \ 349 rval = 0; \ 350 else \ 351 rval = 1; \ 352 } 353 354 #define SF_RGNMAP_ADD(map, r) BT_SET((map).bitmap, r) 355 #define SF_RGNMAP_DEL(map, r) BT_CLEAR((map).bitmap, r) 356 #define SF_RGNMAP_TEST(map, r) BT_TEST((map).bitmap, r) 357 358 /* 359 * Tests whether map2 is a subset of map1, returns 1 if 360 * this assertion is true. 361 */ 362 #define SF_RGNMAP_IS_SUBSET(map1, map2, rval) { \ 363 int _i; \ 364 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \ 365 if (((map1)->bitmap[_i] & (map2)->bitmap[_i]) \ 366 != (map2)->bitmap[_i]) { \ 367 break; \ 368 } \ 369 } \ 370 if (_i < SFMMU_RGNMAP_WORDS) \ 371 rval = 0; \ 372 else \ 373 rval = 1; \ 374 } 375 376 #define SF_SCD_INCR_REF(scdp) { \ 377 atomic_add_32((volatile uint32_t *)&(scdp)->scd_refcnt, 1); \ 378 } 379 380 #define SF_SCD_DECR_REF(srdp, scdp) { \ 381 sf_region_map_t _scd_rmap = (scdp)->scd_region_map; \ 382 if (!atomic_add_32_nv( \ 383 (volatile uint32_t *)&(scdp)->scd_refcnt, -1)) { \ 384 sfmmu_destroy_scd((srdp), (scdp), &_scd_rmap); \ 385 } \ 386 } 387 388 /* 389 * A sfmmup link in the link list of sfmmups that share the same region. 390 */ 391 typedef struct sf_rgn_link { 392 sfmmu_t *next; 393 sfmmu_t *prev; 394 } sf_rgn_link_t; 395 396 /* 397 * rgn_flags values. 398 */ 399 #define SFMMU_REGION_HME 0x1 400 #define SFMMU_REGION_ISM 0x2 401 #define SFMMU_REGION_FREE 0x8 402 403 #define SFMMU_REGION_TYPE_MASK (0x3) 404 405 /* 406 * sf_region defines a text or (D)ISM segment which map 407 * the same underlying physical object. 408 */ 409 typedef struct sf_region { 410 caddr_t rgn_saddr; /* base addr of attached seg */ 411 size_t rgn_size; /* size of attached seg */ 412 void *rgn_obj; /* the underlying object id */ 413 u_offset_t rgn_objoff; /* offset in the object mapped */ 414 uchar_t rgn_perm; /* PROT_READ/WRITE/EXEC */ 415 uchar_t rgn_pgszc; /* page size of the region */ 416 uchar_t rgn_flags; /* region type, free flag */ 417 uchar_t rgn_id; 418 int rgn_refcnt; /* # of hats sharing the region */ 419 /* callback function for hat_unload_callback */ 420 hat_rgn_cb_func_t rgn_cb_function; 421 struct sf_region *rgn_hash; /* hash chain linking the rgns */ 422 kmutex_t rgn_mutex; /* protect region sfmmu list */ 423 /* A link list of processes attached to this region */ 424 sfmmu_t *rgn_sfmmu_head; 425 ulong_t rgn_ttecnt[MMU_PAGE_SIZES]; 426 uint16_t rgn_hmeflags; /* rgn tte size flags */ 427 } sf_region_t; 428 429 #define rgn_next rgn_hash 430 431 /* srd */ 432 typedef struct sf_shared_region_domain { 433 vnode_t *srd_evp; /* executable vnode */ 434 /* hme region table */ 435 sf_region_t *srd_hmergnp[SFMMU_MAX_HME_REGIONS]; 436 /* ism region table */ 437 sf_region_t *srd_ismrgnp[SFMMU_MAX_ISM_REGIONS]; 438 /* hash chain linking srds */ 439 struct sf_shared_region_domain *srd_hash; 440 /* pointer to the next free hme region */ 441 sf_region_t *srd_hmergnfree; 442 /* pointer to the next free ism region */ 443 sf_region_t *srd_ismrgnfree; 444 /* id of next ism region created */ 445 uint16_t srd_next_ismrid; 446 /* id of next hme region created */ 447 uint16_t srd_next_hmerid; 448 uint16_t srd_ismbusyrgns; /* # of ism rgns in use */ 449 uint16_t srd_hmebusyrgns; /* # of hme rgns in use */ 450 int srd_refcnt; /* # of procs in the srd */ 451 kmutex_t srd_mutex; /* sync add/remove rgns */ 452 kmutex_t srd_scd_mutex; 453 sf_scd_t *srd_scdp; /* list of scds in srd */ 454 /* hash of regions associated with the same executable */ 455 sf_region_t *srd_rgnhash[SFMMU_MAX_REGION_BUCKETS]; 456 } sf_srd_t; 457 458 typedef struct sf_srd_bucket { 459 kmutex_t srdb_lock; 460 sf_srd_t *srdb_srdp; 461 } sf_srd_bucket_t; 462 463 /* 464 * The value of SFMMU_L1_HMERLINKS and SFMMU_L2_HMERLINKS will be increased 465 * to 16 when the use of shared hmes for shared libraries is enabled. 466 */ 467 468 #define SFMMU_L1_HMERLINKS (8) 469 #define SFMMU_L2_HMERLINKS (8) 470 #define SFMMU_L1_HMERLINKS_SHIFT (3) 471 #define SFMMU_L1_HMERLINKS_MASK (SFMMU_L1_HMERLINKS - 1) 472 #define SFMMU_L2_HMERLINKS_MASK (SFMMU_L2_HMERLINKS - 1) 473 #define SFMMU_L1_HMERLINKS_SIZE \ 474 (SFMMU_L1_HMERLINKS * sizeof (sf_rgn_link_t *)) 475 #define SFMMU_L2_HMERLINKS_SIZE \ 476 (SFMMU_L2_HMERLINKS * sizeof (sf_rgn_link_t)) 477 478 #if (SFMMU_L1_HMERLINKS * SFMMU_L2_HMERLINKS < SFMMU_MAX_HME_REGIONS) 479 #error Not Enough HMERLINKS 480 #endif 481 482 /* 483 * This macro grabs hat lock and allocates level 2 hat chain 484 * associated with a shme rgn. In the majority of cases, the macro 485 * is called with alloc = 0, and lock = 0. 486 * A pointer to the level 2 sf_rgn_link_t structure is returned in the lnkp 487 * parameter. 488 */ 489 #define SFMMU_HMERID2RLINKP(sfmmup, rid, lnkp, alloc, lock) \ 490 { \ 491 int _l1ix = ((rid) >> SFMMU_L1_HMERLINKS_SHIFT) & \ 492 SFMMU_L1_HMERLINKS_MASK; \ 493 int _l2ix = ((rid) & SFMMU_L2_HMERLINKS_MASK); \ 494 hatlock_t *_hatlockp; \ 495 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \ 496 if (lnkp != NULL) { \ 497 lnkp = &lnkp[_l2ix]; \ 498 } else if (alloc && lock) { \ 499 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \ 500 _hatlockp = sfmmu_hat_enter(sfmmup); \ 501 if ((sfmmup)->sfmmu_hmeregion_links[_l1ix] != NULL) { \ 502 sfmmu_hat_exit(_hatlockp); \ 503 kmem_free(lnkp, SFMMU_L2_HMERLINKS_SIZE); \ 504 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \ 505 ASSERT(lnkp != NULL); \ 506 } else { \ 507 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \ 508 sfmmu_hat_exit(_hatlockp); \ 509 } \ 510 lnkp = &lnkp[_l2ix]; \ 511 } else if (alloc) { \ 512 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \ 513 ASSERT((sfmmup)->sfmmu_hmeregion_links[_l1ix] == NULL); \ 514 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \ 515 lnkp = &lnkp[_l2ix]; \ 516 } \ 517 } 518 519 /* 520 * Per cpu pending freelist of hmeblks. 521 */ 522 typedef struct cpu_hme_pend { 523 struct hme_blk *chp_listp; 524 kmutex_t chp_mutex; 525 time_t chp_timestamp; 526 uint_t chp_count; 527 uint8_t chp_pad[36]; /* pad to 64 bytes */ 528 } cpu_hme_pend_t; 529 530 /* 531 * The default value of the threshold for the per cpu pending queues of hmeblks. 532 * The queues are flushed if either the number of hmeblks on the queue is above 533 * the threshold, or one second has elapsed since the last flush. 534 */ 535 #define CPU_HME_PEND_THRESH 1000 536 537 /* 538 * Per-MMU context domain kstats. 539 * 540 * TSB Miss Exceptions 541 * Number of times a TSB miss exception is handled in an MMU. See 542 * sfmmu_tsbmiss_exception() for more details. 543 * TSB Raise Exception 544 * Number of times the CPUs within an MMU are cross-called 545 * to invalidate either a specific process context (when the process 546 * switches MMU contexts) or the context of any process that is 547 * running on those CPUs (as part of the MMU context wrap-around). 548 * Wrap Around 549 * The number of times a wrap-around of MMU context happens. 550 */ 551 typedef enum mmu_ctx_stat_types { 552 MMU_CTX_TSB_EXCEPTIONS, /* TSB miss exceptions handled */ 553 MMU_CTX_TSB_RAISE_EXCEPTION, /* ctx invalidation cross calls */ 554 MMU_CTX_WRAP_AROUND, /* wraparounds */ 555 MMU_CTX_NUM_STATS 556 } mmu_ctx_stat_t; 557 558 /* 559 * Per-MMU context domain structure. This is instantiated the first time a CPU 560 * belonging to the MMU context domain is configured into the system, at boot 561 * time or at DR time. 562 * 563 * mmu_gnum 564 * The current generation number for the context IDs on this MMU context 565 * domain. It is protected by mmu_lock. 566 * mmu_cnum 567 * The current cnum to be allocated on this MMU context domain. It 568 * is protected via CAS. 569 * mmu_nctxs 570 * The max number of context IDs supported on every CPU in this 571 * MMU context domain. It is 8K except for Rock where it is 64K. 572 * This is needed here in case the system supports mixed type of 573 * processors/MMUs. It also helps to make ctx switch code access 574 * fewer cache lines i.e. no need to retrieve it from some global nctxs. 575 * mmu_lock 576 * The mutex spin lock used to serialize context ID wrap around 577 * mmu_idx 578 * The index for this MMU context domain structure in the global array 579 * mmu_ctxdoms. 580 * mmu_ncpus 581 * The actual number of CPUs that have been configured in this 582 * MMU context domain. This also acts as a reference count for the 583 * structure. When the last CPU in an MMU context domain is unconfigured, 584 * the structure is freed. It is protected by mmu_lock. 585 * mmu_cpuset 586 * The CPU set of configured CPUs for this MMU context domain. Used 587 * to cross-call all the CPUs in the MMU context domain to invalidate 588 * context IDs during a wraparound operation. It is protected by mmu_lock. 589 */ 590 591 typedef struct mmu_ctx { 592 uint64_t mmu_gnum; 593 uint_t mmu_cnum; 594 uint_t mmu_nctxs; 595 kmutex_t mmu_lock; 596 uint_t mmu_idx; 597 uint_t mmu_ncpus; 598 cpuset_t mmu_cpuset; 599 kstat_t *mmu_kstat; 600 kstat_named_t mmu_kstat_data[MMU_CTX_NUM_STATS]; 601 } mmu_ctx_t; 602 603 #define mmu_tsb_exceptions \ 604 mmu_kstat_data[MMU_CTX_TSB_EXCEPTIONS].value.ui64 605 #define mmu_tsb_raise_exception \ 606 mmu_kstat_data[MMU_CTX_TSB_RAISE_EXCEPTION].value.ui64 607 #define mmu_wrap_around \ 608 mmu_kstat_data[MMU_CTX_WRAP_AROUND].value.ui64 609 610 extern uint_t max_mmu_ctxdoms; 611 extern mmu_ctx_t **mmu_ctxs_tbl; 612 extern uint_t nctxs; 613 614 extern void sfmmu_cpu_init(cpu_t *); 615 extern void sfmmu_cpu_cleanup(cpu_t *); 616 617 /* 618 * The following structure is used to get MMU context domain information for 619 * a CPU from the platform. 620 * 621 * mmu_idx 622 * The MMU context domain index within the global array mmu_ctxs 623 * mmu_nctxs 624 * The number of context IDs supported in the MMU context domain 625 * (64K for Rock) 626 */ 627 typedef struct mmu_ctx_info { 628 uint_t mmu_idx; 629 uint_t mmu_nctxs; 630 } mmu_ctx_info_t; 631 632 #pragma weak plat_cpuid_to_mmu_ctx_info 633 634 extern void plat_cpuid_to_mmu_ctx_info(processorid_t, mmu_ctx_info_t *); 635 636 /* 637 * Each address space has an array of sfmmu_ctx_t structures, one structure 638 * per MMU context domain. 639 * 640 * cnum 641 * The context ID allocated for an address space on an MMU context domain 642 * gnum 643 * The generation number for the context ID in the MMU context domain. 644 * 645 * This structure needs to be a power-of-two in size. 646 */ 647 typedef struct sfmmu_ctx { 648 uint64_t gnum:48; 649 uint64_t cnum:16; 650 } sfmmu_ctx_t; 651 652 653 /* 654 * The platform dependent hat structure. 655 * tte counts should be protected by cas. 656 * cpuset is protected by cas. 657 * 658 * ttecnt accounting for mappings which do not use shared hme is carried out 659 * during pagefault handling. In the shared hme case, only the first process 660 * to access a mapping generates a pagefault, subsequent processes simply 661 * find the shared hme entry during trap handling and therefore there is no 662 * corresponding event to initiate ttecnt accounting. Currently, as shared 663 * hmes are only used for text segments, when joining a region we assume the 664 * worst case and add the the number of ttes required to map the entire region 665 * to the ttecnt corresponding to the region pagesize. However, if the region 666 * has a 4M pagesize, and memory is low, the allocation of 4M pages may fail 667 * then 8K pages will be allocated instead and the first TSB which stores 8K 668 * mappings will potentially be undersized. To compensate for the potential 669 * underaccounting in this case we always add 1/4 of the region size to the 8K 670 * ttecnt. 671 * 672 * Note that sfmmu_xhat_provider MUST be the first element. 673 */ 674 675 struct hat { 676 void *sfmmu_xhat_provider; /* NULL for CPU hat */ 677 cpuset_t sfmmu_cpusran; /* cpu bit mask for efficient xcalls */ 678 struct as *sfmmu_as; /* as this hat provides mapping for */ 679 /* per pgsz private ttecnt + shme rgns ttecnt for rgns not in SCD */ 680 ulong_t sfmmu_ttecnt[MMU_PAGE_SIZES]; 681 /* shme rgns ttecnt for rgns in SCD */ 682 ulong_t sfmmu_scdrttecnt[MMU_PAGE_SIZES]; 683 /* est. ism ttes that are NOT in a SCD */ 684 ulong_t sfmmu_ismttecnt[MMU_PAGE_SIZES]; 685 /* ttecnt for isms that are in a SCD */ 686 ulong_t sfmmu_scdismttecnt[MMU_PAGE_SIZES]; 687 /* inflate tsb0 to allow for large page alloc failure in region */ 688 ulong_t sfmmu_tsb0_4minflcnt; 689 union _h_un { 690 ism_blk_t *sfmmu_iblkp; /* maps to ismhat(s) */ 691 ism_ment_t *sfmmu_imentp; /* ism hat's mapping list */ 692 } h_un; 693 uint_t sfmmu_free:1; /* hat to be freed - set on as_free */ 694 uint_t sfmmu_ismhat:1; /* hat is dummy ism hatid */ 695 uint_t sfmmu_scdhat:1; /* hat is dummy scd hatid */ 696 uchar_t sfmmu_rmstat; /* refmod stats refcnt */ 697 ushort_t sfmmu_clrstart; /* start color bin for page coloring */ 698 ushort_t sfmmu_clrbin; /* per as phys page coloring bin */ 699 ushort_t sfmmu_flags; /* flags */ 700 uchar_t sfmmu_tteflags; /* pgsz flags */ 701 uchar_t sfmmu_rtteflags; /* pgsz flags for SRD hmes */ 702 struct tsb_info *sfmmu_tsb; /* list of per as tsbs */ 703 uint64_t sfmmu_ismblkpa; /* pa of sfmmu_iblkp, or -1 */ 704 lock_t sfmmu_ctx_lock; /* sync ctx alloc and invalidation */ 705 kcondvar_t sfmmu_tsb_cv; /* signals TSB swapin or relocation */ 706 uchar_t sfmmu_cext; /* context page size encoding */ 707 uint8_t sfmmu_pgsz[MMU_PAGE_SIZES]; /* ranking for MMU */ 708 sf_srd_t *sfmmu_srdp; 709 sf_scd_t *sfmmu_scdp; /* scd this address space belongs to */ 710 sf_region_map_t sfmmu_region_map; 711 sf_rgn_link_t *sfmmu_hmeregion_links[SFMMU_L1_HMERLINKS]; 712 sf_rgn_link_t sfmmu_scd_link; /* link to scd or pending queue */ 713 #ifdef sun4v 714 struct hv_tsb_block sfmmu_hvblock; 715 #endif 716 /* 717 * sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of 718 * elements. max_mmu_ctxdoms is determined at run-time. 719 * sfmmu_ctxs[1] is just the fist element of an array, it always 720 * has to be the last field to ensure that the memory allocated 721 * for sfmmu_ctxs is consecutive with the memory of the rest of 722 * the hat data structure. 723 */ 724 sfmmu_ctx_t sfmmu_ctxs[1]; 725 726 }; 727 728 #define sfmmu_iblk h_un.sfmmu_iblkp 729 #define sfmmu_iment h_un.sfmmu_imentp 730 731 #define sfmmu_hmeregion_map sfmmu_region_map.h_rmap_s.hmeregion_map 732 #define sfmmu_ismregion_map sfmmu_region_map.h_rmap_s.ismregion_map 733 734 #define SF_RGNMAP_ISNULL(sfmmup) \ 735 (sfrgnmap_isnull(&(sfmmup)->sfmmu_region_map)) 736 #define SF_HMERGNMAP_ISNULL(sfmmup) \ 737 (sfhmergnmap_isnull(&(sfmmup)->sfmmu_hmeregion_map)) 738 739 struct sf_scd { 740 sfmmu_t *scd_sfmmup; /* shared context hat */ 741 /* per pgsz ttecnt for shme rgns in SCD */ 742 ulong_t scd_rttecnt[MMU_PAGE_SIZES]; 743 uint_t scd_refcnt; /* address spaces attached to scd */ 744 sf_region_map_t scd_region_map; /* bit mask of attached segments */ 745 sf_scd_t *scd_next; /* link pointers for srd_scd list */ 746 sf_scd_t *scd_prev; 747 sfmmu_t *scd_sf_list; /* list of doubly linked hat structs */ 748 kmutex_t scd_mutex; 749 /* 750 * Link used to add an scd to the sfmmu_iment list. 751 */ 752 ism_ment_t scd_ism_links[SFMMU_MAX_ISM_REGIONS]; 753 }; 754 755 #define scd_hmeregion_map scd_region_map.h_rmap_s.hmeregion_map 756 #define scd_ismregion_map scd_region_map.h_rmap_s.ismregion_map 757 758 extern int disable_shctx; 759 extern int shctx_on; 760 761 /* 762 * bit mask for managing vac conflicts on large pages. 763 * bit 1 is for uncache flag. 764 * bits 2 through min(num of cache colors + 1,31) are 765 * for cache colors that have already been flushed. 766 */ 767 #ifdef VAC 768 #define CACHE_NUM_COLOR (shm_alignment >> MMU_PAGESHIFT) 769 #else 770 #define CACHE_NUM_COLOR 1 771 #endif 772 773 #define CACHE_VCOLOR_MASK(vcolor) (2 << (vcolor & (CACHE_NUM_COLOR - 1))) 774 775 #define CacheColor_IsFlushed(flag, vcolor) \ 776 ((flag) & CACHE_VCOLOR_MASK(vcolor)) 777 778 #define CacheColor_SetFlushed(flag, vcolor) \ 779 ((flag) |= CACHE_VCOLOR_MASK(vcolor)) 780 /* 781 * Flags passed to sfmmu_page_cache to flush page from vac or not. 782 */ 783 #define CACHE_FLUSH 0 784 #define CACHE_NO_FLUSH 1 785 786 /* 787 * Flags passed to sfmmu_tlbcache_demap 788 */ 789 #define FLUSH_NECESSARY_CPUS 0 790 #define FLUSH_ALL_CPUS 1 791 792 #ifdef DEBUG 793 /* 794 * For debugging purpose only. Maybe removed later. 795 */ 796 struct ctx_trace { 797 sfmmu_t *sc_sfmmu_stolen; 798 sfmmu_t *sc_sfmmu_stealing; 799 clock_t sc_time; 800 ushort_t sc_type; 801 ushort_t sc_cnum; 802 }; 803 #define CTX_TRC_STEAL 0x1 804 #define CTX_TRC_FREE 0x0 805 #define TRSIZE 0x400 806 #define NEXT_CTXTR(ptr) (((ptr) >= ctx_trace_last) ? \ 807 ctx_trace_first : ((ptr) + 1)) 808 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type) \ 809 mutex_enter(mutex); \ 810 (ptr)->sc_sfmmu_stolen = (stolen_sfmmu); \ 811 (ptr)->sc_sfmmu_stealing = (stealing_sfmmu); \ 812 (ptr)->sc_cnum = (cnum); \ 813 (ptr)->sc_type = (type); \ 814 (ptr)->sc_time = lbolt; \ 815 (ptr) = NEXT_CTXTR(ptr); \ 816 num_ctx_stolen += (type); \ 817 mutex_exit(mutex); 818 #else 819 820 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type) 821 822 #endif /* DEBUG */ 823 824 #endif /* !_ASM */ 825 826 /* 827 * Macros for sfmmup->sfmmu_flags access. The macros that change the flags 828 * ASSERT() that we're holding the HAT lock before changing the flags; 829 * however callers that read the flags may do so without acquiring the lock 830 * in a fast path, and then recheck the flag after acquiring the lock in 831 * a slow path. 832 */ 833 #define SFMMU_FLAGS_ISSET(sfmmup, flags) \ 834 (((sfmmup)->sfmmu_flags & (flags)) == (flags)) 835 836 #define SFMMU_FLAGS_CLEAR(sfmmup, flags) \ 837 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \ 838 (sfmmup)->sfmmu_flags &= ~(flags)) 839 840 #define SFMMU_FLAGS_SET(sfmmup, flags) \ 841 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \ 842 (sfmmup)->sfmmu_flags |= (flags)) 843 844 #define SFMMU_TTEFLAGS_ISSET(sfmmup, flags) \ 845 ((((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) & (flags)) == \ 846 (flags)) 847 848 849 /* 850 * sfmmu tte HAT flags, must fit in 8 bits 851 */ 852 #define HAT_CHKCTX1_FLAG 0x1 853 #define HAT_64K_FLAG (0x1 << TTE64K) 854 #define HAT_512K_FLAG (0x1 << TTE512K) 855 #define HAT_4M_FLAG (0x1 << TTE4M) 856 #define HAT_32M_FLAG (0x1 << TTE32M) 857 #define HAT_256M_FLAG (0x1 << TTE256M) 858 859 /* 860 * sfmmu HAT flags, 16 bits at the moment. 861 */ 862 #define HAT_4MTEXT_FLAG 0x01 863 #define HAT_32M_ISM 0x02 864 #define HAT_256M_ISM 0x04 865 #define HAT_SWAPPED 0x08 /* swapped out */ 866 #define HAT_SWAPIN 0x10 /* swapping in */ 867 #define HAT_BUSY 0x20 /* replacing TSB(s) */ 868 #define HAT_ISMBUSY 0x40 /* adding/removing/traversing ISM maps */ 869 870 #define HAT_CTX1_FLAG 0x100 /* ISM imap hatflag for ctx1 */ 871 #define HAT_JOIN_SCD 0x200 /* region is joining scd */ 872 #define HAT_ALLCTX_INVALID 0x400 /* all per-MMU ctxs are invalidated */ 873 874 #define SFMMU_LGPGS_INUSE(sfmmup) \ 875 (((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) || \ 876 ((sfmmup)->sfmmu_iblk != NULL)) 877 878 /* 879 * Starting with context 0, the first NUM_LOCKED_CTXS contexts 880 * are locked so that sfmmu_getctx can't steal any of these 881 * contexts. At the time this software was being developed, the 882 * only context that needs to be locked is context 0 (the kernel 883 * context), and context 1 (reserved for stolen context). So this constant 884 * was originally defined to be 2. 885 * 886 * For sun4v only, USER_CONTEXT_TYPE represents any user context. Many 887 * routines only care whether the context is kernel, invalid or user. 888 */ 889 890 #define NUM_LOCKED_CTXS 2 891 #define INVALID_CONTEXT 1 892 893 #ifdef sun4v 894 #define USER_CONTEXT_TYPE NUM_LOCKED_CTXS 895 #endif 896 #if defined(sun4v) || defined(UTSB_PHYS) 897 /* 898 * Get the location in the 4MB base TSB of the tsbe for this fault. 899 * Assumes that the second TSB only contains 4M mappings. 900 * 901 * In: 902 * tagacc = tag access register (not clobbered) 903 * tsbe = 2nd TSB base register 904 * tmp1, tmp2 = scratch registers 905 * Out: 906 * tsbe = pointer to the tsbe in the 2nd TSB 907 */ 908 909 #define GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \ 910 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \ 911 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \ 912 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \ 913 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \ 914 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \ 915 srlx tagacc, MMU_PAGESHIFT4M, tmp2; \ 916 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \ 917 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \ 918 add tsbe, tmp1, tsbe /* add entry offset to TSB base */ 919 920 #define GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \ 921 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) 922 923 /* 924 * Get the location in the 3rd TSB of the tsbe for this fault. 925 * The 3rd TSB corresponds to the shared context, and is used 926 * for 8K - 512k pages. 927 * 928 * In: 929 * tagacc = tag access register (not clobbered) 930 * tsbe, tmp1, tmp2 = scratch registers 931 * Out: 932 * tsbe = pointer to the tsbe in the 3rd TSB 933 */ 934 935 #define GET_3RD_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \ 936 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \ 937 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \ 938 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \ 939 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \ 940 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \ 941 srlx tagacc, MMU_PAGESHIFT, tmp2; \ 942 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \ 943 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \ 944 add tsbe, tmp1, tsbe /* add entry offset to TSB base */ 945 946 #define GET_4TH_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \ 947 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) 948 /* 949 * Copy the sfmmu_region_map or scd_region_map to the tsbmiss 950 * shmermap or scd_shmermap, from sfmmu_load_mmustate. 951 */ 952 #define SET_REGION_MAP(rgn_map, tsbmiss_map, cnt, tmp, label) \ 953 /* BEGIN CSTYLED */ \ 954 label: ;\ 955 ldx [rgn_map], tmp ;\ 956 dec cnt ;\ 957 add rgn_map, CLONGSIZE, rgn_map ;\ 958 stx tmp, [tsbmiss_map] ;\ 959 brnz,pt cnt, label ;\ 960 add tsbmiss_map, CLONGSIZE, tsbmiss_map \ 961 /* END CSTYLED */ 962 963 /* 964 * If there is no scd, then zero the tsbmiss scd_shmermap, 965 * from sfmmu_load_mmustate. 966 */ 967 #define ZERO_REGION_MAP(tsbmiss_map, cnt, label) \ 968 /* BEGIN CSTYLED */ \ 969 label: ;\ 970 dec cnt ;\ 971 stx %g0, [tsbmiss_map] ;\ 972 brnz,pt cnt, label ;\ 973 add tsbmiss_map, CLONGSIZE, tsbmiss_map 974 /* END CSTYLED */ 975 976 /* 977 * Set hmemisc to 1 if the shared hme is also part of an scd. 978 * In: 979 * tsbarea = tsbmiss area (not clobbered) 980 * hmeblkpa = hmeblkpa + hmentoff + SFHME_TTE (not clobbered) 981 * hmentoff = hmentoff + SFHME_TTE = tte offset(clobbered) 982 * Out: 983 * use_shctx = 1 if shme is in scd and 0 otherwise 984 */ 985 #define GET_SCDSHMERMAP(tsbarea, hmeblkpa, hmentoff, use_shctx) \ 986 /* BEGIN CSTYLED */ \ 987 sub hmeblkpa, hmentoff, hmentoff /* hmentofff = hmeblkpa */ ;\ 988 add hmentoff, HMEBLK_TAG, hmentoff ;\ 989 ldxa [hmentoff]ASI_MEM, hmentoff /* read 1st part of tag */ ;\ 990 and hmentoff, HTAG_RID_MASK, hmentoff /* mask off rid */ ;\ 991 and hmentoff, BT_ULMASK, use_shctx /* mask bit index */ ;\ 992 srlx hmentoff, BT_ULSHIFT, hmentoff /* extract word */ ;\ 993 sllx hmentoff, CLONGSHIFT, hmentoff /* index */ ;\ 994 add tsbarea, hmentoff, hmentoff /* add to tsbarea */ ;\ 995 ldx [hmentoff + TSBMISS_SCDSHMERMAP], hmentoff /* scdrgn */ ;\ 996 srlx hmentoff, use_shctx, use_shctx ;\ 997 and use_shctx, 0x1, use_shctx \ 998 /* END CSTYLED */ 999 1000 /* 1001 * Synthesize a TSB base register contents for a process. 1002 * 1003 * In: 1004 * tsbinfo = TSB info pointer (ro) 1005 * tsbreg, tmp1 = scratch registers 1006 * Out: 1007 * tsbreg = value to program into TSB base register 1008 */ 1009 1010 #define MAKE_UTSBREG(tsbinfo, tsbreg, tmp1) \ 1011 ldx [tsbinfo + TSBINFO_PADDR], tsbreg; \ 1012 lduh [tsbinfo + TSBINFO_SZCODE], tmp1; \ 1013 and tmp1, TSB_SOFTSZ_MASK, tmp1; \ 1014 or tsbreg, tmp1, tsbreg; 1015 1016 1017 /* 1018 * Load TSB base register to TSBMISS area for privte contexts. 1019 * This register contains utsb_pabase in bits 63:13, and TSB size 1020 * code in bits 2:0. 1021 * 1022 * For private context 1023 * In: 1024 * tsbreg = value to load (ro) 1025 * regnum = constant or register 1026 * tmp1 = scratch register 1027 * Out: 1028 * Specified scratchpad register updated 1029 * 1030 */ 1031 #define SET_UTSBREG(regnum, tsbreg, tmp1) \ 1032 mov regnum, tmp1; \ 1033 stxa tsbreg, [tmp1]ASI_SCRATCHPAD /* save tsbreg */ 1034 /* 1035 * Get TSB base register from the scratchpad for private contexts 1036 * 1037 * In: 1038 * regnum = constant or register 1039 * tsbreg = scratch 1040 * Out: 1041 * tsbreg = tsbreg from the specified scratchpad register 1042 */ 1043 #define GET_UTSBREG(regnum, tsbreg) \ 1044 mov regnum, tsbreg; \ 1045 ldxa [tsbreg]ASI_SCRATCHPAD, tsbreg 1046 1047 /* 1048 * Load TSB base register to TSBMISS area for shared contexts. 1049 * This register contains utsb_pabase in bits 63:13, and TSB size 1050 * code in bits 2:0. 1051 * 1052 * In: 1053 * tsbmiss = pointer to tsbmiss area 1054 * tsbmissoffset = offset to right tsb pointer 1055 * tsbreg = value to load (ro) 1056 * Out: 1057 * Specified tsbmiss area updated 1058 * 1059 */ 1060 #define SET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \ 1061 stx tsbreg, [tsbmiss + tsbmissoffset] /* save tsbreg */ 1062 1063 /* 1064 * Get TSB base register from the scratchpad for 1065 * shared contexts 1066 * 1067 * In: 1068 * tsbmiss = pointer to tsbmiss area 1069 * tsbmissoffset = offset to right tsb pointer 1070 * tsbreg = scratch 1071 * Out: 1072 * tsbreg = tsbreg from the specified scratchpad register 1073 */ 1074 #define GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \ 1075 ldx [tsbmiss + tsbmissoffset], tsbreg 1076 1077 #endif /* defined(sun4v) || defined(UTSB_PHYS) */ 1078 1079 #ifndef _ASM 1080 1081 /* 1082 * Kernel page relocation stuff. 1083 */ 1084 struct sfmmu_callback { 1085 int key; 1086 int (*prehandler)(caddr_t, uint_t, uint_t, void *); 1087 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t); 1088 int (*errhandler)(caddr_t, uint_t, uint_t, void *); 1089 int capture_cpus; 1090 }; 1091 1092 extern int sfmmu_max_cb_id; 1093 extern struct sfmmu_callback *sfmmu_cb_table; 1094 1095 extern int hat_kpr_enabled; 1096 1097 struct pa_hment; 1098 1099 /* 1100 * RFE: With multihat gone we gain back an int. We could use this to 1101 * keep ref bits on a per cpu basis to eliminate xcalls. 1102 */ 1103 struct sf_hment { 1104 tte_t hme_tte; /* tte for this hment */ 1105 1106 union { 1107 struct page *page; /* what page this maps */ 1108 struct pa_hment *data; /* pa_hment */ 1109 } sf_hment_un; 1110 1111 struct sf_hment *hme_next; /* next hment */ 1112 struct sf_hment *hme_prev; /* prev hment */ 1113 }; 1114 1115 struct pa_hment { 1116 caddr_t addr; /* va */ 1117 uint_t len; /* bytes */ 1118 ushort_t flags; /* internal flags */ 1119 ushort_t refcnt; /* reference count */ 1120 id_t cb_id; /* callback id, table index */ 1121 void *pvt; /* handler's private data */ 1122 struct sf_hment sfment; /* corresponding dummy sf_hment */ 1123 }; 1124 1125 #define hme_page sf_hment_un.page 1126 #define hme_data sf_hment_un.data 1127 #define hme_size(sfhmep) ((int)(TTE_CSZ(&(sfhmep)->hme_tte))) 1128 #define PAHME_SZ (sizeof (struct pa_hment)) 1129 #define SFHME_SZ (sizeof (struct sf_hment)) 1130 1131 #define IS_PAHME(hme) ((hme)->hme_tte.ll == 0) 1132 1133 /* 1134 * hmeblk_tag structure 1135 * structure used to obtain a match on a hme_blk. Currently consists of 1136 * the address of the sfmmu struct (or hatid), the base page address of the 1137 * hme_blk, and the rehash count. The rehash count is actually only 2 bits 1138 * and has the following meaning: 1139 * 1 = 8k or 64k hash sequence. 1140 * 2 = 512k hash sequence. 1141 * 3 = 4M hash sequence. 1142 * We require this count because we don't want to get a false hit on a 512K or 1143 * 4M rehash with a base address corresponding to a 8k or 64k hmeblk. 1144 * Note: The ordering and size of the hmeblk_tag members are implictly known 1145 * by the tsb miss handlers written in assembly. Do not change this structure 1146 * without checking those routines. See HTAG_SFMMUPSZ define. 1147 */ 1148 1149 /* 1150 * In private hmeblks hblk_rid field must be SFMMU_INVALID_RID. 1151 */ 1152 typedef union { 1153 struct { 1154 uint64_t hblk_basepg: 51, /* hme_blk base pg # */ 1155 hblk_rehash: 3, /* rehash number */ 1156 hblk_rid: 10; /* hme_blk region id */ 1157 void *hblk_id; 1158 } hblk_tag_un; 1159 uint64_t htag_tag[2]; 1160 } hmeblk_tag; 1161 1162 #define htag_id hblk_tag_un.hblk_id 1163 #define htag_bspage hblk_tag_un.hblk_basepg 1164 #define htag_rehash hblk_tag_un.hblk_rehash 1165 #define htag_rid hblk_tag_un.hblk_rid 1166 1167 #endif /* !_ASM */ 1168 1169 #define HTAG_REHASH_SHIFT 10 1170 #define HTAG_MAX_RID (((0x1 << HTAG_REHASH_SHIFT) - 1)) 1171 #define HTAG_RID_MASK HTAG_MAX_RID 1172 1173 /* used for tagging all per sfmmu (i.e. non SRD) private hmeblks */ 1174 #define SFMMU_INVALID_SHMERID HTAG_MAX_RID 1175 1176 #if SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS 1177 #error SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS 1178 #endif 1179 1180 #define SFMMU_IS_SHMERID_VALID(rid) ((rid) != SFMMU_INVALID_SHMERID) 1181 1182 /* ISM regions */ 1183 #define SFMMU_INVALID_ISMRID 0xff 1184 1185 #if SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS 1186 #error SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS 1187 #endif 1188 1189 #define SFMMU_IS_ISMRID_VALID(rid) ((rid) != SFMMU_INVALID_ISMRID) 1190 1191 1192 #define HTAGS_EQ(tag1, tag2) (((tag1.htag_tag[0] ^ tag2.htag_tag[0]) | \ 1193 (tag1.htag_tag[1] ^ tag2.htag_tag[1])) == 0) 1194 1195 /* 1196 * this macro must only be used for comparing tags in shared hmeblks. 1197 */ 1198 #define HTAGS_EQ_SHME(hmetag, tag, hrmap) \ 1199 (((hmetag).htag_rid != SFMMU_INVALID_SHMERID) && \ 1200 (((((hmetag).htag_tag[0] ^ (tag).htag_tag[0]) & \ 1201 ~HTAG_RID_MASK) | \ 1202 ((hmetag).htag_tag[1] ^ (tag).htag_tag[1])) == 0) && \ 1203 SF_RGNMAP_TEST(hrmap, hmetag.htag_rid)) 1204 1205 #define HME_REHASH(sfmmup) \ 1206 ((sfmmup)->sfmmu_ttecnt[TTE512K] != 0 || \ 1207 (sfmmup)->sfmmu_ttecnt[TTE4M] != 0 || \ 1208 (sfmmup)->sfmmu_ttecnt[TTE32M] != 0 || \ 1209 (sfmmup)->sfmmu_ttecnt[TTE256M] != 0) 1210 1211 #define NHMENTS 8 /* # of hments in an 8k hme_blk */ 1212 /* needs to be multiple of 2 */ 1213 1214 #ifndef _ASM 1215 1216 #ifdef HBLK_TRACE 1217 1218 #define HBLK_LOCK 1 1219 #define HBLK_UNLOCK 0 1220 #define HBLK_STACK_DEPTH 6 1221 #define HBLK_AUDIT_CACHE_SIZE 16 1222 #define HBLK_LOCK_PATTERN 0xaaaaaaaa 1223 #define HBLK_UNLOCK_PATTERN 0xbbbbbbbb 1224 1225 struct hblk_lockcnt_audit { 1226 int flag; /* lock or unlock */ 1227 kthread_id_t thread; 1228 int depth; 1229 pc_t stack[HBLK_STACK_DEPTH]; 1230 }; 1231 1232 #endif /* HBLK_TRACE */ 1233 1234 1235 /* 1236 * Hment block structure. 1237 * The hme_blk is the node data structure which the hash structure 1238 * mantains. An hme_blk can have 2 different sizes depending on the 1239 * number of hments it implicitly contains. When dealing with 64K, 512K, 1240 * or 4M hments there is one hment per hme_blk. When dealing with 1241 * 8k hments we allocate an hme_blk plus an additional 7 hments to 1242 * give us a total of 8 (NHMENTS) hments that can be referenced through a 1243 * hme_blk. 1244 * 1245 * The hmeblk structure contains 2 tte reference counters used to determine if 1246 * it is ok to free up the hmeblk. Both counters have to be zero in order 1247 * to be able to free up hmeblk. They are protected by cas. 1248 * hblk_hmecnt is the number of hments present on pp mapping lists. 1249 * hblk_vcnt reflects number of valid ttes in hmeblk. 1250 * 1251 * The hmeblk now also has per tte lock cnts. This is required because 1252 * the counts can be high and there are not enough bits in the tte. When 1253 * physio is fixed to not lock the translations we should be able to move 1254 * the lock cnt back to the tte. See bug id 1198554. 1255 * 1256 * Note that xhat_hme_blk's layout follows this structure: hme_blk_misc 1257 * and sf_hment are at the same offsets in both structures. Whenever 1258 * hme_blk is changed, xhat_hme_blk may need to be updated as well. 1259 */ 1260 1261 struct hme_blk_misc { 1262 uint_t notused:25; 1263 uint_t shared_bit:1; /* set for SRD shared hmeblk */ 1264 uint_t xhat_bit:1; /* set for an xhat hme_blk */ 1265 uint_t shadow_bit:1; /* set for a shadow hme_blk */ 1266 uint_t nucleus_bit:1; /* set for a nucleus hme_blk */ 1267 uint_t ttesize:3; /* contains ttesz of hmeblk */ 1268 }; 1269 1270 struct hme_blk { 1271 volatile uint64_t hblk_nextpa; /* physical address for hash list */ 1272 1273 hmeblk_tag hblk_tag; /* tag used to obtain an hmeblk match */ 1274 1275 struct hme_blk *hblk_next; /* on free list or on hash list */ 1276 /* protected by hash lock */ 1277 1278 struct hme_blk *hblk_shadow; /* pts to shadow hblk */ 1279 /* protected by hash lock */ 1280 uint_t hblk_span; /* span of memory hmeblk maps */ 1281 1282 struct hme_blk_misc hblk_misc; 1283 1284 union { 1285 struct { 1286 ushort_t hblk_hmecount; /* hment on mlists counter */ 1287 ushort_t hblk_validcnt; /* valid tte reference count */ 1288 } hblk_counts; 1289 uint_t hblk_shadow_mask; 1290 } hblk_un; 1291 1292 uint_t hblk_lckcnt; 1293 1294 #ifdef HBLK_TRACE 1295 kmutex_t hblk_audit_lock; /* lock to protect index */ 1296 uint_t hblk_audit_index; /* index into audit_cache */ 1297 struct hblk_lockcnt_audit hblk_audit_cache[HBLK_AUDIT_CACHE_SIZE]; 1298 #endif /* HBLK_AUDIT */ 1299 1300 struct sf_hment hblk_hme[1]; /* hment array */ 1301 }; 1302 1303 #define hblk_shared hblk_misc.shared_bit 1304 #define hblk_xhat_bit hblk_misc.xhat_bit 1305 #define hblk_shw_bit hblk_misc.shadow_bit 1306 #define hblk_nuc_bit hblk_misc.nucleus_bit 1307 #define hblk_ttesz hblk_misc.ttesize 1308 #define hblk_hmecnt hblk_un.hblk_counts.hblk_hmecount 1309 #define hblk_vcnt hblk_un.hblk_counts.hblk_validcnt 1310 #define hblk_shw_mask hblk_un.hblk_shadow_mask 1311 1312 #define MAX_HBLK_LCKCNT 0xFFFFFFFF 1313 #define HMEBLK_ALIGN 0x8 /* hmeblk has to be double aligned */ 1314 1315 #ifdef HBLK_TRACE 1316 1317 #define HBLK_STACK_TRACE(hmeblkp, lock) \ 1318 { \ 1319 int flag = lock; /* to pacify lint */ \ 1320 int audit_index; \ 1321 \ 1322 mutex_enter(&hmeblkp->hblk_audit_lock); \ 1323 audit_index = hmeblkp->hblk_audit_index; \ 1324 hmeblkp->hblk_audit_index = ((hmeblkp->hblk_audit_index + 1) & \ 1325 (HBLK_AUDIT_CACHE_SIZE - 1)); \ 1326 mutex_exit(&hmeblkp->hblk_audit_lock); \ 1327 \ 1328 if (flag) \ 1329 hmeblkp->hblk_audit_cache[audit_index].flag = \ 1330 HBLK_LOCK_PATTERN; \ 1331 else \ 1332 hmeblkp->hblk_audit_cache[audit_index].flag = \ 1333 HBLK_UNLOCK_PATTERN; \ 1334 \ 1335 hmeblkp->hblk_audit_cache[audit_index].thread = curthread; \ 1336 hmeblkp->hblk_audit_cache[audit_index].depth = \ 1337 getpcstack(hmeblkp->hblk_audit_cache[audit_index].stack, \ 1338 HBLK_STACK_DEPTH); \ 1339 } 1340 1341 #else 1342 1343 #define HBLK_STACK_TRACE(hmeblkp, lock) 1344 1345 #endif /* HBLK_TRACE */ 1346 1347 #define HMEHASH_FACTOR 16 /* used to calc # of buckets in hme hash */ 1348 1349 /* 1350 * A maximum number of user hmeblks is defined in order to place an upper 1351 * limit on how much nucleus memory is required and to avoid overflowing the 1352 * tsbmiss uhashsz and khashsz data areas. The number below corresponds to 1353 * the number of buckets required, for an average hash chain length of 4 on 1354 * a 16TB machine. 1355 */ 1356 1357 #define MAX_UHME_BUCKETS (0x1 << 30) 1358 #define MAX_KHME_BUCKETS (0x1 << 30) 1359 1360 /* 1361 * The minimum number of kernel hash buckets. 1362 */ 1363 #define MIN_KHME_BUCKETS 0x800 1364 1365 /* 1366 * The number of hash buckets must be a power of 2. If the initial calculated 1367 * value is less than USER_BUCKETS_THRESHOLD we round up to the next greater 1368 * power of 2, otherwise we round down to avoid huge over allocations. 1369 */ 1370 #define USER_BUCKETS_THRESHOLD (1<<22) 1371 1372 #define MAX_NUCUHME_BUCKETS 0x4000 1373 #define MAX_NUCKHME_BUCKETS 0x2000 1374 1375 /* 1376 * There are 2 locks in the hmehash bucket. The hmehash_mutex is 1377 * a regular mutex used to make sure operations on a hash link are only 1378 * done by one thread. Any operation which comes into the hat with 1379 * a <vaddr, as> will grab the hmehash_mutex. Normally one would expect 1380 * the tsb miss handlers to grab the hash lock to make sure the hash list 1381 * is consistent while we traverse it. Unfortunately this can lead to 1382 * deadlocks or recursive mutex enters since it is possible for 1383 * someone holding the lock to take a tlb/tsb miss. 1384 * To solve this problem we have added the hmehash_listlock. This lock 1385 * is only grabbed by the tsb miss handlers, vatopfn, and while 1386 * adding/removing a hmeblk from the hash list. The code is written to 1387 * guarantee we won't take a tlb miss while holding this lock. 1388 */ 1389 struct hmehash_bucket { 1390 kmutex_t hmehash_mutex; 1391 volatile uint64_t hmeh_nextpa; /* physical address for hash list */ 1392 struct hme_blk *hmeblkp; 1393 uint_t hmeh_listlock; 1394 }; 1395 1396 #endif /* !_ASM */ 1397 1398 #define SFMMU_PGCNT_MASK 0x3f 1399 #define SFMMU_PGCNT_SHIFT 6 1400 #define INVALID_MMU_ID -1 1401 #define SFMMU_MMU_GNUM_RSHIFT 16 1402 #define SFMMU_MMU_CNUM_LSHIFT (64 - SFMMU_MMU_GNUM_RSHIFT) 1403 #define MAX_SFMMU_CTX_VAL ((1 << 16) - 1) /* for sanity check */ 1404 #define MAX_SFMMU_GNUM_VAL ((0x1UL << 48) - 1) 1405 1406 /* 1407 * The tsb miss handlers written in assembly know that sfmmup 1408 * is a 64 bit ptr. 1409 * 1410 * The bspage and re-hash part is 64 bits, with the sfmmup being another 64 1411 * bits. 1412 */ 1413 #define HTAG_SFMMUPSZ 0 /* Not really used for LP64 */ 1414 #define HTAG_BSPAGE_SHIFT 13 1415 1416 /* 1417 * Assembly routines need to be able to get to ttesz 1418 */ 1419 #define HBLK_SZMASK 0x7 1420 1421 #ifndef _ASM 1422 1423 /* 1424 * Returns the number of bytes that an hmeblk spans given its tte size 1425 */ 1426 #define get_hblk_span(hmeblkp) ((hmeblkp)->hblk_span) 1427 #define get_hblk_ttesz(hmeblkp) ((hmeblkp)->hblk_ttesz) 1428 #define get_hblk_cache(hmeblkp) (((hmeblkp)->hblk_ttesz == TTE8K) ? \ 1429 sfmmu8_cache : sfmmu1_cache) 1430 #define HMEBLK_SPAN(ttesz) \ 1431 ((ttesz == TTE8K)? (TTEBYTES(ttesz) * NHMENTS) : TTEBYTES(ttesz)) 1432 1433 #define set_hblk_sz(hmeblkp, ttesz) \ 1434 (hmeblkp)->hblk_ttesz = (ttesz); \ 1435 (hmeblkp)->hblk_span = HMEBLK_SPAN(ttesz) 1436 1437 #define get_hblk_base(hmeblkp) \ 1438 ((uintptr_t)(hmeblkp)->hblk_tag.htag_bspage << MMU_PAGESHIFT) 1439 1440 #define get_hblk_endaddr(hmeblkp) \ 1441 ((caddr_t)(get_hblk_base(hmeblkp) + get_hblk_span(hmeblkp))) 1442 1443 #define in_hblk_range(hmeblkp, vaddr) \ 1444 (((uintptr_t)(vaddr) >= get_hblk_base(hmeblkp)) && \ 1445 ((uintptr_t)(vaddr) < (get_hblk_base(hmeblkp) + \ 1446 get_hblk_span(hmeblkp)))) 1447 1448 #define tte_to_vaddr(hmeblkp, tte) ((caddr_t)(get_hblk_base(hmeblkp) \ 1449 + (TTEBYTES(TTE_CSZ(&tte)) * (tte).tte_hmenum))) 1450 1451 #define tte_to_evaddr(hmeblkp, ttep) ((caddr_t)(get_hblk_base(hmeblkp) \ 1452 + (TTEBYTES(TTE_CSZ(ttep)) * ((ttep)->tte_hmenum + 1)))) 1453 1454 #define vaddr_to_vshift(hblktag, vaddr, shwsz) \ 1455 ((((uintptr_t)(vaddr) >> MMU_PAGESHIFT) - (hblktag.htag_bspage)) >>\ 1456 TTE_BSZS_SHIFT((shwsz) - 1)) 1457 1458 #define HME8BLK_SZ (sizeof (struct hme_blk) + \ 1459 (NHMENTS - 1) * sizeof (struct sf_hment)) 1460 #define HME1BLK_SZ (sizeof (struct hme_blk)) 1461 #define H1MIN (2 + MAX_BIGKTSB_TTES) /* nucleus text+data, ktsb */ 1462 1463 /* 1464 * Hme_blk hash structure 1465 * Active mappings are kept in a hash structure of hme_blks. The hash 1466 * function is based on (ctx, vaddr) The size of the hash table size is a 1467 * power of 2 such that the average hash chain lenth is HMENT_HASHAVELEN. 1468 * The hash actually consists of 2 separate hashes. One hash is for the user 1469 * address space and the other hash is for the kernel address space. 1470 * The number of buckets are calculated at boot time and stored in the global 1471 * variables "uhmehash_num" and "khmehash_num". By making the hash table size 1472 * a power of 2 we can use a simply & function to derive an index instead of 1473 * a divide. 1474 * 1475 * HME_HASH_FUNCTION(hatid, vaddr, shift) returns a pointer to a hme_hash 1476 * bucket. 1477 * An hme hash bucket contains a pointer to an hme_blk and the mutex that 1478 * protects the link list. 1479 * Spitfire supports 4 page sizes. 8k and 64K pages only need one hash. 1480 * 512K pages need 2 hashes and 4M pages need 3 hashes. 1481 * The 'shift' parameter controls how many bits the vaddr will be shifted in 1482 * the hash function. It is calculated in the HME_HASH_SHIFT(ttesz) function 1483 * and it varies depending on the page size as follows: 1484 * 8k pages: HBLK_RANGE_SHIFT 1485 * 64k pages: MMU_PAGESHIFT64K 1486 * 512K pages: MMU_PAGESHIFT512K 1487 * 4M pages: MMU_PAGESHIFT4M 1488 * An assembly version of the hash function exists in sfmmu_ktsb_miss(). All 1489 * changes should be reflected in both versions. This function and the TSB 1490 * miss handlers are the only places which know about the two hashes. 1491 * 1492 * HBLK_RANGE_SHIFT controls range of virtual addresses that will fall 1493 * into the same bucket for a particular process. It is currently set to 1494 * be equivalent to 64K range or one hme_blk. 1495 * 1496 * The hme_blks in the hash are protected by a per hash bucket mutex 1497 * known as SFMMU_HASH_LOCK. 1498 * You need to acquire this lock before traversing the hash bucket link 1499 * list, while adding/removing a hme_blk to the list, and while 1500 * modifying an hme_blk. A possible optimization is to replace these 1501 * mutexes by readers/writer lock but right now it is not clear whether 1502 * this is a win or not. 1503 * 1504 * The HME_HASH_TABLE_SEARCH will search the hash table for the 1505 * hme_blk that contains the hment that corresponds to the passed 1506 * ctx and vaddr. It assumed the SFMMU_HASH_LOCK is held. 1507 */ 1508 1509 #endif /* ! _ASM */ 1510 1511 #define KHATID ksfmmup 1512 #define UHMEHASH_SZ uhmehash_num 1513 #define KHMEHASH_SZ khmehash_num 1514 #define HMENT_HASHAVELEN 4 1515 #define HBLK_RANGE_SHIFT MMU_PAGESHIFT64K /* shift for HBLK_BS_MASK */ 1516 #define HBLK_MIN_TTESZ 1 1517 #define HBLK_MIN_BYTES MMU_PAGESIZE64K 1518 #define HBLK_MIN_SHIFT MMU_PAGESHIFT64K 1519 #define MAX_HASHCNT 5 1520 #define DEFAULT_MAX_HASHCNT 3 1521 1522 #ifndef _ASM 1523 1524 #define HASHADDR_MASK(hashno) TTE_PAGEMASK(hashno) 1525 1526 #define HME_HASH_SHIFT(ttesz) \ 1527 ((ttesz == TTE8K)? HBLK_RANGE_SHIFT : TTE_PAGE_SHIFT(ttesz)) 1528 1529 #define HME_HASH_ADDR(vaddr, hmeshift) \ 1530 ((caddr_t)(((uintptr_t)(vaddr) >> (hmeshift)) << (hmeshift))) 1531 1532 #define HME_HASH_BSPAGE(vaddr, hmeshift) \ 1533 (((uintptr_t)(vaddr) >> (hmeshift)) << ((hmeshift) - MMU_PAGESHIFT)) 1534 1535 #define HME_HASH_REHASH(ttesz) \ 1536 (((ttesz) < TTE512K)? 1 : (ttesz)) 1537 1538 #define HME_HASH_FUNCTION(hatid, vaddr, shift) \ 1539 ((((void *)hatid) != ((void *)KHATID)) ? \ 1540 (&uhme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \ 1541 UHMEHASH_SZ) ]): \ 1542 (&khme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \ 1543 KHMEHASH_SZ) ])) 1544 1545 /* 1546 * This macro will traverse a hmeblk hash link list looking for an hme_blk 1547 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp 1548 * will be set to NULL, otherwise it will point to the correct hme_blk. 1549 * This macro also cleans empty hblks. 1550 */ 1551 #define HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp) \ 1552 { \ 1553 struct hme_blk *nx_hblk; \ 1554 \ 1555 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \ 1556 hblkp = hmebp->hmeblkp; \ 1557 pr_hblk = NULL; \ 1558 while (hblkp) { \ 1559 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \ 1560 /* found hme_blk */ \ 1561 break; \ 1562 } \ 1563 nx_hblk = hblkp->hblk_next; \ 1564 if (!hblkp->hblk_vcnt && !hblkp->hblk_hmecnt) { \ 1565 sfmmu_hblk_hash_rm(hmebp, hblkp, pr_hblk, \ 1566 listp, 0); \ 1567 } else { \ 1568 pr_hblk = hblkp; \ 1569 } \ 1570 hblkp = nx_hblk; \ 1571 } \ 1572 } 1573 1574 #define HME_HASH_SEARCH(hmebp, hblktag, hblkp, listp) \ 1575 { \ 1576 struct hme_blk *pr_hblk; \ 1577 \ 1578 HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp); \ 1579 } 1580 1581 /* 1582 * This macro will traverse a hmeblk hash link list looking for an hme_blk 1583 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp 1584 * will be set to NULL, otherwise it will point to the correct hme_blk. 1585 * It doesn't remove empty hblks. 1586 */ 1587 #define HME_HASH_FAST_SEARCH(hmebp, hblktag, hblkp) \ 1588 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \ 1589 for (hblkp = hmebp->hmeblkp; hblkp; \ 1590 hblkp = hblkp->hblk_next) { \ 1591 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \ 1592 /* found hme_blk */ \ 1593 break; \ 1594 } \ 1595 } 1596 1597 #define SFMMU_HASH_LOCK(hmebp) \ 1598 (mutex_enter(&hmebp->hmehash_mutex)) 1599 1600 #define SFMMU_HASH_UNLOCK(hmebp) \ 1601 (mutex_exit(&hmebp->hmehash_mutex)) 1602 1603 #define SFMMU_HASH_LOCK_TRYENTER(hmebp) \ 1604 (mutex_tryenter(&hmebp->hmehash_mutex)) 1605 1606 #define SFMMU_HASH_LOCK_ISHELD(hmebp) \ 1607 (mutex_owned(&hmebp->hmehash_mutex)) 1608 1609 #define SFMMU_XCALL_STATS(sfmmup) \ 1610 { \ 1611 if (sfmmup == ksfmmup) { \ 1612 SFMMU_STAT(sf_kernel_xcalls); \ 1613 } else { \ 1614 SFMMU_STAT(sf_user_xcalls); \ 1615 } \ 1616 } 1617 1618 #define astosfmmu(as) ((as)->a_hat) 1619 #define hblktosfmmu(hmeblkp) ((sfmmu_t *)(hmeblkp)->hblk_tag.htag_id) 1620 #define hblktosrd(hmeblkp) ((sf_srd_t *)(hmeblkp)->hblk_tag.htag_id) 1621 #define sfmmutoas(sfmmup) ((sfmmup)->sfmmu_as) 1622 1623 #define sfmmutohtagid(sfmmup, rid) \ 1624 (((rid) == SFMMU_INVALID_SHMERID) ? (void *)(sfmmup) : \ 1625 (void *)((sfmmup)->sfmmu_srdp)) 1626 1627 /* 1628 * We use the sfmmu data structure to keep the per as page coloring info. 1629 */ 1630 #define as_color_bin(as) (astosfmmu(as)->sfmmu_clrbin) 1631 #define as_color_start(as) (astosfmmu(as)->sfmmu_clrstart) 1632 1633 typedef struct { 1634 char h8[HME8BLK_SZ]; 1635 } hblk8_t; 1636 1637 typedef struct { 1638 char h1[HME1BLK_SZ]; 1639 } hblk1_t; 1640 1641 typedef struct { 1642 ulong_t index; 1643 ulong_t len; 1644 hblk8_t *list; 1645 } nucleus_hblk8_info_t; 1646 1647 typedef struct { 1648 ulong_t index; 1649 ulong_t len; 1650 hblk1_t *list; 1651 } nucleus_hblk1_info_t; 1652 1653 /* 1654 * This struct is used for accumlating information about a range 1655 * of pages that are unloading so that a single xcall can flush 1656 * the entire range from remote tlbs. A function that must demap 1657 * a range of virtual addresses declares one of these structures 1658 * and initializes using DEMP_RANGE_INIT(). It then passes a pointer to this 1659 * struct to the appropriate sfmmu_hblk_* level function which does 1660 * all the bookkeeping using the other macros. When the function has 1661 * finished the virtual address range, it needs to call DEMAP_RANGE_FLUSH() 1662 * macro to take care of any remaining unflushed mappings. 1663 * 1664 * The maximum range this struct can represent is the number of bits 1665 * in the dmr_bitvec field times the pagesize in dmr_pgsz. Currently, only 1666 * MMU_PAGESIZE pages are supported. 1667 * 1668 * Since there are now cases where it's no longer necessary to do 1669 * flushes (e.g. when the process isn't runnable because it's swapping 1670 * out or exiting) we allow these macros to take a NULL dmr input and do 1671 * nothing in that case. 1672 */ 1673 typedef struct { 1674 sfmmu_t *dmr_sfmmup; /* relevant hat */ 1675 caddr_t dmr_addr; /* beginning address */ 1676 caddr_t dmr_endaddr; /* ending address */ 1677 ulong_t dmr_bitvec; /* valid pages found */ 1678 ulong_t dmr_bit; /* next page to examine */ 1679 ulong_t dmr_maxbit; /* highest page in range */ 1680 ulong_t dmr_pgsz; /* page size in range */ 1681 } demap_range_t; 1682 1683 #define DMR_MAXBIT ((ulong_t)1<<63) /* dmr_bit high bit */ 1684 1685 #define DEMAP_RANGE_INIT(sfmmup, dmrp) \ 1686 if ((dmrp) != NULL) { \ 1687 (dmrp)->dmr_sfmmup = (sfmmup); \ 1688 (dmrp)->dmr_bitvec = 0; \ 1689 (dmrp)->dmr_maxbit = sfmmu_dmr_maxbit; \ 1690 (dmrp)->dmr_pgsz = MMU_PAGESIZE; \ 1691 } 1692 1693 #define DEMAP_RANGE_PGSZ(dmrp) ((dmrp)? (dmrp)->dmr_pgsz : MMU_PAGESIZE) 1694 1695 #define DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr) \ 1696 if ((dmrp) != NULL) { \ 1697 if ((dmrp)->dmr_bitvec != 0 && (dmrp)->dmr_endaddr != (addr)) \ 1698 sfmmu_tlb_range_demap(dmrp); \ 1699 (dmrp)->dmr_endaddr = (endaddr); \ 1700 } 1701 1702 #define DEMAP_RANGE_FLUSH(dmrp) \ 1703 if ((dmrp) != NULL) { \ 1704 if ((dmrp)->dmr_bitvec != 0) \ 1705 sfmmu_tlb_range_demap(dmrp); \ 1706 } 1707 1708 #define DEMAP_RANGE_MARKPG(dmrp, addr) \ 1709 if ((dmrp) != NULL) { \ 1710 if ((dmrp)->dmr_bitvec == 0) { \ 1711 (dmrp)->dmr_addr = (addr); \ 1712 (dmrp)->dmr_bit = 1; \ 1713 } \ 1714 (dmrp)->dmr_bitvec |= (dmrp)->dmr_bit; \ 1715 } 1716 1717 #define DEMAP_RANGE_NEXTPG(dmrp) \ 1718 if ((dmrp) != NULL && (dmrp)->dmr_bitvec != 0) { \ 1719 if ((dmrp)->dmr_bit & (dmrp)->dmr_maxbit) { \ 1720 sfmmu_tlb_range_demap(dmrp); \ 1721 } else { \ 1722 (dmrp)->dmr_bit <<= 1; \ 1723 } \ 1724 } 1725 1726 /* 1727 * TSB related structures 1728 * 1729 * The TSB is made up of tte entries. Both the tag and data are present 1730 * in the TSB. The TSB locking is managed as follows: 1731 * A software bit in the tsb tag is used to indicate that entry is locked. 1732 * If a cpu servicing a tsb miss reads a locked entry the tag compare will 1733 * fail forcing the cpu to go to the hat hash for the translation. 1734 * The cpu who holds the lock can then modify the data side, and the tag side. 1735 * The last write should be to the word containing the lock bit which will 1736 * clear the lock and allow the tsb entry to be read. It is assumed that all 1737 * cpus reading the tsb will do so with atomic 128-bit loads. An atomic 128 1738 * bit load is required to prevent the following from happening: 1739 * 1740 * cpu 0 cpu 1 comments 1741 * 1742 * ldx tag tag unlocked 1743 * ldstub lock set lock 1744 * stx data 1745 * stx tag unlock 1746 * ldx tag incorrect tte!!! 1747 * 1748 * The software also maintains a bit in the tag to indicate an invalid 1749 * tsb entry. The purpose of this bit is to allow the tsb invalidate code 1750 * to invalidate a tsb entry with a single cas. See code for details. 1751 */ 1752 1753 union tsb_tag { 1754 struct { 1755 uint32_t tag_res0:16; /* reserved - context area */ 1756 uint32_t tag_inv:1; /* sw - invalid tsb entry */ 1757 uint32_t tag_lock:1; /* sw - locked tsb entry */ 1758 uint32_t tag_res1:4; /* reserved */ 1759 uint32_t tag_va_hi:10; /* va[63:54] */ 1760 uint32_t tag_va_lo; /* va[53:22] */ 1761 } tagbits; 1762 struct tsb_tagints { 1763 uint32_t inthi; 1764 uint32_t intlo; 1765 } tagints; 1766 }; 1767 #define tag_invalid tagbits.tag_inv 1768 #define tag_locked tagbits.tag_lock 1769 #define tag_vahi tagbits.tag_va_hi 1770 #define tag_valo tagbits.tag_va_lo 1771 #define tag_inthi tagints.inthi 1772 #define tag_intlo tagints.intlo 1773 1774 struct tsbe { 1775 union tsb_tag tte_tag; 1776 tte_t tte_data; 1777 }; 1778 1779 /* 1780 * A per cpu struct is kept that duplicates some info 1781 * used by the tl>0 tsb miss handlers plus it provides 1782 * a scratch area. Its purpose is to minimize cache misses 1783 * in the tsb miss handler and is 128 bytes (2 e$ lines). 1784 * 1785 * There should be one allocated per cpu in nucleus memory 1786 * and should be aligned on an ecache line boundary. 1787 */ 1788 struct tsbmiss { 1789 sfmmu_t *ksfmmup; /* kernel hat id */ 1790 sfmmu_t *usfmmup; /* user hat id */ 1791 sf_srd_t *usrdp; /* user's SRD hat id */ 1792 struct tsbe *tsbptr; /* hardware computed ptr */ 1793 struct tsbe *tsbptr4m; /* hardware computed ptr */ 1794 struct tsbe *tsbscdptr; /* hardware computed ptr */ 1795 struct tsbe *tsbscdptr4m; /* hardware computed ptr */ 1796 uint64_t ismblkpa; 1797 struct hmehash_bucket *khashstart; 1798 struct hmehash_bucket *uhashstart; 1799 uint_t khashsz; 1800 uint_t uhashsz; 1801 uint16_t dcache_line_mask; /* used to flush dcache */ 1802 uchar_t uhat_tteflags; /* private page sizes */ 1803 uchar_t uhat_rtteflags; /* SHME pagesizes */ 1804 uint32_t utsb_misses; 1805 uint32_t ktsb_misses; 1806 uint16_t uprot_traps; 1807 uint16_t kprot_traps; 1808 /* 1809 * scratch[0] -> TSB_TAGACC 1810 * scratch[1] -> TSBMISS_HMEBP 1811 * scratch[2] -> TSBMISS_HATID 1812 */ 1813 uintptr_t scratch[3]; 1814 ulong_t shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */ 1815 ulong_t scd_shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */ 1816 uint8_t pad[48]; /* pad to 64 bytes */ 1817 }; 1818 1819 /* 1820 * A per cpu struct is kept for the use within the tl>0 kpm tsb 1821 * miss handler. Some members are duplicates of common data or 1822 * the physical addresses of common data. A few members are also 1823 * written by the tl>0 kpm tsb miss handler. Its purpose is to 1824 * minimize cache misses in the kpm tsb miss handler and occupies 1825 * one ecache line. There should be one allocated per cpu in 1826 * nucleus memory and it should be aligned on an ecache line 1827 * boundary. It is not merged w/ struct tsbmiss since there is 1828 * not much to share and the tsbmiss pathes are different, so 1829 * a kpm tlbmiss/tsbmiss only touches one cacheline, except for 1830 * (DEBUG || SFMMU_STAT_GATHER) where the dtlb_misses counter 1831 * of struct tsbmiss is used on every dtlb miss. 1832 */ 1833 struct kpmtsbm { 1834 caddr_t vbase; /* start of address kpm range */ 1835 caddr_t vend; /* end of address kpm range */ 1836 uchar_t flags; /* flags needed in TL tsbmiss handler */ 1837 uchar_t sz_shift; /* for single kpm window */ 1838 uchar_t kpmp_shift; /* hash lock shift */ 1839 uchar_t kpmp2pshft; /* kpm page to page shift */ 1840 uint_t kpmp_table_sz; /* size of kpmp_table or kpmp_stable */ 1841 uint64_t kpmp_tablepa; /* paddr of kpmp_table or kpmp_stable */ 1842 uint64_t msegphashpa; /* paddr of memseg_phash */ 1843 struct tsbe *tsbptr; /* saved ktsb pointer */ 1844 uint_t kpm_dtlb_misses; /* kpm tlbmiss counter */ 1845 uint_t kpm_tsb_misses; /* kpm tsbmiss counter */ 1846 uintptr_t pad[1]; 1847 }; 1848 1849 extern size_t tsb_slab_size; 1850 extern uint_t tsb_slab_shift; 1851 extern size_t tsb_slab_mask; 1852 1853 #endif /* !_ASM */ 1854 1855 /* 1856 * Flags for TL kpm tsbmiss handler 1857 */ 1858 #define KPMTSBM_ENABLE_FLAG 0x01 /* bit copy of kpm_enable */ 1859 #define KPMTSBM_TLTSBM_FLAG 0x02 /* use TL tsbmiss handler */ 1860 #define KPMTSBM_TSBPHYS_FLAG 0x04 /* use ASI_MEM for TSB update */ 1861 1862 /* 1863 * The TSB 1864 * All TSB sizes supported by the hardware are now supported (8K - 1M). 1865 * For kernel TSBs we may go beyond the hardware supported sizes and support 1866 * larger TSBs via software. 1867 * All TTE sizes are supported in the TSB; the manner in which this is 1868 * done is cpu dependent. 1869 */ 1870 #define TSB_MIN_SZCODE TSB_8K_SZCODE /* min. supported TSB size */ 1871 #define TSB_MIN_OFFSET_MASK (TSB_OFFSET_MASK(TSB_MIN_SZCODE)) 1872 1873 #ifdef sun4v 1874 #define UTSB_MAX_SZCODE TSB_256M_SZCODE /* max. supported TSB size */ 1875 #else /* sun4u */ 1876 #define UTSB_MAX_SZCODE TSB_1M_SZCODE /* max. supported TSB size */ 1877 #endif /* sun4v */ 1878 1879 #define UTSB_MAX_OFFSET_MASK (TSB_OFFSET_MASK(UTSB_MAX_SZCODE)) 1880 1881 #define TSB_FREEMEM_MIN 0x1000 /* 32 mb */ 1882 #define TSB_FREEMEM_LARGE 0x10000 /* 512 mb */ 1883 #define TSB_8K_SZCODE 0 /* 512 entries */ 1884 #define TSB_16K_SZCODE 1 /* 1k entries */ 1885 #define TSB_32K_SZCODE 2 /* 2k entries */ 1886 #define TSB_64K_SZCODE 3 /* 4k entries */ 1887 #define TSB_128K_SZCODE 4 /* 8k entries */ 1888 #define TSB_256K_SZCODE 5 /* 16k entries */ 1889 #define TSB_512K_SZCODE 6 /* 32k entries */ 1890 #define TSB_1M_SZCODE 7 /* 64k entries */ 1891 #define TSB_2M_SZCODE 8 /* 128k entries */ 1892 #define TSB_4M_SZCODE 9 /* 256k entries */ 1893 #define TSB_8M_SZCODE 10 /* 512k entries */ 1894 #define TSB_16M_SZCODE 11 /* 1M entries */ 1895 #define TSB_32M_SZCODE 12 /* 2M entries */ 1896 #define TSB_64M_SZCODE 13 /* 4M entries */ 1897 #define TSB_128M_SZCODE 14 /* 8M entries */ 1898 #define TSB_256M_SZCODE 15 /* 16M entries */ 1899 #define TSB_ENTRY_SHIFT 4 /* each entry = 128 bits = 16 bytes */ 1900 #define TSB_ENTRY_SIZE (1 << 4) 1901 #define TSB_START_SIZE 9 1902 #define TSB_ENTRIES(tsbsz) (1 << (TSB_START_SIZE + tsbsz)) 1903 #define TSB_BYTES(tsbsz) (TSB_ENTRIES(tsbsz) << TSB_ENTRY_SHIFT) 1904 #define TSB_OFFSET_MASK(tsbsz) (TSB_ENTRIES(tsbsz) - 1) 1905 #define TSB_BASEADDR_MASK ((1 << 12) - 1) 1906 1907 /* 1908 * sun4u platforms 1909 * --------------- 1910 * We now support two user TSBs with one TSB base register. 1911 * Hence the TSB base register is split up as follows: 1912 * 1913 * When only one TSB present: 1914 * [63 62..42 41..13 12..4 3..0] 1915 * ^ ^ ^ ^ ^ 1916 * | | | | | 1917 * | | | | |_ TSB size code 1918 * | | | | 1919 * | | | |_ Reserved 0 1920 * | | | 1921 * | | |_ TSB VA[41..13] 1922 * | | 1923 * | |_ VA hole (Spitfire), zeros (Cheetah and beyond) 1924 * | 1925 * |_ 0 1926 * 1927 * When second TSB present: 1928 * [63 62..42 41..33 32..29 28..22 21..13 12..4 3..0] 1929 * ^ ^ ^ ^ ^ ^ ^ ^ 1930 * | | | | | | | | 1931 * | | | | | | | |_ First TSB size code 1932 * | | | | | | | 1933 * | | | | | | |_ Reserved 0 1934 * | | | | | | 1935 * | | | | | |_ First TSB's VA[21..13] 1936 * | | | | | 1937 * | | | | |_ Reserved for future use 1938 * | | | | 1939 * | | | |_ Second TSB's size code 1940 * | | | 1941 * | | |_ Second TSB's VA[21..13] 1942 * | | 1943 * | |_ VA hole (Spitfire) / ones (Cheetah and beyond) 1944 * | 1945 * |_ 1 1946 * 1947 * Note that since we store 21..13 of each TSB's VA, TSBs and their slabs 1948 * may be up to 4M in size. For now, only hardware supported TSB sizes 1949 * are supported, though the slabs are usually 4M in size. 1950 * 1951 * sun4u platforms that define UTSB_PHYS use physical addressing to access 1952 * the user TSBs at TL>0. The first user TSB base is in the MMU I/D TSB Base 1953 * registers. The second TSB base uses a dedicated scratchpad register which 1954 * requires a definition of SCRATCHPAD_UTSBREG2 in mach_sfmmu.h. The layout for 1955 * both registers is equivalent to sun4v below, except the TSB PA range is 1956 * [46..13] for sun4u. 1957 * 1958 * sun4v platforms 1959 * --------------- 1960 * On sun4v platforms, we use two dedicated scratchpad registers as pseudo 1961 * hardware TSB base registers to hold up to two different user TSBs. 1962 * 1963 * Each register contains TSB's physical base and size code information 1964 * as follows: 1965 * 1966 * [63..56 55..13 12..4 3..0] 1967 * ^ ^ ^ ^ 1968 * | | | | 1969 * | | | |_ TSB size code 1970 * | | | 1971 * | | |_ Reserved 0 1972 * | | 1973 * | |_ TSB PA[55..13] 1974 * | 1975 * | 1976 * | 1977 * |_ 0 for valid TSB 1978 * 1979 * Absence of a user TSB (primarily the second user TSB) is indicated by 1980 * storing a negative value in the TSB base register. This allows us to 1981 * check for presence of a user TSB by simply checking bit# 63. 1982 */ 1983 #define TSBREG_MSB_SHIFT 32 /* set upper bits */ 1984 #define TSBREG_MSB_CONST 0xfffff800 /* set bits 63..43 */ 1985 #define TSBREG_FIRTSB_SHIFT 42 /* to clear bits 63:22 */ 1986 #define TSBREG_SECTSB_MKSHIFT 20 /* 21:13 --> 41:33 */ 1987 #define TSBREG_SECTSB_LSHIFT 22 /* to clear bits 63:42 */ 1988 #define TSBREG_SECTSB_RSHIFT (TSBREG_SECTSB_MKSHIFT + TSBREG_SECTSB_LSHIFT) 1989 /* sectsb va -> bits 21:13 */ 1990 /* after clearing upper bits */ 1991 #define TSBREG_SECSZ_SHIFT 29 /* to get sectsb szc to 3:0 */ 1992 #define TSBREG_VAMASK_SHIFT 13 /* set up VA mask */ 1993 1994 #define BIGKTSB_SZ_MASK 0xf 1995 #define TSB_SOFTSZ_MASK BIGKTSB_SZ_MASK 1996 #define MIN_BIGKTSB_SZCODE 9 /* 256k entries */ 1997 #define MAX_BIGKTSB_SZCODE 11 /* 1024k entries */ 1998 #define MAX_BIGKTSB_TTES (TSB_BYTES(MAX_BIGKTSB_SZCODE) / MMU_PAGESIZE4M) 1999 2000 #define TAG_VALO_SHIFT 22 /* tag's va are bits 63-22 */ 2001 /* 2002 * sw bits used on tsb_tag - bit masks used only in assembly 2003 * use only a sethi for these fields. 2004 */ 2005 #define TSBTAG_INVALID 0x00008000 /* tsb_tag.tag_invalid */ 2006 #define TSBTAG_LOCKED 0x00004000 /* tsb_tag.tag_locked */ 2007 2008 #ifdef _ASM 2009 2010 /* 2011 * Marker to indicate that this instruction will be hot patched at runtime 2012 * to some other value. 2013 * This value must be zero since it fills in the imm bits of the target 2014 * instructions to be patched 2015 */ 2016 #define RUNTIME_PATCH (0) 2017 2018 /* 2019 * V9 defines nop instruction as the following, which we use 2020 * at runtime to nullify some instructions we don't want to 2021 * execute in the trap handlers on certain platforms. 2022 */ 2023 #define MAKE_NOP_INSTR(reg) \ 2024 sethi %hi(0x1000000), reg 2025 2026 /* 2027 * This macro constructs a SPARC V9 "jmpl <source reg>, %g0" 2028 * instruction, with the source register specified by the jump_reg_number. 2029 * The jmp opcode [24:19] = 11 1000 and source register is bits [18:14]. 2030 * The instruction is returned in reg. The macro is used to patch in a jmpl 2031 * instruction at runtime. 2032 */ 2033 #define MAKE_JMP_INSTR(jump_reg_number, reg, tmp) \ 2034 sethi %hi(0x81c00000), reg; \ 2035 mov jump_reg_number, tmp; \ 2036 sll tmp, 14, tmp; \ 2037 or reg, tmp, reg 2038 2039 /* 2040 * Macro to get hat per-MMU cnum on this CPU. 2041 * sfmmu - In, pass in "sfmmup" from the caller. 2042 * cnum - Out, return 'cnum' to the caller 2043 * scr - scratch 2044 */ 2045 #define SFMMU_CPU_CNUM(sfmmu, cnum, scr) \ 2046 CPU_ADDR(scr, cnum); /* scr = load CPU struct addr */ \ 2047 ld [scr + CPU_MMU_IDX], cnum; /* cnum = mmuid */ \ 2048 add sfmmu, SFMMU_CTXS, scr; /* scr = sfmmup->sfmmu_ctxs[] */ \ 2049 sllx cnum, SFMMU_MMU_CTX_SHIFT, cnum; \ 2050 add scr, cnum, scr; /* scr = sfmmup->sfmmu_ctxs[id] */ \ 2051 ldx [scr + SFMMU_MMU_GC_NUM], scr; /* sfmmu_ctxs[id].gcnum */ \ 2052 sllx scr, SFMMU_MMU_CNUM_LSHIFT, scr; \ 2053 srlx scr, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */ 2054 2055 /* 2056 * Macro to get hat gnum & cnum assocaited with sfmmu_ctx[mmuid] entry 2057 * entry - In, pass in (&sfmmu_ctxs[mmuid] - SFMMU_CTXS) from the caller. 2058 * gnum - Out, return sfmmu gnum 2059 * cnum - Out, return sfmmu cnum 2060 * reg - scratch 2061 */ 2062 #define SFMMU_MMUID_GNUM_CNUM(entry, gnum, cnum, reg) \ 2063 ldx [entry + SFMMU_CTXS], reg; /* reg = sfmmu (gnum | cnum) */ \ 2064 srlx reg, SFMMU_MMU_GNUM_RSHIFT, gnum; /* gnum = sfmmu gnum */ \ 2065 sllx reg, SFMMU_MMU_CNUM_LSHIFT, cnum; \ 2066 srlx cnum, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */ 2067 2068 /* 2069 * Macro to get this CPU's tsbmiss area. 2070 */ 2071 #define CPU_TSBMISS_AREA(tsbmiss, tmp1) \ 2072 CPU_INDEX(tmp1, tsbmiss); /* tmp1 = cpu idx */ \ 2073 sethi %hi(tsbmiss_area), tsbmiss; /* tsbmiss base ptr */ \ 2074 mulx tmp1, TSBMISS_SIZE, tmp1; /* byte offset */ \ 2075 or tsbmiss, %lo(tsbmiss_area), tsbmiss; \ 2076 add tsbmiss, tmp1, tsbmiss /* tsbmiss area of CPU */ 2077 2078 2079 /* 2080 * Macro to set kernel context + page size codes in DMMU primary context 2081 * register. It is only necessary for sun4u because sun4v does not need 2082 * page size codes 2083 */ 2084 #ifdef sun4v 2085 2086 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) 2087 2088 #else 2089 2090 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) \ 2091 sethi %hi(kcontextreg), reg0; \ 2092 ldx [reg0 + %lo(kcontextreg)], reg0; \ 2093 mov MMU_PCONTEXT, reg1; \ 2094 ldxa [reg1]ASI_MMU_CTX, reg2; \ 2095 xor reg0, reg2, reg2; \ 2096 brz reg2, label3; \ 2097 srlx reg2, CTXREG_NEXT_SHIFT, reg2; \ 2098 rdpr %pstate, reg3; /* disable interrupts */ \ 2099 btst PSTATE_IE, reg3; \ 2100 /*CSTYLED*/ \ 2101 bnz,a,pt %icc, label1; \ 2102 wrpr reg3, PSTATE_IE, %pstate; \ 2103 /*CSTYLED*/ \ 2104 label1:; \ 2105 brz reg2, label2; /* need demap if N_pgsz0/1 change */ \ 2106 sethi %hi(FLUSH_ADDR), reg4; \ 2107 mov DEMAP_ALL_TYPE, reg2; \ 2108 stxa %g0, [reg2]ASI_DTLB_DEMAP; \ 2109 stxa %g0, [reg2]ASI_ITLB_DEMAP; \ 2110 /*CSTYLED*/ \ 2111 label2:; \ 2112 stxa reg0, [reg1]ASI_MMU_CTX; \ 2113 flush reg4; \ 2114 btst PSTATE_IE, reg3; \ 2115 /*CSTYLED*/ \ 2116 bnz,a,pt %icc, label3; \ 2117 wrpr %g0, reg3, %pstate; /* restore interrupt state */ \ 2118 label3:; 2119 2120 #endif 2121 2122 /* 2123 * Macro to setup arguments with kernel sfmmup context + page size before 2124 * calling sfmmu_setctx_sec() 2125 */ 2126 #ifdef sun4v 2127 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \ 2128 set KCONTEXT, arg0; \ 2129 set 0, arg1; 2130 #else 2131 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \ 2132 ldub [sfmmup + SFMMU_CEXT], arg1; \ 2133 set KCONTEXT, arg0; \ 2134 sll arg1, CTXREG_EXT_SHIFT, arg1; 2135 #endif 2136 2137 #define PANIC_IF_INTR_DISABLED_PSTR(pstatereg, label, scr) \ 2138 andcc pstatereg, PSTATE_IE, %g0; /* panic if intrs */ \ 2139 /*CSTYLED*/ \ 2140 bnz,pt %icc, label; /* already disabled */ \ 2141 nop; \ 2142 \ 2143 sethi %hi(panicstr), scr; \ 2144 ldx [scr + %lo(panicstr)], scr; \ 2145 tst scr; \ 2146 /*CSTYLED*/ \ 2147 bnz,pt %xcc, label; \ 2148 nop; \ 2149 \ 2150 save %sp, -SA(MINFRAME), %sp; \ 2151 sethi %hi(sfmmu_panic1), %o0; \ 2152 call panic; \ 2153 or %o0, %lo(sfmmu_panic1), %o0; \ 2154 /*CSTYLED*/ \ 2155 label: 2156 2157 #define PANIC_IF_INTR_ENABLED_PSTR(label, scr) \ 2158 /* \ 2159 * The caller must have disabled interrupts. \ 2160 * If interrupts are not disabled, panic \ 2161 */ \ 2162 rdpr %pstate, scr; \ 2163 andcc scr, PSTATE_IE, %g0; \ 2164 /*CSTYLED*/ \ 2165 bz,pt %icc, label; \ 2166 nop; \ 2167 \ 2168 sethi %hi(panicstr), scr; \ 2169 ldx [scr + %lo(panicstr)], scr; \ 2170 tst scr; \ 2171 /*CSTYLED*/ \ 2172 bnz,pt %xcc, label; \ 2173 nop; \ 2174 \ 2175 sethi %hi(sfmmu_panic6), %o0; \ 2176 call panic; \ 2177 or %o0, %lo(sfmmu_panic6), %o0; \ 2178 /*CSTYLED*/ \ 2179 label: 2180 2181 #endif /* _ASM */ 2182 2183 #ifndef _ASM 2184 2185 #ifdef VAC 2186 /* 2187 * Page coloring 2188 * The p_vcolor field of the page struct (1 byte) is used to store the 2189 * virtual page color. This provides for 255 colors. The value zero is 2190 * used to mean the page has no color - never been mapped or somehow 2191 * purified. 2192 */ 2193 2194 #define PP_GET_VCOLOR(pp) (((pp)->p_vcolor) - 1) 2195 #define PP_NEWPAGE(pp) (!(pp)->p_vcolor) 2196 #define PP_SET_VCOLOR(pp, color) \ 2197 ((pp)->p_vcolor = ((color) + 1)) 2198 2199 /* 2200 * As mentioned p_vcolor == 0 means there is no color for this page. 2201 * But PP_SET_VCOLOR(pp, color) expects 'color' to be real color minus 2202 * one so we define this constant. 2203 */ 2204 #define NO_VCOLOR (-1) 2205 2206 #define addr_to_vcolor(addr) \ 2207 (((uint_t)(uintptr_t)(addr) >> MMU_PAGESHIFT) & vac_colors_mask) 2208 #else /* VAC */ 2209 #define addr_to_vcolor(addr) (0) 2210 #endif /* VAC */ 2211 2212 /* 2213 * The field p_index in the psm page structure is for large pages support. 2214 * P_index is a bit-vector of the different mapping sizes that a given page 2215 * is part of. An hme structure for a large mapping is only added in the 2216 * group leader page (first page). All pages covered by a given large mapping 2217 * have the corrosponding mapping bit set in their p_index field. This allows 2218 * us to only store an explicit hme structure in the leading page which 2219 * simplifies the mapping link list management. Furthermore, it provides us 2220 * a fast mechanism for determining the largest mapping a page is part of. For 2221 * exmaple, a page with a 64K and a 4M mappings has a p_index value of 0x0A. 2222 * 2223 * Implementation note: even though the first bit in p_index is reserved 2224 * for 8K mappings, it is NOT USED by the code and SHOULD NOT be set. 2225 * In addition, the upper four bits of the p_index field are used by the 2226 * code as temporaries 2227 */ 2228 2229 /* 2230 * Defines for psm page struct fields and large page support 2231 */ 2232 #define SFMMU_INDEX_SHIFT 6 2233 #define SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1) 2234 2235 /* Return the mapping index */ 2236 #define PP_MAPINDEX(pp) ((pp)->p_index & SFMMU_INDEX_MASK) 2237 2238 /* 2239 * These macros rely on the following property: 2240 * All pages constituting a large page are covered by a virtually 2241 * contiguous set of page_t's. 2242 */ 2243 2244 /* Return the leader for this mapping size */ 2245 #define PP_GROUPLEADER(pp, sz) \ 2246 (&(pp)[-(int)(pp->p_pagenum & (TTEPAGES(sz)-1))]) 2247 2248 /* Return the root page for this page based on p_szc */ 2249 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \ 2250 PP_GROUPLEADER((pp), (pp)->p_szc)) 2251 2252 #define PP_PAGENEXT_N(pp, n) ((pp) + (n)) 2253 #define PP_PAGENEXT(pp) PP_PAGENEXT_N((pp), 1) 2254 2255 #define PP_PAGEPREV_N(pp, n) ((pp) - (n)) 2256 #define PP_PAGEPREV(pp) PP_PAGEPREV_N((pp), 1) 2257 2258 #define PP_ISMAPPED_LARGE(pp) (PP_MAPINDEX(pp) != 0) 2259 2260 /* Need function to test the page mappping which takes p_index into account */ 2261 #define PP_ISMAPPED(pp) ((pp)->p_mapping || PP_ISMAPPED_LARGE(pp)) 2262 2263 /* 2264 * Don't call this macro with sz equal to zero. 8K mappings SHOULD NOT 2265 * set p_index field. 2266 */ 2267 #define PAGESZ_TO_INDEX(sz) (1 << (sz)) 2268 2269 2270 /* 2271 * prototypes for hat assembly routines. Some of these are 2272 * known to machine dependent VM code. 2273 */ 2274 extern uint64_t sfmmu_make_tsbtag(caddr_t); 2275 extern struct tsbe * 2276 sfmmu_get_tsbe(uint64_t, caddr_t, int, int); 2277 extern void sfmmu_load_tsbe(struct tsbe *, uint64_t, tte_t *, int); 2278 extern void sfmmu_unload_tsbe(struct tsbe *, uint64_t, int); 2279 extern void sfmmu_load_mmustate(sfmmu_t *); 2280 extern void sfmmu_raise_tsb_exception(uint64_t, uint64_t); 2281 #ifndef sun4v 2282 extern void sfmmu_itlb_ld_kva(caddr_t, tte_t *); 2283 extern void sfmmu_dtlb_ld_kva(caddr_t, tte_t *); 2284 #endif /* sun4v */ 2285 extern void sfmmu_copytte(tte_t *, tte_t *); 2286 extern int sfmmu_modifytte(tte_t *, tte_t *, tte_t *); 2287 extern int sfmmu_modifytte_try(tte_t *, tte_t *, tte_t *); 2288 extern pfn_t sfmmu_ttetopfn(tte_t *, caddr_t); 2289 extern uint_t sfmmu_disable_intrs(void); 2290 extern void sfmmu_enable_intrs(uint_t); 2291 /* 2292 * functions exported to machine dependent VM code 2293 */ 2294 extern void sfmmu_patch_ktsb(void); 2295 #ifndef UTSB_PHYS 2296 extern void sfmmu_patch_utsb(void); 2297 #endif /* UTSB_PHYS */ 2298 extern pfn_t sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *); 2299 extern void sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *); 2300 extern pfn_t sfmmu_kvaszc2pfn(caddr_t, int); 2301 #ifdef DEBUG 2302 extern void sfmmu_check_kpfn(pfn_t); 2303 #else 2304 #define sfmmu_check_kpfn(pfn) /* disabled */ 2305 #endif /* DEBUG */ 2306 extern void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 2307 extern void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, uint_t); 2308 extern void sfmmu_tsbmiss_exception(struct regs *, uintptr_t, uint_t); 2309 extern void sfmmu_init_tsbs(void); 2310 extern caddr_t sfmmu_ktsb_alloc(caddr_t); 2311 extern int sfmmu_getctx_pri(void); 2312 extern int sfmmu_getctx_sec(void); 2313 extern void sfmmu_setctx_sec(uint_t); 2314 extern void sfmmu_inv_tsb(caddr_t, uint_t); 2315 extern void sfmmu_init_ktsbinfo(void); 2316 extern int sfmmu_setup_4lp(void); 2317 extern void sfmmu_patch_mmu_asi(int); 2318 extern void sfmmu_init_nucleus_hblks(caddr_t, size_t, int, int); 2319 extern void sfmmu_cache_flushall(void); 2320 extern pgcnt_t sfmmu_tte_cnt(sfmmu_t *, uint_t); 2321 extern void *sfmmu_tsb_segkmem_alloc(vmem_t *, size_t, int); 2322 extern void sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t); 2323 extern void sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *); 2324 2325 extern void hat_kern_setup(void); 2326 extern int hat_page_relocate(page_t **, page_t **, spgcnt_t *); 2327 extern int sfmmu_get_ppvcolor(struct page *); 2328 extern int sfmmu_get_addrvcolor(caddr_t); 2329 extern int sfmmu_hat_lock_held(sfmmu_t *); 2330 extern int sfmmu_alloc_ctx(sfmmu_t *, int, struct cpu *, int); 2331 2332 /* 2333 * Functions exported to xhat_sfmmu.c 2334 */ 2335 extern kmutex_t *sfmmu_mlist_enter(page_t *); 2336 extern void sfmmu_mlist_exit(kmutex_t *); 2337 extern int sfmmu_mlist_held(struct page *); 2338 extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *); 2339 2340 /* 2341 * MMU-specific functions optionally imported from the CPU module 2342 */ 2343 #pragma weak mmu_init_scd 2344 #pragma weak mmu_large_pages_disabled 2345 #pragma weak mmu_set_ctx_page_sizes 2346 #pragma weak mmu_check_page_sizes 2347 2348 extern void mmu_init_scd(sf_scd_t *); 2349 extern uint_t mmu_large_pages_disabled(uint_t); 2350 extern void mmu_set_ctx_page_sizes(sfmmu_t *); 2351 extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *); 2352 2353 extern sfmmu_t *ksfmmup; 2354 extern caddr_t ktsb_base; 2355 extern uint64_t ktsb_pbase; 2356 extern int ktsb_sz; 2357 extern int ktsb_szcode; 2358 extern caddr_t ktsb4m_base; 2359 extern uint64_t ktsb4m_pbase; 2360 extern int ktsb4m_sz; 2361 extern int ktsb4m_szcode; 2362 extern uint64_t kpm_tsbbase; 2363 extern int kpm_tsbsz; 2364 extern int ktsb_phys; 2365 extern int enable_bigktsb; 2366 #ifndef sun4v 2367 extern int utsb_dtlb_ttenum; 2368 extern int utsb4m_dtlb_ttenum; 2369 #endif /* sun4v */ 2370 extern int uhmehash_num; 2371 extern int khmehash_num; 2372 extern struct hmehash_bucket *uhme_hash; 2373 extern struct hmehash_bucket *khme_hash; 2374 extern kmutex_t *mml_table; 2375 extern uint_t mml_table_sz; 2376 extern uint_t mml_shift; 2377 extern uint_t hblk_alloc_dynamic; 2378 extern struct tsbmiss tsbmiss_area[NCPU]; 2379 extern struct kpmtsbm kpmtsbm_area[NCPU]; 2380 2381 #ifndef sun4v 2382 extern int dtlb_resv_ttenum; 2383 extern caddr_t utsb_vabase; 2384 extern caddr_t utsb4m_vabase; 2385 #endif /* sun4v */ 2386 extern vmem_t *kmem_tsb_default_arena[]; 2387 extern int tsb_lgrp_affinity; 2388 2389 extern uint_t disable_large_pages; 2390 extern uint_t disable_ism_large_pages; 2391 extern uint_t disable_auto_data_large_pages; 2392 extern uint_t disable_auto_text_large_pages; 2393 2394 /* kpm externals */ 2395 extern pfn_t sfmmu_kpm_vatopfn(caddr_t); 2396 extern void sfmmu_kpm_patch_tlbm(void); 2397 extern void sfmmu_kpm_patch_tsbm(void); 2398 extern void sfmmu_patch_shctx(void); 2399 extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int); 2400 extern void sfmmu_kpm_unload_tsb(caddr_t, int); 2401 extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int); 2402 extern int sfmmu_kpm_stsbmtl(uchar_t *, uint_t *, int); 2403 extern caddr_t kpm_vbase; 2404 extern size_t kpm_size; 2405 extern struct memseg *memseg_hash[]; 2406 extern uint64_t memseg_phash[]; 2407 extern kpm_hlk_t *kpmp_table; 2408 extern kpm_shlk_t *kpmp_stable; 2409 extern uint_t kpmp_table_sz; 2410 extern uint_t kpmp_stable_sz; 2411 extern uchar_t kpmp_shift; 2412 2413 #define PP_ISMAPPED_KPM(pp) ((pp)->p_kpmref > 0) 2414 2415 #define IS_KPM_ALIAS_RANGE(vaddr) \ 2416 (((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift > 0) 2417 2418 #endif /* !_ASM */ 2419 2420 /* sfmmu_kpm_tsbmtl flags */ 2421 #define KPMTSBM_STOP 0 2422 #define KPMTSBM_START 1 2423 2424 /* 2425 * For kpm_smallpages, the state about how a kpm page is mapped and whether 2426 * it is ready to go is indicated by the two 4-bit fields defined in the 2427 * kpm_spage structure as follows: 2428 * kp_mapped_flag bit[0:3] - the page is mapped cacheable or not 2429 * kp_mapped_flag bit[4:7] - the mapping is ready to go or not 2430 * If the bit KPM_MAPPED_GO is on, it indicates that the assembly tsb miss 2431 * handler can drop the mapping in regardless of the caching state of the 2432 * mapping. Otherwise, we will have C handler resolve the VAC conflict no 2433 * matter the page is currently mapped cacheable or non-cacheable. 2434 */ 2435 #define KPM_MAPPEDS 0x1 /* small mapping valid, no conflict */ 2436 #define KPM_MAPPEDSC 0x2 /* small mapping valid, conflict */ 2437 #define KPM_MAPPED_GO 0x10 /* the mapping is ready to go */ 2438 #define KPM_MAPPED_MASK 0xf 2439 2440 /* Physical memseg address NULL marker */ 2441 #define MSEG_NULLPTR_PA -1 2442 2443 /* 2444 * Memseg hash defines for kpm trap level tsbmiss handler. 2445 * Must be in sync w/ page.h . 2446 */ 2447 #define SFMMU_MEM_HASH_SHIFT 0x9 2448 #define SFMMU_N_MEM_SLOTS 0x200 2449 #define SFMMU_MEM_HASH_ENTRY_SHIFT 3 2450 2451 #ifndef _ASM 2452 #if (SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT) 2453 #error SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT 2454 #endif 2455 #if (SFMMU_N_MEM_SLOTS != N_MEM_SLOTS) 2456 #error SFMMU_N_MEM_SLOTS != N_MEM_SLOTS 2457 #endif 2458 2459 /* Physical memseg address NULL marker */ 2460 #define SFMMU_MEMSEG_NULLPTR_PA -1 2461 2462 /* 2463 * Check KCONTEXT to be zero, asm parts depend on that assumption. 2464 */ 2465 #if (KCONTEXT != 0) 2466 #error KCONTEXT != 0 2467 #endif 2468 #endif /* !_ASM */ 2469 2470 2471 #endif /* _KERNEL */ 2472 2473 #ifndef _ASM 2474 /* 2475 * ctx, hmeblk, mlistlock and other stats for sfmmu 2476 */ 2477 struct sfmmu_global_stat { 2478 int sf_tsb_exceptions; /* # of tsb exceptions */ 2479 int sf_tsb_raise_exception; /* # tsb exc. w/o TLB flush */ 2480 2481 int sf_pagefaults; /* # of pagefaults */ 2482 2483 int sf_uhash_searches; /* # of user hash searches */ 2484 int sf_uhash_links; /* # of user hash links */ 2485 int sf_khash_searches; /* # of kernel hash searches */ 2486 int sf_khash_links; /* # of kernel hash links */ 2487 2488 int sf_swapout; /* # times hat swapped out */ 2489 2490 int sf_tsb_alloc; /* # TSB allocations */ 2491 int sf_tsb_allocfail; /* # times TSB alloc fail */ 2492 int sf_tsb_sectsb_create; /* # times second TSB added */ 2493 2494 int sf_scd_1sttsb_alloc; /* # SCD 1st TSB allocations */ 2495 int sf_scd_2ndtsb_alloc; /* # SCD 2nd TSB allocations */ 2496 int sf_scd_1sttsb_allocfail; /* # SCD 1st TSB alloc fail */ 2497 int sf_scd_2ndtsb_allocfail; /* # SCD 2nd TSB alloc fail */ 2498 2499 2500 int sf_tteload8k; /* calls to sfmmu_tteload */ 2501 int sf_tteload64k; /* calls to sfmmu_tteload */ 2502 int sf_tteload512k; /* calls to sfmmu_tteload */ 2503 int sf_tteload4m; /* calls to sfmmu_tteload */ 2504 int sf_tteload32m; /* calls to sfmmu_tteload */ 2505 int sf_tteload256m; /* calls to sfmmu_tteload */ 2506 2507 int sf_tsb_load8k; /* # times loaded 8K tsbent */ 2508 int sf_tsb_load4m; /* # times loaded 4M tsbent */ 2509 2510 int sf_hblk_hit; /* found hblk during tteload */ 2511 int sf_hblk8_ncreate; /* static hblk8's created */ 2512 int sf_hblk8_nalloc; /* static hblk8's allocated */ 2513 int sf_hblk1_ncreate; /* static hblk1's created */ 2514 int sf_hblk1_nalloc; /* static hblk1's allocated */ 2515 int sf_hblk_slab_cnt; /* sfmmu8_cache slab creates */ 2516 int sf_hblk_reserve_cnt; /* hblk_reserve usage */ 2517 int sf_hblk_recurse_cnt; /* hblk_reserve owner reqs */ 2518 int sf_hblk_reserve_hit; /* hblk_reserve hash hits */ 2519 int sf_get_free_success; /* reserve list allocs */ 2520 int sf_get_free_throttle; /* fails due to throttling */ 2521 int sf_get_free_fail; /* fails due to empty list */ 2522 int sf_put_free_success; /* reserve list frees */ 2523 int sf_put_free_fail; /* fails due to full list */ 2524 2525 int sf_pgcolor_conflict; /* VAC conflict resolution */ 2526 int sf_uncache_conflict; /* VAC conflict resolution */ 2527 int sf_unload_conflict; /* VAC unload resolution */ 2528 int sf_ism_uncache; /* VAC conflict resolution */ 2529 int sf_ism_recache; /* VAC conflict resolution */ 2530 int sf_recache; /* VAC conflict resolution */ 2531 2532 int sf_steal_count; /* # of hblks stolen */ 2533 2534 int sf_pagesync; /* # of pagesyncs */ 2535 int sf_clrwrt; /* # of clear write perms */ 2536 int sf_pagesync_invalid; /* pagesync with inv tte */ 2537 2538 int sf_kernel_xcalls; /* # of kernel cross calls */ 2539 int sf_user_xcalls; /* # of user cross calls */ 2540 2541 int sf_tsb_grow; /* # of user tsb grows */ 2542 int sf_tsb_shrink; /* # of user tsb shrinks */ 2543 int sf_tsb_resize_failures; /* # of user tsb resize */ 2544 int sf_tsb_reloc; /* # of user tsb relocations */ 2545 2546 int sf_user_vtop; /* # of user vatopfn calls */ 2547 2548 int sf_ctx_inv; /* #times invalidate MMU ctx */ 2549 2550 int sf_tlb_reprog_pgsz; /* # times switch TLB pgsz */ 2551 2552 int sf_region_remap_demap; /* # times shme remap demap */ 2553 2554 int sf_create_scd; /* # times SCD is created */ 2555 int sf_join_scd; /* # process joined scd */ 2556 int sf_leave_scd; /* # process left scd */ 2557 int sf_destroy_scd; /* # times SCD is destroyed */ 2558 }; 2559 2560 struct sfmmu_tsbsize_stat { 2561 int sf_tsbsz_8k; 2562 int sf_tsbsz_16k; 2563 int sf_tsbsz_32k; 2564 int sf_tsbsz_64k; 2565 int sf_tsbsz_128k; 2566 int sf_tsbsz_256k; 2567 int sf_tsbsz_512k; 2568 int sf_tsbsz_1m; 2569 int sf_tsbsz_2m; 2570 int sf_tsbsz_4m; 2571 int sf_tsbsz_8m; 2572 int sf_tsbsz_16m; 2573 int sf_tsbsz_32m; 2574 int sf_tsbsz_64m; 2575 int sf_tsbsz_128m; 2576 int sf_tsbsz_256m; 2577 }; 2578 2579 struct sfmmu_percpu_stat { 2580 int sf_itlb_misses; /* # of itlb misses */ 2581 int sf_dtlb_misses; /* # of dtlb misses */ 2582 int sf_utsb_misses; /* # of user tsb misses */ 2583 int sf_ktsb_misses; /* # of kernel tsb misses */ 2584 int sf_tsb_hits; /* # of tsb hits */ 2585 int sf_umod_faults; /* # of mod (prot viol) flts */ 2586 int sf_kmod_faults; /* # of mod (prot viol) flts */ 2587 }; 2588 2589 #define SFMMU_STAT(stat) sfmmu_global_stat.stat++ 2590 #define SFMMU_STAT_ADD(stat, amount) sfmmu_global_stat.stat += (amount) 2591 #define SFMMU_STAT_SET(stat, count) sfmmu_global_stat.stat = (count) 2592 2593 #define SFMMU_MMU_STAT(stat) CPU->cpu_m.cpu_mmu_ctxp->stat++ 2594 2595 #endif /* !_ASM */ 2596 2597 #ifdef __cplusplus 2598 } 2599 #endif 2600 2601 #endif /* _VM_HAT_SFMMU_H */ 2602