1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * VM - Hardware Address Translation management. 28 * 29 * This file describes the contents of the sun reference mmu (sfmmu) 30 * specific hat data structures and the sfmmu specific hat procedures. 31 * The machine independent interface is described in <vm/hat.h>. 32 */ 33 34 #ifndef _VM_MACH_SFMMU_H 35 #define _VM_MACH_SFMMU_H 36 37 #pragma ident "%Z%%M% %I% %E% SMI" 38 39 #include <sys/x_call.h> 40 #include <sys/cheetahregs.h> 41 #include <sys/spitregs.h> 42 #include <sys/opl_olympus_regs.h> 43 44 #ifdef __cplusplus 45 extern "C" { 46 #endif 47 48 /* 49 * On sun4u platforms, user TSBs are accessed via virtual address by default. 50 * Platforms that support ASI_SCRATCHPAD registers can define UTSB_PHYS in the 51 * platform Makefile to access user TSBs via physical address but must also 52 * designate one ASI_SCRATCHPAD register to hold the second user TSB. To 53 * designate the user TSB scratchpad register, platforms must provide a 54 * definition for SCRATCHPAD_UTSBREG below. 55 * 56 * Platforms that use UTSB_PHYS do not allocate 2 locked TLB entries to access 57 * the user TSBs. 58 */ 59 #if defined(UTSB_PHYS) 60 61 #if defined(_OPL) 62 #define SCRATCHPAD_UTSBREG OPL_SCRATCHPAD_UTSBREG4 63 #else 64 #error "Compiling UTSB_PHYS but no SCRATCHPAD_UTSBREG specified" 65 #endif 66 67 #endif /* UTSB_PHYS */ 68 69 70 #ifdef _ASM 71 72 /* 73 * This macro is used to set private secondary context register in 74 * sfmmu_alloc_ctx(). 75 * Input: 76 * cnum : cnum 77 * arg2 : unused 78 */ 79 #define SET_SECCTX(cnum, arg2, tmp1, tmp2) \ 80 mov MMU_SCONTEXT, tmp1; \ 81 sethi %hi(FLUSH_ADDR), tmp2; \ 82 stxa cnum, [tmp1]ASI_MMU_CTX; \ 83 flush tmp2 84 85 /* 86 * This macro is used in the MMU code to check if TL should be lowered from 87 * 2 to 1 to pop trapstat's state. See the block comment in trapstat.c 88 * for details. 89 */ 90 91 #define TSTAT_CHECK_TL1(label, scr1, scr2) \ 92 rdpr %tpc, scr1; \ 93 sethi %hi(KERNELBASE), scr2; \ 94 or scr2, %lo(KERNELBASE), scr2; \ 95 cmp scr1, scr2; \ 96 bgeu %xcc, 9f; \ 97 nop; \ 98 ba label; \ 99 wrpr %g0, 1, %tl; \ 100 9: 101 102 103 /* 104 * The following macros allow us to share majority of the 105 * SFMMU code between sun4u and sun4v platforms. 106 */ 107 108 #define SETUP_TSB_ASI(qlp, tmp) \ 109 movrz qlp, ASI_N, tmp; \ 110 movrnz qlp, ASI_MEM, tmp; \ 111 mov tmp, %asi 112 113 /* 114 * Macro to swtich to alternate global register on sun4u platforms 115 * (not applicable to sun4v platforms) 116 */ 117 #define USE_ALTERNATE_GLOBALS(scr) \ 118 rdpr %pstate, scr; \ 119 wrpr scr, PSTATE_MG | PSTATE_AG, %pstate 120 121 /* 122 * Macro to set %gl register value on sun4v platforms 123 * (not applicable to sun4u platforms) 124 */ 125 #define SET_GL_REG(val) 126 127 /* 128 * Get MMU data tag access register value 129 * 130 * In: 131 * tagacc, scr1 = scratch registers 132 * Out: 133 * tagacc = MMU data tag access register value 134 */ 135 #define GET_MMU_D_TAGACC(tagacc, scr1) \ 136 mov MMU_TAG_ACCESS, scr1; \ 137 ldxa [scr1]ASI_DMMU, tagacc 138 139 /* 140 * Get MMU data tag target register 141 * 142 * In: 143 * ttarget, scr1 = scratch registers 144 * Out: 145 * ttarget = MMU data tag target register value 146 */ 147 #define GET_MMU_D_TTARGET(ttarget, scr1) \ 148 ldxa [%g0]ASI_DMMU, ttarget 149 150 /* 151 * Get MMU data/instruction tag access register values 152 * 153 * In: 154 * dtagacc, itagacc, scr1, scr2 = scratch registers 155 * Out: 156 * dtagacc = MMU data tag access register value 157 * itagacc = MMU instruction tag access register value 158 */ 159 #define GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2) \ 160 mov MMU_TAG_ACCESS, scr1; \ 161 ldxa [scr1]ASI_DMMU, dtagacc; \ 162 ldxa [scr1]ASI_IMMU, itagacc 163 164 /* 165 * Get MMU data fault address from the tag access register 166 * 167 * In: 168 * daddr, scr1 = scratch registers 169 * Out: 170 * daddr = MMU data fault address 171 */ 172 #define GET_MMU_D_ADDR(daddr, scr1) \ 173 mov MMU_TAG_ACCESS, scr1; \ 174 ldxa [scr1]ASI_DMMU, daddr; \ 175 set TAGACC_CTX_MASK, scr1; \ 176 andn daddr, scr1, daddr 177 178 179 /* 180 * Load ITLB entry 181 * 182 * In: 183 * tte = reg containing tte 184 * scr1, scr2, scr3, scr4 = scratch registers (not used) 185 */ 186 #define ITLB_STUFF(tte, scr1, scr2, scr3, scr4) \ 187 stxa tte, [%g0]ASI_ITLB_IN 188 189 /* 190 * Load DTLB entry 191 * 192 * In: 193 * tte = reg containing tte 194 * scr1, scr2, scr3, scr4 = scratch register (not used) 195 */ 196 #define DTLB_STUFF(tte, scr1, scr2, scr3, scr4) \ 197 stxa tte, [%g0]ASI_DTLB_IN 198 199 200 /* 201 * Returns PFN given the TTE and vaddr 202 * 203 * In: 204 * tte = reg containing tte 205 * vaddr = reg containing vaddr 206 * scr1, scr2, scr3 = scratch registers 207 * Out: 208 * tte = PFN value 209 */ 210 #define TTETOPFN(tte, vaddr, label, scr1, scr2, scr3) \ 211 srlx tte, TTE_SZ_SHFT, scr1; \ 212 and scr1, TTE_SZ_BITS, scr1; /* scr1 = tte_size */ \ 213 srlx tte, TTE_SZ2_SHFT, scr3; \ 214 and scr3, TTE_SZ2_BITS, scr3; /* scr3 = tte_size2 */ \ 215 or scr1, scr3, scr1; \ 216 sllx scr1, 1, scr2; \ 217 add scr2, scr1, scr2; /* mulx 3 */ \ 218 sllx tte, TTE_PA_LSHIFT, tte; \ 219 add scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3; \ 220 /* BEGIN CSTYLED */ \ 221 brz,pt scr2, label/**/1; \ 222 srlx tte, scr3, tte; \ 223 /* END CSTYLED */ \ 224 sllx tte, scr2, tte; \ 225 set 1, scr1; \ 226 add scr2, MMU_PAGESHIFT, scr3; \ 227 sllx scr1, scr3, scr1; \ 228 sub scr1, 1, scr1; /* g2=TTE_PAGE_OFFSET(ttesz) */ \ 229 and vaddr, scr1, scr2; \ 230 srln scr2, MMU_PAGESHIFT, scr2; \ 231 or tte, scr2, tte; \ 232 /* CSTYLED */ \ 233 label/**/1: 234 235 236 /* 237 * TTE_SET_REF_ML is a macro that updates the reference bit if it is 238 * not already set. 239 * 240 * Parameters: 241 * tte = reg containing tte 242 * ttepa = physical pointer to tte 243 * tteva = virtual ptr to tte 244 * tsbarea = tsb miss area 245 * tmp1 = tmp reg 246 * label = temporary label 247 */ 248 249 #define TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label) \ 250 /* BEGIN CSTYLED */ \ 251 /* check reference bit */ \ 252 andcc tte, TTE_REF_INT, %g0; \ 253 bnz,pt %xcc, label/**/4; /* if ref bit set-skip ahead */ \ 254 nop; \ 255 GET_CPU_IMPL(tmp1); \ 256 cmp tmp1, SPITFIRE_IMPL; \ 257 blt %icc, label/**/2; /* skip flush if FJ-OPL cpus */ \ 258 cmp tmp1, CHEETAH_IMPL; \ 259 bl,a %icc, label/**/1; \ 260 /* update reference bit */ \ 261 lduh [tsbarea + TSBMISS_DMASK], tmp1; \ 262 stxa %g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */ \ 263 membar #Sync; \ 264 ba label/**/2; \ 265 label/**/1: \ 266 and tteva, tmp1, tmp1; \ 267 stxa %g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */ \ 268 membar #Sync; \ 269 label/**/2: \ 270 or tte, TTE_REF_INT, tmp1; \ 271 casxa [ttepa]ASI_MEM, tte, tmp1; /* update ref bit */ \ 272 cmp tte, tmp1; \ 273 bne,a,pn %xcc, label/**/2; \ 274 ldxa [ttepa]ASI_MEM, tte; /* MMU_READTTE through pa */ \ 275 or tte, TTE_REF_INT, tte; \ 276 label/**/4: \ 277 /* END CSTYLED */ 278 279 280 /* 281 * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits 282 * if not already set. 283 * 284 * Parameters: 285 * tte = reg containing tte 286 * ttepa = physical pointer to tte 287 * tteva = virtual ptr to tte 288 * tsbarea = tsb miss area 289 * tmp1 = tmp reg 290 * label = temporary label 291 * exitlabel = label where to jump to if write perm bit not set. 292 */ 293 294 #define TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label, \ 295 exitlabel) \ 296 /* BEGIN CSTYLED */ \ 297 /* check reference bit */ \ 298 andcc tte, TTE_WRPRM_INT, %g0; \ 299 bz,pn %xcc, exitlabel; /* exit if wr_perm not set */ \ 300 nop; \ 301 andcc tte, TTE_HWWR_INT, %g0; \ 302 bnz,pn %xcc, label/**/4; /* nothing to do */ \ 303 nop; \ 304 GET_CPU_IMPL(tmp1); \ 305 cmp tmp1, SPITFIRE_IMPL; \ 306 blt %icc, label/**/2; /* skip flush if FJ-OPL cpus */ \ 307 cmp tmp1, CHEETAH_IMPL; \ 308 bl,a %icc, label/**/1; \ 309 /* update reference bit */ \ 310 lduh [tsbarea + TSBMISS_DMASK], tmp1; \ 311 stxa %g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */ \ 312 membar #Sync; \ 313 ba label/**/2; \ 314 label/**/1: \ 315 and tteva, tmp1, tmp1; \ 316 stxa %g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */ \ 317 membar #Sync; \ 318 label/**/2: \ 319 or tte, TTE_HWWR_INT | TTE_REF_INT, tmp1; \ 320 casxa [ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */ \ 321 cmp tte, tmp1; \ 322 bne,a,pn %xcc, label/**/2; \ 323 ldxa [ttepa]ASI_MEM, tte; /* MMU_READTTE through pa */ \ 324 or tte, TTE_HWWR_INT | TTE_REF_INT, tte; \ 325 label/**/4: \ 326 /* END CSTYLED */ 327 328 329 #ifndef UTSB_PHYS 330 331 /* 332 * Synthesize TSB base register contents for a process with 333 * a single TSB. 334 * 335 * We patch the virtual address mask in at runtime since the 336 * number of significant virtual address bits in the TSB VA 337 * can vary depending upon the TSB slab size being used on the 338 * machine. 339 * 340 * In: 341 * tsbinfo = TSB info pointer (ro) 342 * vabase = value of utsb_vabase (ro) 343 * Out: 344 * tsbreg = value to program into TSB base register 345 */ 346 347 #define MAKE_TSBREG(tsbreg, tsbinfo, vabase, tmp1, tmp2, label) \ 348 /* BEGIN CSTYLED */ \ 349 ldx [tsbinfo + TSBINFO_VADDR], tmp1; \ 350 .global label/**/_tsbreg_vamask ;\ 351 label/**/_tsbreg_vamask: \ 352 or %g0, RUNTIME_PATCH, tsbreg; \ 353 lduh [tsbinfo + TSBINFO_SZCODE], tmp2; \ 354 sllx tsbreg, TSBREG_VAMASK_SHIFT, tsbreg; \ 355 or vabase, tmp2, tmp2; \ 356 and tmp1, tsbreg, tsbreg; \ 357 or tsbreg, tmp2, tsbreg; \ 358 /* END CSTYLED */ 359 360 361 /* 362 * Synthesize TSB base register contents for a process with 363 * two TSBs. See hat_sfmmu.h for the layout of the TSB base 364 * register in this case. 365 * 366 * In: 367 * tsb1 = pointer to first TSB info (ro) 368 * tsb2 = pointer to second TSB info (ro) 369 * Out: 370 * tsbreg = value to program into TSB base register 371 */ 372 #define MAKE_TSBREG_SECTSB(tsbreg, tsb1, tsb2, tmp1, tmp2, tmp3, label) \ 373 /* BEGIN CSTYLED */ \ 374 set TSBREG_MSB_CONST, tmp3 ;\ 375 sllx tmp3, TSBREG_MSB_SHIFT, tsbreg ;\ 376 .global label/**/_tsbreg_vamask ;\ 377 label/**/_tsbreg_vamask: ;\ 378 or %g0, RUNTIME_PATCH, tmp3 ;\ 379 sll tmp3, TSBREG_VAMASK_SHIFT, tmp3 ;\ 380 ldx [tsb1 + TSBINFO_VADDR], tmp1 ;\ 381 ldx [tsb2 + TSBINFO_VADDR], tmp2 ;\ 382 and tmp1, tmp3, tmp1 ;\ 383 and tmp2, tmp3, tmp2 ;\ 384 sllx tmp2, TSBREG_SECTSB_MKSHIFT, tmp2 ;\ 385 or tmp1, tmp2, tmp3 ;\ 386 or tsbreg, tmp3, tsbreg ;\ 387 lduh [tsb1 + TSBINFO_SZCODE], tmp1 ;\ 388 lduh [tsb2 + TSBINFO_SZCODE], tmp2 ;\ 389 and tmp1, TSB_SOFTSZ_MASK, tmp1 ;\ 390 and tmp2, TSB_SOFTSZ_MASK, tmp2 ;\ 391 sllx tmp2, TSBREG_SECSZ_SHIFT, tmp2 ;\ 392 or tmp1, tmp2, tmp3 ;\ 393 or tsbreg, tmp3, tsbreg ;\ 394 /* END CSTYLED */ 395 396 397 /* 398 * Load the locked TSB TLB entry. 399 * 400 * In: 401 * tsbinfo = tsb_info pointer as va (ro) 402 * tteidx = shifted index into TLB to load the locked entry (ro) 403 * va = virtual address at which to load the locked TSB entry (ro) 404 * Out: 405 * Scratch: 406 * tmp 407 */ 408 #define LOAD_TSBTTE(tsbinfo, tteidx, va, tmp) \ 409 mov MMU_TAG_ACCESS, tmp; \ 410 stxa va, [tmp]ASI_DMMU; /* set tag access */ \ 411 membar #Sync; \ 412 ldx [tsbinfo + TSBINFO_TTE], tmp; /* fetch locked tte */ \ 413 stxa tmp, [tteidx]ASI_DTLB_ACCESS; /* load locked tte */ \ 414 membar #Sync 415 416 417 /* 418 * In the current implementation, TSBs usually come from physically 419 * contiguous chunks of memory up to 4MB in size, but 8K TSBs may be 420 * allocated from 8K chunks of memory under certain conditions. To 421 * prevent aliasing in the virtual address cache when the TSB slab is 422 * 8K in size we must align the reserved (TL>0) TSB virtual address to 423 * have the same low-order bits as the kernel (TL=0) TSB virtual address, 424 * and map 8K TSBs with an 8K TTE. In cases where the TSB reserved VA 425 * range is smaller than the assumed 4M we will patch the shift at 426 * runtime; otherwise we leave it alone (which is why RUNTIME_PATCH 427 * constant doesn't appear below). 428 * 429 * In: 430 * tsbinfo (ro) 431 * resva: reserved VA base for this TSB 432 * Out: 433 * resva: corrected VA for this TSB 434 */ 435 #define RESV_OFFSET(tsbinfo, resva, tmp1, label) \ 436 /* BEGIN CSTYLED */ \ 437 lduh [tsbinfo + TSBINFO_SZCODE], tmp1 ;\ 438 brgz,pn tmp1, 9f ;\ 439 nop ;\ 440 ldx [tsbinfo + TSBINFO_VADDR], tmp1 ;\ 441 .global label/**/_resv_offset ;\ 442 label/**/_resv_offset: ;\ 443 sllx tmp1, (64 - MMU_PAGESHIFT4M), tmp1 ;\ 444 srlx tmp1, (64 - MMU_PAGESHIFT4M), tmp1 ;\ 445 or tmp1, resva, resva ;\ 446 9: /* END CSTYLED */ 447 448 /* 449 * Determine the pointer of the entry in the first TSB to probe given 450 * the 8K TSB pointer register contents. 451 * 452 * In: 453 * tsbp8k = 8K TSB pointer register (ro) 454 * tmp = scratch register 455 * label = label for hot patching of utsb_vabase 456 * 457 * Out: tsbe_ptr = TSB entry address 458 * 459 * Note: This function is patched at runtime for performance reasons. 460 * Any changes here require sfmmu_patch_utsb fixed. 461 */ 462 463 #define GET_1ST_TSBE_PTR(tsbp8k, tsbe_ptr, tmp, label) \ 464 /* BEGIN CSTYLED */ \ 465 label/**/_get_1st_tsbe_ptr: ;\ 466 RUNTIME_PATCH_SETX(tsbe_ptr, tmp) ;\ 467 /* tsbeptr = contents of utsb_vabase */ ;\ 468 /* clear upper bits leaving just bits 21:0 of TSB ptr. */ ;\ 469 sllx tsbp8k, TSBREG_FIRTSB_SHIFT, tmp ;\ 470 /* finish clear */ ;\ 471 srlx tmp, TSBREG_FIRTSB_SHIFT, tmp ;\ 472 /* or-in bits 41:22 of the VA to form the real pointer. */ ;\ 473 or tsbe_ptr, tmp, tsbe_ptr \ 474 /* END CSTYLED */ 475 476 /* 477 * Determine the base address of the second TSB given the 8K TSB 478 * pointer register contents. 479 * 480 * In: 481 * tsbp8k = 8K TSB pointer register (ro) 482 * tmp = scratch register 483 * label = label for hot patching of utsb_vabase 484 * 485 * Out: 486 * tsbbase = TSB base address 487 * 488 * Note: This function is patched at runtime for performance reasons. 489 * Any changes here require sfmmu_patch_utsb fixed. 490 */ 491 492 #define GET_2ND_TSB_BASE(tsbp8k, tsbbase, tmp, label) \ 493 /* BEGIN CSTYLED */ \ 494 label/**/_get_2nd_tsb_base: ;\ 495 RUNTIME_PATCH_SETX(tsbbase, tmp) ;\ 496 /* tsbbase = contents of utsb4m_vabase */ ;\ 497 /* clear upper bits leaving just bits 21:xx of TSB addr. */ ;\ 498 sllx tsbp8k, TSBREG_SECTSB_LSHIFT, tmp ;\ 499 /* clear lower bits leaving just 21:13 in 8:0 */ ;\ 500 srlx tmp, (TSBREG_SECTSB_RSHIFT + MMU_PAGESHIFT), tmp ;\ 501 /* adjust TSB offset to bits 21:13 */ ;\ 502 sllx tmp, MMU_PAGESHIFT, tmp ;\ 503 or tsbbase, tmp, tsbbase ;\ 504 /* END CSTYLED */ 505 506 /* 507 * Determine the size code of the second TSB given the 8K TSB 508 * pointer register contents. 509 * 510 * In: 511 * tsbp8k = 8K TSB pointer register (ro) 512 * Out: 513 * size = TSB size code 514 */ 515 516 #define GET_2ND_TSB_SIZE(tsbp8k, size) \ 517 srlx tsbp8k, TSBREG_SECSZ_SHIFT, size; \ 518 and size, TSB_SOFTSZ_MASK, size 519 520 /* 521 * Get the location in the 2nd TSB of the tsbe for this fault. 522 * Assumes that the second TSB only contains 4M mappings. 523 * 524 * In: 525 * tagacc = tag access register (clobbered) 526 * tsbp8k = contents of TSB8K pointer register (ro) 527 * tmp1, tmp2 = scratch registers 528 * label = label at which to patch in reserved TSB 4M VA range 529 * Out: 530 * tsbe_ptr = pointer to the tsbe in the 2nd TSB 531 */ 532 #define GET_2ND_TSBE_PTR(tagacc, tsbp8k, tsbe_ptr, tmp1, tmp2, label) \ 533 GET_2ND_TSB_BASE(tsbp8k, tsbe_ptr, tmp2, label); \ 534 /* tsbe_ptr = TSB base address, tmp2 = junk */ \ 535 GET_2ND_TSB_SIZE(tsbp8k, tmp1); \ 536 /* tmp1 = TSB size code */ \ 537 GET_TSBE_POINTER(MMU_PAGESHIFT4M, tsbe_ptr, tagacc, tmp1, tmp2) 538 539 #endif /* UTSB_PHYS */ 540 541 542 #ifdef UTSB_PHYS 543 544 /* 545 * Synthesize a TSB base register contents for a process. 546 * 547 * In: 548 * tsbinfo = TSB info pointer (ro) 549 * tsbreg, tmp1 = scratch registers 550 * Out: 551 * tsbreg = value to program into TSB base register 552 */ 553 554 #define MAKE_UTSBREG_PHYS(tsbinfo, tsbreg, tmp1) \ 555 ldx [tsbinfo + TSBINFO_PADDR], tsbreg; \ 556 lduh [tsbinfo + TSBINFO_SZCODE], tmp1; \ 557 and tmp1, TSB_SOFTSZ_MASK, tmp1; \ 558 or tsbreg, tmp1, tsbreg; \ 559 560 /* 561 * Load TSB base register into a dedicated scratchpad register. 562 * This register contains utsb_pabase in bits 63:13, and TSB size 563 * code in bits 2:0. 564 * 565 * In: 566 * tsbreg = value to load (ro) 567 * regnum = constant or register 568 * tmp1 = scratch register 569 * Out: 570 * Specified scratchpad register updated 571 * 572 * Note: If this is enabled on Panther, a membar #Sync is required 573 * following an ASI store to the scratchpad registers. 574 */ 575 576 #define SET_UTSBREG(regnum, tsbreg, tmp1) \ 577 mov regnum, tmp1; \ 578 stxa tsbreg, [tmp1]ASI_SCRATCHPAD; /* save tsbreg */ \ 579 580 /* 581 * Get TSB base register from the scratchpad 582 * 583 * In: 584 * regnum = constant or register 585 * tsbreg = scratch 586 * Out: 587 * tsbreg = tsbreg from the specified scratchpad register 588 */ 589 590 #define GET_UTSBREG(regnum, tsbreg) \ 591 mov regnum, tsbreg; \ 592 ldxa [tsbreg]ASI_SCRATCHPAD, tsbreg 593 594 /* 595 * Determine the pointer of the entry in the first TSB to probe given 596 * the 8K TSB pointer register contents. 597 * 598 * In: 599 * tagacc = tag access register 600 * tsbe_ptr = 8K TSB pointer register 601 * tmp = scratch registers 602 * 603 * Out: tsbe_ptr = TSB entry address 604 * 605 * Note: This macro is a nop since the 8K TSB pointer register 606 * is the entry pointer and does not need to be decoded. 607 * It is defined to allow for code sharing with sun4v. 608 */ 609 610 #define GET_1ST_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) 611 612 /* 613 * Get the location in the 2nd TSB of the tsbe for this fault. 614 * Assumes that the second TSB only contains 4M mappings. 615 * 616 * In: 617 * tagacc = tag access register (not clobbered) 618 * tsbe = 2nd TSB base register 619 * tmp1, tmp2 = scratch registers 620 * Out: 621 * tsbe = pointer to the tsbe in the 2nd TSB 622 */ 623 624 #define GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \ 625 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \ 626 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \ 627 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \ 628 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \ 629 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \ 630 srlx tagacc, MMU_PAGESHIFT4M, tmp2; \ 631 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \ 632 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \ 633 add tsbe, tmp1, tsbe /* add entry offset to TSB base */ 634 635 /* 636 * Read the 2nd TSB base register. This is not done in GET_2ND_TSBE_PTR as 637 * an optimization since the TLB miss trap handler entries have potentially 638 * already loaded the 2nd TSB base reg when we invoke GET_2ND_TSBE_PTR. 639 * 640 * Out: 641 * tsbreg = contents of the 2nd TSB base register 642 */ 643 #define GET_2ND_TSBREG(tsbreg) \ 644 GET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg); 645 646 /* 647 * Load the 2nd TSB base into a dedicated scratchpad register which 648 * is used as a pseudo TSB base register. 649 * 650 * In: 651 * tsbreg = value to load (ro) 652 * regnum = constant or register 653 * tmp1 = scratch register 654 * Out: 655 * Specified scratchpad register updated 656 */ 657 #define LOAD_2ND_TSBREG(tsbreg, tmp1) \ 658 SET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg, tmp1); 659 660 #endif /* UTSB_PHYS */ 661 662 663 /* 664 * Load TSB base register. In the single TSB case this register 665 * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the 666 * TSB size code in bits 2:0. See hat_sfmmu.h for the layout in 667 * the case where we have multiple TSBs per process. 668 * 669 * In: 670 * tsbreg = value to load (ro) 671 */ 672 #define LOAD_TSBREG(tsbreg, tmp1, tmp2) \ 673 mov MMU_TSB, tmp1; \ 674 sethi %hi(FLUSH_ADDR), tmp2; \ 675 stxa tsbreg, [tmp1]ASI_DMMU; /* dtsb reg */ \ 676 stxa tsbreg, [tmp1]ASI_IMMU; /* itsb reg */ \ 677 flush tmp2 678 679 #ifdef UTSB_PHYS 680 #define UTSB_PROBE_ASI ASI_QUAD_LDD_PHYS 681 #else 682 #define UTSB_PROBE_ASI ASI_NQUAD_LD 683 #endif 684 685 /* 686 * Will probe the first TSB, and if it finds a match, will insert it 687 * into the TLB and retry. 688 * 689 * tsbe_ptr = precomputed first TSB entry pointer (in, ro) 690 * vpg_4m = 4M virtual page number for tag matching (in, ro) 691 * label = where to branch to if this is a miss (text) 692 * %asi = atomic ASI to use for the TSB access 693 * 694 * For trapstat, we have to explicily use these registers. 695 * g4 = location tag will be retrieved into from TSB (out) 696 * g5 = location data(tte) will be retrieved into from TSB (out) 697 */ 698 #define PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label) /* g4/g5 clobbered */ \ 699 /* BEGIN CSTYLED */ \ 700 ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\ 701 cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\ 702 bne,pn %xcc, label/**/1 /* branch if !match */ ;\ 703 nop ;\ 704 TT_TRACE(trace_tsbhit) ;\ 705 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 706 /* trapstat expects tte in %g5 */ ;\ 707 retry /* retry faulted instruction */ ;\ 708 label/**/1: \ 709 /* END CSTYLED */ 710 711 /* 712 * Same as above, only if the TTE doesn't have the execute 713 * bit set, will branch to exec_fault directly. 714 */ 715 #define PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label) \ 716 /* BEGIN CSTYLED */ \ 717 ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\ 718 cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\ 719 bne,pn %xcc, label/**/1 /* branch if !match */ ;\ 720 nop ;\ 721 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 722 bz,pn %icc, exec_fault ;\ 723 nop ;\ 724 TT_TRACE(trace_tsbhit) ;\ 725 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 726 retry /* retry faulted instruction */ ;\ 727 label/**/1: \ 728 /* END CSTYLED */ 729 730 /* 731 * vpg_4m = 4M virtual page number for tag matching (in) 732 * tsbe_ptr = precomputed second TSB entry pointer (in) 733 * label = label to use to make branch targets unique (text) 734 * 735 * For trapstat, we have to explicity use these registers. 736 * g4 = tag portion of TSBE (out) 737 * g5 = data portion of TSBE (out) 738 */ 739 #define PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label) \ 740 /* BEGIN CSTYLED */ \ 741 ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\ 742 /* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\ 743 cmp %g4, vpg_4m ;\ 744 bne,pn %xcc, label/**/1 ;\ 745 nop ;\ 746 mov tsbe_ptr, %g1 /* trace_tsbhit wants ptr in %g1 */ ;\ 747 TT_TRACE(trace_tsbhit) ;\ 748 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 749 /* trapstat expects tte in %g5 */ ;\ 750 retry /* retry faulted instruction */ ;\ 751 label/**/1: \ 752 /* END CSTYLED */ 753 754 /* 755 * Macro to get SCD shared hme map on sun4v platforms 756 * (not applicable to sun4u platforms) 757 */ 758 #define GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc) 759 760 #ifndef TRAPTRACE 761 /* 762 * Same as above, with the following additions: 763 * If the TTE found is not executable, branch directly 764 * to exec_fault after checking for ITLB synthesis. 765 * If a TSB miss, branch to TSB miss handler. 766 */ 767 #define PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label) \ 768 /* BEGIN CSTYLED */ \ 769 ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\ 770 cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\ 771 bne,pn %xcc, sfmmu_tsb_miss_tt /* branch if !match */ ;\ 772 or %g0, TTE4M, %g6 ;\ 773 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 774 bz,a,pn %icc, label/**/1 ;\ 775 sllx %g6, TTE_SZ_SHFT, %g6 ;\ 776 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 777 retry /* retry faulted instruction */ ;\ 778 label/**/1: ;\ 779 andcc %g5, TTE_E_SYNTH_INT, %g0 ;\ 780 bz,pn %icc, exec_fault ;\ 781 or %g5, %g6, %g5 ;\ 782 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 783 retry /* retry faulted instruction */ \ 784 /* END CSTYLED */ 785 #else /* TRAPTRACE */ 786 /* 787 * Same as above, with the TT_TRACE and mov tsbe_ptr, %g1 additions. 788 */ 789 #define PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label) \ 790 /* BEGIN CSTYLED */ \ 791 ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\ 792 cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\ 793 bne,pn %xcc, sfmmu_tsb_miss_tt /* branch if !match */ ;\ 794 or %g0, TTE4M, %g6 ;\ 795 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 796 bz,a,pn %icc, label/**/1 ;\ 797 sllx %g6, TTE_SZ_SHFT, %g6 ;\ 798 mov tsbe_ptr, %g1 /* trap trace wants ptr in %g1 */ ;\ 799 TT_TRACE(trace_tsbhit) ;\ 800 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 801 retry /* retry faulted instruction */ ;\ 802 label/**/1: ;\ 803 andcc %g5, TTE_E_SYNTH_INT, %g0 ;\ 804 bz,pn %icc, exec_fault ;\ 805 mov tsbe_ptr, %g1 /* trap trace wants ptr in %g1 */ ;\ 806 or %g5, %g6, %g5 ;\ 807 TT_TRACE(trace_tsbhit) ;\ 808 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\ 809 retry /* retry faulted instruction */ \ 810 /* END CSTYLED */ 811 812 #endif /* TRAPTRACE */ 813 #endif /* _ASM */ 814 815 #ifdef __cplusplus 816 } 817 #endif 818 819 #endif /* _VM_MACH_SFMMU_H */ 820