1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28/* 29 * SFMMU primitives. These primitives should only be used by sfmmu 30 * routines. 31 */ 32 33#if defined(lint) 34#include <sys/types.h> 35#else /* lint */ 36#include "assym.h" 37#endif /* lint */ 38 39#include <sys/asm_linkage.h> 40#include <sys/machtrap.h> 41#include <sys/machasi.h> 42#include <sys/sun4asi.h> 43#include <sys/pte.h> 44#include <sys/mmu.h> 45#include <vm/hat_sfmmu.h> 46#include <vm/seg_spt.h> 47#include <sys/machparam.h> 48#include <sys/privregs.h> 49#include <sys/scb.h> 50#include <sys/intreg.h> 51#include <sys/machthread.h> 52#include <sys/clock.h> 53#include <sys/trapstat.h> 54 55/* 56 * sfmmu related subroutines 57 */ 58 59#if defined (lint) 60 61/* 62 * sfmmu related subroutines 63 */ 64/* ARGSUSED */ 65void 66sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx) 67{} 68 69/* ARGSUSED */ 70void 71sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte) 72{} 73 74/* ARGSUSED */ 75void 76sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte) 77{} 78 79int 80sfmmu_getctx_pri() 81{ return(0); } 82 83int 84sfmmu_getctx_sec() 85{ return(0); } 86 87/* ARGSUSED */ 88void 89sfmmu_setctx_sec(uint_t ctx) 90{} 91 92/* ARGSUSED */ 93void 94sfmmu_load_mmustate(sfmmu_t *sfmmup) 95{ 96} 97 98#else /* lint */ 99 100/* 101 * Invalidate either the context of a specific victim or any process 102 * currently running on this CPU. 103 * 104 * %g1 = sfmmup whose ctx is being invalidated 105 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT 106 * Note %g1 is the only input argument used by this xcall handler. 107 */ 108 ENTRY(sfmmu_raise_tsb_exception) 109 ! 110 ! if (victim == INVALID_CONTEXT || 111 ! current CPU tsbmiss->usfmmup == victim sfmmup) { 112 ! if (shctx_on) { 113 ! shctx = INVALID; 114 ! } 115 ! if (sec-ctx > INVALID_CONTEXT) { 116 ! write INVALID_CONTEXT to sec-ctx 117 ! } 118 ! if (pri-ctx > INVALID_CONTEXT) { 119 ! write INVALID_CONTEXT to pri-ctx 120 ! } 121 ! } 122 123 sethi %hi(ksfmmup), %g3 124 ldx [%g3 + %lo(ksfmmup)], %g3 125 cmp %g1, %g3 126 be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */ 127 mov PTL1_BAD_RAISE_TSBEXCP, %g1 128 129 set INVALID_CONTEXT, %g2 130 cmp %g1, INVALID_CONTEXT 131 be,pn %xcc, 0f /* called from wrap_around? */ 132 mov MMU_SCONTEXT, %g3 133 134 CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */ 135 ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */ 136 cmp %g5, %g1 /* hat toBe-invalid running? */ 137 bne,pt %xcc, 3f 138 nop 139 1400: 141 sethi %hi(shctx_on), %g5 142 ld [%g5 + %lo(shctx_on)], %g5 143 brz %g5, 1f 144 mov MMU_SHARED_CONTEXT, %g5 145 sethi %hi(FLUSH_ADDR), %g4 146 stxa %g0, [%g5]ASI_MMU_CTX 147 flush %g4 148 1491: 150 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = pgsz | sec-ctx */ 151 set CTXREG_CTX_MASK, %g4 152 and %g5, %g4, %g5 /* %g5 = sec-ctx */ 153 cmp %g5, INVALID_CONTEXT /* kernel ctx or invald ctx? */ 154 ble,pn %xcc, 2f /* yes, no need to change */ 155 mov MMU_PCONTEXT, %g7 156 157 stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */ 158 membar #Sync 159 1602: 161 ldxa [%g7]ASI_MMU_CTX, %g3 /* get pgz | pri-ctx */ 162 and %g3, %g4, %g5 /* %g5 = pri-ctx */ 163 cmp %g5, INVALID_CONTEXT /* kernel ctx or invald ctx? */ 164 ble,pn %xcc, 3f /* yes, no need to change */ 165 srlx %g3, CTXREG_NEXT_SHIFT, %g3 /* %g3 = nucleus pgsz */ 166 sllx %g3, CTXREG_NEXT_SHIFT, %g3 /* need to preserve nucleus pgsz */ 167 or %g3, %g2, %g2 /* %g2 = nucleus pgsz | INVALID_CONTEXT */ 168 169 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */ 1703: 171 retry 172 SET_SIZE(sfmmu_raise_tsb_exception) 173 174 175 176 /* 177 * %o0 = virtual address 178 * %o1 = address of TTE to be loaded 179 */ 180 ENTRY_NP(sfmmu_itlb_ld_kva) 181 rdpr %pstate, %o3 182#ifdef DEBUG 183 PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1) 184#endif /* DEBUG */ 185 wrpr %o3, PSTATE_IE, %pstate ! Disable interrupts 186 srln %o0, MMU_PAGESHIFT, %o0 187 slln %o0, MMU_PAGESHIFT, %o0 ! Clear page offset 188 189 ldx [%o1], %g1 190 set MMU_TAG_ACCESS, %o5 191#ifdef CHEETAHPLUS_ERRATUM_34 192 ! 193 ! If this is Cheetah or derivative and the specified TTE is locked 194 ! and hence to be loaded into the T16, fully-associative TLB, we 195 ! must avoid Cheetah+ erratum 34. In Cheetah+ erratum 34, under 196 ! certain conditions an ITLB locked index 0 TTE will erroneously be 197 ! displaced when a new TTE is loaded via ASI_ITLB_IN. To avoid 198 ! this erratum, we scan the T16 top down for an unlocked TTE and 199 ! explicitly load the specified TTE into that index. 200 ! 201 GET_CPU_IMPL(%g2) 202 cmp %g2, CHEETAH_IMPL 203 bl,pn %icc, 0f 204 nop 205 206 andcc %g1, TTE_LCK_INT, %g0 207 bz %icc, 0f ! Lock bit is not set; 208 ! load normally. 209 or %g0, (15 << 3), %g3 ! Start searching from the 210 ! top down. 211 2121: 213 ldxa [%g3]ASI_ITLB_ACCESS, %g4 ! Load TTE from t16 214 215 ! 216 ! If this entry isn't valid, we'll choose to displace it (regardless 217 ! of the lock bit). 218 ! 219 cmp %g4, %g0 220 bge %xcc, 2f ! TTE is > 0 iff not valid 221 andcc %g4, TTE_LCK_INT, %g0 ! Check for lock bit 222 bz %icc, 2f ! If unlocked, go displace 223 nop 224 sub %g3, (1 << 3), %g3 225 brgz %g3, 1b ! Still more TLB entries 226 nop ! to search 227 228 sethi %hi(sfmmu_panic5), %o0 ! We searched all entries and 229 call panic ! found no unlocked TTE so 230 or %o0, %lo(sfmmu_panic5), %o0 ! give up. 231 232 2332: 234 ! 235 ! We have found an unlocked or non-valid entry; we'll explicitly load 236 ! our locked entry here. 237 ! 238 sethi %hi(FLUSH_ADDR), %o1 ! Flush addr doesn't matter 239 stxa %o0, [%o5]ASI_IMMU 240 stxa %g1, [%g3]ASI_ITLB_ACCESS 241 flush %o1 ! Flush required for I-MMU 242 ba 3f ! Delay slot of ba is empty 243 nop ! per Erratum 64 244 2450: 246#endif /* CHEETAHPLUS_ERRATUM_34 */ 247 sethi %hi(FLUSH_ADDR), %o1 ! Flush addr doesn't matter 248 stxa %o0, [%o5]ASI_IMMU 249 stxa %g1, [%g0]ASI_ITLB_IN 250 flush %o1 ! Flush required for I-MMU 2513: 252 retl 253 wrpr %g0, %o3, %pstate ! Enable interrupts 254 SET_SIZE(sfmmu_itlb_ld_kva) 255 256 /* 257 * Load an entry into the DTLB. 258 * 259 * Special handling is required for locked entries since there 260 * are some TLB slots that are reserved for the kernel but not 261 * always held locked. We want to avoid loading locked TTEs 262 * into those slots since they could be displaced. 263 * 264 * %o0 = virtual address 265 * %o1 = address of TTE to be loaded 266 */ 267 ENTRY_NP(sfmmu_dtlb_ld_kva) 268 rdpr %pstate, %o3 269#ifdef DEBUG 270 PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1) 271#endif /* DEBUG */ 272 wrpr %o3, PSTATE_IE, %pstate ! disable interrupts 273 srln %o0, MMU_PAGESHIFT, %o0 274 slln %o0, MMU_PAGESHIFT, %o0 ! clear page offset 275 276 ldx [%o1], %g1 277 278 set MMU_TAG_ACCESS, %o5 279 280 set cpu_impl_dual_pgsz, %o2 281 ld [%o2], %o2 282 brz %o2, 1f 283 nop 284 285 sethi %hi(ksfmmup), %o2 286 ldx [%o2 + %lo(ksfmmup)], %o2 287 ldub [%o2 + SFMMU_CEXT], %o2 288 sll %o2, TAGACCEXT_SHIFT, %o2 289 290 set MMU_TAG_ACCESS_EXT, %o4 ! can go into T8 if unlocked 291 stxa %o2,[%o4]ASI_DMMU 292 membar #Sync 2931: 294 andcc %g1, TTE_LCK_INT, %g0 ! Locked entries require 295 bnz,pn %icc, 2f ! special handling 296 sethi %hi(dtlb_resv_ttenum), %g3 297 stxa %o0,[%o5]ASI_DMMU ! Load unlocked TTE 298 stxa %g1,[%g0]ASI_DTLB_IN ! via DTLB_IN 299 membar #Sync 300 retl 301 wrpr %g0, %o3, %pstate ! enable interrupts 3022: 303 ld [%g3 + %lo(dtlb_resv_ttenum)], %g3 304 sll %g3, 3, %g3 ! First reserved idx in TLB 0 305 sub %g3, (1 << 3), %g3 ! Decrement idx 3063: 307 ldxa [%g3]ASI_DTLB_ACCESS, %g4 ! Load TTE from TLB 0 308 ! 309 ! If this entry isn't valid, we'll choose to displace it (regardless 310 ! of the lock bit). 311 ! 312 brgez,pn %g4, 4f ! TTE is > 0 iff not valid 313 nop 314 andcc %g4, TTE_LCK_INT, %g0 ! Check for lock bit 315 bz,pn %icc, 4f ! If unlocked, go displace 316 nop 317 sub %g3, (1 << 3), %g3 ! Decrement idx 318 brgez %g3, 3b 319 nop 320 sethi %hi(sfmmu_panic5), %o0 ! We searched all entries and 321 call panic ! found no unlocked TTE so 322 or %o0, %lo(sfmmu_panic5), %o0 ! give up. 3234: 324 stxa %o0,[%o5]ASI_DMMU ! Setup tag access 325#ifdef OLYMPUS_SHARED_FTLB 326 stxa %g1,[%g0]ASI_DTLB_IN 327#else 328 stxa %g1,[%g3]ASI_DTLB_ACCESS ! Displace entry at idx 329#endif 330 membar #Sync 331 retl 332 wrpr %g0, %o3, %pstate ! enable interrupts 333 SET_SIZE(sfmmu_dtlb_ld_kva) 334 335 ENTRY_NP(sfmmu_getctx_pri) 336 set MMU_PCONTEXT, %o0 337 retl 338 ldxa [%o0]ASI_MMU_CTX, %o0 339 SET_SIZE(sfmmu_getctx_pri) 340 341 ENTRY_NP(sfmmu_getctx_sec) 342 set MMU_SCONTEXT, %o0 343 set CTXREG_CTX_MASK, %o1 344 ldxa [%o0]ASI_MMU_CTX, %o0 345 retl 346 and %o0, %o1, %o0 347 SET_SIZE(sfmmu_getctx_sec) 348 349 /* 350 * Set the secondary context register for this process. 351 * %o0 = page_size | context number for this process. 352 */ 353 ENTRY_NP(sfmmu_setctx_sec) 354 /* 355 * From resume we call sfmmu_setctx_sec with interrupts disabled. 356 * But we can also get called from C with interrupts enabled. So, 357 * we need to check first. 358 */ 359 360 /* If interrupts are not disabled, then disable them */ 361 rdpr %pstate, %g1 362 btst PSTATE_IE, %g1 363 bnz,a,pt %icc, 1f 364 wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */ 365 3661: 367 mov MMU_SCONTEXT, %o1 368 369 sethi %hi(FLUSH_ADDR), %o4 370 stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */ 371 flush %o4 372 sethi %hi(shctx_on), %g3 373 ld [%g3 + %lo(shctx_on)], %g3 374 brz %g3, 2f 375 nop 376 set CTXREG_CTX_MASK, %o4 377 and %o0,%o4,%o1 378 cmp %o1, INVALID_CONTEXT 379 bne,pn %icc, 2f 380 mov MMU_SHARED_CONTEXT, %o1 381 sethi %hi(FLUSH_ADDR), %o4 382 stxa %g0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */ 383 flush %o4 384 385 /* 386 * if the routine was entered with intr enabled, then enable intr now. 387 * otherwise, keep intr disabled, return without enabing intr. 388 * %g1 - old intr state 389 */ 3902: btst PSTATE_IE, %g1 391 bnz,a,pt %icc, 3f 392 wrpr %g0, %g1, %pstate /* enable interrupts */ 3933: retl 394 nop 395 SET_SIZE(sfmmu_setctx_sec) 396 397 /* 398 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS. 399 * returns the detection value in %o0. 400 * 401 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows 402 * - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL) 403 * - FJ OPL Olympus-C and later (less than SPITFIRE_IMPL) 404 * 405 */ 406 ENTRY_NP(sfmmu_setup_4lp) 407 GET_CPU_IMPL(%o0); 408 cmp %o0, CHEETAH_PLUS_IMPL 409 bge,pt %icc, 4f 410 mov 1, %o1 411 cmp %o0, SPITFIRE_IMPL 412 bge,a,pn %icc, 3f 413 clr %o1 4144: 415 set ktsb_phys, %o2 416 st %o1, [%o2] 4173: retl 418 mov %o1, %o0 419 SET_SIZE(sfmmu_setup_4lp) 420 421 422 /* 423 * Called to load MMU registers and tsbmiss area 424 * for the active process. This function should 425 * only be called from TL=0. 426 * 427 * %o0 - hat pointer 428 * 429 */ 430 ENTRY_NP(sfmmu_load_mmustate) 431 432#ifdef DEBUG 433 PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1) 434#endif /* DEBUG */ 435 436 sethi %hi(ksfmmup), %o3 437 ldx [%o3 + %lo(ksfmmup)], %o3 438 cmp %o3, %o0 439 be,pn %xcc, 8f ! if kernel as, do nothing 440 nop 441 /* 442 * We need to set up the TSB base register, tsbmiss 443 * area, and load locked TTE(s) for the TSB. 444 */ 445 ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo 446 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo 447 448#ifdef UTSB_PHYS 449 /* 450 * UTSB_PHYS accesses user TSBs via physical addresses. The first 451 * TSB is in the MMU I/D TSB Base registers. The 2nd, 3rd and 452 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs. 453 */ 454 455 /* create/set first UTSBREG actually loaded into MMU_TSB */ 456 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = first utsbreg 457 LOAD_TSBREG(%o2, %o3, %o4) ! write TSB base register 458 459 brz,a,pt %g2, 2f 460 mov -1, %o2 ! use -1 if no second TSB 461 462 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = second utsbreg 4632: 464 SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3) 465 466 /* make 3rd and 4th TSB */ 467 CPU_TSBMISS_AREA(%o4, %o3) ! %o4 = tsbmiss area 468 469 ldx [%o0 + SFMMU_SCDP], %g2 ! %g2 = sfmmu_scd 470 brz,pt %g2, 3f 471 mov -1, %o2 ! use -1 if no third TSB 472 473 ldx [%g2 + SCD_SFMMUP], %g3 ! %g3 = scdp->scd_sfmmup 474 ldx [%g3 + SFMMU_TSB], %o1 ! %o1 = first scd tsbinfo 475 brz,pn %o1, 5f 476 nop ! panic if no third TSB 477 478 /* make 3rd UTSBREG */ 479 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = third utsbreg 4803: 481 SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3) 482 stn %o2, [%o4 + TSBMISS_TSBSCDPTR] 483 484 brz,pt %g2, 4f 485 mov -1, %o2 ! use -1 if no 3rd or 4th TSB 486 487 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second scd tsbinfo 488 brz,pt %g2, 4f 489 mov -1, %o2 ! use -1 if no 4th TSB 490 491 /* make 4th UTSBREG */ 492 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = fourth utsbreg 4934: 494 SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3) 495 stn %o2, [%o4 + TSBMISS_TSBSCDPTR4M] 496 ba,pt %icc, 6f 497 mov %o4, %o2 ! %o2 = tsbmiss area 4985: 499 sethi %hi(panicstr), %g1 ! panic if no 3rd TSB 500 ldx [%g1 + %lo(panicstr)], %g1 501 tst %g1 502 503 bnz,pn %xcc, 8f 504 nop 505 506 sethi %hi(sfmmu_panic10), %o0 507 call panic 508 or %o0, %lo(sfmmu_panic10), %o0 509 510#else /* UTSBREG_PHYS */ 511 512 brz,pt %g2, 4f 513 nop 514 /* 515 * We have a second TSB for this process, so we need to 516 * encode data for both the first and second TSB in our single 517 * TSB base register. See hat_sfmmu.h for details on what bits 518 * correspond to which TSB. 519 * We also need to load a locked TTE into the TLB for the second TSB 520 * in this case. 521 */ 522 MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd) 523 ! %o2 = tsbreg 524 sethi %hi(utsb4m_dtlb_ttenum), %o3 525 sethi %hi(utsb4m_vabase), %o4 526 ld [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3 527 ldx [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB 528 sll %o3, DTACC_SHIFT, %o3 ! %o3 = sec TSB TLB index 529 RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd) ! or-in bits of TSB VA 530 LOAD_TSBTTE(%g2, %o3, %o4, %g3) ! load sec TSB locked TTE 531 sethi %hi(utsb_vabase), %g3 532 ldx [%g3 + %lo(utsb_vabase)], %g3 ! %g3 = TLB tag for first TSB 533 ba,pt %xcc, 5f 534 nop 535 5364: sethi %hi(utsb_vabase), %g3 537 ldx [%g3 + %lo(utsb_vabase)], %g3 ! %g3 = TLB tag for first TSB 538 MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st) ! %o2 = tsbreg 539 5405: LOAD_TSBREG(%o2, %o3, %o4) ! write TSB base register 541 542 /* 543 * Load the TTE for the first TSB at the appropriate location in 544 * the TLB 545 */ 546 sethi %hi(utsb_dtlb_ttenum), %o2 547 ld [%o2 + %lo(utsb_dtlb_ttenum)], %o2 548 sll %o2, DTACC_SHIFT, %o2 ! %o1 = first TSB TLB index 549 RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st) ! or-in bits of TSB VA 550 LOAD_TSBTTE(%o1, %o2, %g3, %o4) ! load first TSB locked TTE 551 CPU_TSBMISS_AREA(%o2, %o3) 552#endif /* UTSB_PHYS */ 5536: 554 ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu 555 ! we need to access from 556 stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the 557 ldub [%o0 + SFMMU_TTEFLAGS], %o3 ! per-CPU tsbmiss area. 558 stx %o0, [%o2 + TSBMISS_UHATID] 559 stub %o3, [%o2 + TSBMISS_UTTEFLAGS] 560#ifdef UTSB_PHYS 561 ldx [%o0 + SFMMU_SRDP], %o1 562 ldub [%o0 + SFMMU_RTTEFLAGS], %o4 563 stub %o4, [%o2 + TSBMISS_URTTEFLAGS] 564 stx %o1, [%o2 + TSBMISS_SHARED_UHATID] 565 brz,pn %o1, 8f ! check for sfmmu_srdp 566 add %o0, SFMMU_HMERMAP, %o1 567 add %o2, TSBMISS_SHMERMAP, %o2 568 mov SFMMU_HMERGNMAP_WORDS, %o3 569 ! set tsbmiss shmermap 570 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate) 571 572 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd 573 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area 574 mov SFMMU_HMERGNMAP_WORDS, %o3 575 brnz,pt %o4, 7f ! check for sfmmu_scdp else 576 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap 577 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate) 578 ba 8f 579 nop 5807: 581 add %o4, SCD_HMERMAP, %o1 582 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate) 583#endif /* UTSB_PHYS */ 584 5858: 586 retl 587 nop 588 SET_SIZE(sfmmu_load_mmustate) 589 590#endif /* lint */ 591 592#if defined (lint) 593/* 594 * Invalidate all of the entries within the tsb, by setting the inv bit 595 * in the tte_tag field of each tsbe. 596 * 597 * We take advantage of the fact TSBs are page aligned and a multiple of 598 * PAGESIZE to use block stores. 599 * 600 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice 601 * (in short, we set all bits in the upper word of the tag, and we give the 602 * invalid bit precedence over other tag bits in both places). 603 */ 604/* ARGSUSED */ 605void 606sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes) 607{} 608 609#else /* lint */ 610 611#define VIS_BLOCKSIZE 64 612 613 ENTRY(sfmmu_inv_tsb_fast) 614 615 ! Get space for aligned block of saved fp regs. 616 save %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp 617 618 ! kpreempt_disable(); 619 ldsb [THREAD_REG + T_PREEMPT], %l3 620 inc %l3 621 stb %l3, [THREAD_REG + T_PREEMPT] 622 623 ! See if fpu was in use. If it was, we need to save off the 624 ! floating point registers to the stack. 625 rd %fprs, %l0 ! %l0 = cached copy of fprs 626 btst FPRS_FEF, %l0 627 bz,pt %icc, 4f 628 nop 629 630 ! save in-use fpregs on stack 631 membar #Sync ! make sure tranx to fp regs 632 ! have completed 633 add %fp, STACK_BIAS - 65, %l1 ! get stack frame for fp regs 634 and %l1, -VIS_BLOCKSIZE, %l1 ! block align frame 635 stda %d0, [%l1]ASI_BLK_P ! %l1 = addr of saved fp regs 636 637 ! enable fp 6384: membar #StoreStore|#StoreLoad|#LoadStore 639 wr %g0, FPRS_FEF, %fprs 640 wr %g0, ASI_BLK_P, %asi 641 642 ! load up FP registers with invalid TSB tag. 643 fone %d0 ! ones in tag 644 fzero %d2 ! zeros in TTE 645 fone %d4 ! ones in tag 646 fzero %d6 ! zeros in TTE 647 fone %d8 ! ones in tag 648 fzero %d10 ! zeros in TTE 649 fone %d12 ! ones in tag 650 fzero %d14 ! zeros in TTE 651 ba,pt %xcc, .sfmmu_inv_doblock 652 mov (4*VIS_BLOCKSIZE), %i4 ! we do 4 stda's each loop below 653 654.sfmmu_inv_blkstart: 655 ! stda %d0, [%i0+192]%asi ! in dly slot of branch that got us here 656 stda %d0, [%i0+128]%asi 657 stda %d0, [%i0+64]%asi 658 stda %d0, [%i0]%asi 659 660 add %i0, %i4, %i0 661 sub %i1, %i4, %i1 662 663.sfmmu_inv_doblock: 664 cmp %i1, (4*VIS_BLOCKSIZE) ! check for completion 665 bgeu,a %icc, .sfmmu_inv_blkstart 666 stda %d0, [%i0+192]%asi 667 668.sfmmu_inv_finish: 669 membar #Sync 670 btst FPRS_FEF, %l0 ! saved from above 671 bz,a .sfmmu_inv_finished 672 wr %l0, 0, %fprs ! restore fprs 673 674 ! restore fpregs from stack 675 ldda [%l1]ASI_BLK_P, %d0 676 membar #Sync 677 wr %l0, 0, %fprs ! restore fprs 678 679.sfmmu_inv_finished: 680 ! kpreempt_enable(); 681 ldsb [THREAD_REG + T_PREEMPT], %l3 682 dec %l3 683 stb %l3, [THREAD_REG + T_PREEMPT] 684 ret 685 restore 686 SET_SIZE(sfmmu_inv_tsb_fast) 687 688#endif /* lint */ 689 690#if defined(lint) 691 692/* 693 * Prefetch "struct tsbe" while walking TSBs. 694 * prefetch 7 cache lines ahead of where we are at now. 695 * #n_reads is being used since #one_read only applies to 696 * floating point reads, and we are not doing floating point 697 * reads. However, this has the negative side effect of polluting 698 * the ecache. 699 * The 448 comes from (7 * 64) which is how far ahead of our current 700 * address, we want to prefetch. 701 */ 702/*ARGSUSED*/ 703void 704prefetch_tsbe_read(struct tsbe *tsbep) 705{} 706 707/* Prefetch the tsbe that we are about to write */ 708/*ARGSUSED*/ 709void 710prefetch_tsbe_write(struct tsbe *tsbep) 711{} 712 713#else /* lint */ 714 715 ENTRY(prefetch_tsbe_read) 716 retl 717 prefetch [%o0+448], #n_reads 718 SET_SIZE(prefetch_tsbe_read) 719 720 ENTRY(prefetch_tsbe_write) 721 retl 722 prefetch [%o0], #n_writes 723 SET_SIZE(prefetch_tsbe_write) 724#endif /* lint */ 725 726