1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28/* 29 * SFMMU primitives. These primitives should only be used by sfmmu 30 * routines. 31 */ 32 33#if defined(lint) 34#include <sys/types.h> 35#else /* lint */ 36#include "assym.h" 37#endif /* lint */ 38 39#include <sys/asm_linkage.h> 40#include <sys/machtrap.h> 41#include <sys/machasi.h> 42#include <sys/sun4asi.h> 43#include <sys/pte.h> 44#include <sys/mmu.h> 45#include <vm/hat_sfmmu.h> 46#include <vm/seg_spt.h> 47#include <sys/machparam.h> 48#include <sys/privregs.h> 49#include <sys/scb.h> 50#include <sys/intreg.h> 51#include <sys/machthread.h> 52#include <sys/intr.h> 53#include <sys/clock.h> 54#include <sys/trapstat.h> 55 56#ifdef TRAPTRACE 57#include <sys/traptrace.h> 58 59/* 60 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 61 */ 62#define TT_TRACE(label) \ 63 ba label ;\ 64 rd %pc, %g7 65#else 66 67#define TT_TRACE(label) 68 69#endif /* TRAPTRACE */ 70 71#ifndef lint 72 73#if (TTE_SUSPEND_SHIFT > 0) 74#define TTE_SUSPEND_INT_SHIFT(reg) \ 75 sllx reg, TTE_SUSPEND_SHIFT, reg 76#else 77#define TTE_SUSPEND_INT_SHIFT(reg) 78#endif 79 80#endif /* lint */ 81 82#ifndef lint 83 84/* 85 * Assumes TSBE_TAG is 0 86 * Assumes TSBE_INTHI is 0 87 * Assumes TSBREG.split is 0 88 */ 89 90#if TSBE_TAG != 0 91#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0" 92#endif 93 94#if TSBTAG_INTHI != 0 95#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0" 96#endif 97 98/* 99 * The following code assumes the tsb is not split. 100 * 101 * With TSBs no longer shared between processes, it's no longer 102 * necessary to hash the context bits into the tsb index to get 103 * tsb coloring; the new implementation treats the TSB as a 104 * direct-mapped, virtually-addressed cache. 105 * 106 * In: 107 * vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro) 108 * tsbbase = base address of TSB (clobbered) 109 * tagacc = tag access register (clobbered) 110 * szc = size code of TSB (ro) 111 * tmp = scratch reg 112 * Out: 113 * tsbbase = pointer to entry in TSB 114 */ 115#define GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp) \ 116 mov TSB_ENTRIES(0), tmp /* nentries in TSB size 0 */ ;\ 117 srlx tagacc, vpshift, tagacc ;\ 118 sllx tmp, szc, tmp /* tmp = nentries in TSB */ ;\ 119 sub tmp, 1, tmp /* mask = nentries - 1 */ ;\ 120 and tagacc, tmp, tmp /* tsbent = virtpage & mask */ ;\ 121 sllx tmp, TSB_ENTRY_SHIFT, tmp /* entry num --> ptr */ ;\ 122 add tsbbase, tmp, tsbbase /* add entry offset to TSB base */ 123 124/* 125 * When the kpm TSB is used it is assumed that it is direct mapped 126 * using (vaddr>>vpshift)%tsbsz as the index. 127 * 128 * Note that, for now, the kpm TSB and kernel TSB are the same for 129 * each mapping size. However that need not always be the case. If 130 * the trap handlers are updated to search a different TSB for kpm 131 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz 132 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent. 133 * 134 * In: 135 * vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro) 136 * vaddr = virtual address (clobbered) 137 * tsbp, szc, tmp = scratch 138 * Out: 139 * tsbp = pointer to entry in TSB 140 */ 141#define GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp) \ 142 cmp vpshift, MMU_PAGESHIFT ;\ 143 bne,pn %icc, 1f /* branch if large case */ ;\ 144 sethi %hi(kpmsm_tsbsz), szc ;\ 145 sethi %hi(kpmsm_tsbbase), tsbp ;\ 146 ld [szc + %lo(kpmsm_tsbsz)], szc ;\ 147 ldx [tsbp + %lo(kpmsm_tsbbase)], tsbp ;\ 148 ba,pt %icc, 2f ;\ 149 nop ;\ 1501: sethi %hi(kpm_tsbsz), szc ;\ 151 sethi %hi(kpm_tsbbase), tsbp ;\ 152 ld [szc + %lo(kpm_tsbsz)], szc ;\ 153 ldx [tsbp + %lo(kpm_tsbbase)], tsbp ;\ 1542: GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp) 155 156/* 157 * Lock the TSBE at virtual address tsbep. 158 * 159 * tsbep = TSBE va (ro) 160 * tmp1, tmp2 = scratch registers (clobbered) 161 * label = label to use for branches (text) 162 * %asi = ASI to use for TSB access 163 * 164 * NOTE that we flush the TSB using fast VIS instructions that 165 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must 166 * not be treated as a locked entry or we'll get stuck spinning on 167 * an entry that isn't locked but really invalid. 168 */ 169 170#if defined(UTSB_PHYS) 171 172#define TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) \ 173 lda [tsbep]ASI_MEM, tmp1 ;\ 174label: ;\ 175 sethi %hi(TSBTAG_LOCKED), tmp2 ;\ 176 cmp tmp1, tmp2 ;\ 177 be,a,pn %icc, label/**/b /* if locked spin */ ;\ 178 lda [tsbep]ASI_MEM, tmp1 ;\ 179 casa [tsbep]ASI_MEM, tmp1, tmp2 ;\ 180 cmp tmp1, tmp2 ;\ 181 bne,a,pn %icc, label/**/b /* didn't lock so try again */ ;\ 182 lda [tsbep]ASI_MEM, tmp1 ;\ 183 /* tsbe lock acquired */ ;\ 184 membar #StoreStore 185 186#else /* UTSB_PHYS */ 187 188#define TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) \ 189 lda [tsbep]%asi, tmp1 ;\ 190label: ;\ 191 sethi %hi(TSBTAG_LOCKED), tmp2 ;\ 192 cmp tmp1, tmp2 ;\ 193 be,a,pn %icc, label/**/b /* if locked spin */ ;\ 194 lda [tsbep]%asi, tmp1 ;\ 195 casa [tsbep]%asi, tmp1, tmp2 ;\ 196 cmp tmp1, tmp2 ;\ 197 bne,a,pn %icc, label/**/b /* didn't lock so try again */ ;\ 198 lda [tsbep]%asi, tmp1 ;\ 199 /* tsbe lock acquired */ ;\ 200 membar #StoreStore 201 202#endif /* UTSB_PHYS */ 203 204/* 205 * Atomically write TSBE at virtual address tsbep. 206 * 207 * tsbep = TSBE va (ro) 208 * tte = TSBE TTE (ro) 209 * tagtarget = TSBE tag (ro) 210 * %asi = ASI to use for TSB access 211 */ 212 213#if defined(UTSB_PHYS) 214 215#define TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) \ 216 add tsbep, TSBE_TTE, tmp1 ;\ 217 stxa tte, [tmp1]ASI_MEM /* write tte data */ ;\ 218 membar #StoreStore ;\ 219 add tsbep, TSBE_TAG, tmp1 ;\ 220 stxa tagtarget, [tmp1]ASI_MEM /* write tte tag & unlock */ 221 222#else /* UTSB_PHYS */ 223 224#define TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1) \ 225 stxa tte, [tsbep + TSBE_TTE]%asi /* write tte data */ ;\ 226 membar #StoreStore ;\ 227 stxa tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */ 228 229#endif /* UTSB_PHYS */ 230 231/* 232 * Load an entry into the TSB at TL > 0. 233 * 234 * tsbep = pointer to the TSBE to load as va (ro) 235 * tte = value of the TTE retrieved and loaded (wo) 236 * tagtarget = tag target register. To get TSBE tag to load, 237 * we need to mask off the context and leave only the va (clobbered) 238 * ttepa = pointer to the TTE to retrieve/load as pa (ro) 239 * tmp1, tmp2 = scratch registers 240 * label = label to use for branches (text) 241 * %asi = ASI to use for TSB access 242 */ 243 244#if defined(UTSB_PHYS) 245 246#define TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \ 247 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\ 248 /* ;\ 249 * I don't need to update the TSB then check for the valid tte. ;\ 250 * TSB invalidate will spin till the entry is unlocked. Note, ;\ 251 * we always invalidate the hash table before we unload the TSB.;\ 252 */ ;\ 253 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 254 ldxa [ttepa]ASI_MEM, tte ;\ 255 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 256 sethi %hi(TSBTAG_INVALID), tmp2 ;\ 257 add tsbep, TSBE_TAG, tmp1 ;\ 258 brgez,a,pn tte, label/**/f ;\ 259 sta tmp2, [tmp1]ASI_MEM /* unlock */ ;\ 260 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\ 261label: 262 263#else /* UTSB_PHYS */ 264 265#define TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \ 266 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\ 267 /* ;\ 268 * I don't need to update the TSB then check for the valid tte. ;\ 269 * TSB invalidate will spin till the entry is unlocked. Note, ;\ 270 * we always invalidate the hash table before we unload the TSB.;\ 271 */ ;\ 272 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 273 ldxa [ttepa]ASI_MEM, tte ;\ 274 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 275 sethi %hi(TSBTAG_INVALID), tmp2 ;\ 276 brgez,a,pn tte, label/**/f ;\ 277 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\ 278 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\ 279label: 280 281#endif /* UTSB_PHYS */ 282 283/* 284 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0, 285 * for ITLB synthesis. 286 * 287 * tsbep = pointer to the TSBE to load as va (ro) 288 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out) 289 * with exec_perm turned off and exec_synth turned on 290 * tagtarget = tag target register. To get TSBE tag to load, 291 * we need to mask off the context and leave only the va (clobbered) 292 * ttepa = pointer to the TTE to retrieve/load as pa (ro) 293 * tmp1, tmp2 = scratch registers 294 * label = label to use for branch (text) 295 * %asi = ASI to use for TSB access 296 */ 297 298#define TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \ 299 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\ 300 /* ;\ 301 * I don't need to update the TSB then check for the valid tte. ;\ 302 * TSB invalidate will spin till the entry is unlocked. Note, ;\ 303 * we always invalidate the hash table before we unload the TSB.;\ 304 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0 ;\ 305 * and exec_synth bit to 1. ;\ 306 */ ;\ 307 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 308 mov tte, tmp1 ;\ 309 ldxa [ttepa]ASI_MEM, tte ;\ 310 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\ 311 sethi %hi(TSBTAG_INVALID), tmp2 ;\ 312 brgez,a,pn tte, label/**/f ;\ 313 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\ 314 or tte, tmp1, tte ;\ 315 andn tte, TTE_EXECPRM_INT, tte ;\ 316 or tte, TTE_E_SYNTH_INT, tte ;\ 317 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\ 318label: 319 320/* 321 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis. 322 * 323 * tte = value of the TTE, used to get tte_size bits (ro) 324 * tagaccess = tag access register, used to get 4M pfn bits (ro) 325 * pfn = 4M pfn bits shifted to offset for tte (out) 326 * tmp1 = scratch register 327 * label = label to use for branch (text) 328 */ 329 330#define GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label) \ 331 /* ;\ 332 * Get 4M bits from tagaccess for 32M, 256M pagesizes. ;\ 333 * Return them, shifted, in pfn. ;\ 334 */ ;\ 335 srlx tagaccess, MMU_PAGESHIFT4M, tagaccess ;\ 336 srlx tte, TTE_SZ_SHFT, tmp /* isolate the */ ;\ 337 andcc tmp, TTE_SZ_BITS, %g0 /* tte_size bits */ ;\ 338 bz,a,pt %icc, label/**/f /* if 0, is */ ;\ 339 and tagaccess, 0x7, tagaccess /* 32M page size */ ;\ 340 and tagaccess, 0x3f, tagaccess /* else 256M page size */ ;\ 341label: ;\ 342 sllx tagaccess, MMU_PAGESHIFT4M, pfn 343 344/* 345 * Add 4M TTE size code to a tte for a Panther 32M/256M page, 346 * for ITLB synthesis. 347 * 348 * tte = value of the TTE, used to get tte_size bits (rw) 349 * tmp1 = scratch register 350 */ 351 352#define SET_TTE4M_PN(tte, tmp) \ 353 /* ;\ 354 * Set 4M pagesize tte bits. ;\ 355 */ ;\ 356 set TTE4M, tmp ;\ 357 sllx tmp, TTE_SZ_SHFT, tmp ;\ 358 or tte, tmp, tte 359 360/* 361 * Load an entry into the TSB at TL=0. 362 * 363 * tsbep = pointer to the TSBE to load as va (ro) 364 * tteva = pointer to the TTE to load as va (ro) 365 * tagtarget = TSBE tag to load (which contains no context), synthesized 366 * to match va of MMU tag target register only (ro) 367 * tmp1, tmp2 = scratch registers (clobbered) 368 * label = label to use for branches (text) 369 * %asi = ASI to use for TSB access 370 */ 371 372#if defined(UTSB_PHYS) 373 374#define TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) \ 375 /* can't rd tteva after locking tsb because it can tlb miss */ ;\ 376 ldx [tteva], tteva /* load tte */ ;\ 377 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\ 378 sethi %hi(TSBTAG_INVALID), tmp2 ;\ 379 add tsbep, TSBE_TAG, tmp1 ;\ 380 brgez,a,pn tteva, label/**/f ;\ 381 sta tmp2, [tmp1]ASI_MEM /* unlock */ ;\ 382 TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1) ;\ 383label: 384 385#else /* UTSB_PHYS */ 386 387#define TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) \ 388 /* can't rd tteva after locking tsb because it can tlb miss */ ;\ 389 ldx [tteva], tteva /* load tte */ ;\ 390 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\ 391 sethi %hi(TSBTAG_INVALID), tmp2 ;\ 392 brgez,a,pn tteva, label/**/f ;\ 393 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\ 394 TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1) ;\ 395label: 396 397#endif /* UTSB_PHYS */ 398 399/* 400 * Invalidate a TSB entry in the TSB. 401 * 402 * NOTE: TSBE_TAG is assumed to be zero. There is a compile time check 403 * about this earlier to ensure this is true. Thus when we are 404 * directly referencing tsbep below, we are referencing the tte_tag 405 * field of the TSBE. If this offset ever changes, the code below 406 * will need to be modified. 407 * 408 * tsbep = pointer to TSBE as va (ro) 409 * tag = invalidation is done if this matches the TSBE tag (ro) 410 * tmp1 - tmp3 = scratch registers (clobbered) 411 * label = label name to use for branches (text) 412 * %asi = ASI to use for TSB access 413 */ 414 415#if defined(UTSB_PHYS) 416 417#define TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) \ 418 lda [tsbep]ASI_MEM, tmp1 /* tmp1 = tsbe tag */ ;\ 419 sethi %hi(TSBTAG_LOCKED), tmp2 ;\ 420label/**/1: ;\ 421 cmp tmp1, tmp2 /* see if tsbe is locked, if */ ;\ 422 be,a,pn %icc, label/**/1 /* so, loop until unlocked */ ;\ 423 lda [tsbep]ASI_MEM, tmp1 /* reloading value each time */ ;\ 424 ldxa [tsbep]ASI_MEM, tmp3 /* tmp3 = tsbe tag */ ;\ 425 cmp tag, tmp3 /* compare tags */ ;\ 426 bne,pt %xcc, label/**/2 /* if different, do nothing */ ;\ 427 sethi %hi(TSBTAG_INVALID), tmp3 ;\ 428 casa [tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */ ;\ 429 cmp tmp1, tmp3 /* if not successful */ ;\ 430 bne,a,pn %icc, label/**/1 /* start over from the top */ ;\ 431 lda [tsbep]ASI_MEM, tmp1 /* reloading tsbe tag */ ;\ 432label/**/2: 433 434#else /* UTSB_PHYS */ 435 436#define TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) \ 437 lda [tsbep]%asi, tmp1 /* tmp1 = tsbe tag */ ;\ 438 sethi %hi(TSBTAG_LOCKED), tmp2 ;\ 439label/**/1: ;\ 440 cmp tmp1, tmp2 /* see if tsbe is locked, if */ ;\ 441 be,a,pn %icc, label/**/1 /* so, loop until unlocked */ ;\ 442 lda [tsbep]%asi, tmp1 /* reloading value each time */ ;\ 443 ldxa [tsbep]%asi, tmp3 /* tmp3 = tsbe tag */ ;\ 444 cmp tag, tmp3 /* compare tags */ ;\ 445 bne,pt %xcc, label/**/2 /* if different, do nothing */ ;\ 446 sethi %hi(TSBTAG_INVALID), tmp3 ;\ 447 casa [tsbep]%asi, tmp1, tmp3 /* try to set tag invalid */ ;\ 448 cmp tmp1, tmp3 /* if not successful */ ;\ 449 bne,a,pn %icc, label/**/1 /* start over from the top */ ;\ 450 lda [tsbep]%asi, tmp1 /* reloading tsbe tag */ ;\ 451label/**/2: 452 453#endif /* UTSB_PHYS */ 454 455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK 456#error - TSB_SOFTSZ_MASK too small 457#endif 458 459 460/* 461 * An implementation of setx which will be hot patched at run time. 462 * since it is being hot patched, there is no value passed in. 463 * Thus, essentially we are implementing 464 * setx value, tmp, dest 465 * where value is RUNTIME_PATCH (aka 0) in this case. 466 */ 467#define RUNTIME_PATCH_SETX(dest, tmp) \ 468 sethi %hh(RUNTIME_PATCH), tmp ;\ 469 sethi %lm(RUNTIME_PATCH), dest ;\ 470 or tmp, %hm(RUNTIME_PATCH), tmp ;\ 471 or dest, %lo(RUNTIME_PATCH), dest ;\ 472 sllx tmp, 32, tmp ;\ 473 nop /* for perf reasons */ ;\ 474 or tmp, dest, dest /* contents of patched value */ 475 476 477#endif (lint) 478 479 480#if defined (lint) 481 482/* 483 * sfmmu related subroutines 484 */ 485 486/* 487 * Use cas, if tte has changed underneath us then reread and try again. 488 * In the case of a retry, it will update sttep with the new original. 489 */ 490/* ARGSUSED */ 491int 492sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep) 493{ return(0); } 494 495/* 496 * Use cas, if tte has changed underneath us then return 1, else return 0 497 */ 498/* ARGSUSED */ 499int 500sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep) 501{ return(0); } 502 503/* ARGSUSED */ 504void 505sfmmu_copytte(tte_t *sttep, tte_t *dttep) 506{} 507 508/*ARGSUSED*/ 509struct tsbe * 510sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc) 511{ return(0); } 512 513/*ARGSUSED*/ 514uint64_t 515sfmmu_make_tsbtag(caddr_t va) 516{ return(0); } 517 518#else /* lint */ 519 520 .seg ".data" 521 .global sfmmu_panic1 522sfmmu_panic1: 523 .asciz "sfmmu_asm: interrupts already disabled" 524 525 .global sfmmu_panic3 526sfmmu_panic3: 527 .asciz "sfmmu_asm: sfmmu_vatopfn called for user" 528 529 .global sfmmu_panic4 530sfmmu_panic4: 531 .asciz "sfmmu_asm: 4M tsb pointer mis-match" 532 533 .global sfmmu_panic5 534sfmmu_panic5: 535 .asciz "sfmmu_asm: no unlocked TTEs in TLB 0" 536 537 538 ENTRY_NP(sfmmu_modifytte) 539 ldx [%o2], %g3 /* current */ 540 ldx [%o0], %g1 /* original */ 5412: 542 ldx [%o1], %g2 /* modified */ 543 cmp %g2, %g3 /* is modified = current? */ 544 be,a,pt %xcc,1f /* yes, don't write */ 545 stx %g3, [%o0] /* update new original */ 546 casx [%o2], %g1, %g2 547 cmp %g1, %g2 548 be,pt %xcc, 1f /* cas succeeded - return */ 549 nop 550 ldx [%o2], %g3 /* new current */ 551 stx %g3, [%o0] /* save as new original */ 552 ba,pt %xcc, 2b 553 mov %g3, %g1 5541: retl 555 membar #StoreLoad 556 SET_SIZE(sfmmu_modifytte) 557 558 ENTRY_NP(sfmmu_modifytte_try) 559 ldx [%o1], %g2 /* modified */ 560 ldx [%o2], %g3 /* current */ 561 ldx [%o0], %g1 /* original */ 562 cmp %g3, %g2 /* is modified = current? */ 563 be,a,pn %xcc,1f /* yes, don't write */ 564 mov 0, %o1 /* as if cas failed. */ 565 566 casx [%o2], %g1, %g2 567 membar #StoreLoad 568 cmp %g1, %g2 569 movne %xcc, -1, %o1 /* cas failed. */ 570 move %xcc, 1, %o1 /* cas succeeded. */ 5711: 572 stx %g2, [%o0] /* report "current" value */ 573 retl 574 mov %o1, %o0 575 SET_SIZE(sfmmu_modifytte_try) 576 577 ENTRY_NP(sfmmu_copytte) 578 ldx [%o0], %g1 579 retl 580 stx %g1, [%o1] 581 SET_SIZE(sfmmu_copytte) 582 583 584 /* 585 * Calculate a TSB entry pointer for the given TSB, va, pagesize. 586 * %o0 = TSB base address (in), pointer to TSB entry (out) 587 * %o1 = vaddr (in) 588 * %o2 = vpshift (in) 589 * %o3 = tsb size code (in) 590 * %o4 = scratch register 591 */ 592 ENTRY_NP(sfmmu_get_tsbe) 593 GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4) 594 retl 595 nop 596 SET_SIZE(sfmmu_get_tsbe) 597 598 /* 599 * Return a TSB tag for the given va. 600 * %o0 = va (in/clobbered) 601 * %o0 = va shifted to be in tsb tag format (with no context) (out) 602 */ 603 ENTRY_NP(sfmmu_make_tsbtag) 604 retl 605 srln %o0, TTARGET_VA_SHIFT, %o0 606 SET_SIZE(sfmmu_make_tsbtag) 607 608#endif /* lint */ 609 610/* 611 * Other sfmmu primitives 612 */ 613 614 615#if defined (lint) 616void 617sfmmu_patch_ktsb(void) 618{ 619} 620 621void 622sfmmu_kpm_patch_tlbm(void) 623{ 624} 625 626void 627sfmmu_kpm_patch_tsbm(void) 628{ 629} 630 631/* ARGSUSED */ 632void 633sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys) 634{ 635} 636 637/* ARGSUSED */ 638void 639sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys) 640{ 641} 642 643/* ARGSUSED */ 644void 645sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift) 646{ 647} 648 649/* ARGSUSED */ 650void 651sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift) 652{ 653} 654 655#else /* lint */ 656 657#define I_SIZE 4 658 659 ENTRY_NP(sfmmu_fix_ktlb_traptable) 660 /* 661 * %o0 = start of patch area 662 * %o1 = size code of TSB to patch 663 * %o3 = scratch 664 */ 665 /* fix sll */ 666 ld [%o0], %o3 /* get sll */ 667 sub %o3, %o1, %o3 /* decrease shift by tsb szc */ 668 st %o3, [%o0] /* write sll */ 669 flush %o0 670 /* fix srl */ 671 add %o0, I_SIZE, %o0 /* goto next instr. */ 672 ld [%o0], %o3 /* get srl */ 673 sub %o3, %o1, %o3 /* decrease shift by tsb szc */ 674 st %o3, [%o0] /* write srl */ 675 retl 676 flush %o0 677 SET_SIZE(sfmmu_fix_ktlb_traptable) 678 679 ENTRY_NP(sfmmu_fixup_ktsbbase) 680 /* 681 * %o0 = start of patch area 682 * %o5 = kernel virtual or physical tsb base address 683 * %o2, %o3 are used as scratch registers. 684 */ 685 /* fixup sethi instruction */ 686 ld [%o0], %o3 687 srl %o5, 10, %o2 ! offset is bits 32:10 688 or %o3, %o2, %o3 ! set imm22 689 st %o3, [%o0] 690 /* fixup offset of lduw/ldx */ 691 add %o0, I_SIZE, %o0 ! next instr 692 ld [%o0], %o3 693 and %o5, 0x3ff, %o2 ! set imm13 to bits 9:0 694 or %o3, %o2, %o3 695 st %o3, [%o0] 696 retl 697 flush %o0 698 SET_SIZE(sfmmu_fixup_ktsbbase) 699 700 ENTRY_NP(sfmmu_fixup_setx) 701 /* 702 * %o0 = start of patch area 703 * %o4 = 64 bit value to patch 704 * %o2, %o3 are used as scratch registers. 705 * 706 * Note: Assuming that all parts of the instructions which need to be 707 * patched correspond to RUNTIME_PATCH (aka 0) 708 * 709 * Note the implementation of setx which is being patched is as follows: 710 * 711 * sethi %hh(RUNTIME_PATCH), tmp 712 * sethi %lm(RUNTIME_PATCH), dest 713 * or tmp, %hm(RUNTIME_PATCH), tmp 714 * or dest, %lo(RUNTIME_PATCH), dest 715 * sllx tmp, 32, tmp 716 * nop 717 * or tmp, dest, dest 718 * 719 * which differs from the implementation in the 720 * "SPARC Architecture Manual" 721 */ 722 /* fixup sethi instruction */ 723 ld [%o0], %o3 724 srlx %o4, 42, %o2 ! bits [63:42] 725 or %o3, %o2, %o3 ! set imm22 726 st %o3, [%o0] 727 /* fixup sethi instruction */ 728 add %o0, I_SIZE, %o0 ! next instr 729 ld [%o0], %o3 730 sllx %o4, 32, %o2 ! clear upper bits 731 srlx %o2, 42, %o2 ! bits [31:10] 732 or %o3, %o2, %o3 ! set imm22 733 st %o3, [%o0] 734 /* fixup or instruction */ 735 add %o0, I_SIZE, %o0 ! next instr 736 ld [%o0], %o3 737 srlx %o4, 32, %o2 ! bits [63:32] 738 and %o2, 0x3ff, %o2 ! bits [41:32] 739 or %o3, %o2, %o3 ! set imm 740 st %o3, [%o0] 741 /* fixup or instruction */ 742 add %o0, I_SIZE, %o0 ! next instr 743 ld [%o0], %o3 744 and %o4, 0x3ff, %o2 ! bits [9:0] 745 or %o3, %o2, %o3 ! set imm 746 st %o3, [%o0] 747 retl 748 flush %o0 749 SET_SIZE(sfmmu_fixup_setx) 750 751 ENTRY_NP(sfmmu_fixup_or) 752 /* 753 * %o0 = start of patch area 754 * %o4 = 32 bit value to patch 755 * %o2, %o3 are used as scratch registers. 756 * Note: Assuming that all parts of the instructions which need to be 757 * patched correspond to RUNTIME_PATCH (aka 0) 758 */ 759 ld [%o0], %o3 760 and %o4, 0x3ff, %o2 ! bits [9:0] 761 or %o3, %o2, %o3 ! set imm 762 st %o3, [%o0] 763 retl 764 flush %o0 765 SET_SIZE(sfmmu_fixup_or) 766 767 ENTRY_NP(sfmmu_fixup_shiftx) 768 /* 769 * %o0 = start of patch area 770 * %o4 = signed int immediate value to add to sllx/srlx imm field 771 * %o2, %o3 are used as scratch registers. 772 * 773 * sllx/srlx store the 6 bit immediate value in the lowest order bits 774 * so we do a simple add. The caller must be careful to prevent 775 * overflow, which could easily occur if the initial value is nonzero! 776 */ 777 ld [%o0], %o3 ! %o3 = instruction to patch 778 and %o3, 0x3f, %o2 ! %o2 = existing imm value 779 add %o2, %o4, %o2 ! %o2 = new imm value 780 andn %o3, 0x3f, %o3 ! clear old imm value 781 and %o2, 0x3f, %o2 ! truncate new imm value 782 or %o3, %o2, %o3 ! set new imm value 783 st %o3, [%o0] ! store updated instruction 784 retl 785 flush %o0 786 SET_SIZE(sfmmu_fixup_shiftx) 787 788 ENTRY_NP(sfmmu_fixup_mmu_asi) 789 /* 790 * Patch imm_asi of all ldda instructions in the MMU 791 * trap handlers. We search MMU_PATCH_INSTR instructions 792 * starting from the itlb miss handler (trap 0x64). 793 * %o0 = address of tt[0,1]_itlbmiss 794 * %o1 = imm_asi to setup, shifted by appropriate offset. 795 * %o3 = number of instructions to search 796 * %o4 = reserved by caller: called from leaf routine 797 */ 7981: ldsw [%o0], %o2 ! load instruction to %o2 799 brgez,pt %o2, 2f 800 srl %o2, 30, %o5 801 btst 1, %o5 ! test bit 30; skip if not set 802 bz,pt %icc, 2f 803 sllx %o2, 39, %o5 ! bit 24 -> bit 63 804 srlx %o5, 58, %o5 ! isolate op3 part of opcode 805 xor %o5, 0x13, %o5 ! 01 0011 binary == ldda 806 brnz,pt %o5, 2f ! skip if not a match 807 or %o2, %o1, %o2 ! or in imm_asi 808 st %o2, [%o0] ! write patched instruction 8092: dec %o3 810 brnz,a,pt %o3, 1b ! loop until we're done 811 add %o0, I_SIZE, %o0 812 retl 813 flush %o0 814 SET_SIZE(sfmmu_fixup_mmu_asi) 815 816 /* 817 * Patch immediate ASI used to access the TSB in the 818 * trap table. 819 * inputs: %o0 = value of ktsb_phys 820 */ 821 ENTRY_NP(sfmmu_patch_mmu_asi) 822 mov %o7, %o4 ! save return pc in %o4 823 movrnz %o0, ASI_QUAD_LDD_PHYS, %o3 824 movrz %o0, ASI_NQUAD_LD, %o3 825 sll %o3, 5, %o1 ! imm_asi offset 826 mov 6, %o3 ! number of instructions 827 sethi %hi(dktsb), %o0 ! to search 828 call sfmmu_fixup_mmu_asi ! patch kdtlb miss 829 or %o0, %lo(dktsb), %o0 830 mov 6, %o3 ! number of instructions 831 sethi %hi(dktsb4m), %o0 ! to search 832 call sfmmu_fixup_mmu_asi ! patch kdtlb4m miss 833 or %o0, %lo(dktsb4m), %o0 834 mov 6, %o3 ! number of instructions 835 sethi %hi(iktsb), %o0 ! to search 836 call sfmmu_fixup_mmu_asi ! patch kitlb miss 837 or %o0, %lo(iktsb), %o0 838 mov %o4, %o7 ! retore return pc -- leaf 839 retl 840 nop 841 SET_SIZE(sfmmu_patch_mmu_asi) 842 843 ENTRY_NP(sfmmu_patch_ktsb) 844 /* 845 * We need to fix iktsb, dktsb, et. al. 846 */ 847 save %sp, -SA(MINFRAME), %sp 848 set ktsb_phys, %o1 849 ld [%o1], %o4 850 set ktsb_base, %o5 851 set ktsb4m_base, %l1 852 brz,pt %o4, 1f 853 nop 854 set ktsb_pbase, %o5 855 set ktsb4m_pbase, %l1 8561: 857 sethi %hi(ktsb_szcode), %o1 858 ld [%o1 + %lo(ktsb_szcode)], %o1 /* %o1 = ktsb size code */ 859 860 sethi %hi(iktsb), %o0 861 call sfmmu_fix_ktlb_traptable 862 or %o0, %lo(iktsb), %o0 863 864 sethi %hi(dktsb), %o0 865 call sfmmu_fix_ktlb_traptable 866 or %o0, %lo(dktsb), %o0 867 868 sethi %hi(ktsb4m_szcode), %o1 869 ld [%o1 + %lo(ktsb4m_szcode)], %o1 /* %o1 = ktsb4m size code */ 870 871 sethi %hi(dktsb4m), %o0 872 call sfmmu_fix_ktlb_traptable 873 or %o0, %lo(dktsb4m), %o0 874 875#ifndef sun4v 876 mov ASI_N, %o2 877 movrnz %o4, ASI_MEM, %o2 ! setup kernel 32bit ASI to patch 878 mov %o2, %o4 ! sfmmu_fixup_or needs this in %o4 879 sethi %hi(tsb_kernel_patch_asi), %o0 880 call sfmmu_fixup_or 881 or %o0, %lo(tsb_kernel_patch_asi), %o0 882#endif 883 884 ldx [%o5], %o4 ! load ktsb base addr (VA or PA) 885 886 sethi %hi(dktsbbase), %o0 887 call sfmmu_fixup_setx ! patch value of ktsb base addr 888 or %o0, %lo(dktsbbase), %o0 889 890 sethi %hi(iktsbbase), %o0 891 call sfmmu_fixup_setx ! patch value of ktsb base addr 892 or %o0, %lo(iktsbbase), %o0 893 894 sethi %hi(sfmmu_kprot_patch_ktsb_base), %o0 895 call sfmmu_fixup_setx ! patch value of ktsb base addr 896 or %o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0 897 898#ifdef sun4v 899 sethi %hi(sfmmu_dslow_patch_ktsb_base), %o0 900 call sfmmu_fixup_setx ! patch value of ktsb base addr 901 or %o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0 902#endif /* sun4v */ 903 904 ldx [%l1], %o4 ! load ktsb4m base addr (VA or PA) 905 906 sethi %hi(dktsb4mbase), %o0 907 call sfmmu_fixup_setx ! patch value of ktsb4m base addr 908 or %o0, %lo(dktsb4mbase), %o0 909 910 sethi %hi(sfmmu_kprot_patch_ktsb4m_base), %o0 911 call sfmmu_fixup_setx ! patch value of ktsb4m base addr 912 or %o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0 913 914#ifdef sun4v 915 sethi %hi(sfmmu_dslow_patch_ktsb4m_base), %o0 916 call sfmmu_fixup_setx ! patch value of ktsb4m base addr 917 or %o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0 918#endif /* sun4v */ 919 920 set ktsb_szcode, %o4 921 ld [%o4], %o4 922 sethi %hi(sfmmu_kprot_patch_ktsb_szcode), %o0 923 call sfmmu_fixup_or ! patch value of ktsb_szcode 924 or %o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0 925 926#ifdef sun4v 927 sethi %hi(sfmmu_dslow_patch_ktsb_szcode), %o0 928 call sfmmu_fixup_or ! patch value of ktsb_szcode 929 or %o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0 930#endif /* sun4v */ 931 932 set ktsb4m_szcode, %o4 933 ld [%o4], %o4 934 sethi %hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0 935 call sfmmu_fixup_or ! patch value of ktsb4m_szcode 936 or %o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0 937 938#ifdef sun4v 939 sethi %hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0 940 call sfmmu_fixup_or ! patch value of ktsb4m_szcode 941 or %o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0 942#endif /* sun4v */ 943 944 ret 945 restore 946 SET_SIZE(sfmmu_patch_ktsb) 947 948 ENTRY_NP(sfmmu_kpm_patch_tlbm) 949 /* 950 * Fixup trap handlers in common segkpm case. This is reserved 951 * for future use should kpm TSB be changed to be other than the 952 * kernel TSB. 953 */ 954 retl 955 nop 956 SET_SIZE(sfmmu_kpm_patch_tlbm) 957 958 ENTRY_NP(sfmmu_kpm_patch_tsbm) 959 /* 960 * nop the branch to sfmmu_kpm_dtsb_miss_small 961 * in the case where we are using large pages for 962 * seg_kpm (and hence must probe the second TSB for 963 * seg_kpm VAs) 964 */ 965 set dktsb4m_kpmcheck_small, %o0 966 MAKE_NOP_INSTR(%o1) 967 st %o1, [%o0] 968 flush %o0 969 retl 970 nop 971 SET_SIZE(sfmmu_kpm_patch_tsbm) 972 973 ENTRY_NP(sfmmu_patch_utsb) 974#ifdef UTSB_PHYS 975 retl 976 nop 977#else /* UTSB_PHYS */ 978 /* 979 * We need to hot patch utsb_vabase and utsb4m_vabase 980 */ 981 save %sp, -SA(MINFRAME), %sp 982 983 /* patch value of utsb_vabase */ 984 set utsb_vabase, %o1 985 ldx [%o1], %o4 986 sethi %hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0 987 call sfmmu_fixup_setx 988 or %o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0 989 sethi %hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0 990 call sfmmu_fixup_setx 991 or %o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0 992 sethi %hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0 993 call sfmmu_fixup_setx 994 or %o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0 995 996 /* patch value of utsb4m_vabase */ 997 set utsb4m_vabase, %o1 998 ldx [%o1], %o4 999 sethi %hi(sfmmu_uprot_get_2nd_tsb_base), %o0 1000 call sfmmu_fixup_setx 1001 or %o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0 1002 sethi %hi(sfmmu_uitlb_get_2nd_tsb_base), %o0 1003 call sfmmu_fixup_setx 1004 or %o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0 1005 sethi %hi(sfmmu_udtlb_get_2nd_tsb_base), %o0 1006 call sfmmu_fixup_setx 1007 or %o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0 1008 1009 /* 1010 * Patch TSB base register masks and shifts if needed. 1011 * By default the TSB base register contents are set up for 4M slab. 1012 * If we're using a smaller slab size and reserved VA range we need 1013 * to patch up those values here. 1014 */ 1015 set tsb_slab_shift, %o1 1016 set MMU_PAGESHIFT4M, %o4 1017 ldsw [%o1], %o3 1018 subcc %o4, %o3, %o4 1019 bz,pt %icc, 1f 1020 /* delay slot safe */ 1021 1022 /* patch reserved VA range size if needed. */ 1023 sethi %hi(sfmmu_tsb_1st_resv_offset), %o0 1024 call sfmmu_fixup_shiftx 1025 or %o0, %lo(sfmmu_tsb_1st_resv_offset), %o0 1026 call sfmmu_fixup_shiftx 1027 add %o0, I_SIZE, %o0 1028 sethi %hi(sfmmu_tsb_2nd_resv_offset), %o0 1029 call sfmmu_fixup_shiftx 1030 or %o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0 1031 call sfmmu_fixup_shiftx 1032 add %o0, I_SIZE, %o0 10331: 1034 /* patch TSBREG_VAMASK used to set up TSB base register */ 1035 set tsb_slab_mask, %o1 1036 lduw [%o1], %o4 1037 sethi %hi(sfmmu_tsb_1st_tsbreg_vamask), %o0 1038 call sfmmu_fixup_or 1039 or %o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0 1040 sethi %hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0 1041 call sfmmu_fixup_or 1042 or %o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0 1043 1044 ret 1045 restore 1046#endif /* UTSB_PHYS */ 1047 SET_SIZE(sfmmu_patch_utsb) 1048 1049 1050 /* 1051 * Routine that loads an entry into a tsb using virtual addresses. 1052 * Locking is required since all cpus can use the same TSB. 1053 * Note that it is no longer required to have a valid context 1054 * when calling this function. 1055 */ 1056 ENTRY_NP(sfmmu_load_tsbe) 1057 /* 1058 * %o0 = pointer to tsbe to load 1059 * %o1 = tsb tag 1060 * %o2 = virtual pointer to TTE 1061 * %o3 = 1 if physical address in %o0 else 0 1062 */ 1063 rdpr %pstate, %o5 1064#ifdef DEBUG 1065 andcc %o5, PSTATE_IE, %g0 /* if interrupts already */ 1066 bnz,pt %icc, 1f /* disabled, panic */ 1067 nop 1068 1069 sethi %hi(panicstr), %g1 1070 ldx [%g1 + %lo(panicstr)], %g1 1071 tst %g1 1072 bnz,pt %icc, 1f 1073 nop 1074 1075 save %sp, -SA(MINFRAME), %sp 1076 sethi %hi(sfmmu_panic1), %o0 1077 call panic 1078 or %o0, %lo(sfmmu_panic1), %o0 10791: 1080#endif /* DEBUG */ 1081 1082 wrpr %o5, PSTATE_IE, %pstate /* disable interrupts */ 1083 1084 SETUP_TSB_ASI(%o3, %g3) 1085 TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1) 1086 1087 wrpr %g0, %o5, %pstate /* enable interrupts */ 1088 1089 retl 1090 membar #StoreStore|#StoreLoad 1091 SET_SIZE(sfmmu_load_tsbe) 1092 1093 /* 1094 * Flush TSB of a given entry if the tag matches. 1095 */ 1096 ENTRY(sfmmu_unload_tsbe) 1097 /* 1098 * %o0 = pointer to tsbe to be flushed 1099 * %o1 = tag to match 1100 * %o2 = 1 if physical address in %o0 else 0 1101 */ 1102 SETUP_TSB_ASI(%o2, %g1) 1103 TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe) 1104 retl 1105 membar #StoreStore|#StoreLoad 1106 SET_SIZE(sfmmu_unload_tsbe) 1107 1108 /* 1109 * Routine that loads a TTE into the kpm TSB from C code. 1110 * Locking is required since kpm TSB is shared among all CPUs. 1111 */ 1112 ENTRY_NP(sfmmu_kpm_load_tsb) 1113 /* 1114 * %o0 = vaddr 1115 * %o1 = ttep 1116 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift) 1117 */ 1118 rdpr %pstate, %o5 ! %o5 = saved pstate 1119#ifdef DEBUG 1120 andcc %o5, PSTATE_IE, %g0 ! if interrupts already 1121 bnz,pt %icc, 1f ! disabled, panic 1122 nop 1123 1124 sethi %hi(panicstr), %g1 1125 ldx [%g1 + %lo(panicstr)], %g1 1126 tst %g1 1127 bnz,pt %icc, 1f 1128 nop 1129 1130 save %sp, -SA(MINFRAME), %sp 1131 sethi %hi(sfmmu_panic1), %o0 1132 call panic 1133 or %o0, %lo(sfmmu_panic1), %o0 11341: 1135#endif /* DEBUG */ 1136 wrpr %o5, PSTATE_IE, %pstate ! disable interrupts 1137 1138#ifndef sun4v 1139 sethi %hi(ktsb_phys), %o4 1140 mov ASI_N, %o3 1141 ld [%o4 + %lo(ktsb_phys)], %o4 1142 movrnz %o4, ASI_MEM, %o3 1143 mov %o3, %asi 1144#endif 1145 mov %o0, %g1 ! %g1 = vaddr 1146 1147 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */ 1148 GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4) 1149 /* %g2 = tsbep, %g1 clobbered */ 1150 1151 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target 1152 /* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */ 1153 TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1) 1154 1155 wrpr %g0, %o5, %pstate ! enable interrupts 1156 retl 1157 membar #StoreStore|#StoreLoad 1158 SET_SIZE(sfmmu_kpm_load_tsb) 1159 1160 /* 1161 * Routine that shoots down a TTE in the kpm TSB or in the 1162 * kernel TSB depending on virtpg. Locking is required since 1163 * kpm/kernel TSB is shared among all CPUs. 1164 */ 1165 ENTRY_NP(sfmmu_kpm_unload_tsb) 1166 /* 1167 * %o0 = vaddr 1168 * %o1 = virtpg to TSB index shift (e.g. TTE page shift) 1169 */ 1170#ifndef sun4v 1171 sethi %hi(ktsb_phys), %o4 1172 mov ASI_N, %o3 1173 ld [%o4 + %lo(ktsb_phys)], %o4 1174 movrnz %o4, ASI_MEM, %o3 1175 mov %o3, %asi 1176#endif 1177 mov %o0, %g1 ! %g1 = vaddr 1178 1179 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */ 1180 GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4) 1181 /* %g2 = tsbep, %g1 clobbered */ 1182 1183 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target 1184 /* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */ 1185 TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval) 1186 1187 retl 1188 membar #StoreStore|#StoreLoad 1189 SET_SIZE(sfmmu_kpm_unload_tsb) 1190 1191#endif /* lint */ 1192 1193 1194#if defined (lint) 1195 1196/*ARGSUSED*/ 1197pfn_t 1198sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr) 1199{ return(0); } 1200 1201#else /* lint */ 1202 1203 ENTRY_NP(sfmmu_ttetopfn) 1204 ldx [%o0], %g1 /* read tte */ 1205 TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4) 1206 /* 1207 * g1 = pfn 1208 */ 1209 retl 1210 mov %g1, %o0 1211 SET_SIZE(sfmmu_ttetopfn) 1212 1213#endif /* !lint */ 1214 1215 1216#if defined (lint) 1217/* 1218 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the 1219 * the hash list. 1220 */ 1221/* ARGSUSED */ 1222void 1223sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 1224 uint64_t hblkpa) 1225{ 1226} 1227 1228/* 1229 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the 1230 * hash list. 1231 */ 1232/* ARGSUSED */ 1233void 1234sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 1235 uint64_t hblkpa, struct hme_blk *prev_hblkp) 1236{ 1237} 1238#else /* lint */ 1239 1240/* 1241 * Functions to grab/release hme bucket list lock. I only use a byte 1242 * instead of the whole int because eventually we might want to 1243 * put some counters on the other bytes (of course, these routines would 1244 * have to change). The code that grab this lock should execute 1245 * with interrupts disabled and hold the lock for the least amount of time 1246 * possible. 1247 */ 1248 1249/* 1250 * Even though hmeh_listlock is updated using pa there's no need to flush 1251 * dcache since hmeh_listlock will be restored to the original value (0) 1252 * before interrupts are reenabled. 1253 */ 1254 1255/* 1256 * For sparcv9 hme hash buckets may not be in the nucleus. hme hash update 1257 * routines still use virtual addresses to update the bucket fields. But they 1258 * must not cause a TLB miss after grabbing the low level bucket lock. To 1259 * achieve this we must make sure the bucket structure is completely within an 1260 * 8K page. 1261 */ 1262 1263#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1)) 1264#error - the size of hmehash_bucket structure is not power of 2 1265#endif 1266 1267#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi) \ 1268 mov 0xff, tmp2 ;\ 1269 add hmebp, HMEBUCK_LOCK, tmp1 ;\ 1270label1: ;\ 1271 casa [tmp1]asi, %g0, tmp2 ;\ 1272 brnz,pn tmp2, label1 ;\ 1273 mov 0xff, tmp2 ;\ 1274 membar #LoadLoad 1275 1276#define HMELOCK_EXIT(hmebp, tmp1, asi) \ 1277 membar #LoadStore|#StoreStore ;\ 1278 add hmebp, HMEBUCK_LOCK, tmp1 ;\ 1279 sta %g0, [tmp1]asi 1280 1281 .seg ".data" 1282hblk_add_panic1: 1283 .ascii "sfmmu_hblk_hash_add: interrupts disabled" 1284 .byte 0 1285hblk_add_panic2: 1286 .ascii "sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not" 1287 .byte 0 1288 .align 4 1289 .seg ".text" 1290 1291 ENTRY_NP(sfmmu_hblk_hash_add) 1292 /* 1293 * %o0 = hmebp 1294 * %o1 = hmeblkp 1295 * %o2 = hblkpa 1296 */ 1297 rdpr %pstate, %o5 1298#ifdef DEBUG 1299 andcc %o5, PSTATE_IE, %g0 /* if interrupts already */ 1300 bnz,pt %icc, 3f /* disabled, panic */ 1301 nop 1302 save %sp, -SA(MINFRAME), %sp 1303 sethi %hi(hblk_add_panic1), %o0 1304 call panic 1305 or %o0, %lo(hblk_add_panic1), %o0 1306 ret 1307 restore 1308 13093: 1310#endif /* DEBUG */ 1311 wrpr %o5, PSTATE_IE, %pstate /* disable interrupts */ 1312 mov %o2, %g1 1313 1314 /* 1315 * g1 = hblkpa 1316 */ 1317 ldn [%o0 + HMEBUCK_HBLK], %o4 /* next hmeblk */ 1318 ldx [%o0 + HMEBUCK_NEXTPA], %g2 /* g2 = next hblkpa */ 1319#ifdef DEBUG 1320 cmp %o4, %g0 1321 bne,pt %xcc, 1f 1322 nop 1323 brz,pt %g2, 1f 1324 nop 1325 wrpr %g0, %o5, %pstate /* enable interrupts */ 1326 save %sp, -SA(MINFRAME), %sp 1327 sethi %hi(hblk_add_panic2), %o0 1328 call panic 1329 or %o0, %lo(hblk_add_panic2), %o0 1330 ret 1331 restore 13321: 1333#endif /* DEBUG */ 1334 /* 1335 * We update hmeblks entries before grabbing lock because the stores 1336 * could take a tlb miss and require the hash lock. The buckets 1337 * are part of the nucleus so we are cool with those stores. 1338 * 1339 * if buckets are not part of the nucleus our game is to 1340 * not touch any other page via va until we drop the lock. 1341 * This guarantees we won't get a tlb miss before the lock release 1342 * since interrupts are disabled. 1343 */ 1344 stn %o4, [%o1 + HMEBLK_NEXT] /* update hmeblk's next */ 1345 stx %g2, [%o1 + HMEBLK_NEXTPA] /* update hmeblk's next pa */ 1346 HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N) 1347 stn %o1, [%o0 + HMEBUCK_HBLK] /* update bucket hblk next */ 1348 stx %g1, [%o0 + HMEBUCK_NEXTPA] /* add hmeblk to list */ 1349 HMELOCK_EXIT(%o0, %g2, ASI_N) 1350 retl 1351 wrpr %g0, %o5, %pstate /* enable interrupts */ 1352 SET_SIZE(sfmmu_hblk_hash_add) 1353 1354 ENTRY_NP(sfmmu_hblk_hash_rm) 1355 /* 1356 * This function removes an hmeblk from the hash chain. 1357 * It is written to guarantee we don't take a tlb miss 1358 * by using physical addresses to update the list. 1359 * 1360 * %o0 = hmebp 1361 * %o1 = hmeblkp 1362 * %o2 = hmeblkp previous pa 1363 * %o3 = hmeblkp previous 1364 */ 1365 1366 mov %o3, %o4 /* o4 = hmeblkp previous */ 1367 1368 rdpr %pstate, %o5 1369#ifdef DEBUG 1370 andcc %o5, PSTATE_IE, %g0 /* if interrupts already */ 1371 bnz,pt %icc, 3f /* disabled, panic */ 1372 nop 1373 1374 sethi %hi(panicstr), %g1 1375 ldx [%g1 + %lo(panicstr)], %g1 1376 tst %g1 1377 bnz,pt %icc, 3f 1378 nop 1379 1380 sethi %hi(sfmmu_panic1), %o0 1381 call panic 1382 or %o0, %lo(sfmmu_panic1), %o0 13833: 1384#endif /* DEBUG */ 1385 /* 1386 * disable interrupts, clear Address Mask to access 64 bit physaddr 1387 */ 1388 andn %o5, PSTATE_IE, %g1 1389 wrpr %g1, 0, %pstate 1390 1391#ifndef sun4v 1392 sethi %hi(dcache_line_mask), %g4 1393 ld [%g4 + %lo(dcache_line_mask)], %g4 1394#endif /* sun4v */ 1395 1396 /* 1397 * if buckets are not part of the nucleus our game is to 1398 * not touch any other page via va until we drop the lock. 1399 * This guarantees we won't get a tlb miss before the lock release 1400 * since interrupts are disabled. 1401 */ 1402 HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N) 1403 ldn [%o0 + HMEBUCK_HBLK], %g2 /* first hmeblk in list */ 1404 cmp %g2, %o1 1405 bne,pt %ncc,1f 1406 mov ASI_MEM, %asi 1407 /* 1408 * hmeblk is first on list 1409 */ 1410 ldx [%o0 + HMEBUCK_NEXTPA], %g2 /* g2 = hmeblk pa */ 1411 ldna [%g2 + HMEBLK_NEXT] %asi, %o3 /* read next hmeblk va */ 1412 ldxa [%g2 + HMEBLK_NEXTPA] %asi, %g1 /* read next hmeblk pa */ 1413 stn %o3, [%o0 + HMEBUCK_HBLK] /* write va */ 1414 ba,pt %xcc, 2f 1415 stx %g1, [%o0 + HMEBUCK_NEXTPA] /* write pa */ 14161: 1417 /* hmeblk is not first on list */ 1418 1419 mov %o2, %g3 1420#ifndef sun4v 1421 GET_CPU_IMPL(%g2) 1422 cmp %g2, CHEETAH_IMPL 1423 bge,a,pt %icc, hblk_hash_rm_1 1424 and %o4, %g4, %g2 1425 cmp %g2, SPITFIRE_IMPL 1426 blt %icc, hblk_hash_rm_2 /* no flushing needed for OPL */ 1427 and %o4, %g4, %g2 1428 stxa %g0, [%g2]ASI_DC_TAG /* flush prev pa from dcache */ 1429 add %o4, HMEBLK_NEXT, %o4 1430 and %o4, %g4, %g2 1431 ba hblk_hash_rm_2 1432 stxa %g0, [%g2]ASI_DC_TAG /* flush prev va from dcache */ 1433hblk_hash_rm_1: 1434 1435 stxa %g0, [%g3]ASI_DC_INVAL /* flush prev pa from dcache */ 1436 membar #Sync 1437 add %g3, HMEBLK_NEXT, %g2 1438 stxa %g0, [%g2]ASI_DC_INVAL /* flush prev va from dcache */ 1439hblk_hash_rm_2: 1440 membar #Sync 1441#endif /* sun4v */ 1442 ldxa [%g3 + HMEBLK_NEXTPA] %asi, %g2 /* g2 = hmeblk pa */ 1443 ldna [%g2 + HMEBLK_NEXT] %asi, %o3 /* read next hmeblk va */ 1444 ldxa [%g2 + HMEBLK_NEXTPA] %asi, %g1 /* read next hmeblk pa */ 1445 stna %o3, [%g3 + HMEBLK_NEXT] %asi /* write va */ 1446 stxa %g1, [%g3 + HMEBLK_NEXTPA] %asi /* write pa */ 14472: 1448 HMELOCK_EXIT(%o0, %g2, ASI_N) 1449 retl 1450 wrpr %g0, %o5, %pstate /* enable interrupts */ 1451 SET_SIZE(sfmmu_hblk_hash_rm) 1452 1453#endif /* lint */ 1454 1455/* 1456 * These macros are used to update global sfmmu hme hash statistics 1457 * in perf critical paths. It is only enabled in debug kernels or 1458 * if SFMMU_STAT_GATHER is defined 1459 */ 1460#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 1461#define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2) \ 1462 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\ 1463 mov HATSTAT_KHASH_SEARCH, tmp2 ;\ 1464 cmp tmp1, hatid ;\ 1465 movne %ncc, HATSTAT_UHASH_SEARCH, tmp2 ;\ 1466 set sfmmu_global_stat, tmp1 ;\ 1467 add tmp1, tmp2, tmp1 ;\ 1468 ld [tmp1], tmp2 ;\ 1469 inc tmp2 ;\ 1470 st tmp2, [tmp1] 1471 1472#define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) \ 1473 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\ 1474 mov HATSTAT_KHASH_LINKS, tmp2 ;\ 1475 cmp tmp1, hatid ;\ 1476 movne %ncc, HATSTAT_UHASH_LINKS, tmp2 ;\ 1477 set sfmmu_global_stat, tmp1 ;\ 1478 add tmp1, tmp2, tmp1 ;\ 1479 ld [tmp1], tmp2 ;\ 1480 inc tmp2 ;\ 1481 st tmp2, [tmp1] 1482 1483 1484#else /* DEBUG || SFMMU_STAT_GATHER */ 1485 1486#define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2) 1487 1488#define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) 1489 1490#endif /* DEBUG || SFMMU_STAT_GATHER */ 1491 1492/* 1493 * This macro is used to update global sfmmu kstas in non 1494 * perf critical areas so they are enabled all the time 1495 */ 1496#define HAT_GLOBAL_STAT(statname, tmp1, tmp2) \ 1497 sethi %hi(sfmmu_global_stat), tmp1 ;\ 1498 add tmp1, statname, tmp1 ;\ 1499 ld [tmp1 + %lo(sfmmu_global_stat)], tmp2 ;\ 1500 inc tmp2 ;\ 1501 st tmp2, [tmp1 + %lo(sfmmu_global_stat)] 1502 1503/* 1504 * These macros are used to update per cpu stats in non perf 1505 * critical areas so they are enabled all the time 1506 */ 1507#define HAT_PERCPU_STAT32(tsbarea, stat, tmp1) \ 1508 ld [tsbarea + stat], tmp1 ;\ 1509 inc tmp1 ;\ 1510 st tmp1, [tsbarea + stat] 1511 1512/* 1513 * These macros are used to update per cpu stats in non perf 1514 * critical areas so they are enabled all the time 1515 */ 1516#define HAT_PERCPU_STAT16(tsbarea, stat, tmp1) \ 1517 lduh [tsbarea + stat], tmp1 ;\ 1518 inc tmp1 ;\ 1519 stuh tmp1, [tsbarea + stat] 1520 1521#if defined(KPM_TLBMISS_STATS_GATHER) 1522 /* 1523 * Count kpm dtlb misses separately to allow a different 1524 * evaluation of hme and kpm tlbmisses. kpm tsb hits can 1525 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses). 1526 */ 1527#define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) \ 1528 brgez tagacc, label /* KPM VA? */ ;\ 1529 nop ;\ 1530 CPU_INDEX(tmp1, tsbma) ;\ 1531 sethi %hi(kpmtsbm_area), tsbma ;\ 1532 sllx tmp1, KPMTSBM_SHIFT, tmp1 ;\ 1533 or tsbma, %lo(kpmtsbm_area), tsbma ;\ 1534 add tsbma, tmp1, tsbma /* kpmtsbm area */ ;\ 1535 /* VA range check */ ;\ 1536 ldx [tsbma + KPMTSBM_VBASE], val ;\ 1537 cmp tagacc, val ;\ 1538 blu,pn %xcc, label ;\ 1539 ldx [tsbma + KPMTSBM_VEND], tmp1 ;\ 1540 cmp tagacc, tmp1 ;\ 1541 bgeu,pn %xcc, label ;\ 1542 lduw [tsbma + KPMTSBM_DTLBMISS], val ;\ 1543 inc val ;\ 1544 st val, [tsbma + KPMTSBM_DTLBMISS] ;\ 1545label: 1546#else 1547#define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) 1548#endif /* KPM_TLBMISS_STATS_GATHER */ 1549 1550#if defined (lint) 1551/* 1552 * The following routines are jumped to from the mmu trap handlers to do 1553 * the setting up to call systrap. They are separate routines instead of 1554 * being part of the handlers because the handlers would exceed 32 1555 * instructions and since this is part of the slow path the jump 1556 * cost is irrelevant. 1557 */ 1558void 1559sfmmu_pagefault(void) 1560{ 1561} 1562 1563void 1564sfmmu_mmu_trap(void) 1565{ 1566} 1567 1568void 1569sfmmu_window_trap(void) 1570{ 1571} 1572 1573void 1574sfmmu_kpm_exception(void) 1575{ 1576} 1577 1578#else /* lint */ 1579 1580#ifdef PTL1_PANIC_DEBUG 1581 .seg ".data" 1582 .global test_ptl1_panic 1583test_ptl1_panic: 1584 .word 0 1585 .align 8 1586 1587 .seg ".text" 1588 .align 4 1589#endif /* PTL1_PANIC_DEBUG */ 1590 1591 1592 ENTRY_NP(sfmmu_pagefault) 1593 SET_GL_REG(1) 1594 USE_ALTERNATE_GLOBALS(%g5) 1595 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4) 1596 rdpr %tt, %g6 1597 cmp %g6, FAST_IMMU_MISS_TT 1598 be,a,pn %icc, 1f 1599 mov T_INSTR_MMU_MISS, %g3 1600 cmp %g6, T_INSTR_MMU_MISS 1601 be,a,pn %icc, 1f 1602 mov T_INSTR_MMU_MISS, %g3 1603 mov %g5, %g2 1604 mov T_DATA_PROT, %g3 /* arg2 = traptype */ 1605 cmp %g6, FAST_DMMU_MISS_TT 1606 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */ 1607 cmp %g6, T_DATA_MMU_MISS 1608 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */ 1609 1610#ifdef PTL1_PANIC_DEBUG 1611 /* check if we want to test the tl1 panic */ 1612 sethi %hi(test_ptl1_panic), %g4 1613 ld [%g4 + %lo(test_ptl1_panic)], %g1 1614 st %g0, [%g4 + %lo(test_ptl1_panic)] 1615 cmp %g1, %g0 1616 bne,a,pn %icc, ptl1_panic 1617 or %g0, PTL1_BAD_DEBUG, %g1 1618#endif /* PTL1_PANIC_DEBUG */ 16191: 1620 HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4) 1621 /* 1622 * g2 = tag access reg 1623 * g3.l = type 1624 * g3.h = 0 1625 */ 1626 sethi %hi(trap), %g1 1627 or %g1, %lo(trap), %g1 16282: 1629 ba,pt %xcc, sys_trap 1630 mov -1, %g4 1631 SET_SIZE(sfmmu_pagefault) 1632 1633 ENTRY_NP(sfmmu_mmu_trap) 1634 SET_GL_REG(1) 1635 USE_ALTERNATE_GLOBALS(%g5) 1636 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6) 1637 rdpr %tt, %g6 1638 cmp %g6, FAST_IMMU_MISS_TT 1639 be,a,pn %icc, 1f 1640 mov T_INSTR_MMU_MISS, %g3 1641 cmp %g6, T_INSTR_MMU_MISS 1642 be,a,pn %icc, 1f 1643 mov T_INSTR_MMU_MISS, %g3 1644 mov %g5, %g2 1645 mov T_DATA_PROT, %g3 /* arg2 = traptype */ 1646 cmp %g6, FAST_DMMU_MISS_TT 1647 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */ 1648 cmp %g6, T_DATA_MMU_MISS 1649 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */ 16501: 1651 /* 1652 * g2 = tag access reg 1653 * g3 = type 1654 */ 1655 sethi %hi(sfmmu_tsbmiss_exception), %g1 1656 or %g1, %lo(sfmmu_tsbmiss_exception), %g1 1657 ba,pt %xcc, sys_trap 1658 mov -1, %g4 1659 /*NOTREACHED*/ 1660 SET_SIZE(sfmmu_mmu_trap) 1661 1662 ENTRY_NP(sfmmu_suspend_tl) 1663 SET_GL_REG(1) 1664 USE_ALTERNATE_GLOBALS(%g5) 1665 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3) 1666 rdpr %tt, %g6 1667 cmp %g6, FAST_IMMU_MISS_TT 1668 be,a,pn %icc, 1f 1669 mov T_INSTR_MMU_MISS, %g3 1670 mov %g5, %g2 1671 cmp %g6, FAST_DMMU_MISS_TT 1672 move %icc, T_DATA_MMU_MISS, %g3 1673 movne %icc, T_DATA_PROT, %g3 16741: 1675 sethi %hi(sfmmu_tsbmiss_suspended), %g1 1676 or %g1, %lo(sfmmu_tsbmiss_suspended), %g1 1677 /* g1 = TL0 handler, g2 = tagacc, g3 = trap type */ 1678 ba,pt %xcc, sys_trap 1679 mov PIL_15, %g4 1680 /*NOTREACHED*/ 1681 SET_SIZE(sfmmu_suspend_tl) 1682 1683 /* 1684 * No %g registers in use at this point. 1685 */ 1686 ENTRY_NP(sfmmu_window_trap) 1687 rdpr %tpc, %g1 1688#ifdef sun4v 1689#ifdef DEBUG 1690 /* We assume previous %gl was 1 */ 1691 rdpr %tstate, %g4 1692 srlx %g4, TSTATE_GL_SHIFT, %g4 1693 and %g4, TSTATE_GL_MASK, %g4 1694 cmp %g4, 1 1695 bne,a,pn %icc, ptl1_panic 1696 mov PTL1_BAD_WTRAP, %g1 1697#endif /* DEBUG */ 1698 /* user miss at tl>1. better be the window handler or user_rtt */ 1699 /* in user_rtt? */ 1700 set rtt_fill_start, %g4 1701 cmp %g1, %g4 1702 blu,pn %xcc, 6f 1703 .empty 1704 set rtt_fill_end, %g4 1705 cmp %g1, %g4 1706 bgeu,pn %xcc, 6f 1707 nop 1708 set fault_rtt_fn1, %g1 1709 wrpr %g0, %g1, %tnpc 1710 ba,a 7f 17116: 1712 ! must save this trap level before descending trap stack 1713 ! no need to save %tnpc, either overwritten or discarded 1714 ! already got it: rdpr %tpc, %g1 1715 rdpr %tstate, %g6 1716 rdpr %tt, %g7 1717 ! trap level saved, go get underlying trap type 1718 rdpr %tl, %g5 1719 sub %g5, 1, %g3 1720 wrpr %g3, %tl 1721 rdpr %tt, %g2 1722 wrpr %g5, %tl 1723 ! restore saved trap level 1724 wrpr %g1, %tpc 1725 wrpr %g6, %tstate 1726 wrpr %g7, %tt 1727#else /* sun4v */ 1728 /* user miss at tl>1. better be the window handler */ 1729 rdpr %tl, %g5 1730 sub %g5, 1, %g3 1731 wrpr %g3, %tl 1732 rdpr %tt, %g2 1733 wrpr %g5, %tl 1734#endif /* sun4v */ 1735 and %g2, WTRAP_TTMASK, %g4 1736 cmp %g4, WTRAP_TYPE 1737 bne,pn %xcc, 1f 1738 nop 1739 /* tpc should be in the trap table */ 1740 set trap_table, %g4 1741 cmp %g1, %g4 1742 blt,pn %xcc, 1f 1743 .empty 1744 set etrap_table, %g4 1745 cmp %g1, %g4 1746 bge,pn %xcc, 1f 1747 .empty 1748 andn %g1, WTRAP_ALIGN, %g1 /* 128 byte aligned */ 1749 add %g1, WTRAP_FAULTOFF, %g1 1750 wrpr %g0, %g1, %tnpc 17517: 1752 /* 1753 * some wbuf handlers will call systrap to resolve the fault 1754 * we pass the trap type so they figure out the correct parameters. 1755 * g5 = trap type, g6 = tag access reg 1756 */ 1757 1758 /* 1759 * only use g5, g6, g7 registers after we have switched to alternate 1760 * globals. 1761 */ 1762 SET_GL_REG(1) 1763 USE_ALTERNATE_GLOBALS(%g5) 1764 GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/) 1765 rdpr %tt, %g7 1766 cmp %g7, FAST_IMMU_MISS_TT 1767 be,a,pn %icc, ptl1_panic 1768 mov PTL1_BAD_WTRAP, %g1 1769 cmp %g7, T_INSTR_MMU_MISS 1770 be,a,pn %icc, ptl1_panic 1771 mov PTL1_BAD_WTRAP, %g1 1772 mov T_DATA_PROT, %g5 1773 cmp %g7, FAST_DMMU_MISS_TT 1774 move %icc, T_DATA_MMU_MISS, %g5 1775 cmp %g7, T_DATA_MMU_MISS 1776 move %icc, T_DATA_MMU_MISS, %g5 1777 ! XXXQ AGS re-check out this one 1778 done 17791: 1780 CPU_ADDR(%g1, %g4) 1781 ld [%g1 + CPU_TL1_HDLR], %g4 1782 brnz,a,pt %g4, sfmmu_mmu_trap 1783 st %g0, [%g1 + CPU_TL1_HDLR] 1784 ba,pt %icc, ptl1_panic 1785 mov PTL1_BAD_TRAP, %g1 1786 SET_SIZE(sfmmu_window_trap) 1787 1788 ENTRY_NP(sfmmu_kpm_exception) 1789 /* 1790 * We have accessed an unmapped segkpm address or a legal segkpm 1791 * address which is involved in a VAC alias conflict prevention. 1792 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is 1793 * set. If it is, we will instead note that a fault has occurred 1794 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of 1795 * a "retry"). This will step over the faulting instruction. 1796 * Note that this means that a legal segkpm address involved in 1797 * a VAC alias conflict prevention (a rare case to begin with) 1798 * cannot be used in DTrace. 1799 */ 1800 CPU_INDEX(%g1, %g2) 1801 set cpu_core, %g2 1802 sllx %g1, CPU_CORE_SHIFT, %g1 1803 add %g1, %g2, %g1 1804 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2 1805 andcc %g2, CPU_DTRACE_NOFAULT, %g0 1806 bz 0f 1807 or %g2, CPU_DTRACE_BADADDR, %g2 1808 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS] 1809 GET_MMU_D_ADDR(%g3, /*scratch*/ %g4) 1810 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL] 1811 done 18120: 1813 TSTAT_CHECK_TL1(1f, %g1, %g2) 18141: 1815 SET_GL_REG(1) 1816 USE_ALTERNATE_GLOBALS(%g5) 1817 GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/) 1818 mov T_DATA_MMU_MISS, %g3 /* arg2 = traptype */ 1819 /* 1820 * g2=tagacc g3.l=type g3.h=0 1821 */ 1822 sethi %hi(trap), %g1 1823 or %g1, %lo(trap), %g1 1824 ba,pt %xcc, sys_trap 1825 mov -1, %g4 1826 SET_SIZE(sfmmu_kpm_exception) 1827 1828#endif /* lint */ 1829 1830#if defined (lint) 1831 1832void 1833sfmmu_tsb_miss(void) 1834{ 1835} 1836 1837void 1838sfmmu_kpm_dtsb_miss(void) 1839{ 1840} 1841 1842void 1843sfmmu_kpm_dtsb_miss_small(void) 1844{ 1845} 1846 1847#else /* lint */ 1848 1849 1850#if (CTX_SIZE != (1 << CTX_SZ_SHIFT)) 1851#error - size of context struct does not match with CTX_SZ_SHIFT 1852#endif 1853 1854#if (IMAP_SEG != 0) 1855#error - ism_map->ism_seg offset is not zero 1856#endif 1857 1858/* 1859 * Copies ism mapping for this ctx in param "ism" if this is a ISM 1860 * tlb miss and branches to label "ismhit". If this is not an ISM 1861 * process or an ISM tlb miss it falls thru. 1862 * 1863 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for 1864 * this process. 1865 * If so, it will branch to label "ismhit". If not, it will fall through. 1866 * 1867 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT 1868 * so that any other threads of this process will not try and walk the ism 1869 * maps while they are being changed. 1870 * 1871 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare 1872 * will make sure of that. This means we can terminate our search on 1873 * the first zero mapping we find. 1874 * 1875 * Parameters: 1876 * tagacc = tag access register (vaddr + ctx) (in) 1877 * tsbmiss = address of tsb miss area (in) 1878 * ismseg = contents of ism_seg for this ism map (out) 1879 * ismhat = physical address of imap_ismhat for this ism map (out) 1880 * tmp1 = scratch reg (CLOBBERED) 1881 * tmp2 = scratch reg (CLOBBERED) 1882 * tmp3 = scratch reg (CLOBBERED) 1883 * label: temporary labels 1884 * ismhit: label where to jump to if an ism dtlb miss 1885 * exitlabel:label where to jump if hat is busy due to hat_unshare. 1886 */ 1887#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \ 1888 label, ismhit) \ 1889 ldx [tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */ ;\ 1890 brlz,pt tmp1, label/**/3 /* exit if -1 */ ;\ 1891 add tmp1, IBLK_MAPS, ismhat /* ismhat = &ismblk.map[0] */ ;\ 1892label/**/1: ;\ 1893 ldxa [ismhat]ASI_MEM, ismseg /* ismblk.map[0].ism_seg */ ;\ 1894 mov tmp1, tmp3 /* update current ismblkpa head */ ;\ 1895label/**/2: ;\ 1896 brz,pt ismseg, label/**/3 /* no mapping */ ;\ 1897 add ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */ ;\ 1898 lduha [tmp1]ASI_MEM, tmp1 /* tmp1 = vb shift*/ ;\ 1899 srlx ismseg, tmp1, tmp2 /* tmp2 = vbase */ ;\ 1900 srlx tagacc, tmp1, tmp1 /* tmp1 = va seg*/ ;\ 1901 sub tmp1, tmp2, tmp2 /* tmp2 = va - vbase */ ;\ 1902 add ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */ ;\ 1903 lda [tmp1]ASI_MEM, tmp1 /* tmp1 = sz_mask */ ;\ 1904 and ismseg, tmp1, tmp1 /* tmp1 = size */ ;\ 1905 cmp tmp2, tmp1 /* check va <= offset*/ ;\ 1906 blu,a,pt %xcc, ismhit /* ism hit */ ;\ 1907 add ismhat, IMAP_ISMHAT, ismhat /* ismhat = &ism_sfmmu*/ ;\ 1908 ;\ 1909 add ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ ;\ 1910 add tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1 ;\ 1911 cmp ismhat, tmp1 ;\ 1912 bl,pt %xcc, label/**/2 /* keep looking */ ;\ 1913 ldxa [ismhat]ASI_MEM, ismseg /* ismseg = map[ismhat] */ ;\ 1914 ;\ 1915 add tmp3, IBLK_NEXTPA, tmp1 ;\ 1916 ldxa [tmp1]ASI_MEM, tmp1 /* check blk->nextpa */ ;\ 1917 brgez,pt tmp1, label/**/1 /* continue if not -1*/ ;\ 1918 add tmp1, IBLK_MAPS, ismhat /* ismhat = &ismblk.map[0]*/ ;\ 1919label/**/3: 1920 1921/* 1922 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid 1923 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift) 1924 * Parameters: 1925 * vaddr = reg containing virtual address 1926 * hatid = reg containing sfmmu pointer 1927 * hmeshift = constant/register to shift vaddr to obtain vapg 1928 * hmebp = register where bucket pointer will be stored 1929 * vapg = register where virtual page will be stored 1930 * tmp1, tmp2 = tmp registers 1931 */ 1932 1933 1934#define HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp, \ 1935 vapg, label, tmp1, tmp2) \ 1936 sllx tagacc, TAGACC_CTX_LSHIFT, tmp1 ;\ 1937 brnz,a,pt tmp1, label/**/1 ;\ 1938 ld [tsbarea + TSBMISS_UHASHSZ], hmebp ;\ 1939 ld [tsbarea + TSBMISS_KHASHSZ], hmebp ;\ 1940 ba,pt %xcc, label/**/2 ;\ 1941 ldx [tsbarea + TSBMISS_KHASHSTART], tmp1 ;\ 1942label/**/1: ;\ 1943 ldx [tsbarea + TSBMISS_UHASHSTART], tmp1 ;\ 1944label/**/2: ;\ 1945 srlx tagacc, hmeshift, vapg ;\ 1946 xor vapg, hatid, tmp2 /* hatid ^ (vaddr >> shift) */ ;\ 1947 and tmp2, hmebp, hmebp /* index into hme_hash */ ;\ 1948 mulx hmebp, HMEBUCK_SIZE, hmebp ;\ 1949 add hmebp, tmp1, hmebp 1950 1951/* 1952 * hashtag includes bspage + hashno (64 bits). 1953 */ 1954 1955#define MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag) \ 1956 sllx vapg, hmeshift, vapg ;\ 1957 or vapg, hashno, hblktag 1958 1959/* 1960 * Function to traverse hmeblk hash link list and find corresponding match. 1961 * The search is done using physical pointers. It returns the physical address 1962 * and virtual address pointers to the hmeblk that matches with the tag 1963 * provided. 1964 * Parameters: 1965 * hmebp = register that points to hme hash bucket, also used as 1966 * tmp reg (clobbered) 1967 * hmeblktag = register with hmeblk tag match 1968 * hatid = register with hatid 1969 * hmeblkpa = register where physical ptr will be stored 1970 * hmeblkva = register where virtual ptr will be stored 1971 * tmp1 = tmp reg 1972 * label: temporary label 1973 */ 1974 1975#define HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva, \ 1976 tsbarea, tmp1, label) \ 1977 add hmebp, HMEBUCK_NEXTPA, hmeblkpa ;\ 1978 ldxa [hmeblkpa]ASI_MEM, hmeblkpa ;\ 1979 add hmebp, HMEBUCK_HBLK, hmeblkva ;\ 1980 ldxa [hmeblkva]ASI_MEM, hmeblkva ;\ 1981 HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1) ;\ 1982label/**/1: ;\ 1983 brz,pn hmeblkva, label/**/2 ;\ 1984 HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1) ;\ 1985 add hmeblkpa, HMEBLK_TAG, hmebp ;\ 1986 ldxa [hmebp]ASI_MEM, tmp1 /* read 1st part of tag */ ;\ 1987 add hmebp, CLONGSIZE, hmebp ;\ 1988 ldxa [hmebp]ASI_MEM, hmebp /* read 2nd part of tag */ ;\ 1989 xor tmp1, hmeblktag, tmp1 ;\ 1990 xor hmebp, hatid, hmebp ;\ 1991 or hmebp, tmp1, hmebp ;\ 1992 brz,pn hmebp, label/**/2 /* branch on hit */ ;\ 1993 add hmeblkpa, HMEBLK_NEXT, hmebp ;\ 1994 ldna [hmebp]ASI_MEM, hmeblkva /* hmeblk ptr va */ ;\ 1995 add hmeblkpa, HMEBLK_NEXTPA, hmebp ;\ 1996 ba,pt %xcc, label/**/1 ;\ 1997 ldxa [hmebp]ASI_MEM, hmeblkpa /* hmeblk ptr pa */ ;\ 1998label/**/2: 1999 2000 2001#if ((1 << SFHME_SHIFT) != SFHME_SIZE) 2002#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size 2003#endif 2004 2005/* 2006 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns 2007 * he offset for the corresponding hment. 2008 * Parameters: 2009 * vaddr = register with virtual address 2010 * hmeblkpa = physical pointer to hme_blk 2011 * hment = register where address of hment will be stored 2012 * hmentoff = register where hment offset will be stored 2013 * label1 = temporary label 2014 */ 2015#define HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, tmp1, label1) \ 2016 add hmeblkpa, HMEBLK_MISC, hmentoff ;\ 2017 lda [hmentoff]ASI_MEM, tmp1 ;\ 2018 andcc tmp1, HBLK_SZMASK, %g0 /* tmp1 = get_hblk_sz(%g5) */ ;\ 2019 bnz,a,pn %icc, label1 /* if sz != TTE8K branch */ ;\ 2020 or %g0, HMEBLK_HME1, hmentoff ;\ 2021 srl vaddr, MMU_PAGESHIFT, tmp1 ;\ 2022 and tmp1, NHMENTS - 1, tmp1 /* tmp1 = index */ ;\ 2023 sllx tmp1, SFHME_SHIFT, tmp1 ;\ 2024 add tmp1, HMEBLK_HME1, hmentoff ;\ 2025label1: 2026 2027/* 2028 * GET_TTE is a macro that returns a TTE given a tag and hatid. 2029 * 2030 * tagacc = tag access register (vaddr + ctx) (in) 2031 * hatid = sfmmu pointer for TSB miss (in) 2032 * tte = tte for TLB miss if found, otherwise clobbered (out) 2033 * hmeblkpa = PA of hment if found, otherwise clobbered (out) 2034 * hmeblkva = VA of hment if found, otherwise clobbered (out) 2035 * tsbarea = pointer to the tsbmiss area for this cpu. (in) 2036 * hmentoff = temporarily stores hment offset (clobbered) 2037 * hmeshift = constant/register to shift VA to obtain the virtual pfn 2038 * for this page size. 2039 * hashno = constant/register hash number 2040 * label = temporary label for branching within macro. 2041 * foundlabel = label to jump to when tte is found. 2042 * suspendlabel= label to jump to when tte is suspended. 2043 * exitlabel = label to jump to when tte is not found. The hmebp lock 2044 * is still held at this time. 2045 * 2046 * The caller should set up the tsbmiss->scratch[2] field correctly before 2047 * calling this funciton (aka TSBMISS_SCRATCH + TSBMISS_HATID) 2048 */ 2049#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmentoff, \ 2050 hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \ 2051 ;\ 2052 stn tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)] ;\ 2053 stn hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)] ;\ 2054 HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte, \ 2055 hmeblkpa, label/**/5, hmentoff, hmeblkva) ;\ 2056 ;\ 2057 /* ;\ 2058 * tagacc = tagacc ;\ 2059 * hatid = hatid ;\ 2060 * tsbarea = tsbarea ;\ 2061 * tte = hmebp (hme bucket pointer) ;\ 2062 * hmeblkpa = vapg (virtual page) ;\ 2063 * hmentoff, hmeblkva = scratch ;\ 2064 */ ;\ 2065 MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmentoff) ;\ 2066 ;\ 2067 /* ;\ 2068 * tagacc = tagacc ;\ 2069 * hatid = hatid ;\ 2070 * tte = hmebp ;\ 2071 * hmeblkpa = CLOBBERED ;\ 2072 * hmentoff = htag_bspage & hashno ;\ 2073 * hmeblkva = scratch ;\ 2074 */ ;\ 2075 stn tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)] ;\ 2076 HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM) ;\ 2077 HMEHASH_SEARCH(tte, hmentoff, hatid, hmeblkpa, hmeblkva, \ 2078 tsbarea, tagacc, label/**/1) ;\ 2079 /* ;\ 2080 * tagacc = CLOBBERED ;\ 2081 * tte = CLOBBERED ;\ 2082 * hmeblkpa = hmeblkpa ;\ 2083 * hmeblkva = hmeblkva ;\ 2084 */ ;\ 2085 brnz,pt hmeblkva, label/**/4 /* branch if hmeblk found */ ;\ 2086 ldn [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc ;\ 2087 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva ;\ 2088 HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM) /* drop lock */ ;\ 2089 ba,pt %xcc, exitlabel /* exit if hblk not found */ ;\ 2090 nop ;\ 2091label/**/4: ;\ 2092 /* ;\ 2093 * We have found the hmeblk containing the hment. ;\ 2094 * Now we calculate the corresponding tte. ;\ 2095 * ;\ 2096 * tagacc = tagacc ;\ 2097 * hatid = clobbered ;\ 2098 * tte = hmebp ;\ 2099 * hmeblkpa = hmeblkpa ;\ 2100 * hmentoff = hblktag ;\ 2101 * hmeblkva = hmeblkva ;\ 2102 */ ;\ 2103 HMEBLK_TO_HMENT(tagacc, hmeblkpa, hmentoff, hatid, label/**/2) ;\ 2104 ;\ 2105 add hmentoff, SFHME_TTE, hmentoff ;\ 2106 add hmeblkpa, hmentoff, hmeblkpa ;\ 2107 ldxa [hmeblkpa]ASI_MEM, tte /* MMU_READTTE through pa */ ;\ 2108 add hmeblkva, hmentoff, hmeblkva ;\ 2109 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid ;\ 2110 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmentoff ;\ 2111 HMELOCK_EXIT(hmentoff, hmentoff, ASI_MEM) /* drop lock */ ;\ 2112 set TTE_SUSPEND, hmentoff ;\ 2113 TTE_SUSPEND_INT_SHIFT(hmentoff) ;\ 2114 btst tte, hmentoff ;\ 2115 bz,pt %xcc, foundlabel ;\ 2116 nop ;\ 2117 ;\ 2118 /* ;\ 2119 * Mapping is suspended, so goto suspend label. ;\ 2120 */ ;\ 2121 ba,pt %xcc, suspendlabel ;\ 2122 nop 2123 2124 /* 2125 * KERNEL PROTECTION HANDLER 2126 * 2127 * g1 = tsb8k pointer register (clobbered) 2128 * g2 = tag access register (ro) 2129 * g3 - g7 = scratch registers 2130 * 2131 * Note: This function is patched at runtime for performance reasons. 2132 * Any changes here require sfmmu_patch_ktsb fixed. 2133 */ 2134 ENTRY_NP(sfmmu_kprot_trap) 2135 mov %g2, %g7 ! TSB pointer macro clobbers tagacc 2136sfmmu_kprot_patch_ktsb_base: 2137 RUNTIME_PATCH_SETX(%g1, %g6) 2138 /* %g1 = contents of ktsb_base or ktsb_pbase */ 2139sfmmu_kprot_patch_ktsb_szcode: 2140 or %g0, RUNTIME_PATCH, %g3 ! ktsb_szcode (hot patched) 2141 2142 GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5) 2143 ! %g1 = First TSB entry pointer, as TSB miss handler expects 2144 2145 mov %g2, %g7 ! TSB pointer macro clobbers tagacc 2146sfmmu_kprot_patch_ktsb4m_base: 2147 RUNTIME_PATCH_SETX(%g3, %g6) 2148 /* %g3 = contents of ktsb4m_base or ktsb4m_pbase */ 2149sfmmu_kprot_patch_ktsb4m_szcode: 2150 or %g0, RUNTIME_PATCH, %g6 ! ktsb4m_szcode (hot patched) 2151 2152 GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5) 2153 ! %g3 = 4M tsb entry pointer, as TSB miss handler expects 2154 2155 CPU_TSBMISS_AREA(%g6, %g7) 2156 HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7) 2157 ba,pt %xcc, sfmmu_tsb_miss_tt 2158 nop 2159 2160 /* 2161 * USER PROTECTION HANDLER 2162 * 2163 * g1 = tsb8k pointer register (ro) 2164 * g2 = tag access register (ro) 2165 * g3 = faulting context (clobbered, currently not used) 2166 * g4 - g7 = scratch registers 2167 */ 2168 ALTENTRY(sfmmu_uprot_trap) 2169#ifdef sun4v 2170 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) 2171 /* %g1 = first TSB entry ptr now, %g2 preserved */ 2172 2173 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3) /* get 2nd utsbreg */ 2174 brlz,pt %g3, 9f /* check for 2nd TSB */ 2175 mov %g0, %g3 /* clear second tsbe ptr */ 2176 2177 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2178 /* %g3 = second TSB entry ptr now, %g2 preserved */ 2179 2180#else /* sun4v */ 2181#ifdef UTSB_PHYS 2182 /* g1 = first TSB entry ptr */ 2183 GET_2ND_TSBREG(%g3) 2184 brlz,a,pt %g3, 9f /* check for 2nd TSB */ 2185 mov %g0, %g3 /* clear second tsbe ptr */ 2186 2187 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2188 /* %g3 = second TSB entry ptr now, %g2 preserved */ 2189#else /* UTSB_PHYS */ 2190 brgez,pt %g1, 9f /* check for 2nd TSB */ 2191 mov %g0, %g3 /* clear second tsbe ptr */ 2192 2193 mov %g2, %g7 2194 GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot) 2195 /* %g3 = second TSB entry ptr now, %g7 clobbered */ 2196 mov %g1, %g7 2197 GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot) 2198#endif /* UTSB_PHYS */ 2199#endif /* sun4v */ 22009: 2201 CPU_TSBMISS_AREA(%g6, %g7) 2202 HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7) 2203 ba,pt %xcc, sfmmu_tsb_miss_tt /* branch TSB miss handler */ 2204 nop 2205 2206 /* 2207 * Kernel 8K page iTLB miss. We also get here if we took a 2208 * fast instruction access mmu miss trap while running in 2209 * invalid context. 2210 * 2211 * %g1 = 8K TSB pointer register (not used, clobbered) 2212 * %g2 = tag access register (used) 2213 * %g3 = faulting context id (used) 2214 * %g7 = 4M virtual page number for tag matching (used) 2215 */ 2216 .align 64 2217 ALTENTRY(sfmmu_kitlb_miss) 2218 brnz,pn %g3, tsb_tl0_noctxt 2219 nop 2220 2221 /* kernel miss */ 2222 /* get kernel tsb pointer */ 2223 /* we patch the next set of instructions at run time */ 2224 /* NOTE: any changes here require sfmmu_patch_ktsb fixed */ 2225iktsbbase: 2226 RUNTIME_PATCH_SETX(%g4, %g5) 2227 /* %g4 = contents of ktsb_base or ktsb_pbase */ 2228 2229iktsb: sllx %g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1 2230 srlx %g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1 2231 or %g4, %g1, %g1 ! form tsb ptr 2232 ldda [%g1]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data 2233 cmp %g4, %g7 2234 bne,pn %xcc, sfmmu_tsb_miss_tt ! branch on miss 2235 andcc %g5, TTE_EXECPRM_INT, %g0 ! check exec bit 2236 bz,pn %icc, exec_fault 2237 nop 2238 TT_TRACE(trace_tsbhit) ! 2 instr traptrace 2239 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2240 retry 2241 2242 /* 2243 * Kernel dTLB miss. We also get here if we took a fast data 2244 * access mmu miss trap while running in invalid context. 2245 * 2246 * Note: for now we store kpm TTEs in the kernel TSB as usual. 2247 * We select the TSB miss handler to branch to depending on 2248 * the virtual address of the access. In the future it may 2249 * be desirable to separate kpm TTEs into their own TSB, 2250 * in which case all that needs to be done is to set 2251 * kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch 2252 * early in the miss if we detect a kpm VA to a new handler. 2253 * 2254 * %g1 = 8K TSB pointer register (not used, clobbered) 2255 * %g2 = tag access register (used) 2256 * %g3 = faulting context id (used) 2257 */ 2258 .align 64 2259 ALTENTRY(sfmmu_kdtlb_miss) 2260 brnz,pn %g3, tsb_tl0_noctxt /* invalid context? */ 2261 nop 2262 2263 /* Gather some stats for kpm misses in the TLB. */ 2264 /* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */ 2265 KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out) 2266 2267 /* 2268 * Get first TSB offset and look for 8K/64K/512K mapping 2269 * using the 8K virtual page as the index. 2270 * 2271 * We patch the next set of instructions at run time; 2272 * any changes here require sfmmu_patch_ktsb changes too. 2273 */ 2274dktsbbase: 2275 RUNTIME_PATCH_SETX(%g7, %g6) 2276 /* %g7 = contents of ktsb_base or ktsb_pbase */ 2277 2278dktsb: sllx %g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1 2279 srlx %g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1 2280 2281 /* 2282 * At this point %g1 is our index into the TSB. 2283 * We just masked off enough bits of the VA depending 2284 * on our TSB size code. 2285 */ 2286 ldda [%g7 + %g1]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data 2287 srlx %g2, TAG_VALO_SHIFT, %g6 ! make tag to compare 2288 cmp %g6, %g4 ! compare tag 2289 bne,pn %xcc, dktsb4m_kpmcheck_small 2290 add %g7, %g1, %g1 /* form tsb ptr */ 2291 TT_TRACE(trace_tsbhit) 2292 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2293 /* trapstat expects tte in %g5 */ 2294 retry 2295 2296 /* 2297 * If kpm is using large pages, the following instruction needs 2298 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm) 2299 * so that we will probe the 4M TSB regardless of the VA. In 2300 * the case kpm is using small pages, we know no large kernel 2301 * mappings are located above 0x80000000.00000000 so we skip the 2302 * probe as an optimization. 2303 */ 2304dktsb4m_kpmcheck_small: 2305 brlz,pn %g2, sfmmu_kpm_dtsb_miss_small 2306 /* delay slot safe, below */ 2307 2308 /* 2309 * Get second TSB offset and look for 4M mapping 2310 * using 4M virtual page as the TSB index. 2311 * 2312 * Here: 2313 * %g1 = 8K TSB pointer. Don't squash it. 2314 * %g2 = tag access register (we still need it) 2315 */ 2316 srlx %g2, MMU_PAGESHIFT4M, %g3 2317 2318 /* 2319 * We patch the next set of instructions at run time; 2320 * any changes here require sfmmu_patch_ktsb changes too. 2321 */ 2322dktsb4mbase: 2323 RUNTIME_PATCH_SETX(%g7, %g6) 2324 /* %g7 = contents of ktsb4m_base or ktsb4m_pbase */ 2325dktsb4m: 2326 sllx %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3 2327 srlx %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3 2328 2329 /* 2330 * At this point %g3 is our index into the TSB. 2331 * We just masked off enough bits of the VA depending 2332 * on our TSB size code. 2333 */ 2334 ldda [%g7 + %g3]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data 2335 srlx %g2, TAG_VALO_SHIFT, %g6 ! make tag to compare 2336 cmp %g6, %g4 ! compare tag 2337 2338dktsb4m_tsbmiss: 2339 bne,pn %xcc, dktsb4m_kpmcheck 2340 add %g7, %g3, %g3 ! %g3 = kernel second TSB ptr 2341 TT_TRACE(trace_tsbhit) 2342 /* we don't check TTE size here since we assume 4M TSB is separate */ 2343 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2344 /* trapstat expects tte in %g5 */ 2345 retry 2346 2347 /* 2348 * So, we failed to find a valid TTE to match the faulting 2349 * address in either TSB. There are a few cases that could land 2350 * us here: 2351 * 2352 * 1) This is a kernel VA below 0x80000000.00000000. We branch 2353 * to sfmmu_tsb_miss_tt to handle the miss. 2354 * 2) We missed on a kpm VA, and we didn't find the mapping in the 2355 * 4M TSB. Let segkpm handle it. 2356 * 2357 * Note that we shouldn't land here in the case of a kpm VA when 2358 * kpm_smallpages is active -- we handled that case earlier at 2359 * dktsb4m_kpmcheck_small. 2360 * 2361 * At this point: 2362 * g1 = 8K-indexed primary TSB pointer 2363 * g2 = tag access register 2364 * g3 = 4M-indexed secondary TSB pointer 2365 */ 2366dktsb4m_kpmcheck: 2367 cmp %g2, %g0 2368 bl,pn %xcc, sfmmu_kpm_dtsb_miss 2369 nop 2370 ba,a,pt %icc, sfmmu_tsb_miss_tt 2371 nop 2372 2373#ifdef sun4v 2374 /* 2375 * User instruction miss w/ single TSB. 2376 * The first probe covers 8K, 64K, and 512K page sizes, 2377 * because 64K and 512K mappings are replicated off 8K 2378 * pointer. 2379 * 2380 * g1 = tsb8k pointer register 2381 * g2 = tag access register 2382 * g3 - g6 = scratch registers 2383 * g7 = TSB tag to match 2384 */ 2385 .align 64 2386 ALTENTRY(sfmmu_uitlb_fastpath) 2387 2388 PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail) 2389 /* g4 - g5 = clobbered by PROBE_1ST_ITSB */ 2390 ba,pn %xcc, sfmmu_tsb_miss_tt 2391 mov %g0, %g3 2392 2393 /* 2394 * User data miss w/ single TSB. 2395 * The first probe covers 8K, 64K, and 512K page sizes, 2396 * because 64K and 512K mappings are replicated off 8K 2397 * pointer. 2398 * 2399 * g1 = tsb8k pointer register 2400 * g2 = tag access register 2401 * g3 - g6 = scratch registers 2402 * g7 = TSB tag to match 2403 */ 2404 .align 64 2405 ALTENTRY(sfmmu_udtlb_fastpath) 2406 2407 PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail) 2408 /* g4 - g5 = clobbered by PROBE_1ST_DTSB */ 2409 ba,pn %xcc, sfmmu_tsb_miss_tt 2410 mov %g0, %g3 2411 2412 /* 2413 * User instruction miss w/ multiple TSBs (sun4v). 2414 * The first probe covers 8K, 64K, and 512K page sizes, 2415 * because 64K and 512K mappings are replicated off 8K 2416 * pointer. Second probe covers 4M page size only. 2417 * 2418 * Just like sfmmu_udtlb_slowpath, except: 2419 * o Uses ASI_ITLB_IN 2420 * o checks for execute permission 2421 * o No ISM prediction. 2422 * 2423 * g1 = tsb8k pointer register 2424 * g2 = tag access register 2425 * g3 - g6 = scratch registers 2426 * g7 = TSB tag to match 2427 */ 2428 .align 64 2429 ALTENTRY(sfmmu_uitlb_slowpath) 2430 2431 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) 2432 PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail) 2433 /* g4 - g5 = clobbered here */ 2434 2435 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2436 /* g1 = first TSB pointer, g3 = second TSB pointer */ 2437 srlx %g2, TAG_VALO_SHIFT, %g7 2438 PROBE_2ND_ITSB(%g3, %g7) 2439 /* NOT REACHED */ 2440 2441#else /* sun4v */ 2442 2443 /* 2444 * User instruction miss w/ multiple TSBs (sun4u). 2445 * The first probe covers 8K, 64K, and 512K page sizes, 2446 * because 64K and 512K mappings are replicated off 8K 2447 * pointer. Second probe covers 4M page size only. 2448 * 2449 * Just like sfmmu_udtlb_slowpath, except: 2450 * o Uses ASI_ITLB_IN 2451 * o checks for execute permission 2452 * o No ISM prediction. 2453 * 2454 * g1 = tsb8k pointer register 2455 * g2 = tag access register 2456 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch 2457 * g4 - g6 = scratch registers 2458 * g7 = TSB tag to match 2459 */ 2460 .align 64 2461 ALTENTRY(sfmmu_uitlb_slowpath) 2462 2463#ifdef UTSB_PHYS 2464 /* 2465 * g1 = 1st TSB entry pointer 2466 * g3 = 2nd TSB base register 2467 * Need 2nd TSB entry pointer for 2nd probe. 2468 */ 2469 PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail) 2470 2471 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2472#else /* UTSB_PHYS */ 2473 mov %g1, %g3 /* save tsb8k reg in %g3 */ 2474 GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb) 2475 PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail) 2476 2477 mov %g2, %g6 /* GET_2ND_TSBE_PTR clobbers tagacc */ 2478 mov %g3, %g7 /* copy tsb8k reg in %g7 */ 2479 GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb) 2480#endif /* UTSB_PHYS */ 2481 /* g1 = first TSB pointer, g3 = second TSB pointer */ 2482 srlx %g2, TAG_VALO_SHIFT, %g7 2483 PROBE_2ND_ITSB(%g3, %g7, isynth) 2484 /* NOT REACHED */ 2485#endif /* sun4v */ 2486 2487 /* 2488 * User data miss w/ multiple TSBs. 2489 * The first probe covers 8K, 64K, and 512K page sizes, 2490 * because 64K and 512K mappings are replicated off 8K 2491 * pointer. Second probe covers 4M page size only. 2492 * 2493 * We consider probing for 4M pages first if the VA falls 2494 * in a range that's likely to be ISM. 2495 * 2496 * g1 = tsb8k pointer register 2497 * g2 = tag access register 2498 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch 2499 * g4 - g6 = scratch registers 2500 * g7 = TSB tag to match 2501 */ 2502 .align 64 2503 ALTENTRY(sfmmu_udtlb_slowpath) 2504 2505 /* 2506 * Check for ISM. If it exists, look for 4M mappings in the second TSB 2507 * first, then probe for other mappings in the first TSB if that fails. 2508 */ 2509 srax %g2, PREDISM_BASESHIFT, %g6 /* g6 > 0 : ISM predicted */ 2510 brgz,pn %g6, udtlb_miss_probesecond /* check for ISM */ 2511 mov %g1, %g3 2512 2513udtlb_miss_probefirst: 2514 /* 2515 * g1 = 8K TSB pointer register 2516 * g2 = tag access register 2517 * g3 = (potentially) second TSB entry ptr 2518 * g6 = ism pred. 2519 * g7 = vpg_4m 2520 */ 2521#ifdef sun4v 2522 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) 2523 PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail) 2524 2525 /* 2526 * Here: 2527 * g1 = first TSB pointer 2528 * g2 = tag access reg 2529 * g3 = second TSB ptr IFF ISM pred. (else don't care) 2530 */ 2531 brgz,pn %g6, sfmmu_tsb_miss_tt 2532 nop 2533#else /* sun4v */ 2534#ifndef UTSB_PHYS 2535 mov %g1, %g4 2536 GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb) 2537#endif UTSB_PHYS 2538 PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail) 2539 2540 /* 2541 * Here: 2542 * g1 = first TSB pointer 2543 * g2 = tag access reg 2544 * g3 = second TSB ptr IFF ISM pred. (else don't care) 2545 */ 2546 brgz,pn %g6, sfmmu_tsb_miss_tt 2547 nop 2548#ifndef UTSB_PHYS 2549 ldxa [%g0]ASI_DMMU_TSB_8K, %g3 2550#endif UTSB_PHYS 2551 /* fall through in 8K->4M probe order */ 2552#endif /* sun4v */ 2553 2554udtlb_miss_probesecond: 2555 /* 2556 * Look in the second TSB for the TTE 2557 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred. 2558 * g2 = tag access reg 2559 * g3 = 8K TSB pointer register 2560 * g6 = ism pred. 2561 * g7 = vpg_4m 2562 */ 2563#ifdef sun4v 2564 /* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */ 2565 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2566 /* %g2 is okay, no need to reload, %g3 = second tsbe ptr */ 2567#else /* sun4v */ 2568#ifdef UTSB_PHYS 2569 GET_2ND_TSBREG(%g3) 2570 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 2571 /* tagacc (%g2) is okay, no need to reload, %g3 = second tsbe ptr */ 2572#else /* UTSB_PHYS */ 2573 mov %g3, %g7 2574 GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb) 2575 /* %g2 clobbered, %g3 =second tsbe ptr */ 2576 mov MMU_TAG_ACCESS, %g2 2577 ldxa [%g2]ASI_DMMU, %g2 2578#endif /* UTSB_PHYS */ 2579#endif /* sun4v */ 2580 2581 srlx %g2, TAG_VALO_SHIFT, %g7 2582 PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail) 2583 /* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */ 2584 brgz,pn %g6, udtlb_miss_probefirst 2585 nop 2586 2587 /* fall through to sfmmu_tsb_miss_tt */ 2588 2589 ALTENTRY(sfmmu_tsb_miss_tt) 2590 TT_TRACE(trace_tsbmiss) 2591 /* 2592 * We get here if there is a TSB miss OR a write protect trap. 2593 * 2594 * g1 = First TSB entry pointer 2595 * g2 = tag access register 2596 * g3 = 4M TSB entry pointer; NULL if no 2nd TSB 2597 * g4 - g7 = scratch registers 2598 */ 2599 2600 ALTENTRY(sfmmu_tsb_miss) 2601 2602 /* 2603 * If trapstat is running, we need to shift the %tpc and %tnpc to 2604 * point to trapstat's TSB miss return code (note that trapstat 2605 * itself will patch the correct offset to add). 2606 */ 2607 rdpr %tl, %g7 2608 cmp %g7, 1 2609 ble,pt %xcc, 0f 2610 sethi %hi(KERNELBASE), %g6 2611 rdpr %tpc, %g7 2612 or %g6, %lo(KERNELBASE), %g6 2613 cmp %g7, %g6 2614 bgeu,pt %xcc, 0f 2615 /* delay slot safe */ 2616 2617 ALTENTRY(tsbmiss_trapstat_patch_point) 2618 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */ 2619 wrpr %g7, %tpc 2620 add %g7, 4, %g7 2621 wrpr %g7, %tnpc 26220: 2623 CPU_TSBMISS_AREA(%g6, %g7) 2624 2625 stn %g1, [%g6 + TSBMISS_TSBPTR] /* save first tsb pointer */ 2626 stn %g3, [%g6 + TSBMISS_TSBPTR4M] /* save second tsb pointer */ 2627 2628 sllx %g2, TAGACC_CTX_LSHIFT, %g3 2629 brz,a,pn %g3, 1f /* skip ahead if kernel */ 2630 ldn [%g6 + TSBMISS_KHATID], %g7 2631 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctxnum */ 2632 ldn [%g6 + TSBMISS_UHATID], %g7 /* g7 = hatid */ 2633 2634 HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5) 2635 2636 cmp %g3, INVALID_CONTEXT 2637 be,pn %icc, tsb_tl0_noctxt /* no ctx miss exception */ 2638 stn %g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)] 2639 2640 ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism) 2641 /* 2642 * The miss wasn't in an ISM segment. 2643 * 2644 * %g1 %g3, %g4, %g5, %g7 all clobbered 2645 * %g2 = tag access (vaddr + ctx) 2646 */ 2647 2648 ba,pt %icc, 2f 2649 ldn [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7 2650 26511: 2652 HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5) 2653 /* 2654 * 8K and 64K hash. 2655 */ 26562: 2657 2658 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, 2659 MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte, 2660 sfmmu_suspend_tl, tsb_512K) 2661 /* NOT REACHED */ 2662 2663tsb_512K: 2664 ldn [%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3 2665 sllx %g3, TAGACC_CTX_LSHIFT, %g5 2666 brz,pn %g5, 3f 2667 lduh [%g6 + TSBMISS_HATFLAGS], %g4 2668 and %g4, HAT_512K_FLAG, %g5 2669 2670 /* 2671 * Note that there is a small window here where we may have 2672 * a 512k page in the hash list but have not set the HAT_512K_FLAG 2673 * flag yet, so we will skip searching the 512k hash list. 2674 * In this case we will end up in pagefault which will find 2675 * the mapping and return. So, in this instance we will end up 2676 * spending a bit more time resolving this TSB miss, but it can 2677 * only happen once per process and even then, the chances of that 2678 * are very small, so it's not worth the extra overhead it would 2679 * take to close this window. 2680 */ 2681 brz,pn %g5, tsb_4M 2682 nop 26833: 2684 /* 2685 * 512K hash 2686 */ 2687 2688 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, 2689 MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte, 2690 sfmmu_suspend_tl, tsb_4M) 2691 /* NOT REACHED */ 2692 2693tsb_4M: 2694 ldn [%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3 2695 sllx %g3, TAGACC_CTX_LSHIFT, %g5 2696 brz,pn %g5, 4f 2697 lduh [%g6 + TSBMISS_HATFLAGS], %g4 2698 and %g4, HAT_4M_FLAG, %g5 2699 brz,pn %g5, tsb_32M 2700 nop 27014: 2702 /* 2703 * 4M hash 2704 */ 2705 2706 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, 2707 MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte, 2708 sfmmu_suspend_tl, tsb_32M) 2709 /* NOT REACHED */ 2710 2711tsb_32M: 2712#ifndef sun4v 2713 GET_CPU_IMPL(%g5) 2714 cmp %g5, OLYMPUS_C_IMPL 2715 be,pn %xcc, 0f 2716 nop 2717 cmp %g5, PANTHER_IMPL 2718 bne,pt %xcc, tsb_pagefault 2719 nop 2720#endif 2721 27220: 2723 ldn [%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3 2724 sllx %g3, TAGACC_CTX_LSHIFT, %g5 2725#ifdef sun4v 2726 brz,pn %g5, 6f 2727#else 2728 brz,pn %g5, tsb_pagefault 2729#endif 2730 lduh [%g6 + TSBMISS_HATFLAGS], %g4 2731 and %g4, HAT_32M_FLAG, %g5 2732 brz,pn %g5, tsb_256M 2733 nop 27345: 2735 /* 2736 * 32M hash 2737 */ 2738 2739 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, 2740 MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte, 2741 sfmmu_suspend_tl, tsb_256M) 2742 /* NOT REACHED */ 2743 2744tsb_256M: 2745 lduh [%g6 + TSBMISS_HATFLAGS], %g4 2746 and %g4, HAT_256M_FLAG, %g5 2747 brz,pn %g5, tsb_pagefault 2748 nop 27496: 2750 /* 2751 * 256M hash 2752 */ 2753 2754 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, 2755 MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte, 2756 sfmmu_suspend_tl, tsb_pagefault) 2757 /* NOT REACHED */ 2758 2759tsb_checktte: 2760 /* 2761 * g3 = tte 2762 * g4 = tte pa 2763 * g5 = tte va 2764 * g6 = tsbmiss area 2765 */ 2766 brgez,pn %g3, tsb_pagefault /* if tte invalid branch */ 2767 nop 2768 2769tsb_validtte: 2770 /* 2771 * Set ref/mod bits if this is a prot trap. Usually, it isn't. 2772 */ 2773 rdpr %tt, %g7 2774 cmp %g7, FAST_PROT_TT 2775 bne,pt %icc, 4f 2776 nop 2777 2778 TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod, 2779 tsb_protfault) 2780 2781 rdpr %tt, %g5 2782 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */ 2783 ba,pt %xcc, tsb_update_tl1 2784 nop 2785 27864: 2787 /* 2788 * If ITLB miss check exec bit. 2789 * If not set treat as invalid TTE. 2790 */ 2791 cmp %g7, T_INSTR_MMU_MISS 2792 be,pn %icc, 5f 2793 andcc %g3, TTE_EXECPRM_INT, %g0 /* check execute bit is set */ 2794 cmp %g7, FAST_IMMU_MISS_TT 2795 bne,pt %icc, 3f 2796 andcc %g3, TTE_EXECPRM_INT, %g0 /* check execute bit is set */ 27975: 2798 bz,pn %icc, tsb_protfault 2799 nop 2800 28013: 2802 /* 2803 * Set reference bit if not already set 2804 */ 2805 TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref) 2806 2807 /* 2808 * Now, load into TSB/TLB. At this point: 2809 * g3 = tte 2810 * g4 = patte 2811 * g6 = tsbmiss area 2812 */ 2813 rdpr %tt, %g5 2814#ifdef sun4v 2815 MMU_FAULT_STATUS_AREA(%g2) 2816 cmp %g5, T_INSTR_MMU_MISS 2817 be,a,pt %icc, 9f 2818 nop 2819 cmp %g5, FAST_IMMU_MISS_TT 2820 be,a,pt %icc, 9f 2821 nop 2822 add %g2, MMFSA_D_, %g2 28239: 2824 ldx [%g2 + MMFSA_CTX_], %g7 2825 sllx %g7, TTARGET_CTX_SHIFT, %g7 2826 ldx [%g2 + MMFSA_ADDR_], %g2 2827 srlx %g2, TTARGET_VA_SHIFT, %g2 2828 or %g2, %g7, %g2 2829#else 2830 cmp %g5, FAST_IMMU_MISS_TT 2831 be,a,pt %icc, tsb_update_tl1 2832 ldxa [%g0]ASI_IMMU, %g2 2833 ldxa [%g0]ASI_DMMU, %g2 2834#endif 2835tsb_update_tl1: 2836 srlx %g2, TTARGET_CTX_SHIFT, %g7 2837 brz,pn %g7, tsb_kernel 2838#ifdef sun4v 2839 and %g3, TTE_SZ_BITS, %g7 ! assumes TTE_SZ_SHFT is 0 2840#else 2841 srlx %g3, TTE_SZ_SHFT, %g7 2842#endif 2843 2844tsb_user: 2845#ifdef sun4v 2846 cmp %g7, TTE4M 2847 bge,pn %icc, tsb_user4m 2848 nop 2849#else 2850 cmp %g7, TTESZ_VALID | TTE4M 2851 be,pn %icc, tsb_user4m 2852 srlx %g3, TTE_SZ2_SHFT, %g7 2853 andcc %g7, TTE_SZ2_BITS, %g7 ! check 32/256MB 2854 bnz,a,pn %icc, tsb_user_pn_synth 2855 cmp %g5, FAST_IMMU_MISS_TT 2856#endif 2857 2858tsb_user8k: 2859 ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = first TSB ptr 2860 2861#ifndef UTSB_PHYS 2862 mov ASI_N, %g7 ! user TSBs accessed by VA 2863 mov %g7, %asi 2864#endif /* UTSB_PHYS */ 2865 2866 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5) 2867 2868#ifdef sun4v 2869 cmp %g5, T_INSTR_MMU_MISS 2870 be,a,pn %xcc, 9f 2871 mov %g3, %g5 2872#endif /* sun4v */ 2873 cmp %g5, FAST_IMMU_MISS_TT 2874 be,pn %xcc, 9f 2875 mov %g3, %g5 2876 2877 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2878 ! trapstat wants TTE in %g5 2879 retry 28809: 2881 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2882 ! trapstat wants TTE in %g5 2883 retry 2884 2885tsb_user4m: 2886 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 /* g1 = tsbp */ 28874: 2888 brz,pn %g1, 5f /* Check to see if we have 2nd TSB programmed */ 2889 nop 2890 2891#ifndef UTSB_PHYS 2892 mov ASI_N, %g7 ! user TSBs accessed by VA 2893 mov %g7, %asi 2894#endif /* UTSB_PHYS */ 2895 2896 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6) 2897 28985: 2899#ifdef sun4v 2900 cmp %g5, T_INSTR_MMU_MISS 2901 be,a,pn %xcc, 9f 2902 mov %g3, %g5 2903#endif /* sun4v */ 2904 cmp %g5, FAST_IMMU_MISS_TT 2905 be,pn %xcc, 9f 2906 mov %g3, %g5 2907 2908 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2909 ! trapstat wants TTE in %g5 2910 retry 29119: 2912 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2913 ! trapstat wants TTE in %g5 2914 retry 2915 2916#ifndef sun4v 2917 /* 2918 * Panther ITLB synthesis. 2919 * The Panther 32M and 256M ITLB code simulates these two large page 2920 * sizes with 4M pages, to provide support for programs, for example 2921 * Java, that may copy instructions into a 32M or 256M data page and 2922 * then execute them. The code below generates the 4M pfn bits and 2923 * saves them in the modified 32M/256M ttes in the TSB. If the tte is 2924 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits 2925 * are ignored by the hardware. 2926 * 2927 * Now, load into TSB/TLB. At this point: 2928 * g2 = tagtarget 2929 * g3 = tte 2930 * g4 = patte 2931 * g5 = tt 2932 * g6 = tsbmiss area 2933 */ 2934tsb_user_pn_synth: 2935 be,pt %xcc, tsb_user_itlb_synth /* ITLB miss */ 2936 andcc %g3, TTE_EXECPRM_INT, %g0 /* is execprm bit set */ 2937 bz,pn %icc, 4b /* if not, been here before */ 2938 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 /* g1 = tsbp */ 2939 brz,a,pn %g1, 5f /* no 2nd tsb */ 2940 mov %g3, %g5 2941 2942 mov MMU_TAG_ACCESS, %g7 2943 ldxa [%g7]ASI_DMMU, %g6 /* get tag access va */ 2944 GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1) /* make 4M pfn offset */ 2945 2946 mov ASI_N, %g7 /* user TSBs always accessed by VA */ 2947 mov %g7, %asi 2948 TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */ 29495: 2950 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2951 retry 2952 2953tsb_user_itlb_synth: 2954 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 /* g1 = tsbp */ 2955 2956 mov MMU_TAG_ACCESS, %g7 2957 ldxa [%g7]ASI_IMMU, %g6 /* get tag access va */ 2958 GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2) /* make 4M pfn offset */ 2959 brz,a,pn %g1, 7f /* Check to see if we have 2nd TSB programmed */ 2960 or %g5, %g3, %g5 /* add 4M bits to TTE */ 2961 2962 mov ASI_N, %g7 /* user TSBs always accessed by VA */ 2963 mov %g7, %asi 2964 TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */ 29657: 2966 SET_TTE4M_PN(%g5, %g7) /* add TTE4M pagesize to TTE */ 2967 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) 2968 retry 2969#endif 2970 2971tsb_kernel: 2972#ifdef sun4v 2973 cmp %g7, TTE4M 2974 bge,pn %icc, 5f 2975#else 2976 cmp %g7, TTESZ_VALID | TTE4M ! no 32M or 256M support 2977 be,pn %icc, 5f 2978#endif 2979 nop 2980 ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = 8k tsbptr 2981 ba,pt %xcc, 6f 2982 nop 29835: 2984 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 ! g1 = 4m tsbptr 2985 brz,pn %g1, 3f /* skip programming if 4m TSB ptr is NULL */ 2986 nop 29876: 2988#ifndef sun4v 2989tsb_kernel_patch_asi: 2990 or %g0, RUNTIME_PATCH, %g6 2991 mov %g6, %asi ! XXX avoid writing to %asi !! 2992#endif 2993 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7) 29943: 2995#ifdef sun4v 2996 cmp %g5, T_INSTR_MMU_MISS 2997 be,a,pn %icc, 1f 2998 mov %g3, %g5 ! trapstat wants TTE in %g5 2999#endif /* sun4v */ 3000 cmp %g5, FAST_IMMU_MISS_TT 3001 be,pn %icc, 1f 3002 mov %g3, %g5 ! trapstat wants TTE in %g5 3003 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) 3004 ! trapstat wants TTE in %g5 3005 retry 30061: 3007 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) 3008 ! trapstat wants TTE in %g5 3009 retry 3010 3011tsb_ism: 3012 /* 3013 * This is an ISM [i|d]tlb miss. We optimize for largest 3014 * page size down to smallest. 3015 * 3016 * g2 = vaddr + ctx aka tag access register 3017 * g3 = ismmap->ism_seg 3018 * g4 = physical address of ismmap->ism_sfmmu 3019 * g6 = tsbmiss area 3020 */ 3021 ldna [%g4]ASI_MEM, %g7 /* g7 = ism hatid */ 3022 brz,a,pn %g7, ptl1_panic /* if zero jmp ahead */ 3023 mov PTL1_BAD_ISM, %g1 3024 /* g5 = pa of imap_vb_shift */ 3025 sub %g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5 3026 lduha [%g5]ASI_MEM, %g4 /* g4 = imap_vb_shift */ 3027 srlx %g3, %g4, %g3 /* clr size field */ 3028 set TAGACC_CTX_MASK, %g1 /* mask off ctx number */ 3029 sllx %g3, %g4, %g3 /* g3 = ism vbase */ 3030 and %g2, %g1, %g4 /* g4 = ctx number */ 3031 andn %g2, %g1, %g1 /* g1 = tlb miss vaddr */ 3032 sub %g1, %g3, %g2 /* g2 = offset in ISM seg */ 3033 or %g2, %g4, %g2 /* g2 = tagacc (vaddr + ctx) */ 3034 3035 /* 3036 * ISM pages are always locked down. 3037 * If we can't find the tte then pagefault 3038 * and let the spt segment driver resovle it. 3039 * 3040 * g2 = ISM vaddr (offset in ISM seg) 3041 * g6 = tsb miss area 3042 * g7 = ISM hatid 3043 */ 3044 sub %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5 3045 lduha [%g5]ASI_MEM, %g4 /* g5 = pa of imap_hatflags */ 3046 and %g4, HAT_4M_FLAG, %g5 /* g4 = imap_hatflags */ 3047 brnz,pt %g5, tsb_ism_4M /* branch if 4M pages */ 3048 nop 3049 3050tsb_ism_32M: 3051 and %g4, HAT_32M_FLAG, %g5 /* check default 32M next */ 3052 brz,pn %g5, tsb_ism_256M 3053 nop 3054 3055 /* 3056 * 32M hash. 3057 */ 3058 3059 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M, 3060 TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl, 3061 tsb_ism_4M) 3062 /* NOT REACHED */ 3063 3064tsb_ism_32M_found: 3065 brlz,pt %g3, tsb_validtte 3066 nop 3067 ba,pt %xcc, tsb_ism_4M 3068 nop 3069 3070tsb_ism_256M: 3071 and %g4, HAT_256M_FLAG, %g5 /* 256M is last resort */ 3072 brz,a,pn %g5, ptl1_panic 3073 mov PTL1_BAD_ISM, %g1 3074 3075 /* 3076 * 256M hash. 3077 */ 3078 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M, 3079 TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl, 3080 tsb_ism_4M) 3081 3082tsb_ism_256M_found: 3083 brlz,pt %g3, tsb_validtte 3084 nop 3085 3086tsb_ism_4M: 3087 /* 3088 * 4M hash. 3089 */ 3090 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M, 3091 TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl, 3092 tsb_ism_8K) 3093 /* NOT REACHED */ 3094 3095tsb_ism_4M_found: 3096 brlz,pt %g3, tsb_validtte 3097 nop 3098 3099tsb_ism_8K: 3100 /* 3101 * 8K and 64K hash. 3102 */ 3103 3104 GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K, 3105 TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl, 3106 tsb_pagefault) 3107 /* NOT REACHED */ 3108 3109tsb_ism_8K_found: 3110 brlz,pt %g3, tsb_validtte 3111 nop 3112 3113tsb_pagefault: 3114 rdpr %tt, %g7 3115 cmp %g7, FAST_PROT_TT 3116 be,a,pn %icc, tsb_protfault 3117 wrpr %g0, FAST_DMMU_MISS_TT, %tt 3118 3119tsb_protfault: 3120 /* 3121 * we get here if we couldn't find a valid tte in the hash. 3122 * 3123 * If user and we are at tl>1 we go to window handling code. 3124 * 3125 * If kernel and the fault is on the same page as our stack 3126 * pointer, then we know the stack is bad and the trap handler 3127 * will fail, so we call ptl1_panic with PTL1_BAD_STACK. 3128 * 3129 * If this is a kernel trap and tl>1, panic. 3130 * 3131 * Otherwise we call pagefault. 3132 */ 3133 cmp %g7, FAST_IMMU_MISS_TT 3134#ifdef sun4v 3135 MMU_FAULT_STATUS_AREA(%g4) 3136 ldx [%g4 + MMFSA_I_CTX], %g5 3137 ldx [%g4 + MMFSA_D_CTX], %g4 3138 move %icc, %g5, %g4 3139 cmp %g7, T_INSTR_MMU_MISS 3140 move %icc, %g5, %g4 3141#else 3142 mov MMU_TAG_ACCESS, %g4 3143 ldxa [%g4]ASI_DMMU, %g2 3144 ldxa [%g4]ASI_IMMU, %g5 3145 move %icc, %g5, %g2 3146 cmp %g7, T_INSTR_MMU_MISS 3147 move %icc, %g5, %g2 3148 sllx %g2, TAGACC_CTX_LSHIFT, %g4 3149#endif 3150 brnz,pn %g4, 3f /* skip if not kernel */ 3151 rdpr %tl, %g5 3152 3153 add %sp, STACK_BIAS, %g3 3154 srlx %g3, MMU_PAGESHIFT, %g3 3155 srlx %g2, MMU_PAGESHIFT, %g4 3156 cmp %g3, %g4 3157 be,a,pn %icc, ptl1_panic /* panic if bad %sp */ 3158 mov PTL1_BAD_STACK, %g1 3159 3160 cmp %g5, 1 3161 ble,pt %icc, 2f 3162 nop 3163 TSTAT_CHECK_TL1(2f, %g1, %g2) 3164 rdpr %tt, %g2 3165 cmp %g2, FAST_PROT_TT 3166 mov PTL1_BAD_KPROT_FAULT, %g1 3167 movne %icc, PTL1_BAD_KMISS, %g1 3168 ba,pt %icc, ptl1_panic 3169 nop 3170 31712: 3172 /* 3173 * We are taking a pagefault in the kernel on a kernel address. If 3174 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually 3175 * want to call sfmmu_pagefault -- we will instead note that a fault 3176 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done" 3177 * (instead of a "retry"). This will step over the faulting 3178 * instruction. 3179 */ 3180 CPU_INDEX(%g1, %g2) 3181 set cpu_core, %g2 3182 sllx %g1, CPU_CORE_SHIFT, %g1 3183 add %g1, %g2, %g1 3184 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2 3185 andcc %g2, CPU_DTRACE_NOFAULT, %g0 3186 bz sfmmu_pagefault 3187 or %g2, CPU_DTRACE_BADADDR, %g2 3188 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS] 3189 GET_MMU_D_ADDR(%g3, %g4) 3190 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL] 3191 done 3192 31933: 3194 cmp %g5, 1 3195 ble,pt %icc, 4f 3196 nop 3197 TSTAT_CHECK_TL1(4f, %g1, %g2) 3198 ba,pt %icc, sfmmu_window_trap 3199 nop 3200 32014: 3202 /* 3203 * We are taking a pagefault on a non-kernel address. If we are in 3204 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags 3205 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above. 3206 */ 3207 CPU_INDEX(%g1, %g2) 3208 set cpu_core, %g2 3209 sllx %g1, CPU_CORE_SHIFT, %g1 3210 add %g1, %g2, %g1 3211 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2 3212 andcc %g2, CPU_DTRACE_NOFAULT, %g0 3213 bz sfmmu_pagefault 3214 or %g2, CPU_DTRACE_BADADDR, %g2 3215 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS] 3216 GET_MMU_D_ADDR(%g3, %g4) 3217 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL] 3218 3219 /* 3220 * Be sure that we're actually taking this miss from the kernel -- 3221 * otherwise we have managed to return to user-level with 3222 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags. 3223 */ 3224 rdpr %tstate, %g2 3225 btst TSTATE_PRIV, %g2 3226 bz,a ptl1_panic 3227 mov PTL1_BAD_DTRACE_FLAGS, %g1 3228 done 3229 3230 ALTENTRY(tsb_tl0_noctxt) 3231 /* 3232 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set; 3233 * if it is, indicated that we have faulted and issue a done. 3234 */ 3235 CPU_INDEX(%g5, %g6) 3236 set cpu_core, %g6 3237 sllx %g5, CPU_CORE_SHIFT, %g5 3238 add %g5, %g6, %g5 3239 lduh [%g5 + CPUC_DTRACE_FLAGS], %g6 3240 andcc %g6, CPU_DTRACE_NOFAULT, %g0 3241 bz 1f 3242 or %g6, CPU_DTRACE_BADADDR, %g6 3243 stuh %g6, [%g5 + CPUC_DTRACE_FLAGS] 3244 GET_MMU_D_ADDR(%g3, %g4) 3245 stx %g3, [%g5 + CPUC_DTRACE_ILLVAL] 3246 3247 /* 3248 * Be sure that we're actually taking this miss from the kernel -- 3249 * otherwise we have managed to return to user-level with 3250 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags. 3251 */ 3252 rdpr %tstate, %g5 3253 btst TSTATE_PRIV, %g5 3254 bz,a ptl1_panic 3255 mov PTL1_BAD_DTRACE_FLAGS, %g1 3256 done 3257 32581: 3259 rdpr %tt, %g5 3260 cmp %g5, FAST_IMMU_MISS_TT 3261#ifdef sun4v 3262 MMU_FAULT_STATUS_AREA(%g2) 3263 be,a,pt %icc, 2f 3264 ldx [%g2 + MMFSA_I_CTX], %g3 3265 cmp %g5, T_INSTR_MMU_MISS 3266 be,a,pt %icc, 2f 3267 ldx [%g2 + MMFSA_I_CTX], %g3 3268 ldx [%g2 + MMFSA_D_CTX], %g3 32692: 3270#else 3271 mov MMU_TAG_ACCESS, %g2 3272 be,a,pt %icc, 2f 3273 ldxa [%g2]ASI_IMMU, %g3 3274 ldxa [%g2]ASI_DMMU, %g3 32752: sllx %g3, TAGACC_CTX_LSHIFT, %g3 3276#endif 3277 brz,a,pn %g3, ptl1_panic ! panic if called for kernel 3278 mov PTL1_BAD_CTX_STEAL, %g1 ! since kernel ctx was stolen 3279 rdpr %tl, %g5 3280 cmp %g5, 1 3281 ble,pt %icc, sfmmu_mmu_trap 3282 nop 3283 TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2) 3284 ba,pt %icc, sfmmu_window_trap 3285 nop 3286 SET_SIZE(sfmmu_tsb_miss) 3287 3288#if (1<< TSBMISS_SHIFT) != TSBMISS_SIZE 3289#error - TSBMISS_SHIFT does not correspond to size of tsbmiss struct 3290#endif 3291 3292#endif /* lint */ 3293 3294#if defined (lint) 3295/* 3296 * This routine will look for a user or kernel vaddr in the hash 3297 * structure. It returns a valid pfn or PFN_INVALID. It doesn't 3298 * grab any locks. It should only be used by other sfmmu routines. 3299 */ 3300/* ARGSUSED */ 3301pfn_t 3302sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep) 3303{ 3304 return(0); 3305} 3306 3307#else /* lint */ 3308 3309 ENTRY_NP(sfmmu_vatopfn) 3310 /* 3311 * disable interrupts 3312 */ 3313 rdpr %pstate, %o3 3314#ifdef DEBUG 3315 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */ 3316 bnz,pt %icc, 1f /* disabled, panic */ 3317 nop 3318 3319 sethi %hi(panicstr), %g1 3320 ldx [%g1 + %lo(panicstr)], %g1 3321 tst %g1 3322 bnz,pt %icc, 1f 3323 nop 3324 3325 save %sp, -SA(MINFRAME), %sp 3326 sethi %hi(sfmmu_panic1), %o0 3327 call panic 3328 or %o0, %lo(sfmmu_panic1), %o0 33291: 3330#endif 3331 /* 3332 * disable interrupts to protect the TSBMISS area 3333 */ 3334 andn %o3, PSTATE_IE, %o5 3335 wrpr %o5, 0, %pstate 3336 3337 /* 3338 * o0 = vaddr 3339 * o1 = sfmmup 3340 * o2 = ttep 3341 */ 3342 CPU_TSBMISS_AREA(%g1, %o5) 3343 ldn [%g1 + TSBMISS_KHATID], %o4 3344 cmp %o4, %o1 3345 bne,pn %ncc, vatopfn_nokernel 3346 mov TTE64K, %g5 /* g5 = rehash # */ 3347 mov %g1,%o5 /* o5 = tsbmiss_area */ 3348 /* 3349 * o0 = vaddr 3350 * o1 & o4 = hatid 3351 * o2 = ttep 3352 * o5 = tsbmiss area 3353 */ 3354 mov HBLK_RANGE_SHIFT, %g6 33551: 3356 3357 /* 3358 * o0 = vaddr 3359 * o1 = sfmmup 3360 * o2 = ttep 3361 * o3 = old %pstate 3362 * o4 = hatid 3363 * o5 = tsbmiss 3364 * g5 = rehash # 3365 * g6 = hmeshift 3366 * 3367 * The first arg to GET_TTE is actually tagaccess register 3368 * not just vaddr. Since this call is for kernel we need to clear 3369 * any lower vaddr bits that would be interpreted as ctx bits. 3370 */ 3371 set TAGACC_CTX_MASK, %g1 3372 andn %o0, %g1, %o0 3373 GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5, 3374 vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk) 3375 3376kvtop_hblk_found: 3377 /* 3378 * o0 = vaddr 3379 * o1 = sfmmup 3380 * o2 = ttep 3381 * g1 = tte 3382 * g2 = tte pa 3383 * g3 = tte va 3384 * o2 = tsbmiss area 3385 * o1 = hat id 3386 */ 3387 brgez,a,pn %g1, 6f /* if tte invalid goto tl0 */ 3388 mov -1, %o0 /* output = -1 (PFN_INVALID) */ 3389 stx %g1,[%o2] /* put tte into *ttep */ 3390 TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4) 3391 /* 3392 * o0 = vaddr 3393 * o1 = sfmmup 3394 * o2 = ttep 3395 * g1 = pfn 3396 */ 3397 ba,pt %xcc, 6f 3398 mov %g1, %o0 3399 3400kvtop_nohblk: 3401 /* 3402 * we get here if we couldn't find valid hblk in hash. We rehash 3403 * if neccesary. 3404 */ 3405 ldn [%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0 3406#ifdef sun4v 3407 cmp %g5, MAX_HASHCNT 3408#else 3409 cmp %g5, DEFAULT_MAX_HASHCNT /* no 32/256M kernel pages */ 3410#endif 3411 be,a,pn %icc, 6f 3412 mov -1, %o0 /* output = -1 (PFN_INVALID) */ 3413 mov %o1, %o4 /* restore hatid */ 3414#ifdef sun4v 3415 add %g5, 2, %g5 3416 cmp %g5, 3 3417 move %icc, MMU_PAGESHIFT4M, %g6 3418 ba,pt %icc, 1b 3419 movne %icc, MMU_PAGESHIFT256M, %g6 3420#else 3421 inc %g5 3422 cmp %g5, 2 3423 move %icc, MMU_PAGESHIFT512K, %g6 3424 ba,pt %icc, 1b 3425 movne %icc, MMU_PAGESHIFT4M, %g6 3426#endif 34276: 3428 retl 3429 wrpr %g0, %o3, %pstate /* re-enable interrupts */ 3430 3431tsb_suspend: 3432 /* 3433 * o0 = vaddr 3434 * o1 = sfmmup 3435 * o2 = ttep 3436 * g1 = tte 3437 * g2 = tte pa 3438 * g3 = tte va 3439 * o2 = tsbmiss area use o5 instead of o2 for tsbmiss 3440 */ 3441 stx %g1,[%o2] /* put tte into *ttep */ 3442 brgez,a,pn %g1, 8f /* if tte invalid goto 8: */ 3443 sub %g0, 1, %o0 /* output = -1 (PFN_INVALID) */ 3444 TTETOPFN(%g1, %o0, vatopfn_l3, %g2, %g3, %g4) 3445 /* 3446 * o0 = PFN return value PFN_INVALID, PFN_SUSPENDED, or pfn# 3447 * o1 = sfmmup 3448 * o2 = ttep 3449 * g1 = pfn 3450 */ 3451 sub %g0, 2, %o0 /* output = PFN_SUSPENDED */ 34528: 3453 retl 3454 wrpr %g0, %o3, %pstate /* enable interrupts */ 3455 3456vatopfn_nokernel: 3457 /* 3458 * This routine does NOT support user addresses 3459 * There is a routine in C that supports this. 3460 * The only reason why we don't have the C routine 3461 * support kernel addresses as well is because 3462 * we do va_to_pa while holding the hashlock. 3463 */ 3464 wrpr %g0, %o3, %pstate /* re-enable interrupts */ 3465 save %sp, -SA(MINFRAME), %sp 3466 sethi %hi(sfmmu_panic3), %o0 3467 call panic 3468 or %o0, %lo(sfmmu_panic3), %o0 3469 3470 SET_SIZE(sfmmu_vatopfn) 3471#endif /* lint */ 3472 3473 3474 3475#if !defined(lint) 3476 3477/* 3478 * kpm lock used between trap level tsbmiss handler and kpm C level. 3479 */ 3480#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) \ 3481 mov 0xff, tmp1 ;\ 3482label1: ;\ 3483 casa [kpmlckp]asi, %g0, tmp1 ;\ 3484 brnz,pn tmp1, label1 ;\ 3485 mov 0xff, tmp1 ;\ 3486 membar #LoadLoad 3487 3488#define KPMLOCK_EXIT(kpmlckp, asi) \ 3489 membar #LoadStore|#StoreStore ;\ 3490 sta %g0, [kpmlckp]asi 3491 3492/* 3493 * Lookup a memseg for a given pfn and if found, return the physical 3494 * address of the corresponding struct memseg in mseg, otherwise 3495 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in 3496 * tsbmp, %asi is assumed to be ASI_MEM. 3497 * This lookup is done by strictly traversing only the physical memseg 3498 * linkage. The more generic approach, to check the virtual linkage 3499 * before using the physical (used e.g. with hmehash buckets), cannot 3500 * be used here. Memory DR operations can run in parallel to this 3501 * lookup w/o any locks and updates of the physical and virtual linkage 3502 * cannot be done atomically wrt. to each other. Because physical 3503 * address zero can be valid physical address, MSEG_NULLPTR_PA acts 3504 * as "physical NULL" pointer. 3505 */ 3506#define PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \ 3507 sethi %hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */ ;\ 3508 ldx [tmp3 + %lo(mhash_per_slot)], mseg ;\ 3509 udivx pfn, mseg, mseg ;\ 3510 ldx [tsbmp + KPMTSBM_MSEGPHASHPA], tmp1 ;\ 3511 and mseg, SFMMU_N_MEM_SLOTS - 1, mseg ;\ 3512 sllx mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg ;\ 3513 add tmp1, mseg, tmp1 ;\ 3514 ldxa [tmp1]%asi, mseg ;\ 3515 cmp mseg, MSEG_NULLPTR_PA ;\ 3516 be,pn %xcc, label/**/1 /* if not found */ ;\ 3517 nop ;\ 3518 ldxa [mseg + MEMSEG_PAGES_BASE]%asi, tmp1 ;\ 3519 cmp pfn, tmp1 /* pfn - pages_base */ ;\ 3520 blu,pn %xcc, label/**/1 ;\ 3521 ldxa [mseg + MEMSEG_PAGES_END]%asi, tmp2 ;\ 3522 cmp pfn, tmp2 /* pfn - pages_end */ ;\ 3523 bgeu,pn %xcc, label/**/1 ;\ 3524 sub pfn, tmp1, tmp1 /* pfn - pages_base */ ;\ 3525 mulx tmp1, PAGE_SIZE, tmp1 ;\ 3526 ldxa [mseg + MEMSEG_PAGESPA]%asi, tmp2 /* pages */ ;\ 3527 add tmp2, tmp1, tmp1 /* pp */ ;\ 3528 lduwa [tmp1 + PAGE_PAGENUM]%asi, tmp2 ;\ 3529 cmp tmp2, pfn ;\ 3530 be,pt %xcc, label/**/_ok /* found */ ;\ 3531label/**/1: ;\ 3532 /* brute force lookup */ ;\ 3533 sethi %hi(memsegspa), tmp3 /* no tsbmp use due to DR */ ;\ 3534 ldx [tmp3 + %lo(memsegspa)], mseg ;\ 3535label/**/2: ;\ 3536 cmp mseg, MSEG_NULLPTR_PA ;\ 3537 be,pn %xcc, label/**/_ok /* if not found */ ;\ 3538 nop ;\ 3539 ldxa [mseg + MEMSEG_PAGES_BASE]%asi, tmp1 ;\ 3540 cmp pfn, tmp1 /* pfn - pages_base */ ;\ 3541 blu,a,pt %xcc, label/**/2 ;\ 3542 ldxa [mseg + MEMSEG_NEXTPA]%asi, mseg ;\ 3543 ldxa [mseg + MEMSEG_PAGES_END]%asi, tmp2 ;\ 3544 cmp pfn, tmp2 /* pfn - pages_end */ ;\ 3545 bgeu,a,pt %xcc, label/**/2 ;\ 3546 ldxa [mseg + MEMSEG_NEXTPA]%asi, mseg ;\ 3547label/**/_ok: 3548 3549 /* 3550 * kpm tsb miss handler large pages 3551 * g1 = 8K kpm TSB entry pointer 3552 * g2 = tag access register 3553 * g3 = 4M kpm TSB entry pointer 3554 */ 3555 ALTENTRY(sfmmu_kpm_dtsb_miss) 3556 TT_TRACE(trace_tsbmiss) 3557 3558 CPU_INDEX(%g7, %g6) 3559 sethi %hi(kpmtsbm_area), %g6 3560 sllx %g7, KPMTSBM_SHIFT, %g7 3561 or %g6, %lo(kpmtsbm_area), %g6 3562 add %g6, %g7, %g6 /* g6 = kpmtsbm ptr */ 3563 3564 /* check enable flag */ 3565 ldub [%g6 + KPMTSBM_FLAGS], %g4 3566 and %g4, KPMTSBM_ENABLE_FLAG, %g5 3567 brz,pn %g5, sfmmu_tsb_miss /* if kpm not enabled */ 3568 nop 3569 3570 /* VA range check */ 3571 ldx [%g6 + KPMTSBM_VBASE], %g7 3572 cmp %g2, %g7 3573 blu,pn %xcc, sfmmu_tsb_miss 3574 ldx [%g6 + KPMTSBM_VEND], %g5 3575 cmp %g2, %g5 3576 bgeu,pn %xcc, sfmmu_tsb_miss 3577 stx %g3, [%g6 + KPMTSBM_TSBPTR] 3578 3579 /* 3580 * check TL tsbmiss handling flag 3581 * bump tsbmiss counter 3582 */ 3583 lduw [%g6 + KPMTSBM_TSBMISS], %g5 3584#ifdef DEBUG 3585 and %g4, KPMTSBM_TLTSBM_FLAG, %g3 3586 inc %g5 3587 brz,pn %g3, sfmmu_kpm_exception 3588 st %g5, [%g6 + KPMTSBM_TSBMISS] 3589#else 3590 inc %g5 3591 st %g5, [%g6 + KPMTSBM_TSBMISS] 3592#endif 3593 /* 3594 * At this point: 3595 * g1 = 8K kpm TSB pointer (not used) 3596 * g2 = tag access register 3597 * g3 = clobbered 3598 * g6 = per-CPU kpm tsbmiss area 3599 * g7 = kpm_vbase 3600 */ 3601 3602 /* vaddr2pfn */ 3603 ldub [%g6 + KPMTSBM_SZSHIFT], %g3 3604 sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */ 3605 srax %g4, %g3, %g2 /* which alias range (r) */ 3606 brnz,pn %g2, sfmmu_kpm_exception /* if (r != 0) goto C handler */ 3607 srlx %g4, MMU_PAGESHIFT, %g2 /* %g2 = pfn */ 3608 3609 /* 3610 * Setup %asi 3611 * mseg_pa = page_numtomemseg_nolock(pfn) 3612 * if (mseg_pa == NULL) sfmmu_kpm_exception 3613 * g2=pfn 3614 */ 3615 mov ASI_MEM, %asi 3616 PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m) 3617 cmp %g3, MSEG_NULLPTR_PA 3618 be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */ 3619 nop 3620 3621 /* 3622 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase)); 3623 * g2=pfn g3=mseg_pa 3624 */ 3625 ldub [%g6 + KPMTSBM_KPMP2PSHFT], %g5 3626 ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7 3627 srlx %g2, %g5, %g4 3628 sllx %g4, %g5, %g4 3629 sub %g4, %g7, %g4 3630 srlx %g4, %g5, %g4 3631 3632 /* 3633 * Validate inx value 3634 * g2=pfn g3=mseg_pa g4=inx 3635 */ 3636#ifdef DEBUG 3637 ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5 3638 cmp %g4, %g5 /* inx - nkpmpgs */ 3639 bgeu,pn %xcc, sfmmu_kpm_exception /* if out of range */ 3640 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7 3641#else 3642 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7 3643#endif 3644 /* 3645 * kp = &mseg_pa->kpm_pages[inx] 3646 */ 3647 sllx %g4, KPMPAGE_SHIFT, %g4 /* kpm_pages offset */ 3648 ldxa [%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */ 3649 add %g5, %g4, %g5 /* kp */ 3650 3651 /* 3652 * KPMP_HASH(kp) 3653 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz 3654 */ 3655 ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */ 3656 sub %g7, 1, %g7 /* mask */ 3657 srlx %g5, %g1, %g1 /* x = ksp >> kpmp_shift */ 3658 add %g5, %g1, %g5 /* y = ksp + x */ 3659 and %g5, %g7, %g5 /* hashinx = y & mask */ 3660 3661 /* 3662 * Calculate physical kpm_page pointer 3663 * g2=pfn g3=mseg_pa g4=offset g5=hashinx 3664 */ 3665 ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */ 3666 add %g1, %g4, %g1 /* kp_pa */ 3667 3668 /* 3669 * Calculate physical hash lock address 3670 * g1=kp_refcntc_pa g2=pfn g5=hashinx 3671 */ 3672 ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */ 3673 sllx %g5, KPMHLK_SHIFT, %g5 3674 add %g4, %g5, %g3 3675 add %g3, KPMHLK_LOCK, %g3 /* hlck_pa */ 3676 3677 /* 3678 * Assemble tte 3679 * g1=kp_pa g2=pfn g3=hlck_pa 3680 */ 3681#ifdef sun4v 3682 sethi %hi(TTE_VALID_INT), %g5 /* upper part */ 3683 sllx %g5, 32, %g5 3684 mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4 3685 or %g4, TTE4M, %g4 3686 or %g5, %g4, %g5 3687#else 3688 sethi %hi(TTE_VALID_INT), %g4 3689 mov TTE4M, %g5 3690 sllx %g5, TTE_SZ_SHFT_INT, %g5 3691 or %g5, %g4, %g5 /* upper part */ 3692 sllx %g5, 32, %g5 3693 mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4 3694 or %g5, %g4, %g5 3695#endif 3696 sllx %g2, MMU_PAGESHIFT, %g4 3697 or %g5, %g4, %g5 /* tte */ 3698 ldx [%g6 + KPMTSBM_TSBPTR], %g4 3699 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */ 3700 3701 /* 3702 * tsb dropin 3703 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area 3704 */ 3705 3706 /* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */ 3707 KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM) 3708 3709 /* use C-handler if there's no go for dropin */ 3710 ldsha [%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */ 3711 cmp %g7, -1 3712 bne,pn %xcc, 5f /* use C-handler if there's no go for dropin */ 3713 nop 3714 3715#ifdef DEBUG 3716 /* double check refcnt */ 3717 ldsha [%g1 + KPMPAGE_REFCNT]%asi, %g7 3718 brz,pn %g7, 5f /* let C-handler deal with this */ 3719 nop 3720#endif 3721 3722#ifndef sun4v 3723 ldub [%g6 + KPMTSBM_FLAGS], %g7 3724 mov ASI_N, %g1 3725 andcc %g7, KPMTSBM_TSBPHYS_FLAG, %g0 3726 movnz %icc, ASI_MEM, %g1 3727 mov %g1, %asi 3728#endif 3729 3730 /* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */ 3731 TSB_LOCK_ENTRY(%g4, %g1, %g7, 6) 3732 3733 /* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */ 3734 TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7) 3735 3736 DTLB_STUFF(%g5, %g1, %g2, %g4, %g6) 3737 3738 /* KPMLOCK_EXIT(kpmlckp, asi) */ 3739 KPMLOCK_EXIT(%g3, ASI_MEM) 3740 3741 /* 3742 * If trapstat is running, we need to shift the %tpc and %tnpc to 3743 * point to trapstat's TSB miss return code (note that trapstat 3744 * itself will patch the correct offset to add). 3745 * Note: TTE is expected in %g5 (allows per pagesize reporting). 3746 */ 3747 rdpr %tl, %g7 3748 cmp %g7, 1 3749 ble %icc, 0f 3750 sethi %hi(KERNELBASE), %g6 3751 rdpr %tpc, %g7 3752 or %g6, %lo(KERNELBASE), %g6 3753 cmp %g7, %g6 3754 bgeu %xcc, 0f 3755 ALTENTRY(tsbmiss_trapstat_patch_point_kpm) 3756 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */ 3757 wrpr %g7, %tpc 3758 add %g7, 4, %g7 3759 wrpr %g7, %tnpc 37600: 3761 retry 37625: 3763 /* g3=hlck_pa */ 3764 KPMLOCK_EXIT(%g3, ASI_MEM) 3765 ba,pt %icc, sfmmu_kpm_exception 3766 nop 3767 SET_SIZE(sfmmu_kpm_dtsb_miss) 3768 3769 /* 3770 * kpm tsbmiss handler for smallpages 3771 * g1 = 8K kpm TSB pointer 3772 * g2 = tag access register 3773 * g3 = 4M kpm TSB pointer 3774 */ 3775 ALTENTRY(sfmmu_kpm_dtsb_miss_small) 3776 TT_TRACE(trace_tsbmiss) 3777 CPU_INDEX(%g7, %g6) 3778 sethi %hi(kpmtsbm_area), %g6 3779 sllx %g7, KPMTSBM_SHIFT, %g7 3780 or %g6, %lo(kpmtsbm_area), %g6 3781 add %g6, %g7, %g6 /* g6 = kpmtsbm ptr */ 3782 3783 /* check enable flag */ 3784 ldub [%g6 + KPMTSBM_FLAGS], %g4 3785 and %g4, KPMTSBM_ENABLE_FLAG, %g5 3786 brz,pn %g5, sfmmu_tsb_miss /* if kpm not enabled */ 3787 nop 3788 3789 /* 3790 * VA range check 3791 * On fail: goto sfmmu_tsb_miss 3792 */ 3793 ldx [%g6 + KPMTSBM_VBASE], %g7 3794 cmp %g2, %g7 3795 blu,pn %xcc, sfmmu_tsb_miss 3796 ldx [%g6 + KPMTSBM_VEND], %g5 3797 cmp %g2, %g5 3798 bgeu,pn %xcc, sfmmu_tsb_miss 3799 stx %g1, [%g6 + KPMTSBM_TSBPTR] /* save 8K kpm TSB pointer */ 3800 3801 /* 3802 * check TL tsbmiss handling flag 3803 * bump tsbmiss counter 3804 */ 3805 lduw [%g6 + KPMTSBM_TSBMISS], %g5 3806#ifdef DEBUG 3807 and %g4, KPMTSBM_TLTSBM_FLAG, %g1 3808 inc %g5 3809 brz,pn %g1, sfmmu_kpm_exception 3810 st %g5, [%g6 + KPMTSBM_TSBMISS] 3811#else 3812 inc %g5 3813 st %g5, [%g6 + KPMTSBM_TSBMISS] 3814#endif 3815 /* 3816 * At this point: 3817 * g1 = clobbered 3818 * g2 = tag access register 3819 * g3 = 4M kpm TSB pointer (not used) 3820 * g6 = per-CPU kpm tsbmiss area 3821 * g7 = kpm_vbase 3822 */ 3823 3824 /* vaddr2pfn */ 3825 ldub [%g6 + KPMTSBM_SZSHIFT], %g3 3826 sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */ 3827 srax %g4, %g3, %g2 /* which alias range (r) */ 3828 brnz,pn %g2, sfmmu_kpm_exception /* if (r != 0) goto C handler */ 3829 srlx %g4, MMU_PAGESHIFT, %g2 /* %g2 = pfn */ 3830 3831 /* 3832 * Setup %asi 3833 * mseg_pa = page_numtomemseg_nolock_pa(pfn) 3834 * if (mseg not found) sfmmu_kpm_exception 3835 * g2=pfn 3836 */ 3837 mov ASI_MEM, %asi 3838 PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m) 3839 cmp %g3, MSEG_NULLPTR_PA 3840 be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */ 3841 nop 3842 3843 /* 3844 * inx = pfn - mseg_pa->kpm_pbase 3845 * g2=pfn g3=mseg_pa 3846 */ 3847 ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7 3848 sub %g2, %g7, %g4 3849 3850#ifdef DEBUG 3851 /* 3852 * Validate inx value 3853 * g2=pfn g3=mseg_pa g4=inx 3854 */ 3855 ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5 3856 cmp %g4, %g5 /* inx - nkpmpgs */ 3857 bgeu,pn %xcc, sfmmu_kpm_exception /* if out of range */ 3858 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7 3859#else 3860 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7 3861#endif 3862 /* ksp = &mseg_pa->kpm_spages[inx] */ 3863 ldxa [%g3 + MEMSEG_KPM_SPAGES]%asi, %g5 3864 add %g5, %g4, %g5 /* ksp */ 3865 3866 /* 3867 * KPMP_SHASH(kp) 3868 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz 3869 */ 3870 ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */ 3871 sub %g7, 1, %g7 /* mask */ 3872 sllx %g5, %g1, %g1 /* x = ksp << kpmp_shift */ 3873 add %g5, %g1, %g5 /* y = ksp + x */ 3874 and %g5, %g7, %g5 /* hashinx = y & mask */ 3875 3876 /* 3877 * Calculate physical kpm_spage pointer 3878 * g2=pfn g3=mseg_pa g4=offset g5=hashinx 3879 */ 3880 ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */ 3881 add %g1, %g4, %g1 /* ksp_pa */ 3882 3883 /* 3884 * Calculate physical hash lock address. 3885 * Note: Changes in kpm_shlk_t must be reflected here. 3886 * g1=ksp_pa g2=pfn g5=hashinx 3887 */ 3888 ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */ 3889 sllx %g5, KPMSHLK_SHIFT, %g5 3890 add %g4, %g5, %g3 /* hlck_pa */ 3891 3892 /* 3893 * Assemble tte 3894 * g1=ksp_pa g2=pfn g3=hlck_pa 3895 */ 3896 sethi %hi(TTE_VALID_INT), %g5 /* upper part */ 3897 sllx %g5, 32, %g5 3898 mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4 3899 or %g5, %g4, %g5 3900 sllx %g2, MMU_PAGESHIFT, %g4 3901 or %g5, %g4, %g5 /* tte */ 3902 ldx [%g6 + KPMTSBM_TSBPTR], %g4 3903 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */ 3904 3905 /* 3906 * tsb dropin 3907 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte 3908 */ 3909 3910 /* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */ 3911 KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM) 3912 3913 /* use C-handler if there's no go for dropin */ 3914 ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */ 3915 cmp %g7, -1 3916 bne,pn %xcc, 5f 3917 nop 3918 3919#ifndef sun4v 3920 ldub [%g6 + KPMTSBM_FLAGS], %g7 3921 mov ASI_N, %g1 3922 andcc %g7, KPMTSBM_TSBPHYS_FLAG, %g0 3923 movnz %icc, ASI_MEM, %g1 3924 mov %g1, %asi 3925#endif 3926 3927 /* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */ 3928 TSB_LOCK_ENTRY(%g4, %g1, %g7, 6) 3929 3930 /* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */ 3931 TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7) 3932 3933 DTLB_STUFF(%g5, %g2, %g4, %g5, %g6) 3934 3935 /* KPMLOCK_EXIT(kpmlckp, asi) */ 3936 KPMLOCK_EXIT(%g3, ASI_MEM) 3937 3938 /* 3939 * If trapstat is running, we need to shift the %tpc and %tnpc to 3940 * point to trapstat's TSB miss return code (note that trapstat 3941 * itself will patch the correct offset to add). 3942 * Note: TTE is expected in %g5 (allows per pagesize reporting). 3943 */ 3944 rdpr %tl, %g7 3945 cmp %g7, 1 3946 ble %icc, 0f 3947 sethi %hi(KERNELBASE), %g6 3948 rdpr %tpc, %g7 3949 or %g6, %lo(KERNELBASE), %g6 3950 cmp %g7, %g6 3951 bgeu %xcc, 0f 3952 ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small) 3953 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */ 3954 wrpr %g7, %tpc 3955 add %g7, 4, %g7 3956 wrpr %g7, %tnpc 39570: 3958 retry 39595: 3960 /* g3=hlck_pa */ 3961 KPMLOCK_EXIT(%g3, ASI_MEM) 3962 ba,pt %icc, sfmmu_kpm_exception 3963 nop 3964 SET_SIZE(sfmmu_kpm_dtsb_miss_small) 3965 3966#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE 3967#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct 3968#endif 3969 3970#endif /* lint */ 3971 3972#ifdef lint 3973/* 3974 * Enable/disable tsbmiss handling at trap level for a kpm (large) page. 3975 * Called from C-level, sets/clears "go" indication for trap level handler. 3976 * khl_lock is a low level spin lock to protect the kp_tsbmtl field. 3977 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level. 3978 * Assumes khl_mutex is held when called from C-level. 3979 */ 3980/* ARGSUSED */ 3981void 3982sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd) 3983{ 3984} 3985 3986/* 3987 * kpm_smallpages: stores val to byte at address mapped within 3988 * low level lock brackets. The old value is returned. 3989 * Called from C-level. 3990 */ 3991/* ARGSUSED */ 3992int 3993sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val) 3994{ 3995 return (0); 3996} 3997 3998#else /* lint */ 3999 4000 .seg ".data" 4001sfmmu_kpm_tsbmtl_panic: 4002 .ascii "sfmmu_kpm_tsbmtl: interrupts disabled" 4003 .byte 0 4004sfmmu_kpm_stsbmtl_panic: 4005 .ascii "sfmmu_kpm_stsbmtl: interrupts disabled" 4006 .byte 0 4007 .align 4 4008 .seg ".text" 4009 4010 ENTRY_NP(sfmmu_kpm_tsbmtl) 4011 rdpr %pstate, %o3 4012 /* 4013 * %o0 = &kp_refcntc 4014 * %o1 = &khl_lock 4015 * %o2 = 0/1 (off/on) 4016 * %o3 = pstate save 4017 */ 4018#ifdef DEBUG 4019 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */ 4020 bnz,pt %icc, 1f /* disabled, panic */ 4021 nop 4022 save %sp, -SA(MINFRAME), %sp 4023 sethi %hi(sfmmu_kpm_tsbmtl_panic), %o0 4024 call panic 4025 or %o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0 4026 ret 4027 restore 40281: 4029#endif /* DEBUG */ 4030 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */ 4031 4032 KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N) 4033 mov -1, %o5 4034 brz,a %o2, 2f 4035 mov 0, %o5 40362: 4037 sth %o5, [%o0] 4038 KPMLOCK_EXIT(%o1, ASI_N) 4039 4040 retl 4041 wrpr %g0, %o3, %pstate /* enable interrupts */ 4042 SET_SIZE(sfmmu_kpm_tsbmtl) 4043 4044 ENTRY_NP(sfmmu_kpm_stsbmtl) 4045 rdpr %pstate, %o3 4046 /* 4047 * %o0 = &mapped 4048 * %o1 = &kshl_lock 4049 * %o2 = val 4050 * %o3 = pstate save 4051 */ 4052#ifdef DEBUG 4053 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */ 4054 bnz,pt %icc, 1f /* disabled, panic */ 4055 nop 4056 save %sp, -SA(MINFRAME), %sp 4057 sethi %hi(sfmmu_kpm_stsbmtl_panic), %o0 4058 call panic 4059 or %o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0 4060 ret 4061 restore 40621: 4063#endif /* DEBUG */ 4064 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */ 4065 4066 KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N) 4067 ldsb [%o0], %o5 4068 stb %o2, [%o0] 4069 KPMLOCK_EXIT(%o1, ASI_N) 4070 4071 mov %o5, %o0 /* return old val */ 4072 retl 4073 wrpr %g0, %o3, %pstate /* enable interrupts */ 4074 SET_SIZE(sfmmu_kpm_stsbmtl) 4075 4076#endif /* lint */ 4077 4078#ifndef lint 4079#ifdef sun4v 4080 /* 4081 * User/kernel data miss w// multiple TSBs 4082 * The first probe covers 8K, 64K, and 512K page sizes, 4083 * because 64K and 512K mappings are replicated off 8K 4084 * pointer. Second probe covers 4M page size only. 4085 * 4086 * MMU fault area contains miss address and context. 4087 */ 4088 ALTENTRY(sfmmu_slow_dmmu_miss) 4089 GET_MMU_D_TAGACC_CTX(%g2, %g3) ! %g2 = tagacc, %g3 = ctx 4090 4091slow_miss_common: 4092 /* 4093 * %g2 = tagacc register (needed for sfmmu_tsb_miss_tt) 4094 * %g3 = ctx (cannot be INVALID_CONTEXT) 4095 */ 4096 brnz,pt %g3, 8f ! check for user context 4097 nop 4098 4099 /* 4100 * Kernel miss 4101 * Get 8K and 4M TSB pointers in %g1 and %g3 and 4102 * branch to sfmmu_tsb_miss_tt to handle it. 4103 */ 4104 mov %g2, %g7 ! TSB pointer macro clobbers tagacc 4105sfmmu_dslow_patch_ktsb_base: 4106 RUNTIME_PATCH_SETX(%g1, %g6) ! %g1 = contents of ktsb_pbase 4107sfmmu_dslow_patch_ktsb_szcode: 4108 or %g0, RUNTIME_PATCH, %g3 ! ktsb_szcode (hot patched) 4109 4110 GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5) 4111 ! %g1 = First TSB entry pointer, as TSB miss handler expects 4112 4113 mov %g2, %g7 ! TSB pointer macro clobbers tagacc 4114sfmmu_dslow_patch_ktsb4m_base: 4115 RUNTIME_PATCH_SETX(%g3, %g6) ! %g3 = contents of ktsb4m_pbase 4116sfmmu_dslow_patch_ktsb4m_szcode: 4117 or %g0, RUNTIME_PATCH, %g6 ! ktsb4m_szcode (hot patched) 4118 4119 GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5) 4120 ! %g3 = 4M tsb entry pointer, as TSB miss handler expects 4121 ba,a,pt %xcc, sfmmu_tsb_miss_tt 4122 .empty 4123 41248: 4125 /* 4126 * User miss 4127 * Get first TSB pointer in %g1 4128 * Get second TSB pointer (or NULL if no second TSB) in %g3 4129 * Branch to sfmmu_tsb_miss_tt to handle it 4130 */ 4131 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) 4132 /* %g1 = first TSB entry ptr now, %g2 preserved */ 4133 4134 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3) /* get 2nd utsbreg */ 4135 brlz,a,pt %g3, sfmmu_tsb_miss_tt /* done if no 2nd TSB */ 4136 mov %g0, %g3 4137 4138 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5) 4139 /* %g3 = second TSB entry ptr now, %g2 preserved */ 41409: 4141 ba,a,pt %xcc, sfmmu_tsb_miss_tt 4142 .empty 4143 SET_SIZE(sfmmu_slow_dmmu_miss) 4144 4145 4146 /* 4147 * User/kernel instruction miss w/ multiple TSBs 4148 * The first probe covers 8K, 64K, and 512K page sizes, 4149 * because 64K and 512K mappings are replicated off 8K 4150 * pointer. Second probe covers 4M page size only. 4151 * 4152 * MMU fault area contains miss address and context. 4153 */ 4154 ALTENTRY(sfmmu_slow_immu_miss) 4155 MMU_FAULT_STATUS_AREA(%g2) 4156 ldx [%g2 + MMFSA_I_CTX], %g3 4157 ldx [%g2 + MMFSA_I_ADDR], %g2 4158 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 4159 sllx %g2, MMU_PAGESHIFT, %g2 4160 ba,pt %xcc, slow_miss_common 4161 or %g2, %g3, %g2 4162 SET_SIZE(sfmmu_slow_immu_miss) 4163 4164#endif /* sun4v */ 4165#endif /* lint */ 4166 4167#ifndef lint 4168 4169/* 4170 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers. 4171 */ 4172 .seg ".data" 4173 .align 64 4174 .global tsbmiss_area 4175tsbmiss_area: 4176 .skip (TSBMISS_SIZE * NCPU) 4177 4178 .align 64 4179 .global kpmtsbm_area 4180kpmtsbm_area: 4181 .skip (KPMTSBM_SIZE * NCPU) 4182#endif /* lint */ 4183