1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28/* 29 * SFMMU primitives. These primitives should only be used by sfmmu 30 * routines. 31 */ 32 33#if defined(lint) 34#include <sys/types.h> 35#else /* lint */ 36#include "assym.h" 37#endif /* lint */ 38 39#include <sys/asm_linkage.h> 40#include <sys/machtrap.h> 41#include <sys/machasi.h> 42#include <sys/sun4asi.h> 43#include <sys/pte.h> 44#include <sys/mmu.h> 45#include <vm/hat_sfmmu.h> 46#include <vm/seg_spt.h> 47#include <sys/machparam.h> 48#include <sys/privregs.h> 49#include <sys/scb.h> 50#include <sys/intreg.h> 51#include <sys/machthread.h> 52#include <sys/clock.h> 53#include <sys/trapstat.h> 54 55/* 56 * sfmmu related subroutines 57 */ 58 59#if defined (lint) 60 61/* ARGSUSED */ 62void 63sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx) 64{} 65 66int 67sfmmu_getctx_pri() 68{ return(0); } 69 70int 71sfmmu_getctx_sec() 72{ return(0); } 73 74/* ARGSUSED */ 75void 76sfmmu_setctx_sec(uint_t ctx) 77{} 78 79/* ARGSUSED */ 80void 81sfmmu_load_mmustate(sfmmu_t *sfmmup) 82{ 83} 84 85#else /* lint */ 86 87/* 88 * Invalidate either the context of a specific victim or any process 89 * currently running on this CPU. 90 * 91 * %g1 = sfmmup whose ctx is being stolen (victim) 92 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT. 93 * Note %g1 is the only input argument used by this xcall handler. 94 */ 95 96 ENTRY(sfmmu_raise_tsb_exception) 97 ! 98 ! if (victim == INVALID_CONTEXT) { 99 ! if (sec-ctx > INVALID_CONTEXT) 100 ! write INVALID_CONTEXT to sec-ctx 101 ! if (pri-ctx > INVALID_CONTEXT) 102 ! write INVALID_CONTEXT to pri-ctx 103 ! 104 ! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) { 105 ! return 106 ! } else { 107 ! if (sec-ctx > INVALID_CONTEXT) 108 ! write INVALID_CONTEXT to sec-ctx 109 ! 110 ! if (pri-ctx > INVALID_CONTEXT) 111 ! write INVALID_CONTEXT to pri-ctx 112 ! } 113 ! 114 115 sethi %hi(ksfmmup), %g3 116 ldx [%g3 + %lo(ksfmmup)], %g3 117 cmp %g1, %g3 118 be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */ 119 mov PTL1_BAD_RAISE_TSBEXCP, %g1 120 121 set INVALID_CONTEXT, %g2 122 123 cmp %g1, INVALID_CONTEXT 124 bne,pt %xcc, 1f /* called from wrap_around? */ 125 mov MMU_SCONTEXT, %g3 126 127 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */ 128 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */ 129 ble,pn %xcc, 0f /* yes, no need to change */ 130 mov MMU_PCONTEXT, %g7 131 132 stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */ 133 membar #Sync 134 1350: 136 ldxa [%g7]ASI_MMU_CTX, %g5 /* %g5 = pri-ctx */ 137 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx? */ 138 ble,pn %xcc, 6f /* yes, no need to change */ 139 nop 140 141 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */ 142 membar #Sync 143 1446: /* flushall tlb */ 145 mov %o0, %g3 146 mov %o1, %g4 147 mov %o2, %g6 148 mov %o5, %g7 149 150 mov %g0, %o0 ! XXX no cpu list yet 151 mov %g0, %o1 ! XXX no cpu list yet 152 mov MAP_ITLB | MAP_DTLB, %o2 153 mov MMU_DEMAP_ALL, %o5 154 ta FAST_TRAP 155 brz,pt %o0, 5f 156 nop 157 ba ptl1_panic /* bad HV call */ 158 mov PTL1_BAD_RAISE_TSBEXCP, %g1 1595: 160 mov %g3, %o0 161 mov %g4, %o1 162 mov %g6, %o2 163 mov %g7, %o5 164 165 ba 3f 166 nop 1671: 168 /* 169 * %g1 = sfmmup 170 * %g2 = INVALID_CONTEXT 171 * %g3 = MMU_SCONTEXT 172 */ 173 CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */ 174 ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */ 175 176 cmp %g5, %g1 /* is it the victim? */ 177 bne,pt %xcc, 2f /* is our sec-ctx a victim? */ 178 nop 179 180 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */ 181 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */ 182 ble,pn %xcc, 0f /* yes, no need to change */ 183 mov MMU_PCONTEXT, %g7 184 185 stxa %g2, [%g3]ASI_MMU_CTX /* set sec-ctx to invalid */ 186 membar #Sync 187 1880: 189 ldxa [%g7]ASI_MMU_CTX, %g4 /* %g4 = pri-ctx */ 190 cmp %g4, INVALID_CONTEXT /* is pri-ctx the victim? */ 191 ble %icc, 3f /* no need to change pri-ctx */ 192 nop 193 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */ 194 membar #Sync 195 1963: 197 /* TSB program must be cleared - walkers do not check a context. */ 198 mov %o0, %g3 199 mov %o1, %g4 200 mov %o5, %g7 201 clr %o0 202 clr %o1 203 mov MMU_TSB_CTXNON0, %o5 204 ta FAST_TRAP 205 brnz,a,pn %o0, ptl1_panic 206 mov PTL1_BAD_HCALL, %g1 207 mov %g3, %o0 208 mov %g4, %o1 209 mov %g7, %o5 2102: 211 retry 212 SET_SIZE(sfmmu_raise_tsb_exception) 213 214 ENTRY_NP(sfmmu_getctx_pri) 215 set MMU_PCONTEXT, %o0 216 retl 217 ldxa [%o0]ASI_MMU_CTX, %o0 218 SET_SIZE(sfmmu_getctx_pri) 219 220 ENTRY_NP(sfmmu_getctx_sec) 221 set MMU_SCONTEXT, %o0 222 retl 223 ldxa [%o0]ASI_MMU_CTX, %o0 224 SET_SIZE(sfmmu_getctx_sec) 225 226 /* 227 * Set the secondary context register for this process. 228 * %o0 = context number 229 */ 230 ENTRY_NP(sfmmu_setctx_sec) 231 /* 232 * From resume we call sfmmu_setctx_sec with interrupts disabled. 233 * But we can also get called from C with interrupts enabled. So, 234 * we need to check first. 235 */ 236 237 /* If interrupts are not disabled, then disable them */ 238 rdpr %pstate, %g1 239 btst PSTATE_IE, %g1 240 bnz,a,pt %icc, 1f 241 wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */ 2421: 243 mov MMU_SCONTEXT, %o1 244 sethi %hi(FLUSH_ADDR), %o4 245 stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */ 246 flush %o4 247 248 /* 249 * if the routine is entered with intr enabled, then enable intr now. 250 * otherwise, keep intr disabled, return without enabing intr. 251 * %g1 - old intr state 252 */ 253 btst PSTATE_IE, %g1 254 bnz,a,pt %icc, 2f 255 wrpr %g0, %g1, %pstate /* enable interrupts */ 2562: retl 257 nop 258 SET_SIZE(sfmmu_setctx_sec) 259 260 /* 261 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS. 262 * returns the detection value in %o0. 263 */ 264 ENTRY_NP(sfmmu_setup_4lp) 265 set ktsb_phys, %o2 266 mov 1, %o1 267 st %o1, [%o2] 268 retl 269 mov %o1, %o0 270 SET_SIZE(sfmmu_setup_4lp) 271 272 /* 273 * Called to load MMU registers and tsbmiss area 274 * for the active process. This function should 275 * only be called from TL=0. 276 * 277 * %o0 - hat pointer 278 */ 279 ENTRY_NP(sfmmu_load_mmustate) 280 281#ifdef DEBUG 282 PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1) 283#endif /* DEBUG */ 284 285 sethi %hi(ksfmmup), %o3 286 ldx [%o3 + %lo(ksfmmup)], %o3 287 cmp %o3, %o0 288 be,pn %xcc, 7f ! if kernel as, do nothing 289 nop 290 291 set MMU_SCONTEXT, %o3 292 ldxa [%o3]ASI_MMU_CTX, %o5 293 294 cmp %o5, INVALID_CONTEXT ! ctx is invalid? 295 bne,pt %icc, 1f 296 nop 297 298 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area 299 stx %o0, [%o2 + TSBMISS_UHATID] 300 stx %g0, [%o2 + TSBMISS_SHARED_UHATID] 301#ifdef DEBUG 302 /* check if hypervisor/hardware should handle user TSB */ 303 sethi %hi(hv_use_non0_tsb), %o2 304 ld [%o2 + %lo(hv_use_non0_tsb)], %o2 305 brz,pn %o2, 0f 306 nop 307#endif /* DEBUG */ 308 clr %o0 ! ntsb = 0 for invalid ctx 309 clr %o1 ! HV_TSB_INFO_PA = 0 if inv ctx 310 mov MMU_TSB_CTXNON0, %o5 311 ta FAST_TRAP ! set TSB info for user process 312 brnz,a,pn %o0, panic_bad_hcall 313 mov MMU_TSB_CTXNON0, %o1 3140: 315 retl 316 nop 3171: 318 /* 319 * We need to set up the TSB base register, tsbmiss 320 * area, and pass the TSB information into the hypervisor 321 */ 322 ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo 323 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo 324 325 /* create/set first UTSBREG */ 326 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg 327 SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3) 328 329 brz,pt %g2, 2f 330 mov -1, %o2 ! use -1 if no second TSB 331 332 /* make 2nd UTSBREG */ 333 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg 3342: 335 SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3) 336 337 /* make 3rd and 4th TSB */ 338 CPU_TSBMISS_AREA(%o4, %o3) ! %o4 = tsbmiss area 339 340 ldx [%o0 + SFMMU_SCDP], %g2 ! %g2 = sfmmu_scd 341 brz,pt %g2, 3f 342 mov -1, %o2 ! use -1 if no third TSB 343 344 ldx [%g2 + SCD_SFMMUP], %g3 ! %g3 = scdp->scd_sfmmup 345 ldx [%g3 + SFMMU_TSB], %o1 ! %o1 = first scd tsbinfo 346 brz,pn %o1, 9f 347 nop ! panic if no third TSB 348 349 /* make 3rd UTSBREG */ 350 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg 3513: 352 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2) 353 354 brz,pt %g2, 4f 355 mov -1, %o2 ! use -1 if no 3rd or 4th TSB 356 357 brz,pt %o1, 4f 358 mov -1, %o2 ! use -1 if no 3rd or 4th TSB 359 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second scd tsbinfo 360 brz,pt %g2, 4f 361 mov -1, %o2 ! use -1 if no 4th TSB 362 363 /* make 4th UTSBREG */ 364 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg 3654: 366 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2) 367 368#ifdef DEBUG 369 /* check if hypervisor/hardware should handle user TSB */ 370 sethi %hi(hv_use_non0_tsb), %o2 371 ld [%o2 + %lo(hv_use_non0_tsb)], %o2 372 brz,pn %o2, 6f 373 nop 374#endif /* DEBUG */ 375 CPU_ADDR(%o2, %o4) ! load CPU struct addr to %o2 using %o4 376 ldub [%o2 + CPU_TSTAT_FLAGS], %o1 ! load cpu_tstat_flag to %o1 377 378 mov %o0, %o3 ! preserve %o0 379 btst TSTAT_TLB_STATS, %o1 380 bnz,a,pn %icc, 5f ! ntsb = 0 if TLB stats enabled 381 clr %o0 382 383 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0 3845: 385 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1 386 mov MMU_TSB_CTXNON0, %o5 387 ta FAST_TRAP ! set TSB info for user process 388 brnz,a,pn %o0, panic_bad_hcall 389 mov MMU_TSB_CTXNON0, %o1 390 mov %o3, %o0 ! restore %o0 3916: 392 ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu 393 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area 394 stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the 395 ldub [%o0 + SFMMU_TTEFLAGS], %o3 ! per-CPU tsbmiss area. 396 ldub [%o0 + SFMMU_RTTEFLAGS], %o4 397 ldx [%o0 + SFMMU_SRDP], %o1 398 stx %o0, [%o2 + TSBMISS_UHATID] 399 stub %o3, [%o2 + TSBMISS_UTTEFLAGS] 400 stub %o4, [%o2 + TSBMISS_URTTEFLAGS] 401 stx %o1, [%o2 + TSBMISS_SHARED_UHATID] 402 brz,pn %o1, 7f ! check for sfmmu_srdp 403 add %o0, SFMMU_HMERMAP, %o1 404 add %o2, TSBMISS_SHMERMAP, %o2 405 mov SFMMU_HMERGNMAP_WORDS, %o3 406 ! set tsbmiss shmermap 407 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate) 408 409 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd 410 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area 411 mov SFMMU_HMERGNMAP_WORDS, %o3 412 brnz,pt %o4, 8f ! check for sfmmu_scdp else 413 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap 414 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate) 4157: 416 retl 417 nop 4188: ! set tsbmiss scd_shmermap 419 add %o4, SCD_HMERMAP, %o1 420 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate) 421 retl 422 nop 4239: 424 sethi %hi(panicstr), %g1 ! panic if no 3rd TSB 425 ldx [%g1 + %lo(panicstr)], %g1 426 tst %g1 427 428 bnz,pn %xcc, 7b 429 nop 430 431 sethi %hi(sfmmu_panic10), %o0 432 call panic 433 or %o0, %lo(sfmmu_panic10), %o0 434 435 SET_SIZE(sfmmu_load_mmustate) 436 437#endif /* lint */ 438 439#if defined(lint) 440 441/* Prefetch "struct tsbe" while walking TSBs */ 442/*ARGSUSED*/ 443void 444prefetch_tsbe_read(struct tsbe *tsbep) 445{} 446 447/* Prefetch the tsbe that we are about to write */ 448/*ARGSUSED*/ 449void 450prefetch_tsbe_write(struct tsbe *tsbep) 451{} 452 453#else /* lint */ 454 455 ENTRY(prefetch_tsbe_read) 456 retl 457 nop 458 SET_SIZE(prefetch_tsbe_read) 459 460 ENTRY(prefetch_tsbe_write) 461 retl 462 nop 463 SET_SIZE(prefetch_tsbe_write) 464#endif /* lint */ 465