1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#pragma ident "%Z%%M% %I% %E% SMI" 28 29#if !defined(lint) 30#include "assym.h" 31#endif /* !lint */ 32#include <sys/asm_linkage.h> 33#include <sys/privregs.h> 34#include <sys/sun4asi.h> 35#include <sys/machasi.h> 36#include <sys/hypervisor_api.h> 37#include <sys/machtrap.h> 38#include <sys/machthread.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This macro is used to update per cpu mmu stats in perf critical 116 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 117 * is defined. 118 */ 119#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 120#define HAT_PERCPU_DBSTAT(stat) \ 121 mov stat, %g1 ;\ 122 ba stat_mmu ;\ 123 rd %pc, %g7 124#else 125#define HAT_PERCPU_DBSTAT(stat) 126#endif /* DEBUG || SFMMU_STAT_GATHER */ 127 128/* 129 * This first set are funneled to trap() with %tt as the type. 130 * Trap will then either panic or send the user a signal. 131 */ 132/* 133 * NOT is used for traps that just shouldn't happen. 134 * It comes in both single and quadruple flavors. 135 */ 136#if !defined(lint) 137 .global trap 138#endif /* !lint */ 139#define NOT \ 140 TT_TRACE(trace_gen) ;\ 141 set trap, %g1 ;\ 142 rdpr %tt, %g3 ;\ 143 ba,pt %xcc, sys_trap ;\ 144 sub %g0, 1, %g4 ;\ 145 .align 32 146#define NOT4 NOT; NOT; NOT; NOT 147 148#define NOTP \ 149 TT_TRACE(trace_gen) ;\ 150 ba,pt %xcc, ptl1_panic ;\ 151 mov PTL1_BAD_TRAP, %g1 ;\ 152 .align 32 153#define NOTP4 NOTP; NOTP; NOTP; NOTP 154 155 156/* 157 * BAD is used for trap vectors we don't have a kernel 158 * handler for. 159 * It also comes in single and quadruple versions. 160 */ 161#define BAD NOT 162#define BAD4 NOT4 163 164#define DONE \ 165 done; \ 166 .align 32 167 168/* 169 * TRAP vectors to the trap() function. 170 * It's main use is for user errors. 171 */ 172#if !defined(lint) 173 .global trap 174#endif /* !lint */ 175#define TRAP(arg) \ 176 TT_TRACE(trace_gen) ;\ 177 set trap, %g1 ;\ 178 mov arg, %g3 ;\ 179 ba,pt %xcc, sys_trap ;\ 180 sub %g0, 1, %g4 ;\ 181 .align 32 182 183/* 184 * SYSCALL is used for system calls on both ILP32 and LP64 kernels 185 * depending on the "which" parameter (should be syscall_trap, 186 * syscall_trap32, or nosys for unused system call traps). 187 */ 188#define SYSCALL(which) \ 189 TT_TRACE(trace_gen) ;\ 190 set (which), %g1 ;\ 191 ba,pt %xcc, sys_trap ;\ 192 sub %g0, 1, %g4 ;\ 193 .align 32 194 195/* 196 * GOTO just jumps to a label. 197 * It's used for things that can be fixed without going thru sys_trap. 198 */ 199#define GOTO(label) \ 200 .global label ;\ 201 ba,a label ;\ 202 .empty ;\ 203 .align 32 204 205/* 206 * GOTO_TT just jumps to a label. 207 * correctable ECC error traps at level 0 and 1 will use this macro. 208 * It's used for things that can be fixed without going thru sys_trap. 209 */ 210#define GOTO_TT(label, ttlabel) \ 211 .global label ;\ 212 TT_TRACE(ttlabel) ;\ 213 ba,a label ;\ 214 .empty ;\ 215 .align 32 216 217/* 218 * Privileged traps 219 * Takes breakpoint if privileged, calls trap() if not. 220 */ 221#define PRIV(label) \ 222 rdpr %tstate, %g1 ;\ 223 btst TSTATE_PRIV, %g1 ;\ 224 bnz label ;\ 225 rdpr %tt, %g3 ;\ 226 set trap, %g1 ;\ 227 ba,pt %xcc, sys_trap ;\ 228 sub %g0, 1, %g4 ;\ 229 .align 32 230 231 232/* 233 * DTrace traps. 234 */ 235#define DTRACE_PID \ 236 .global dtrace_pid_probe ;\ 237 set dtrace_pid_probe, %g1 ;\ 238 ba,pt %xcc, user_trap ;\ 239 sub %g0, 1, %g4 ;\ 240 .align 32 241 242#define DTRACE_RETURN \ 243 .global dtrace_return_probe ;\ 244 set dtrace_return_probe, %g1 ;\ 245 ba,pt %xcc, user_trap ;\ 246 sub %g0, 1, %g4 ;\ 247 .align 32 248 249/* 250 * REGISTER WINDOW MANAGEMENT MACROS 251 */ 252 253/* 254 * various convenient units of padding 255 */ 256#define SKIP(n) .skip 4*(n) 257 258/* 259 * CLEAN_WINDOW is the simple handler for cleaning a register window. 260 */ 261#define CLEAN_WINDOW \ 262 TT_TRACE_L(trace_win) ;\ 263 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 264 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 265 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 266 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 267 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 268 retry; .align 128 269 270#if !defined(lint) 271 272/* 273 * If we get an unresolved tlb miss while in a window handler, the fault 274 * handler will resume execution at the last instruction of the window 275 * hander, instead of delivering the fault to the kernel. Spill handlers 276 * use this to spill windows into the wbuf. 277 * 278 * The mixed handler works by checking %sp, and branching to the correct 279 * handler. This is done by branching back to label 1: for 32b frames, 280 * or label 2: for 64b frames; which implies the handler order is: 32b, 281 * 64b, mixed. The 1: and 2: labels are offset into the routines to 282 * allow the branchs' delay slots to contain useful instructions. 283 */ 284 285/* 286 * SPILL_32bit spills a 32-bit-wide kernel register window. It 287 * assumes that the kernel context and the nucleus context are the 288 * same. The stack pointer is required to be eight-byte aligned even 289 * though this code only needs it to be four-byte aligned. 290 */ 291#define SPILL_32bit(tail) \ 292 srl %sp, 0, %sp ;\ 2931: st %l0, [%sp + 0] ;\ 294 st %l1, [%sp + 4] ;\ 295 st %l2, [%sp + 8] ;\ 296 st %l3, [%sp + 12] ;\ 297 st %l4, [%sp + 16] ;\ 298 st %l5, [%sp + 20] ;\ 299 st %l6, [%sp + 24] ;\ 300 st %l7, [%sp + 28] ;\ 301 st %i0, [%sp + 32] ;\ 302 st %i1, [%sp + 36] ;\ 303 st %i2, [%sp + 40] ;\ 304 st %i3, [%sp + 44] ;\ 305 st %i4, [%sp + 48] ;\ 306 st %i5, [%sp + 52] ;\ 307 st %i6, [%sp + 56] ;\ 308 st %i7, [%sp + 60] ;\ 309 TT_TRACE_L(trace_win) ;\ 310 saved ;\ 311 retry ;\ 312 SKIP(31-19-TT_TRACE_L_INS) ;\ 313 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 314 .empty 315 316/* 317 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 318 * wide address space via the designated asi. It is used to spill 319 * non-kernel windows. The stack pointer is required to be eight-byte 320 * aligned even though this code only needs it to be four-byte 321 * aligned. 322 */ 323#define SPILL_32bit_asi(asi_num, tail) \ 324 srl %sp, 0, %sp ;\ 3251: sta %l0, [%sp + %g0]asi_num ;\ 326 mov 4, %g1 ;\ 327 sta %l1, [%sp + %g1]asi_num ;\ 328 mov 8, %g2 ;\ 329 sta %l2, [%sp + %g2]asi_num ;\ 330 mov 12, %g3 ;\ 331 sta %l3, [%sp + %g3]asi_num ;\ 332 add %sp, 16, %g4 ;\ 333 sta %l4, [%g4 + %g0]asi_num ;\ 334 sta %l5, [%g4 + %g1]asi_num ;\ 335 sta %l6, [%g4 + %g2]asi_num ;\ 336 sta %l7, [%g4 + %g3]asi_num ;\ 337 add %g4, 16, %g4 ;\ 338 sta %i0, [%g4 + %g0]asi_num ;\ 339 sta %i1, [%g4 + %g1]asi_num ;\ 340 sta %i2, [%g4 + %g2]asi_num ;\ 341 sta %i3, [%g4 + %g3]asi_num ;\ 342 add %g4, 16, %g4 ;\ 343 sta %i4, [%g4 + %g0]asi_num ;\ 344 sta %i5, [%g4 + %g1]asi_num ;\ 345 sta %i6, [%g4 + %g2]asi_num ;\ 346 sta %i7, [%g4 + %g3]asi_num ;\ 347 TT_TRACE_L(trace_win) ;\ 348 saved ;\ 349 retry ;\ 350 SKIP(31-25-TT_TRACE_L_INS) ;\ 351 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 352 .empty 353 354#define SPILL_32bit_tt1(asi_num, tail) \ 355 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 356 .empty ;\ 357 .align 128 358 359 360/* 361 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 362 * that the kernel context and the nucleus context are the same. The 363 * stack pointer is required to be eight-byte aligned even though this 364 * code only needs it to be four-byte aligned. 365 */ 366#define FILL_32bit(tail) \ 367 srl %sp, 0, %sp ;\ 3681: TT_TRACE_L(trace_win) ;\ 369 ld [%sp + 0], %l0 ;\ 370 ld [%sp + 4], %l1 ;\ 371 ld [%sp + 8], %l2 ;\ 372 ld [%sp + 12], %l3 ;\ 373 ld [%sp + 16], %l4 ;\ 374 ld [%sp + 20], %l5 ;\ 375 ld [%sp + 24], %l6 ;\ 376 ld [%sp + 28], %l7 ;\ 377 ld [%sp + 32], %i0 ;\ 378 ld [%sp + 36], %i1 ;\ 379 ld [%sp + 40], %i2 ;\ 380 ld [%sp + 44], %i3 ;\ 381 ld [%sp + 48], %i4 ;\ 382 ld [%sp + 52], %i5 ;\ 383 ld [%sp + 56], %i6 ;\ 384 ld [%sp + 60], %i7 ;\ 385 restored ;\ 386 retry ;\ 387 SKIP(31-19-TT_TRACE_L_INS) ;\ 388 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 389 .empty 390 391/* 392 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 393 * wide address space via the designated asi. It is used to fill 394 * non-kernel windows. The stack pointer is required to be eight-byte 395 * aligned even though this code only needs it to be four-byte 396 * aligned. 397 */ 398#define FILL_32bit_asi(asi_num, tail) \ 399 srl %sp, 0, %sp ;\ 4001: TT_TRACE_L(trace_win) ;\ 401 mov 4, %g1 ;\ 402 lda [%sp + %g0]asi_num, %l0 ;\ 403 mov 8, %g2 ;\ 404 lda [%sp + %g1]asi_num, %l1 ;\ 405 mov 12, %g3 ;\ 406 lda [%sp + %g2]asi_num, %l2 ;\ 407 lda [%sp + %g3]asi_num, %l3 ;\ 408 add %sp, 16, %g4 ;\ 409 lda [%g4 + %g0]asi_num, %l4 ;\ 410 lda [%g4 + %g1]asi_num, %l5 ;\ 411 lda [%g4 + %g2]asi_num, %l6 ;\ 412 lda [%g4 + %g3]asi_num, %l7 ;\ 413 add %g4, 16, %g4 ;\ 414 lda [%g4 + %g0]asi_num, %i0 ;\ 415 lda [%g4 + %g1]asi_num, %i1 ;\ 416 lda [%g4 + %g2]asi_num, %i2 ;\ 417 lda [%g4 + %g3]asi_num, %i3 ;\ 418 add %g4, 16, %g4 ;\ 419 lda [%g4 + %g0]asi_num, %i4 ;\ 420 lda [%g4 + %g1]asi_num, %i5 ;\ 421 lda [%g4 + %g2]asi_num, %i6 ;\ 422 lda [%g4 + %g3]asi_num, %i7 ;\ 423 restored ;\ 424 retry ;\ 425 SKIP(31-25-TT_TRACE_L_INS) ;\ 426 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 427 .empty 428 429 430/* 431 * SPILL_64bit spills a 64-bit-wide kernel register window. It 432 * assumes that the kernel context and the nucleus context are the 433 * same. The stack pointer is required to be eight-byte aligned. 434 */ 435#define SPILL_64bit(tail) \ 4362: stx %l0, [%sp + V9BIAS64 + 0] ;\ 437 stx %l1, [%sp + V9BIAS64 + 8] ;\ 438 stx %l2, [%sp + V9BIAS64 + 16] ;\ 439 stx %l3, [%sp + V9BIAS64 + 24] ;\ 440 stx %l4, [%sp + V9BIAS64 + 32] ;\ 441 stx %l5, [%sp + V9BIAS64 + 40] ;\ 442 stx %l6, [%sp + V9BIAS64 + 48] ;\ 443 stx %l7, [%sp + V9BIAS64 + 56] ;\ 444 stx %i0, [%sp + V9BIAS64 + 64] ;\ 445 stx %i1, [%sp + V9BIAS64 + 72] ;\ 446 stx %i2, [%sp + V9BIAS64 + 80] ;\ 447 stx %i3, [%sp + V9BIAS64 + 88] ;\ 448 stx %i4, [%sp + V9BIAS64 + 96] ;\ 449 stx %i5, [%sp + V9BIAS64 + 104] ;\ 450 stx %i6, [%sp + V9BIAS64 + 112] ;\ 451 stx %i7, [%sp + V9BIAS64 + 120] ;\ 452 TT_TRACE_L(trace_win) ;\ 453 saved ;\ 454 retry ;\ 455 SKIP(31-18-TT_TRACE_L_INS) ;\ 456 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 457 .empty 458 459#define SPILL_64bit_ktt1(tail) \ 460 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 461 .empty ;\ 462 .align 128 463 464#define SPILL_mixed_ktt1(tail) \ 465 btst 1, %sp ;\ 466 bz,a,pt %xcc, fault_32bit_/**/tail ;\ 467 srl %sp, 0, %sp ;\ 468 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 469 .empty ;\ 470 .align 128 471 472/* 473 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 474 * wide address space via the designated asi. It is used to spill 475 * non-kernel windows. The stack pointer is required to be eight-byte 476 * aligned. 477 */ 478#define SPILL_64bit_asi(asi_num, tail) \ 479 mov 0 + V9BIAS64, %g1 ;\ 4802: stxa %l0, [%sp + %g1]asi_num ;\ 481 mov 8 + V9BIAS64, %g2 ;\ 482 stxa %l1, [%sp + %g2]asi_num ;\ 483 mov 16 + V9BIAS64, %g3 ;\ 484 stxa %l2, [%sp + %g3]asi_num ;\ 485 mov 24 + V9BIAS64, %g4 ;\ 486 stxa %l3, [%sp + %g4]asi_num ;\ 487 add %sp, 32, %g5 ;\ 488 stxa %l4, [%g5 + %g1]asi_num ;\ 489 stxa %l5, [%g5 + %g2]asi_num ;\ 490 stxa %l6, [%g5 + %g3]asi_num ;\ 491 stxa %l7, [%g5 + %g4]asi_num ;\ 492 add %g5, 32, %g5 ;\ 493 stxa %i0, [%g5 + %g1]asi_num ;\ 494 stxa %i1, [%g5 + %g2]asi_num ;\ 495 stxa %i2, [%g5 + %g3]asi_num ;\ 496 stxa %i3, [%g5 + %g4]asi_num ;\ 497 add %g5, 32, %g5 ;\ 498 stxa %i4, [%g5 + %g1]asi_num ;\ 499 stxa %i5, [%g5 + %g2]asi_num ;\ 500 stxa %i6, [%g5 + %g3]asi_num ;\ 501 stxa %i7, [%g5 + %g4]asi_num ;\ 502 TT_TRACE_L(trace_win) ;\ 503 saved ;\ 504 retry ;\ 505 SKIP(31-25-TT_TRACE_L_INS) ;\ 506 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 507 .empty 508 509#define SPILL_64bit_tt1(asi_num, tail) \ 510 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 511 .empty ;\ 512 .align 128 513 514/* 515 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 516 * that the kernel context and the nucleus context are the same. The 517 * stack pointer is required to be eight-byte aligned. 518 */ 519#define FILL_64bit(tail) \ 5202: TT_TRACE_L(trace_win) ;\ 521 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 522 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 523 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 524 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 525 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 526 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 527 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 528 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 529 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 530 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 531 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 532 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 533 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 534 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 535 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 536 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 537 restored ;\ 538 retry ;\ 539 SKIP(31-18-TT_TRACE_L_INS) ;\ 540 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 541 .empty 542 543/* 544 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 545 * wide address space via the designated asi. It is used to fill 546 * non-kernel windows. The stack pointer is required to be eight-byte 547 * aligned. 548 */ 549#define FILL_64bit_asi(asi_num, tail) \ 550 mov V9BIAS64 + 0, %g1 ;\ 5512: TT_TRACE_L(trace_win) ;\ 552 ldxa [%sp + %g1]asi_num, %l0 ;\ 553 mov V9BIAS64 + 8, %g2 ;\ 554 ldxa [%sp + %g2]asi_num, %l1 ;\ 555 mov V9BIAS64 + 16, %g3 ;\ 556 ldxa [%sp + %g3]asi_num, %l2 ;\ 557 mov V9BIAS64 + 24, %g4 ;\ 558 ldxa [%sp + %g4]asi_num, %l3 ;\ 559 add %sp, 32, %g5 ;\ 560 ldxa [%g5 + %g1]asi_num, %l4 ;\ 561 ldxa [%g5 + %g2]asi_num, %l5 ;\ 562 ldxa [%g5 + %g3]asi_num, %l6 ;\ 563 ldxa [%g5 + %g4]asi_num, %l7 ;\ 564 add %g5, 32, %g5 ;\ 565 ldxa [%g5 + %g1]asi_num, %i0 ;\ 566 ldxa [%g5 + %g2]asi_num, %i1 ;\ 567 ldxa [%g5 + %g3]asi_num, %i2 ;\ 568 ldxa [%g5 + %g4]asi_num, %i3 ;\ 569 add %g5, 32, %g5 ;\ 570 ldxa [%g5 + %g1]asi_num, %i4 ;\ 571 ldxa [%g5 + %g2]asi_num, %i5 ;\ 572 ldxa [%g5 + %g3]asi_num, %i6 ;\ 573 ldxa [%g5 + %g4]asi_num, %i7 ;\ 574 restored ;\ 575 retry ;\ 576 SKIP(31-25-TT_TRACE_L_INS) ;\ 577 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 578 .empty 579 580 581#endif /* !lint */ 582 583/* 584 * SPILL_mixed spills either size window, depending on 585 * whether %sp is even or odd, to a 32-bit address space. 586 * This may only be used in conjunction with SPILL_32bit/ 587 * FILL_64bit. 588 * Clear upper 32 bits of %sp if it is odd. 589 * We won't need to clear them in 64 bit kernel. 590 */ 591#define SPILL_mixed \ 592 btst 1, %sp ;\ 593 bz,a,pt %xcc, 1b ;\ 594 srl %sp, 0, %sp ;\ 595 ba,pt %xcc, 2b ;\ 596 nop ;\ 597 .align 128 598 599/* 600 * FILL_mixed(ASI) fills either size window, depending on 601 * whether %sp is even or odd, from a 32-bit address space. 602 * This may only be used in conjunction with FILL_32bit/ 603 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 604 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 605 * attention should be paid to the instructions that belong 606 * in the delay slots of the branches depending on the type 607 * of fill handler being branched to. 608 * Clear upper 32 bits of %sp if it is odd. 609 * We won't need to clear them in 64 bit kernel. 610 */ 611#define FILL_mixed \ 612 btst 1, %sp ;\ 613 bz,a,pt %xcc, 1b ;\ 614 srl %sp, 0, %sp ;\ 615 ba,pt %xcc, 2b ;\ 616 nop ;\ 617 .align 128 618 619 620/* 621 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 622 * respectively, into the address space via the designated asi. The 623 * unbiased stack pointer is required to be eight-byte aligned (even for 624 * the 32-bit case even though this code does not require such strict 625 * alignment). 626 * 627 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 628 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 629 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 630 * window may contain kernel data so in user_rtt we set wstate to call 631 * these spill handlers on the first user spill trap. These handler then 632 * spill the appropriate window but also back up a window and clean the 633 * window that didn't get a cleanwin trap. 634 */ 635#define SPILL_32clean(asi_num, tail) \ 636 srl %sp, 0, %sp ;\ 637 sta %l0, [%sp + %g0]asi_num ;\ 638 mov 4, %g1 ;\ 639 sta %l1, [%sp + %g1]asi_num ;\ 640 mov 8, %g2 ;\ 641 sta %l2, [%sp + %g2]asi_num ;\ 642 mov 12, %g3 ;\ 643 sta %l3, [%sp + %g3]asi_num ;\ 644 add %sp, 16, %g4 ;\ 645 sta %l4, [%g4 + %g0]asi_num ;\ 646 sta %l5, [%g4 + %g1]asi_num ;\ 647 sta %l6, [%g4 + %g2]asi_num ;\ 648 sta %l7, [%g4 + %g3]asi_num ;\ 649 add %g4, 16, %g4 ;\ 650 sta %i0, [%g4 + %g0]asi_num ;\ 651 sta %i1, [%g4 + %g1]asi_num ;\ 652 sta %i2, [%g4 + %g2]asi_num ;\ 653 sta %i3, [%g4 + %g3]asi_num ;\ 654 add %g4, 16, %g4 ;\ 655 sta %i4, [%g4 + %g0]asi_num ;\ 656 sta %i5, [%g4 + %g1]asi_num ;\ 657 sta %i6, [%g4 + %g2]asi_num ;\ 658 sta %i7, [%g4 + %g3]asi_num ;\ 659 TT_TRACE_L(trace_win) ;\ 660 b .spill_clean ;\ 661 mov WSTATE_USER32, %g7 ;\ 662 SKIP(31-25-TT_TRACE_L_INS) ;\ 663 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 664 .empty 665 666#define SPILL_64clean(asi_num, tail) \ 667 mov 0 + V9BIAS64, %g1 ;\ 668 stxa %l0, [%sp + %g1]asi_num ;\ 669 mov 8 + V9BIAS64, %g2 ;\ 670 stxa %l1, [%sp + %g2]asi_num ;\ 671 mov 16 + V9BIAS64, %g3 ;\ 672 stxa %l2, [%sp + %g3]asi_num ;\ 673 mov 24 + V9BIAS64, %g4 ;\ 674 stxa %l3, [%sp + %g4]asi_num ;\ 675 add %sp, 32, %g5 ;\ 676 stxa %l4, [%g5 + %g1]asi_num ;\ 677 stxa %l5, [%g5 + %g2]asi_num ;\ 678 stxa %l6, [%g5 + %g3]asi_num ;\ 679 stxa %l7, [%g5 + %g4]asi_num ;\ 680 add %g5, 32, %g5 ;\ 681 stxa %i0, [%g5 + %g1]asi_num ;\ 682 stxa %i1, [%g5 + %g2]asi_num ;\ 683 stxa %i2, [%g5 + %g3]asi_num ;\ 684 stxa %i3, [%g5 + %g4]asi_num ;\ 685 add %g5, 32, %g5 ;\ 686 stxa %i4, [%g5 + %g1]asi_num ;\ 687 stxa %i5, [%g5 + %g2]asi_num ;\ 688 stxa %i6, [%g5 + %g3]asi_num ;\ 689 stxa %i7, [%g5 + %g4]asi_num ;\ 690 TT_TRACE_L(trace_win) ;\ 691 b .spill_clean ;\ 692 mov WSTATE_USER64, %g7 ;\ 693 SKIP(31-25-TT_TRACE_L_INS) ;\ 694 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 695 .empty 696 697 698/* 699 * Floating point disabled. 700 */ 701#define FP_DISABLED_TRAP \ 702 TT_TRACE(trace_gen) ;\ 703 ba,pt %xcc,.fp_disabled ;\ 704 nop ;\ 705 .align 32 706 707/* 708 * Floating point exceptions. 709 */ 710#define FP_IEEE_TRAP \ 711 TT_TRACE(trace_gen) ;\ 712 ba,pt %xcc,.fp_ieee_exception ;\ 713 nop ;\ 714 .align 32 715 716#define FP_TRAP \ 717 TT_TRACE(trace_gen) ;\ 718 ba,pt %xcc,.fp_exception ;\ 719 nop ;\ 720 .align 32 721 722#if !defined(lint) 723 724/* 725 * ECACHE_ECC error traps at level 0 and level 1 726 */ 727#define ECACHE_ECC(table_name) \ 728 .global table_name ;\ 729table_name: ;\ 730 membar #Sync ;\ 731 set trap, %g1 ;\ 732 rdpr %tt, %g3 ;\ 733 ba,pt %xcc, sys_trap ;\ 734 sub %g0, 1, %g4 ;\ 735 .align 32 736 737#endif /* !lint */ 738 739/* 740 * illegal instruction trap 741 */ 742#define ILLTRAP_INSTR \ 743 membar #Sync ;\ 744 TT_TRACE(trace_gen) ;\ 745 or %g0, P_UTRAP4, %g2 ;\ 746 or %g0, T_UNIMP_INSTR, %g3 ;\ 747 sethi %hi(.check_v9utrap), %g4 ;\ 748 jmp %g4 + %lo(.check_v9utrap) ;\ 749 nop ;\ 750 .align 32 751 752/* 753 * tag overflow trap 754 */ 755#define TAG_OVERFLOW \ 756 TT_TRACE(trace_gen) ;\ 757 or %g0, P_UTRAP10, %g2 ;\ 758 or %g0, T_TAG_OVERFLOW, %g3 ;\ 759 sethi %hi(.check_v9utrap), %g4 ;\ 760 jmp %g4 + %lo(.check_v9utrap) ;\ 761 nop ;\ 762 .align 32 763 764/* 765 * divide by zero trap 766 */ 767#define DIV_BY_ZERO \ 768 TT_TRACE(trace_gen) ;\ 769 or %g0, P_UTRAP11, %g2 ;\ 770 or %g0, T_IDIV0, %g3 ;\ 771 sethi %hi(.check_v9utrap), %g4 ;\ 772 jmp %g4 + %lo(.check_v9utrap) ;\ 773 nop ;\ 774 .align 32 775 776/* 777 * trap instruction for V9 user trap handlers 778 */ 779#define TRAP_INSTR \ 780 TT_TRACE(trace_gen) ;\ 781 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 782 sethi %hi(.check_v9utrap), %g4 ;\ 783 jmp %g4 + %lo(.check_v9utrap) ;\ 784 nop ;\ 785 .align 32 786#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 787 788/* 789 * LEVEL_INTERRUPT is for level N interrupts. 790 * VECTOR_INTERRUPT is for the vector trap. 791 */ 792#define LEVEL_INTERRUPT(level) \ 793 .global tt_pil/**/level ;\ 794tt_pil/**/level: ;\ 795 ba,pt %xcc, pil_interrupt ;\ 796 mov level, %g4 ;\ 797 .align 32 798 799#define LEVEL14_INTERRUPT \ 800 ba pil14_interrupt ;\ 801 mov PIL_14, %g4 ;\ 802 .align 32 803 804#define CPU_MONDO \ 805 ba,a,pt %xcc, cpu_mondo ;\ 806 .align 32 807 808#define DEV_MONDO \ 809 ba,a,pt %xcc, dev_mondo ;\ 810 .align 32 811 812/* 813 * We take over the rtba after we set our trap table and 814 * fault status area. The watchdog reset trap is now handled by the OS. 815 */ 816#define WATCHDOG_RESET \ 817 mov PTL1_BAD_WATCHDOG, %g1 ;\ 818 ba,a,pt %xcc, .watchdog_trap ;\ 819 .align 32 820 821/* 822 * RED is for traps that use the red mode handler. 823 * We should never see these either. 824 */ 825#define RED \ 826 mov PTL1_BAD_RED, %g1 ;\ 827 ba,a,pt %xcc, .watchdog_trap ;\ 828 .align 32 829 830 831/* 832 * MMU Trap Handlers. 833 */ 834 835/* 836 * synthesize for trap(): SFSR in %g3 837 */ 838#define IMMU_EXCEPTION \ 839 MMU_FAULT_STATUS_AREA(%g3) ;\ 840 rdpr %tpc, %g2 ;\ 841 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\ 842 ldx [%g3 + MMFSA_I_CTX], %g3 ;\ 843 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 844 or %g3, %g1, %g3 ;\ 845 ba,pt %xcc, .mmu_exception_end ;\ 846 mov T_INSTR_EXCEPTION, %g1 ;\ 847 .align 32 848 849/* 850 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3 851 */ 852#define DMMU_EXCEPTION \ 853 ba,a,pt %xcc, .dmmu_exception ;\ 854 .align 32 855 856/* 857 * synthesize for trap(): SFAR in %g2, SFSR in %g3 858 */ 859#define DMMU_EXC_AG_PRIV \ 860 MMU_FAULT_STATUS_AREA(%g3) ;\ 861 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 862 /* Fault type not available in MMU fault status area */ ;\ 863 mov MMFSA_F_PRVACT, %g1 ;\ 864 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 865 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 866 ba,pt %xcc, .mmu_priv_exception ;\ 867 or %g3, %g1, %g3 ;\ 868 .align 32 869 870/* 871 * synthesize for trap(): SFAR in %g2, SFSR in %g3 872 */ 873#define DMMU_EXC_AG_NOT_ALIGNED \ 874 MMU_FAULT_STATUS_AREA(%g3) ;\ 875 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 876 /* Fault type not available in MMU fault status area */ ;\ 877 mov MMFSA_F_UNALIGN, %g1 ;\ 878 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 879 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 880 ba,pt %xcc, .mmu_exception_not_aligned ;\ 881 or %g3, %g1, %g3 /* SFSR */ ;\ 882 .align 32 883/* 884 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 885 */ 886 887/* 888 * synthesize for trap(): SFAR in %g2, SFSR in %g3 889 */ 890#define DMMU_EXC_LDDF_NOT_ALIGNED \ 891 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\ 892 .align 32 893/* 894 * synthesize for trap(): SFAR in %g2, SFSR in %g3 895 */ 896#define DMMU_EXC_STDF_NOT_ALIGNED \ 897 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\ 898 .align 32 899 900#if defined(cscope) 901/* 902 * Define labels to direct cscope quickly to labels that 903 * are generated by macro expansion of DTLB_MISS(). 904 */ 905 .global tt0_dtlbmiss 906tt0_dtlbmiss: 907 .global tt1_dtlbmiss 908tt1_dtlbmiss: 909 nop 910#endif 911 912/* 913 * Data miss handler (must be exactly 32 instructions) 914 * 915 * This handler is invoked only if the hypervisor has been instructed 916 * not to do any TSB walk. 917 * 918 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss 919 * handler. 920 * 921 * User TLB miss handling depends upon whether a user process has one or 922 * two TSBs. User TSB information (physical base and size code) is kept 923 * in two dedicated scratchpad registers. Absence of a user TSB (primarily 924 * second TSB) is indicated by a negative value (-1) in that register. 925 */ 926 927/* 928 * synthesize for miss handler: pseudo-tag access in %g2 (with context "type" 929 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 930 */ 931#define DTLB_MISS(table_name) ;\ 932 .global table_name/**/_dtlbmiss ;\ 933table_name/**/_dtlbmiss: ;\ 934 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 935 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 936 cmp %g3, INVALID_CONTEXT ;\ 937 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 938 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 939 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 940 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 941 brgez,pn %g1, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 942 nop ;\ 943 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 944 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\ 945 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 946 .align 128 947 948 949#if defined(cscope) 950/* 951 * Define labels to direct cscope quickly to labels that 952 * are generated by macro expansion of ITLB_MISS(). 953 */ 954 .global tt0_itlbmiss 955tt0_itlbmiss: 956 .global tt1_itlbmiss 957tt1_itlbmiss: 958 nop 959#endif 960 961/* 962 * Instruction miss handler. 963 * 964 * This handler is invoked only if the hypervisor has been instructed 965 * not to do any TSB walk. 966 * 967 * ldda instructions will have their ASI patched 968 * by sfmmu_patch_ktsb at runtime. 969 * MUST be EXACTLY 32 instructions or we'll break. 970 */ 971 972/* 973 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 974 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 975 */ 976#define ITLB_MISS(table_name) \ 977 .global table_name/**/_itlbmiss ;\ 978table_name/**/_itlbmiss: ;\ 979 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 980 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 981 cmp %g3, INVALID_CONTEXT ;\ 982 ble,pn %xcc, sfmmu_kitlb_miss ;\ 983 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 984 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 985 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 986 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 987 nop ;\ 988 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 989 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\ 990 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 991 .align 128 992 993#define DTSB_MISS \ 994 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu) 995 996#define ITSB_MISS \ 997 GOTO_TT(sfmmu_slow_immu_miss,trace_immu) 998 999/* 1000 * This macro is the first level handler for fast protection faults. 1001 * It first demaps the tlb entry which generated the fault and then 1002 * attempts to set the modify bit on the hash. It needs to be 1003 * exactly 32 instructions. 1004 */ 1005/* 1006 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1007 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1008 */ 1009#define DTLB_PROT \ 1010 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 1011 /* ;\ 1012 * g2 = pseudo-tag access register (ctx type rather than ctx ID) ;\ 1013 * g3 = ctx type (0, 1, or 2) ;\ 1014 */ ;\ 1015 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1016 /* clobbers g1 and g6 XXXQ? */ ;\ 1017 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1018 nop ;\ 1019 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1020 .align 128 1021 1022#define DMMU_EXCEPTION_TL1 ;\ 1023 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1024 .align 32 1025 1026#define MISALIGN_ADDR_TL1 ;\ 1027 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1028 .align 32 1029 1030/* 1031 * Trace a tsb hit 1032 * g1 = tsbe pointer (in/clobbered) 1033 * g2 = tag access register (in) 1034 * g3 - g4 = scratch (clobbered) 1035 * g5 = tsbe data (in) 1036 * g6 = scratch (clobbered) 1037 * g7 = pc we jumped here from (in) 1038 * ttextra = value to OR in to trap type (%tt) (in) 1039 */ 1040#ifdef TRAPTRACE 1041#define TRACE_TSBHIT(ttextra) \ 1042 membar #Sync ;\ 1043 sethi %hi(FLUSH_ADDR), %g6 ;\ 1044 flush %g6 ;\ 1045 TRACE_PTR(%g3, %g6) ;\ 1046 GET_TRACE_TICK(%g6) ;\ 1047 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1048 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1049 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1050 rdpr %tnpc, %g6 ;\ 1051 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1052 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1053 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1054 rdpr %tpc, %g6 ;\ 1055 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1056 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\ 1057 rdpr %tt, %g6 ;\ 1058 or %g6, (ttextra), %g1 ;\ 1059 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\ 1060 MMU_FAULT_STATUS_AREA(%g4) ;\ 1061 mov MMFSA_D_ADDR, %g1 ;\ 1062 cmp %g6, FAST_IMMU_MISS_TT ;\ 1063 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1064 cmp %g6, T_INSTR_MMU_MISS ;\ 1065 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1066 ldx [%g4 + %g1], %g1 ;\ 1067 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\ 1068 mov MMFSA_D_CTX, %g1 ;\ 1069 cmp %g6, FAST_IMMU_MISS_TT ;\ 1070 move %xcc, MMFSA_I_CTX, %g1 ;\ 1071 cmp %g6, T_INSTR_MMU_MISS ;\ 1072 move %xcc, MMFSA_I_CTX, %g1 ;\ 1073 ldx [%g4 + %g1], %g1 ;\ 1074 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\ 1075 TRACE_NEXT(%g3, %g4, %g6) 1076#else 1077#define TRACE_TSBHIT(ttextra) 1078#endif 1079 1080 1081#if defined(lint) 1082 1083struct scb trap_table; 1084struct scb scb; /* trap_table/scb are the same object */ 1085 1086#else /* lint */ 1087 1088/* 1089 * ======================================================================= 1090 * SPARC V9 TRAP TABLE 1091 * 1092 * The trap table is divided into two halves: the first half is used when 1093 * taking traps when TL=0; the second half is used when taking traps from 1094 * TL>0. Note that handlers in the second half of the table might not be able 1095 * to make the same assumptions as handlers in the first half of the table. 1096 * 1097 * Worst case trap nesting so far: 1098 * 1099 * at TL=0 client issues software trap requesting service 1100 * at TL=1 nucleus wants a register window 1101 * at TL=2 register window clean/spill/fill takes a TLB miss 1102 * at TL=3 processing TLB miss 1103 * at TL=4 handle asynchronous error 1104 * 1105 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1106 * 1107 * ======================================================================= 1108 */ 1109 .section ".text" 1110 .align 4 1111 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1112 .type trap_table, #object 1113 .type trap_table0, #object 1114 .type trap_table1, #object 1115 .type scb, #object 1116trap_table: 1117scb: 1118trap_table0: 1119 /* hardware traps */ 1120 NOT; /* 000 reserved */ 1121 RED; /* 001 power on reset */ 1122 WATCHDOG_RESET; /* 002 watchdog reset */ 1123 RED; /* 003 externally initiated reset */ 1124 RED; /* 004 software initiated reset */ 1125 RED; /* 005 red mode exception */ 1126 NOT; NOT; /* 006 - 007 reserved */ 1127 IMMU_EXCEPTION; /* 008 instruction access exception */ 1128 ITSB_MISS; /* 009 instruction access MMU miss */ 1129 NOT; /* 00A reserved */ 1130 NOT; NOT4; /* 00B - 00F reserved */ 1131 ILLTRAP_INSTR; /* 010 illegal instruction */ 1132 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1133 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */ 1134 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */ 1135 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1136 FP_DISABLED_TRAP; /* 020 fp disabled */ 1137 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1138 FP_TRAP; /* 022 fp exception other */ 1139 TAG_OVERFLOW; /* 023 tag overflow */ 1140 CLEAN_WINDOW; /* 024 - 027 clean window */ 1141 DIV_BY_ZERO; /* 028 division by zero */ 1142 NOT; /* 029 internal processor error */ 1143 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1144 DMMU_EXCEPTION; /* 030 data access exception */ 1145 DTSB_MISS; /* 031 data access MMU miss */ 1146 NOT; /* 032 reserved */ 1147 NOT; /* 033 data access protection */ 1148 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1149 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1150 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1151 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1152 NOT; /* 038 LDQF mem address not aligned */ 1153 NOT; /* 039 STQF mem address not aligned */ 1154 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1155 NOT; /* 040 async data error */ 1156 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1157 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1158 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1159 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1160 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1161 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1162 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1163 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1164 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1165 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1166 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1167 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1168 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1169 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1170 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1171 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1172 NOT; /* 060 interrupt vector */ 1173 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1174 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1175 NOT; /* 063 reserved */ 1176 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1177 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1178 DTLB_PROT; /* 06C data access protection */ 1179 NOT; /* 070 reserved */ 1180 NOT; /* 071 reserved */ 1181 NOT; /* 072 reserved */ 1182 NOT; /* 073 reserved */ 1183 NOT4; NOT4 /* 074 - 07B reserved */ 1184 CPU_MONDO; /* 07C cpu_mondo */ 1185 DEV_MONDO; /* 07D dev_mondo */ 1186 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */ 1187 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */ 1188 NOT4; /* 080 spill 0 normal */ 1189 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1190 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1191 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1192 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1193 SPILL_32bit(not); /* 094 spill 5 normal */ 1194 SPILL_64bit(not); /* 098 spill 6 normal */ 1195 SPILL_mixed; /* 09C spill 7 normal */ 1196 NOT4; /* 0A0 spill 0 other */ 1197 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1198 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1199 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1200 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1201 NOT4; /* 0B4 spill 5 other */ 1202 NOT4; /* 0B8 spill 6 other */ 1203 NOT4; /* 0BC spill 7 other */ 1204 NOT4; /* 0C0 fill 0 normal */ 1205 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1206 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1207 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1208 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1209 FILL_32bit(not); /* 0D4 fill 5 normal */ 1210 FILL_64bit(not); /* 0D8 fill 6 normal */ 1211 FILL_mixed; /* 0DC fill 7 normal */ 1212 NOT4; /* 0E0 fill 0 other */ 1213 NOT4; /* 0E4 fill 1 other */ 1214 NOT4; /* 0E8 fill 2 other */ 1215 NOT4; /* 0EC fill 3 other */ 1216 NOT4; /* 0F0 fill 4 other */ 1217 NOT4; /* 0F4 fill 5 other */ 1218 NOT4; /* 0F8 fill 6 other */ 1219 NOT4; /* 0FC fill 7 other */ 1220 /* user traps */ 1221 GOTO(syscall_trap_4x); /* 100 old system call */ 1222 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1223 TRAP(T_DIV0); /* 102 user divide by zero */ 1224 GOTO(.flushw); /* 103 flush windows */ 1225 GOTO(.clean_windows); /* 104 clean windows */ 1226 BAD; /* 105 range check ?? */ 1227 GOTO(.fix_alignment); /* 106 do unaligned references */ 1228 BAD; /* 107 unused */ 1229 SYSCALL(syscall_trap32); /* 108 ILP32 system call on LP64 */ 1230 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1231 BAD; BAD; BAD4; /* 10A - 10F unused */ 1232 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1233 GOTO(.getcc); /* 120 get condition codes */ 1234 GOTO(.setcc); /* 121 set condition codes */ 1235 GOTO(.getpsr); /* 122 get psr */ 1236 GOTO(.setpsr); /* 123 set psr (some fields) */ 1237 GOTO(get_timestamp); /* 124 get timestamp */ 1238 GOTO(get_virtime); /* 125 get lwp virtual time */ 1239 PRIV(self_xcall); /* 126 self xcall */ 1240 GOTO(get_hrestime); /* 127 get hrestime */ 1241 BAD; /* 128 ST_SETV9STACK */ 1242 GOTO(.getlgrp); /* 129 get lgrpid */ 1243 BAD; BAD; BAD4; /* 12A - 12F unused */ 1244 BAD4; BAD4; /* 130 - 137 unused */ 1245 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1246 BAD; /* 139 unused */ 1247 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1248 BAD; BAD4; /* 13B - 13F unused */ 1249 SYSCALL(syscall_trap) /* 140 LP64 system call */ 1250 SYSCALL(nosys); /* 141 unused system call trap */ 1251#ifdef DEBUG_USER_TRAPTRACECTL 1252 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1253 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1254#else 1255 SYSCALL(nosys); /* 142 unused system call trap */ 1256 SYSCALL(nosys); /* 143 unused system call trap */ 1257#endif 1258 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1259 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1260 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1261 BAD; /* 170 - unused */ 1262 BAD; /* 171 - unused */ 1263 BAD; BAD; /* 172 - 173 unused */ 1264 BAD4; BAD4; /* 174 - 17B unused */ 1265#ifdef PTL1_PANIC_DEBUG 1266 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1267 /* 17C test ptl1_panic */ 1268#else 1269 BAD; /* 17C unused */ 1270#endif /* PTL1_PANIC_DEBUG */ 1271 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1272 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1273 PRIV(obp_bpt); /* 17F obp breakpoint */ 1274 /* reserved */ 1275 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1276 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1277 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1278 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1279 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1280 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1281 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1282 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1283 .size trap_table0, (.-trap_table0) 1284trap_table1: 1285 NOT4; NOT4; /* 000 - 007 unused */ 1286 NOT; /* 008 instruction access exception */ 1287 ITSB_MISS; /* 009 instruction access MMU miss */ 1288 NOT; /* 00A reserved */ 1289 NOT; NOT4; /* 00B - 00F unused */ 1290 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1291 NOT4; /* 020 - 023 unused */ 1292 CLEAN_WINDOW; /* 024 - 027 clean window */ 1293 NOT4; NOT4; /* 028 - 02F unused */ 1294 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1295 DTSB_MISS; /* 031 data access MMU miss */ 1296 NOT; /* 032 reserved */ 1297 NOT; /* 033 unused */ 1298 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1299 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1300 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */ 1301 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1302 NOT; /* 060 unused */ 1303 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1304 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1305 NOT; /* 063 reserved */ 1306 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1307 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1308 DTLB_PROT; /* 06C data access protection */ 1309 NOT; /* 070 reserved */ 1310 NOT; /* 071 reserved */ 1311 NOT; /* 072 reserved */ 1312 NOT; /* 073 reserved */ 1313 NOT4; NOT4; /* 074 - 07B reserved */ 1314 NOT; /* 07C reserved */ 1315 NOT; /* 07D reserved */ 1316 NOT; /* 07E resumable error */ 1317 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */ 1318 NOTP4; /* 080 spill 0 normal */ 1319 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1320 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1321 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1322 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1323 NOTP4; /* 094 spill 5 normal */ 1324 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */ 1325 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */ 1326 NOTP4; /* 0A0 spill 0 other */ 1327 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1328 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1329 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1330 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1331 NOTP4; /* 0B4 spill 5 other */ 1332 NOTP4; /* 0B8 spill 6 other */ 1333 NOTP4; /* 0BC spill 7 other */ 1334 NOT4; /* 0C0 fill 0 normal */ 1335 NOT4; /* 0C4 fill 1 normal */ 1336 NOT4; /* 0C8 fill 2 normal */ 1337 NOT4; /* 0CC fill 3 normal */ 1338 NOT4; /* 0D0 fill 4 normal */ 1339 NOT4; /* 0D4 fill 5 normal */ 1340 NOT4; /* 0D8 fill 6 normal */ 1341 NOT4; /* 0DC fill 7 normal */ 1342 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1343 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1344/* 1345 * Code running at TL>0 does not use soft traps, so 1346 * we can truncate the table here. 1347 * However: 1348 * sun4v uses (hypervisor) ta instructions at TL > 0, so 1349 * provide a safety net for now. 1350 */ 1351 /* soft traps */ 1352 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */ 1353 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */ 1354 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */ 1355 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */ 1356 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */ 1357 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1358 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1359 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */ 1360 /* reserved */ 1361 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1362 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1363 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1364 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1365 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1366 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1367 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1368 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1369etrap_table: 1370 .size trap_table1, (.-trap_table1) 1371 .size trap_table, (.-trap_table) 1372 .size scb, (.-scb) 1373 1374/* 1375 * We get to exec_fault in the case of an instruction miss and tte 1376 * has no execute bit set. We go to tl0 to handle it. 1377 * 1378 * g1 = tsbe pointer (in/clobbered) 1379 * g2 = tag access register (in) 1380 * g3 - g4 = scratch (clobbered) 1381 * g5 = tsbe data (in) 1382 * g6 = scratch (clobbered) 1383 * g7 = pc we jumped here from (in) 1384 */ 1385/* 1386 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1387 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1388 */ 1389 ALTENTRY(exec_fault) 1390 TRACE_TSBHIT(TT_MMU_EXEC) 1391 MMU_FAULT_STATUS_AREA(%g4) 1392 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */ 1393 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */ 1394 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 1395 cmp %g3, USER_CONTEXT_TYPE 1396 sllx %g2, MMU_PAGESHIFT, %g2 1397 movgu %icc, USER_CONTEXT_TYPE, %g3 1398 or %g2, %g3, %g2 /* TAG_ACCESS */ 1399 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1400 set trap, %g1 1401 ba,pt %xcc, sys_trap 1402 mov -1, %g4 1403 1404.mmu_exception_not_aligned: 1405 /* %g2 = sfar, %g3 = sfsr */ 1406 rdpr %tstate, %g1 1407 btst TSTATE_PRIV, %g1 1408 bnz,pn %icc, 2f 1409 nop 1410 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1411 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1412 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1413 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1414 brz,pt %g5, 2f 1415 nop 1416 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1417 brz,pn %g5, 2f 1418 nop 1419 btst 1, %sp 1420 bz,pt %xcc, 1f ! 32 bit user program 1421 nop 1422 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1423 nop 14241: 1425 ba,pt %xcc, .setup_utrap 1426 or %g2, %g0, %g7 14272: 1428 ba,pt %xcc, .mmu_exception_end 1429 mov T_ALIGNMENT, %g1 1430 1431.mmu_priv_exception: 1432 rdpr %tstate, %g1 1433 btst TSTATE_PRIV, %g1 1434 bnz,pn %icc, 1f 1435 nop 1436 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1437 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1438 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1439 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1440 brz,pt %g5, 1f 1441 nop 1442 ldn [%g5 + P_UTRAP16], %g5 1443 brnz,pt %g5, .setup_v9utrap 1444 nop 14451: 1446 mov T_PRIV_INSTR, %g1 1447 1448.mmu_exception_end: 1449 CPU_INDEX(%g4, %g5) 1450 set cpu_core, %g5 1451 sllx %g4, CPU_CORE_SHIFT, %g4 1452 add %g4, %g5, %g4 1453 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1454 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1455 bz 1f 1456 or %g5, CPU_DTRACE_BADADDR, %g5 1457 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1458 done 1459 14601: 1461 sllx %g3, 32, %g3 1462 or %g3, %g1, %g3 1463 set trap, %g1 1464 ba,pt %xcc, sys_trap 1465 sub %g0, 1, %g4 1466 1467.fp_disabled: 1468 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1469 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1470 rdpr %tstate, %g4 1471 btst TSTATE_PRIV, %g4 1472 bnz,a,pn %icc, ptl1_panic 1473 mov PTL1_BAD_FPTRAP, %g1 1474 1475 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1476 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1477 brz,a,pt %g5, 2f 1478 nop 1479 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1480 brz,a,pn %g5, 2f 1481 nop 1482 btst 1, %sp 1483 bz,a,pt %xcc, 1f ! 32 bit user program 1484 nop 1485 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1486 nop 14871: 1488 ba,pt %xcc, .setup_utrap 1489 or %g0, %g0, %g7 14902: 1491 set fp_disabled, %g1 1492 ba,pt %xcc, sys_trap 1493 sub %g0, 1, %g4 1494 1495.fp_ieee_exception: 1496 rdpr %tstate, %g1 1497 btst TSTATE_PRIV, %g1 1498 bnz,a,pn %icc, ptl1_panic 1499 mov PTL1_BAD_FPTRAP, %g1 1500 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1501 stx %fsr, [%g1 + CPU_TMP1] 1502 ldx [%g1 + CPU_TMP1], %g2 1503 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1504 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1505 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1506 brz,a,pt %g5, 1f 1507 nop 1508 ldn [%g5 + P_UTRAP8], %g5 1509 brnz,a,pt %g5, .setup_v9utrap 1510 nop 15111: 1512 set _fp_ieee_exception, %g1 1513 ba,pt %xcc, sys_trap 1514 sub %g0, 1, %g4 1515 1516/* 1517 * Register Inputs: 1518 * %g5 user trap handler 1519 * %g7 misaligned addr - for alignment traps only 1520 */ 1521.setup_utrap: 1522 set trap, %g1 ! setup in case we go 1523 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1524 sub %g0, 1, %g4 ! the save instruction below 1525 1526 /* 1527 * If the DTrace pid provider is single stepping a copied-out 1528 * instruction, t->t_dtrace_step will be set. In that case we need 1529 * to abort the single-stepping (since execution of the instruction 1530 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1531 */ 1532 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1533 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1534 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1535 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1536 rdpr %tnpc, %l2 ! arg1 == tnpc 1537 brz,pt %g2, 1f 1538 rdpr %tpc, %l1 ! arg0 == tpc 1539 1540 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1541 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1542 brz,pt %g2, 1f 1543 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1544 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 15451: 1546 mov %g7, %l3 ! arg2 == misaligned address 1547 1548 rdpr %tstate, %g1 ! cwp for trap handler 1549 rdpr %cwp, %g4 1550 bclr TSTATE_CWP_MASK, %g1 1551 wrpr %g1, %g4, %tstate 1552 wrpr %g0, %g5, %tnpc ! trap handler address 1553 FAST_TRAP_DONE 1554 /* NOTREACHED */ 1555 1556.check_v9utrap: 1557 rdpr %tstate, %g1 1558 btst TSTATE_PRIV, %g1 1559 bnz,a,pn %icc, 3f 1560 nop 1561 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1562 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1563 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1564 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1565 1566 cmp %g3, T_SOFTWARE_TRAP 1567 bne,a,pt %icc, 1f 1568 nop 1569 1570 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1571 rdpr %tt, %g3 ! delay - get actual hw trap type 1572 1573 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1574 ba,pt %icc, 2f 1575 smul %g1, CPTRSIZE, %g2 15761: 1577 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1578 nop 1579 1580 cmp %g3, T_UNIMP_INSTR 1581 bne,a,pt %icc, 2f 1582 nop 1583 1584 mov 1, %g1 1585 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1586 rdpr %tpc, %g1 ! ld trapping instruction using 1587 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1588 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1589 1590 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1591 andcc %g1, %g4, %g4 ! and instruction with mask 1592 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1593 nop ! fall thru to setup 15942: 1595 ldn [%g5 + %g2], %g5 1596 brnz,a,pt %g5, .setup_v9utrap 1597 nop 15983: 1599 set trap, %g1 1600 ba,pt %xcc, sys_trap 1601 sub %g0, 1, %g4 1602 /* NOTREACHED */ 1603 1604/* 1605 * Register Inputs: 1606 * %g5 user trap handler 1607 */ 1608.setup_v9utrap: 1609 set trap, %g1 ! setup in case we go 1610 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1611 sub %g0, 1, %g4 ! the save instruction below 1612 1613 /* 1614 * If the DTrace pid provider is single stepping a copied-out 1615 * instruction, t->t_dtrace_step will be set. In that case we need 1616 * to abort the single-stepping (since execution of the instruction 1617 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1618 */ 1619 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1620 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1621 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1622 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1623 rdpr %tnpc, %l7 ! arg1 == tnpc 1624 brz,pt %g2, 1f 1625 rdpr %tpc, %l6 ! arg0 == tpc 1626 1627 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1628 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1629 brz,pt %g2, 1f 1630 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1631 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 16321: 1633 rdpr %tstate, %g2 ! cwp for trap handler 1634 rdpr %cwp, %g4 1635 bclr TSTATE_CWP_MASK, %g2 1636 wrpr %g2, %g4, %tstate 1637 1638 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1639 ldn [%g4 + P_AS], %g4 ! load as pointer 1640 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1641 cmp %l7, %g4 ! check for single-step set 1642 bne,pt %xcc, 4f 1643 nop 1644 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1645 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1646 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1647 bne,pt %icc, 4f 1648 nop 1649 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1650 mov %l7, %g4 ! on entry to precise user trap 1651 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1652 ! at time of trap 1653 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1654 ! %g4 == userlimit 1655 FAST_TRAP_DONE 1656 /* NOTREACHED */ 16574: 1658 wrpr %g0, %g5, %tnpc ! trap handler address 1659 FAST_TRAP_DONE_CHK_INTR 1660 /* NOTREACHED */ 1661 1662.fp_exception: 1663 CPU_ADDR(%g1, %g4) 1664 stx %fsr, [%g1 + CPU_TMP1] 1665 ldx [%g1 + CPU_TMP1], %g2 1666 1667 /* 1668 * Cheetah takes unfinished_FPop trap for certain range of operands 1669 * to the "fitos" instruction. Instead of going through the slow 1670 * software emulation path, we try to simulate the "fitos" instruction 1671 * via "fitod" and "fdtos" provided the following conditions are met: 1672 * 1673 * fpu_exists is set (if DEBUG) 1674 * not in privileged mode 1675 * ftt is unfinished_FPop 1676 * NXM IEEE trap is not enabled 1677 * instruction at %tpc is "fitos" 1678 * 1679 * Usage: 1680 * %g1 per cpu address 1681 * %g2 %fsr 1682 * %g6 user instruction 1683 * 1684 * Note that we can take a memory access related trap while trying 1685 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1686 * flag to catch those traps and let the SFMMU code deal with page 1687 * fault and data access exception. 1688 */ 1689#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1690 sethi %hi(fpu_exists), %g7 1691 ld [%g7 + %lo(fpu_exists)], %g7 1692 brz,pn %g7, .fp_exception_cont 1693 nop 1694#endif 1695 rdpr %tstate, %g7 ! branch if in privileged mode 1696 btst TSTATE_PRIV, %g7 1697 bnz,pn %xcc, .fp_exception_cont 1698 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1699 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1700 cmp %g7, FTT_UNFIN 1701 set FSR_TEM_NX, %g5 1702 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1703 andcc %g2, %g5, %g0 1704 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1705 rdpr %tpc, %g5 ! get faulting PC 1706 1707 or %g0, 1, %g7 1708 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1709 lda [%g5]ASI_USER, %g6 ! get user's instruction 1710 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1711 1712 set FITOS_INSTR_MASK, %g7 1713 and %g6, %g7, %g7 1714 set FITOS_INSTR, %g5 1715 cmp %g7, %g5 1716 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1717 nop 1718 1719 /* 1720 * This is unfinished FPops trap for "fitos" instruction. We 1721 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1722 * sequence. 1723 * 1724 * We need a temporary FP register to do the conversion. Since 1725 * both source and destination operands for the "fitos" instruction 1726 * have to be within %f0-%f31, we use an FP register from the upper 1727 * half to guarantee that it won't collide with the source or the 1728 * dest operand. However, we do have to save and restore its value. 1729 * 1730 * We use %d62 as a temporary FP register for the conversion and 1731 * branch to appropriate instruction within the conversion tables 1732 * based upon the rs2 and rd values. 1733 */ 1734 1735 std %d62, [%g1 + CPU_TMP1] ! save original value 1736 1737 srl %g6, FITOS_RS2_SHIFT, %g7 1738 and %g7, FITOS_REG_MASK, %g7 1739 set _fitos_fitod_table, %g4 1740 sllx %g7, 2, %g7 1741 jmp %g4 + %g7 1742 ba,pt %xcc, _fitos_fitod_done 1743 .empty 1744 1745_fitos_fitod_table: 1746 fitod %f0, %d62 1747 fitod %f1, %d62 1748 fitod %f2, %d62 1749 fitod %f3, %d62 1750 fitod %f4, %d62 1751 fitod %f5, %d62 1752 fitod %f6, %d62 1753 fitod %f7, %d62 1754 fitod %f8, %d62 1755 fitod %f9, %d62 1756 fitod %f10, %d62 1757 fitod %f11, %d62 1758 fitod %f12, %d62 1759 fitod %f13, %d62 1760 fitod %f14, %d62 1761 fitod %f15, %d62 1762 fitod %f16, %d62 1763 fitod %f17, %d62 1764 fitod %f18, %d62 1765 fitod %f19, %d62 1766 fitod %f20, %d62 1767 fitod %f21, %d62 1768 fitod %f22, %d62 1769 fitod %f23, %d62 1770 fitod %f24, %d62 1771 fitod %f25, %d62 1772 fitod %f26, %d62 1773 fitod %f27, %d62 1774 fitod %f28, %d62 1775 fitod %f29, %d62 1776 fitod %f30, %d62 1777 fitod %f31, %d62 1778_fitos_fitod_done: 1779 1780 /* 1781 * Now convert data back into single precision 1782 */ 1783 srl %g6, FITOS_RD_SHIFT, %g7 1784 and %g7, FITOS_REG_MASK, %g7 1785 set _fitos_fdtos_table, %g4 1786 sllx %g7, 2, %g7 1787 jmp %g4 + %g7 1788 ba,pt %xcc, _fitos_fdtos_done 1789 .empty 1790 1791_fitos_fdtos_table: 1792 fdtos %d62, %f0 1793 fdtos %d62, %f1 1794 fdtos %d62, %f2 1795 fdtos %d62, %f3 1796 fdtos %d62, %f4 1797 fdtos %d62, %f5 1798 fdtos %d62, %f6 1799 fdtos %d62, %f7 1800 fdtos %d62, %f8 1801 fdtos %d62, %f9 1802 fdtos %d62, %f10 1803 fdtos %d62, %f11 1804 fdtos %d62, %f12 1805 fdtos %d62, %f13 1806 fdtos %d62, %f14 1807 fdtos %d62, %f15 1808 fdtos %d62, %f16 1809 fdtos %d62, %f17 1810 fdtos %d62, %f18 1811 fdtos %d62, %f19 1812 fdtos %d62, %f20 1813 fdtos %d62, %f21 1814 fdtos %d62, %f22 1815 fdtos %d62, %f23 1816 fdtos %d62, %f24 1817 fdtos %d62, %f25 1818 fdtos %d62, %f26 1819 fdtos %d62, %f27 1820 fdtos %d62, %f28 1821 fdtos %d62, %f29 1822 fdtos %d62, %f30 1823 fdtos %d62, %f31 1824_fitos_fdtos_done: 1825 1826 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 1827 1828#if DEBUG 1829 /* 1830 * Update FPop_unfinished trap kstat 1831 */ 1832 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 1833 ldx [%g7], %g5 18341: 1835 add %g5, 1, %g6 1836 1837 casxa [%g7] ASI_N, %g5, %g6 1838 cmp %g5, %g6 1839 bne,a,pn %xcc, 1b 1840 or %g0, %g6, %g5 1841 1842 /* 1843 * Update fpu_sim_fitos kstat 1844 */ 1845 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 1846 ldx [%g7], %g5 18471: 1848 add %g5, 1, %g6 1849 1850 casxa [%g7] ASI_N, %g5, %g6 1851 cmp %g5, %g6 1852 bne,a,pn %xcc, 1b 1853 or %g0, %g6, %g5 1854#endif /* DEBUG */ 1855 1856 FAST_TRAP_DONE 1857 1858.fp_exception_cont: 1859 /* 1860 * Let _fp_exception deal with simulating FPop instruction. 1861 * Note that we need to pass %fsr in %g2 (already read above). 1862 */ 1863 1864 set _fp_exception, %g1 1865 ba,pt %xcc, sys_trap 1866 sub %g0, 1, %g4 1867 1868 1869/* 1870 * Register windows 1871 */ 1872.flushw: 1873.clean_windows: 1874 rdpr %tnpc, %g1 1875 wrpr %g1, %tpc 1876 add %g1, 4, %g1 1877 wrpr %g1, %tnpc 1878 set trap, %g1 1879 mov T_FLUSH_PCB, %g3 1880 ba,pt %xcc, sys_trap 1881 sub %g0, 1, %g4 1882 1883/* 1884 * .spill_clean: clean the previous window, restore the wstate, and 1885 * "done". 1886 * 1887 * Entry: %g7 contains new wstate 1888 */ 1889.spill_clean: 1890 sethi %hi(nwin_minus_one), %g5 1891 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 1892 rdpr %cwp, %g6 ! %g6 = %cwp 1893 deccc %g6 ! %g6-- 1894 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 1895 wrpr %g6, %cwp 1896 TT_TRACE_L(trace_win) 1897 clr %l0 1898 clr %l1 1899 clr %l2 1900 clr %l3 1901 clr %l4 1902 clr %l5 1903 clr %l6 1904 clr %l7 1905 wrpr %g0, %g7, %wstate 1906 saved 1907 retry ! restores correct %cwp 1908 1909.fix_alignment: 1910 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 1911 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1912 ldn [%g1 + T_PROCP], %g1 1913 mov 1, %g2 1914 stb %g2, [%g1 + P_FIXALIGNMENT] 1915 FAST_TRAP_DONE 1916 1917#define STDF_REG(REG, ADDR, TMP) \ 1918 sll REG, 3, REG ;\ 1919mark1: set start1, TMP ;\ 1920 jmp REG + TMP ;\ 1921 nop ;\ 1922start1: ba,pt %xcc, done1 ;\ 1923 std %f0, [ADDR + CPU_TMP1] ;\ 1924 ba,pt %xcc, done1 ;\ 1925 std %f32, [ADDR + CPU_TMP1] ;\ 1926 ba,pt %xcc, done1 ;\ 1927 std %f2, [ADDR + CPU_TMP1] ;\ 1928 ba,pt %xcc, done1 ;\ 1929 std %f34, [ADDR + CPU_TMP1] ;\ 1930 ba,pt %xcc, done1 ;\ 1931 std %f4, [ADDR + CPU_TMP1] ;\ 1932 ba,pt %xcc, done1 ;\ 1933 std %f36, [ADDR + CPU_TMP1] ;\ 1934 ba,pt %xcc, done1 ;\ 1935 std %f6, [ADDR + CPU_TMP1] ;\ 1936 ba,pt %xcc, done1 ;\ 1937 std %f38, [ADDR + CPU_TMP1] ;\ 1938 ba,pt %xcc, done1 ;\ 1939 std %f8, [ADDR + CPU_TMP1] ;\ 1940 ba,pt %xcc, done1 ;\ 1941 std %f40, [ADDR + CPU_TMP1] ;\ 1942 ba,pt %xcc, done1 ;\ 1943 std %f10, [ADDR + CPU_TMP1] ;\ 1944 ba,pt %xcc, done1 ;\ 1945 std %f42, [ADDR + CPU_TMP1] ;\ 1946 ba,pt %xcc, done1 ;\ 1947 std %f12, [ADDR + CPU_TMP1] ;\ 1948 ba,pt %xcc, done1 ;\ 1949 std %f44, [ADDR + CPU_TMP1] ;\ 1950 ba,pt %xcc, done1 ;\ 1951 std %f14, [ADDR + CPU_TMP1] ;\ 1952 ba,pt %xcc, done1 ;\ 1953 std %f46, [ADDR + CPU_TMP1] ;\ 1954 ba,pt %xcc, done1 ;\ 1955 std %f16, [ADDR + CPU_TMP1] ;\ 1956 ba,pt %xcc, done1 ;\ 1957 std %f48, [ADDR + CPU_TMP1] ;\ 1958 ba,pt %xcc, done1 ;\ 1959 std %f18, [ADDR + CPU_TMP1] ;\ 1960 ba,pt %xcc, done1 ;\ 1961 std %f50, [ADDR + CPU_TMP1] ;\ 1962 ba,pt %xcc, done1 ;\ 1963 std %f20, [ADDR + CPU_TMP1] ;\ 1964 ba,pt %xcc, done1 ;\ 1965 std %f52, [ADDR + CPU_TMP1] ;\ 1966 ba,pt %xcc, done1 ;\ 1967 std %f22, [ADDR + CPU_TMP1] ;\ 1968 ba,pt %xcc, done1 ;\ 1969 std %f54, [ADDR + CPU_TMP1] ;\ 1970 ba,pt %xcc, done1 ;\ 1971 std %f24, [ADDR + CPU_TMP1] ;\ 1972 ba,pt %xcc, done1 ;\ 1973 std %f56, [ADDR + CPU_TMP1] ;\ 1974 ba,pt %xcc, done1 ;\ 1975 std %f26, [ADDR + CPU_TMP1] ;\ 1976 ba,pt %xcc, done1 ;\ 1977 std %f58, [ADDR + CPU_TMP1] ;\ 1978 ba,pt %xcc, done1 ;\ 1979 std %f28, [ADDR + CPU_TMP1] ;\ 1980 ba,pt %xcc, done1 ;\ 1981 std %f60, [ADDR + CPU_TMP1] ;\ 1982 ba,pt %xcc, done1 ;\ 1983 std %f30, [ADDR + CPU_TMP1] ;\ 1984 ba,pt %xcc, done1 ;\ 1985 std %f62, [ADDR + CPU_TMP1] ;\ 1986done1: 1987 1988#define LDDF_REG(REG, ADDR, TMP) \ 1989 sll REG, 3, REG ;\ 1990mark2: set start2, TMP ;\ 1991 jmp REG + TMP ;\ 1992 nop ;\ 1993start2: ba,pt %xcc, done2 ;\ 1994 ldd [ADDR + CPU_TMP1], %f0 ;\ 1995 ba,pt %xcc, done2 ;\ 1996 ldd [ADDR + CPU_TMP1], %f32 ;\ 1997 ba,pt %xcc, done2 ;\ 1998 ldd [ADDR + CPU_TMP1], %f2 ;\ 1999 ba,pt %xcc, done2 ;\ 2000 ldd [ADDR + CPU_TMP1], %f34 ;\ 2001 ba,pt %xcc, done2 ;\ 2002 ldd [ADDR + CPU_TMP1], %f4 ;\ 2003 ba,pt %xcc, done2 ;\ 2004 ldd [ADDR + CPU_TMP1], %f36 ;\ 2005 ba,pt %xcc, done2 ;\ 2006 ldd [ADDR + CPU_TMP1], %f6 ;\ 2007 ba,pt %xcc, done2 ;\ 2008 ldd [ADDR + CPU_TMP1], %f38 ;\ 2009 ba,pt %xcc, done2 ;\ 2010 ldd [ADDR + CPU_TMP1], %f8 ;\ 2011 ba,pt %xcc, done2 ;\ 2012 ldd [ADDR + CPU_TMP1], %f40 ;\ 2013 ba,pt %xcc, done2 ;\ 2014 ldd [ADDR + CPU_TMP1], %f10 ;\ 2015 ba,pt %xcc, done2 ;\ 2016 ldd [ADDR + CPU_TMP1], %f42 ;\ 2017 ba,pt %xcc, done2 ;\ 2018 ldd [ADDR + CPU_TMP1], %f12 ;\ 2019 ba,pt %xcc, done2 ;\ 2020 ldd [ADDR + CPU_TMP1], %f44 ;\ 2021 ba,pt %xcc, done2 ;\ 2022 ldd [ADDR + CPU_TMP1], %f14 ;\ 2023 ba,pt %xcc, done2 ;\ 2024 ldd [ADDR + CPU_TMP1], %f46 ;\ 2025 ba,pt %xcc, done2 ;\ 2026 ldd [ADDR + CPU_TMP1], %f16 ;\ 2027 ba,pt %xcc, done2 ;\ 2028 ldd [ADDR + CPU_TMP1], %f48 ;\ 2029 ba,pt %xcc, done2 ;\ 2030 ldd [ADDR + CPU_TMP1], %f18 ;\ 2031 ba,pt %xcc, done2 ;\ 2032 ldd [ADDR + CPU_TMP1], %f50 ;\ 2033 ba,pt %xcc, done2 ;\ 2034 ldd [ADDR + CPU_TMP1], %f20 ;\ 2035 ba,pt %xcc, done2 ;\ 2036 ldd [ADDR + CPU_TMP1], %f52 ;\ 2037 ba,pt %xcc, done2 ;\ 2038 ldd [ADDR + CPU_TMP1], %f22 ;\ 2039 ba,pt %xcc, done2 ;\ 2040 ldd [ADDR + CPU_TMP1], %f54 ;\ 2041 ba,pt %xcc, done2 ;\ 2042 ldd [ADDR + CPU_TMP1], %f24 ;\ 2043 ba,pt %xcc, done2 ;\ 2044 ldd [ADDR + CPU_TMP1], %f56 ;\ 2045 ba,pt %xcc, done2 ;\ 2046 ldd [ADDR + CPU_TMP1], %f26 ;\ 2047 ba,pt %xcc, done2 ;\ 2048 ldd [ADDR + CPU_TMP1], %f58 ;\ 2049 ba,pt %xcc, done2 ;\ 2050 ldd [ADDR + CPU_TMP1], %f28 ;\ 2051 ba,pt %xcc, done2 ;\ 2052 ldd [ADDR + CPU_TMP1], %f60 ;\ 2053 ba,pt %xcc, done2 ;\ 2054 ldd [ADDR + CPU_TMP1], %f30 ;\ 2055 ba,pt %xcc, done2 ;\ 2056 ldd [ADDR + CPU_TMP1], %f62 ;\ 2057done2: 2058 2059.lddf_exception_not_aligned: 2060 /* %g2 = sfar, %g3 = sfsr */ 2061 mov %g2, %g5 ! stash sfar 2062#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2063 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2064 ld [%g2 + %lo(fpu_exists)], %g2 2065 brz,a,pn %g2, 4f 2066 nop 2067#endif 2068 CPU_ADDR(%g1, %g4) 2069 or %g0, 1, %g4 2070 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2071 2072 rdpr %tpc, %g2 2073 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2074 srl %g6, 23, %g1 ! using ldda or not? 2075 and %g1, 1, %g1 2076 brz,a,pt %g1, 2f ! check for ldda instruction 2077 nop 2078 srl %g6, 13, %g1 ! check immflag 2079 and %g1, 1, %g1 2080 rdpr %tstate, %g2 ! %tstate in %g2 2081 brnz,a,pn %g1, 1f 2082 srl %g2, 31, %g1 ! get asi from %tstate 2083 srl %g6, 5, %g1 ! get asi from instruction 2084 and %g1, 0xFF, %g1 ! imm_asi field 20851: 2086 cmp %g1, ASI_P ! primary address space 2087 be,a,pt %icc, 2f 2088 nop 2089 cmp %g1, ASI_PNF ! primary no fault address space 2090 be,a,pt %icc, 2f 2091 nop 2092 cmp %g1, ASI_S ! secondary address space 2093 be,a,pt %icc, 2f 2094 nop 2095 cmp %g1, ASI_SNF ! secondary no fault address space 2096 bne,a,pn %icc, 3f 2097 nop 20982: 2099 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2100 add %g5, 4, %g5 ! increment misaligned data address 2101 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2102 2103 sllx %g7, 32, %g7 2104 or %g5, %g7, %g5 ! combine data 2105 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2106 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2107 2108 srl %g6, 25, %g3 ! %g6 has the instruction 2109 and %g3, 0x1F, %g3 ! %g3 has rd 2110 LDDF_REG(%g3, %g7, %g4) 2111 2112 CPU_ADDR(%g1, %g4) 2113 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2114 FAST_TRAP_DONE 21153: 2116 CPU_ADDR(%g1, %g4) 2117 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 21184: 2119 set T_USER, %g3 ! trap type in %g3 2120 or %g3, T_LDDF_ALIGN, %g3 2121 mov %g5, %g2 ! misaligned vaddr in %g2 2122 set fpu_trap, %g1 ! goto C for the little and 2123 ba,pt %xcc, sys_trap ! no fault little asi's 2124 sub %g0, 1, %g4 2125 2126.stdf_exception_not_aligned: 2127 /* %g2 = sfar, %g3 = sfsr */ 2128 mov %g2, %g5 2129 2130#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2131 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2132 ld [%g7 + %lo(fpu_exists)], %g3 2133 brz,a,pn %g3, 4f 2134 nop 2135#endif 2136 CPU_ADDR(%g1, %g4) 2137 or %g0, 1, %g4 2138 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2139 2140 rdpr %tpc, %g2 2141 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2142 2143 srl %g6, 23, %g1 ! using stda or not? 2144 and %g1, 1, %g1 2145 brz,a,pt %g1, 2f ! check for stda instruction 2146 nop 2147 srl %g6, 13, %g1 ! check immflag 2148 and %g1, 1, %g1 2149 rdpr %tstate, %g2 ! %tstate in %g2 2150 brnz,a,pn %g1, 1f 2151 srl %g2, 31, %g1 ! get asi from %tstate 2152 srl %g6, 5, %g1 ! get asi from instruction 2153 and %g1, 0xff, %g1 ! imm_asi field 21541: 2155 cmp %g1, ASI_P ! primary address space 2156 be,a,pt %icc, 2f 2157 nop 2158 cmp %g1, ASI_S ! secondary address space 2159 bne,a,pn %icc, 3f 2160 nop 21612: 2162 srl %g6, 25, %g6 2163 and %g6, 0x1F, %g6 ! %g6 has rd 2164 CPU_ADDR(%g7, %g1) 2165 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2166 2167 ldx [%g7 + CPU_TMP1], %g6 2168 srlx %g6, 32, %g7 2169 stuwa %g7, [%g5]ASI_USER ! first half 2170 add %g5, 4, %g5 ! increment misaligned data address 2171 stuwa %g6, [%g5]ASI_USER ! second half 2172 2173 CPU_ADDR(%g1, %g4) 2174 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2175 FAST_TRAP_DONE 21763: 2177 CPU_ADDR(%g1, %g4) 2178 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 21794: 2180 set T_USER, %g3 ! trap type in %g3 2181 or %g3, T_STDF_ALIGN, %g3 2182 mov %g5, %g2 ! misaligned vaddr in %g2 2183 set fpu_trap, %g1 ! goto C for the little and 2184 ba,pt %xcc, sys_trap ! nofault little asi's 2185 sub %g0, 1, %g4 2186 2187#ifdef DEBUG_USER_TRAPTRACECTL 2188 2189.traptrace_freeze: 2190 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2191 TT_TRACE_L(trace_win) 2192 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2193 set trap_freeze, %g1 2194 mov 1, %g2 2195 st %g2, [%g1] 2196 FAST_TRAP_DONE 2197 2198.traptrace_unfreeze: 2199 set trap_freeze, %g1 2200 st %g0, [%g1] 2201 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2202 TT_TRACE_L(trace_win) 2203 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2204 FAST_TRAP_DONE 2205 2206#endif /* DEBUG_USER_TRAPTRACECTL */ 2207 2208.getcc: 2209 CPU_ADDR(%g1, %g2) 2210 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2211 rdpr %tstate, %g3 ! get tstate 2212 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2213 set PSR_ICC, %g2 2214 and %o0, %g2, %o0 ! mask out the rest 2215 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2216 wrpr %g0, 0, %gl 2217 mov %o0, %g1 ! move ccr to normal %g1 2218 wrpr %g0, 1, %gl 2219 ! cannot assume globals retained their values after increasing %gl 2220 CPU_ADDR(%g1, %g2) 2221 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2222 FAST_TRAP_DONE 2223 2224.setcc: 2225 CPU_ADDR(%g1, %g2) 2226 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2227 wrpr %g0, 0, %gl 2228 mov %g1, %o0 2229 wrpr %g0, 1, %gl 2230 ! cannot assume globals retained their values after increasing %gl 2231 CPU_ADDR(%g1, %g2) 2232 sll %o0, PSR_ICC_SHIFT, %g2 2233 set PSR_ICC, %g3 2234 and %g2, %g3, %g2 ! mask out rest 2235 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2236 rdpr %tstate, %g3 ! get tstate 2237 srl %g3, 0, %g3 ! clear upper word 2238 or %g3, %g2, %g3 ! or in new bits 2239 wrpr %g3, %tstate 2240 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2241 FAST_TRAP_DONE 2242 2243/* 2244 * getpsr(void) 2245 * Note that the xcc part of the ccr is not provided. 2246 * The V8 code shows why the V9 trap is not faster: 2247 * #define GETPSR_TRAP() \ 2248 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2249 */ 2250 2251 .type .getpsr, #function 2252.getpsr: 2253 rdpr %tstate, %g1 ! get tstate 2254 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2255 set PSR_ICC, %g2 2256 and %o0, %g2, %o0 ! mask out the rest 2257 2258 rd %fprs, %g1 ! get fprs 2259 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2260 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2261 or %o0, %g2, %o0 ! or result into psr.ef 2262 2263 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2264 or %o0, %g2, %o0 ! or psr.impl/ver 2265 FAST_TRAP_DONE 2266 SET_SIZE(.getpsr) 2267 2268/* 2269 * setpsr(newpsr) 2270 * Note that there is no support for ccr.xcc in the V9 code. 2271 */ 2272 2273 .type .setpsr, #function 2274.setpsr: 2275 rdpr %tstate, %g1 ! get tstate 2276! setx TSTATE_V8_UBITS, %g2 2277 or %g0, CCR_ICC, %g3 2278 sllx %g3, TSTATE_CCR_SHIFT, %g2 2279 2280 andn %g1, %g2, %g1 ! zero current user bits 2281 set PSR_ICC, %g2 2282 and %g2, %o0, %g2 ! clear all but psr.icc bits 2283 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2284 wrpr %g1, %g3, %tstate ! write tstate 2285 2286 set PSR_EF, %g2 2287 and %g2, %o0, %g2 ! clear all but fp enable bit 2288 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2289 wr %g0, %g4, %fprs ! write fprs 2290 2291 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2292 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2293 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2294 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2295 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2296 srlx %g4, 2, %g4 ! shift fef value to bit 0 2297 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2298 FAST_TRAP_DONE 2299 SET_SIZE(.setpsr) 2300 2301/* 2302 * getlgrp 2303 * get home lgrpid on which the calling thread is currently executing. 2304 */ 2305 .type .getlgrp, #function 2306.getlgrp: 2307 ! Thanks for the incredibly helpful comments 2308 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2309 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2310 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2311 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2312 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2313 sra %g1, 0, %o1 2314 FAST_TRAP_DONE 2315 SET_SIZE(.getlgrp) 2316 2317/* 2318 * Entry for old 4.x trap (trap 0). 2319 */ 2320 ENTRY_NP(syscall_trap_4x) 2321 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2322 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2323 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2324 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2325 brz,pn %g2, 1f ! has it been set? 2326 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2327 st %l1, [%g1 + CPU_TMP2] 2328 rdpr %tnpc, %l1 ! save old tnpc 2329 wrpr %g0, %g2, %tnpc ! setup tnpc 2330 2331 mov %g1, %l0 ! save CPU struct addr 2332 wrpr %g0, 0, %gl 2333 mov %l1, %g6 ! pass tnpc to user code in %g6 2334 wrpr %g0, 1, %gl 2335 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2336 ld [%l0 + CPU_TMP1], %l0 2337 FAST_TRAP_DONE_CHK_INTR 23381: 2339 ! 2340 ! check for old syscall mmap which is the only different one which 2341 ! must be the same. Others are handled in the compatibility library. 2342 ! 2343 mov %g1, %l0 ! save CPU struct addr 2344 wrpr %g0, 0, %gl 2345 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2346 movz %icc, SYS_mmap, %g1 2347 wrpr %g0, 1, %gl 2348 ld [%l0 + CPU_TMP1], %l0 2349 SYSCALL(syscall_trap32) 2350 SET_SIZE(syscall_trap_4x) 2351 2352/* 2353 * Handler for software trap 9. 2354 * Set trap0 emulation address for old 4.x system call trap. 2355 * XXX - this should be a system call. 2356 */ 2357 ENTRY_NP(set_trap0_addr) 2358 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2359 st %l0, [%g1 + CPU_TMP1] ! save some locals 2360 st %l1, [%g1 + CPU_TMP2] 2361 mov %g1, %l0 ! preserve CPU addr 2362 wrpr %g0, 0, %gl 2363 mov %g1, %l1 2364 wrpr %g0, 1, %gl 2365 ! cannot assume globals retained their values after increasing %gl 2366 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer 2367 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2368 andn %l1, 3, %l1 ! force alignment 2369 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2370 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2371 ld [%l0 + CPU_TMP1], %l0 2372 FAST_TRAP_DONE 2373 SET_SIZE(set_trap0_addr) 2374 2375/* 2376 * mmu_trap_tl1 2377 * trap handler for unexpected mmu traps. 2378 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2379 * case we go to fpu_trap or a user trap from the window handler, in which 2380 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2381 */ 2382 .type mmu_trap_tl1, #function 2383mmu_trap_tl1: 2384#ifdef TRAPTRACE 2385 TRACE_PTR(%g5, %g6) 2386 GET_TRACE_TICK(%g6) 2387 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2388 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2389 rdpr %tt, %g6 2390 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2391 rdpr %tstate, %g6 2392 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2393 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2394 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2395 rdpr %tpc, %g6 2396 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2397 MMU_FAULT_STATUS_AREA(%g6) 2398 ldx [%g6 + MMFSA_D_ADDR], %g6 2399 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address 2400 CPU_PADDR(%g7, %g6); 2401 add %g7, CPU_TL1_HDLR, %g7 2402 lda [%g7]ASI_MEM, %g6 2403 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2404 MMU_FAULT_STATUS_AREA(%g6) 2405 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant? 2406 ldx [%g6 + MMFSA_D_CTX], %g6 2407 sllx %g6, SFSR_CTX_SHIFT, %g6 2408 or %g6, %g7, %g6 2409 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type 2410 set 0xdeadbeef, %g6 2411 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2412 TRACE_NEXT(%g5, %g6, %g7) 2413#endif /* TRAPTRACE */ 2414 CPU_PADDR(%g7, %g6); 2415 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2416 lda [%g7]ASI_MEM, %g6 2417 brz,a,pt %g6, 1f 2418 nop 2419 sta %g0, [%g7]ASI_MEM 2420 ! XXXQ need to setup registers for sfmmu_mmu_trap? 2421 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 24221: 2423 rdpr %tpc, %g7 2424 /* in user_rtt? */ 2425 set rtt_fill_start, %g6 2426 cmp %g7, %g6 2427 blu,pn %xcc, 6f 2428 .empty 2429 set rtt_fill_end, %g6 2430 cmp %g7, %g6 2431 bgeu,pn %xcc, 6f 2432 nop 2433 set fault_rtt_fn1, %g7 2434 ba,a 7f 24356: 2436 ! check to see if the trap pc is in a window spill/fill handling 2437 rdpr %tpc, %g7 2438 /* tpc should be in the trap table */ 2439 set trap_table, %g6 2440 cmp %g7, %g6 2441 blu,a,pn %xcc, ptl1_panic 2442 mov PTL1_BAD_MMUTRAP, %g1 2443 set etrap_table, %g6 2444 cmp %g7, %g6 2445 bgeu,a,pn %xcc, ptl1_panic 2446 mov PTL1_BAD_MMUTRAP, %g1 2447 ! pc is inside the trap table, convert to trap type 2448 srl %g7, 5, %g6 ! XXXQ need #define 2449 and %g6, 0x1ff, %g6 ! XXXQ need #define 2450 ! and check for a window trap type 2451 and %g6, WTRAP_TTMASK, %g6 2452 cmp %g6, WTRAP_TYPE 2453 bne,a,pn %xcc, ptl1_panic 2454 mov PTL1_BAD_MMUTRAP, %g1 2455 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2456 add %g7, WTRAP_FAULTOFF, %g7 2457 24587: 2459 ! Arguments are passed in the global set active after the 2460 ! 'done' instruction. Before switching sets, must save 2461 ! the calculated next pc 2462 wrpr %g0, %g7, %tnpc 2463 wrpr %g0, 1, %gl 2464 rdpr %tt, %g5 2465 MMU_FAULT_STATUS_AREA(%g7) 2466 cmp %g5, T_ALIGNMENT 2467 be,pn %xcc, 1f 2468 ldx [%g7 + MMFSA_D_ADDR], %g6 2469 ldx [%g7 + MMFSA_D_CTX], %g7 2470 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */ 2471 cmp %g7, USER_CONTEXT_TYPE 2472 sllx %g6, MMU_PAGESHIFT, %g6 2473 movgu %icc, USER_CONTEXT_TYPE, %g7 2474 or %g6, %g7, %g6 /* TAG_ACCESS */ 24751: 2476 done 2477 SET_SIZE(mmu_trap_tl1) 2478 2479/* 2480 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2481 * traps are valid only when kmdb is loaded. When the debugger is active, 2482 * the code below is rewritten to transfer control to the appropriate 2483 * debugger entry points. 2484 */ 2485 .global kmdb_trap 2486 .align 8 2487kmdb_trap: 2488 ba,a trap_table0 2489 jmp %g1 + 0 2490 nop 2491 2492 .global kmdb_trap_tl1 2493 .align 8 2494kmdb_trap_tl1: 2495 ba,a trap_table0 2496 jmp %g1 + 0 2497 nop 2498 2499/* 2500 * This entry is copied from OBP's trap table during boot. 2501 */ 2502 .global obp_bpt 2503 .align 8 2504obp_bpt: 2505 NOT 2506 2507 2508 2509#ifdef TRAPTRACE 2510/* 2511 * TRAPTRACE support. 2512 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2513 * Return is done by "jmp %g7 + 4". 2514 */ 2515 2516trace_dmmu: 2517 TRACE_PTR(%g3, %g6) 2518 GET_TRACE_TICK(%g6) 2519 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2520 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2521 rdpr %tt, %g6 2522 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2523 rdpr %tstate, %g6 2524 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2525 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2526 rdpr %tpc, %g6 2527 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2528 MMU_FAULT_STATUS_AREA(%g6) 2529 ldx [%g6 + MMFSA_D_ADDR], %g4 2530 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2531 ldx [%g6 + MMFSA_D_CTX], %g4 2532 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2533 ldx [%g6 + MMFSA_D_TYPE], %g4 2534 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2535 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2536 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2537 TRACE_NEXT(%g3, %g4, %g5) 2538 jmp %g7 + 4 2539 nop 2540 2541trace_immu: 2542 TRACE_PTR(%g3, %g6) 2543 GET_TRACE_TICK(%g6) 2544 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2545 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2546 rdpr %tt, %g6 2547 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2548 rdpr %tstate, %g6 2549 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2550 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2551 rdpr %tpc, %g6 2552 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2553 MMU_FAULT_STATUS_AREA(%g6) 2554 ldx [%g6 + MMFSA_I_ADDR], %g4 2555 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2556 ldx [%g6 + MMFSA_I_CTX], %g4 2557 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2558 ldx [%g6 + MMFSA_I_TYPE], %g4 2559 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2560 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2561 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2562 TRACE_NEXT(%g3, %g4, %g5) 2563 jmp %g7 + 4 2564 nop 2565 2566trace_gen: 2567 TRACE_PTR(%g3, %g6) 2568 GET_TRACE_TICK(%g6) 2569 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2570 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2571 rdpr %tt, %g6 2572 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2573 rdpr %tstate, %g6 2574 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2575 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2576 rdpr %tpc, %g6 2577 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2578 stna %g0, [%g3 + TRAP_ENT_TR]%asi 2579 stna %g0, [%g3 + TRAP_ENT_F1]%asi 2580 stna %g0, [%g3 + TRAP_ENT_F2]%asi 2581 stna %g0, [%g3 + TRAP_ENT_F3]%asi 2582 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2583 TRACE_NEXT(%g3, %g4, %g5) 2584 jmp %g7 + 4 2585 nop 2586 2587trace_win: 2588 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2589 ! Keep the locals as clean as possible, caller cleans %l4 2590 clr %l2 2591 clr %l1 2592 jmp %l4 + 4 2593 clr %l0 2594 2595/* 2596 * Trace a tsb hit 2597 * g1 = tsbe pointer (in/clobbered) 2598 * g2 = tag access register (in) 2599 * g3 - g4 = scratch (clobbered) 2600 * g5 = tsbe data (in) 2601 * g6 = scratch (clobbered) 2602 * g7 = pc we jumped here from (in) 2603 */ 2604 2605 ! Do not disturb %g5, it will be used after the trace 2606 ALTENTRY(trace_tsbhit) 2607 TRACE_TSBHIT(0) 2608 jmp %g7 + 4 2609 nop 2610 2611/* 2612 * Trace a TSB miss 2613 * 2614 * g1 = tsb8k pointer (in) 2615 * g2 = tag access register (in) 2616 * g3 = tsb4m pointer (in) 2617 * g4 = tsbe tag (in/clobbered) 2618 * g5 - g6 = scratch (clobbered) 2619 * g7 = pc we jumped here from (in) 2620 */ 2621 .global trace_tsbmiss 2622trace_tsbmiss: 2623 membar #Sync 2624 sethi %hi(FLUSH_ADDR), %g6 2625 flush %g6 2626 TRACE_PTR(%g5, %g6) 2627 GET_TRACE_TICK(%g6) 2628 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2629 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2630 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag 2631 rdpr %tnpc, %g6 2632 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2633 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2634 rdpr %tpc, %g6 2635 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2636 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2637 rdpr %tt, %g6 2638 or %g6, TT_MMU_MISS, %g4 2639 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2640 mov MMFSA_D_ADDR, %g4 2641 cmp %g6, FAST_IMMU_MISS_TT 2642 move %xcc, MMFSA_I_ADDR, %g4 2643 cmp %g6, T_INSTR_MMU_MISS 2644 move %xcc, MMFSA_I_ADDR, %g4 2645 MMU_FAULT_STATUS_AREA(%g6) 2646 ldx [%g6 + %g4], %g6 2647 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2648 cmp %g4, MMFSA_D_ADDR 2649 move %xcc, MMFSA_D_CTX, %g4 2650 movne %xcc, MMFSA_I_CTX, %g4 2651 MMU_FAULT_STATUS_AREA(%g6) 2652 ldx [%g6 + %g4], %g6 2653 stxa %g6, [%g5 + TRAP_ENT_F4]%asi ! context ID 2654 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2655 TRACE_NEXT(%g5, %g4, %g6) 2656 jmp %g7 + 4 2657 nop 2658 2659/* 2660 * g2 = tag access register (in) 2661 * g3 = ctx type (0, 1 or 2) (in) (not used) 2662 */ 2663trace_dataprot: 2664 membar #Sync 2665 sethi %hi(FLUSH_ADDR), %g6 2666 flush %g6 2667 TRACE_PTR(%g1, %g6) 2668 GET_TRACE_TICK(%g6) 2669 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2670 rdpr %tpc, %g6 2671 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2672 rdpr %tstate, %g6 2673 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2674 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2675 stna %g0, [%g1 + TRAP_ENT_F1]%asi 2676 stna %g0, [%g1 + TRAP_ENT_F2]%asi 2677 stna %g0, [%g1 + TRAP_ENT_F3]%asi 2678 stna %g0, [%g1 + TRAP_ENT_F4]%asi 2679 TRACE_SAVE_TL_GL_REGS(%g1, %g6) 2680 rdpr %tt, %g6 2681 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2682 mov MMFSA_D_CTX, %g4 2683 cmp %g6, FAST_IMMU_MISS_TT 2684 move %xcc, MMFSA_I_CTX, %g4 2685 cmp %g6, T_INSTR_MMU_MISS 2686 move %xcc, MMFSA_I_CTX, %g4 2687 MMU_FAULT_STATUS_AREA(%g6) 2688 ldx [%g6 + %g4], %g6 2689 stxa %g6, [%g1 + TRAP_ENT_TR]%asi ! context ID 2690 TRACE_NEXT(%g1, %g4, %g5) 2691 jmp %g7 + 4 2692 nop 2693 2694#endif /* TRAPTRACE */ 2695 2696/* 2697 * Handle watchdog reset trap. Enable the MMU using the MMU_ENABLE 2698 * HV service, which requires the return target to be specified as a VA 2699 * since we are enabling the MMU. We set the target to ptl1_panic. 2700 */ 2701 2702 .type .watchdog_trap, #function 2703.watchdog_trap: 2704 mov 1, %o0 2705 setx ptl1_panic, %g2, %o1 2706 mov MMU_ENABLE, %o5 2707 ta FAST_TRAP 2708 done 2709 SET_SIZE(.watchdog_trap) 2710/* 2711 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2712 */ 2713 .type .dmmu_exc_lddf_not_aligned, #function 2714.dmmu_exc_lddf_not_aligned: 2715 MMU_FAULT_STATUS_AREA(%g3) 2716 ldx [%g3 + MMFSA_D_ADDR], %g2 2717 /* Fault type not available in MMU fault status area */ 2718 mov MMFSA_F_UNALIGN, %g1 2719 ldx [%g3 + MMFSA_D_CTX], %g3 2720 sllx %g3, SFSR_CTX_SHIFT, %g3 2721 btst 1, %sp 2722 bnz,pt %xcc, .lddf_exception_not_aligned 2723 or %g3, %g1, %g3 /* SFSR */ 2724 ba,a,pt %xcc, .mmu_exception_not_aligned 2725 SET_SIZE(.dmmu_exc_lddf_not_aligned) 2726 2727/* 2728 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2729 */ 2730 .type .dmmu_exc_stdf_not_aligned, #function 2731.dmmu_exc_stdf_not_aligned: 2732 MMU_FAULT_STATUS_AREA(%g3) 2733 ldx [%g3 + MMFSA_D_ADDR], %g2 2734 /* Fault type not available in MMU fault status area */ 2735 mov MMFSA_F_UNALIGN, %g1 2736 ldx [%g3 + MMFSA_D_CTX], %g3 2737 sllx %g3, SFSR_CTX_SHIFT, %g3 2738 btst 1, %sp 2739 bnz,pt %xcc, .stdf_exception_not_aligned 2740 or %g3, %g1, %g3 /* SFSR */ 2741 ba,a,pt %xcc, .mmu_exception_not_aligned 2742 SET_SIZE(.dmmu_exc_stdf_not_aligned) 2743 2744 .type .dmmu_exception, #function 2745.dmmu_exception: 2746 MMU_FAULT_STATUS_AREA(%g3) 2747 ldx [%g3 + MMFSA_D_ADDR], %g2 2748 ldx [%g3 + MMFSA_D_TYPE], %g1 2749 ldx [%g3 + MMFSA_D_CTX], %g4 2750 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */ 2751 sllx %g2, MMU_PAGESHIFT, %g2 2752 sllx %g4, SFSR_CTX_SHIFT, %g3 2753 or %g3, %g1, %g3 /* SFSR */ 2754 cmp %g4, USER_CONTEXT_TYPE 2755 movgeu %icc, USER_CONTEXT_TYPE, %g4 2756 or %g2, %g4, %g2 /* TAG_ACCESS */ 2757 ba,pt %xcc, .mmu_exception_end 2758 mov T_DATA_EXCEPTION, %g1 2759 SET_SIZE(.dmmu_exception) 2760/* 2761 * expects offset into tsbmiss area in %g1 and return pc in %g7 2762 */ 2763stat_mmu: 2764 CPU_INDEX(%g5, %g6) 2765 sethi %hi(tsbmiss_area), %g6 2766 sllx %g5, TSBMISS_SHIFT, %g5 2767 or %g6, %lo(tsbmiss_area), %g6 2768 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2769 ld [%g6 + %g1], %g5 2770 add %g5, 1, %g5 2771 jmp %g7 + 4 2772 st %g5, [%g6 + %g1] 2773 2774 2775/* 2776 * fast_trap_done, fast_trap_done_chk_intr: 2777 * 2778 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2779 * taken immediately after a RETRY or DONE instruction which causes IE to 2780 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2781 * to execute first before taking any interrupts. If that instruction 2782 * results in other traps, and if the corresponding trap handler runs 2783 * entirely at TL=1 with interrupts disabled, then pending interrupts 2784 * won't be taken until after yet another instruction following the %tpc 2785 * or %tnpc. 2786 * 2787 * A malicious user program can use this feature to block out interrupts 2788 * for extended durations, which can result in send_mondo_timeout kernel 2789 * panic. 2790 * 2791 * This problem is addressed by servicing any pending interrupts via 2792 * sys_trap before returning back to the user mode from a fast trap 2793 * handler. The "done" instruction within a fast trap handler, which 2794 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2795 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2796 * entry point. 2797 * 2798 * We check for any pending interrupts here and force a sys_trap to 2799 * service those interrupts, if any. To minimize overhead, pending 2800 * interrupts are checked if the %tpc happens to be at 16K boundary, 2801 * which allows a malicious program to execute at most 4K consecutive 2802 * instructions before we service any pending interrupts. If a worst 2803 * case fast trap handler takes about 2 usec, then interrupts will be 2804 * blocked for at most 8 msec, less than a clock tick. 2805 * 2806 * For the cases where we don't know if the %tpc will cross a 16K 2807 * boundary, we can't use the above optimization and always process 2808 * any pending interrupts via fast_frap_done_chk_intr entry point. 2809 * 2810 * Entry Conditions: 2811 * %pstate am:0 priv:1 ie:0 2812 * globals are AG (not normal globals) 2813 */ 2814 2815 .global fast_trap_done, fast_trap_done_chk_intr 2816fast_trap_done: 2817 rdpr %tpc, %g5 2818 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2819 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2820 bz,pn %icc, 1f ! branch if zero (lower 32 bits only) 2821 nop 2822 done 2823 2824fast_trap_done_chk_intr: 28251: rd SOFTINT, %g6 2826 brnz,pn %g6, 2f ! branch if any pending intr 2827 nop 2828 done 2829 28302: 2831 /* 2832 * We get here if there are any pending interrupts. 2833 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2834 * instruction. 2835 */ 2836 rdpr %tnpc, %g5 2837 wrpr %g0, %g5, %tpc 2838 add %g5, 4, %g5 2839 wrpr %g0, %g5, %tnpc 2840 2841 /* 2842 * Force a dummy sys_trap call so that interrupts can be serviced. 2843 */ 2844 set fast_trap_dummy_call, %g1 2845 ba,pt %xcc, sys_trap 2846 mov -1, %g4 2847 2848fast_trap_dummy_call: 2849 retl 2850 nop 2851 2852#endif /* lint */ 2853