1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif /* !lint */ 31#include <sys/asm_linkage.h> 32#include <sys/privregs.h> 33#include <sys/sun4asi.h> 34#include <sys/machasi.h> 35#include <sys/hypervisor_api.h> 36#include <sys/machtrap.h> 37#include <sys/machthread.h> 38#include <sys/pcb.h> 39#include <sys/pte.h> 40#include <sys/mmu.h> 41#include <sys/machpcb.h> 42#include <sys/async.h> 43#include <sys/intreg.h> 44#include <sys/scb.h> 45#include <sys/psr_compat.h> 46#include <sys/syscall.h> 47#include <sys/machparam.h> 48#include <sys/traptrace.h> 49#include <vm/hat_sfmmu.h> 50#include <sys/archsystm.h> 51#include <sys/utrap.h> 52#include <sys/clock.h> 53#include <sys/intr.h> 54#include <sys/fpu/fpu_simulator.h> 55#include <vm/seg_spt.h> 56 57/* 58 * WARNING: If you add a fast trap handler which can be invoked by a 59 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 60 * instead of "done" instruction to return back to the user mode. See 61 * comments for the "fast_trap_done" entry point for more information. 62 * 63 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 64 * cases where you always want to process any pending interrupts before 65 * returning back to the user mode. 66 */ 67#define FAST_TRAP_DONE \ 68 ba,a fast_trap_done 69 70#define FAST_TRAP_DONE_CHK_INTR \ 71 ba,a fast_trap_done_chk_intr 72 73/* 74 * SPARC V9 Trap Table 75 * 76 * Most of the trap handlers are made from common building 77 * blocks, and some are instantiated multiple times within 78 * the trap table. So, I build a bunch of macros, then 79 * populate the table using only the macros. 80 * 81 * Many macros branch to sys_trap. Its calling convention is: 82 * %g1 kernel trap handler 83 * %g2, %g3 args for above 84 * %g4 desire %pil 85 */ 86 87#ifdef TRAPTRACE 88 89/* 90 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 91 */ 92#define TT_TRACE(label) \ 93 ba label ;\ 94 rd %pc, %g7 95#define TT_TRACE_INS 2 96 97#define TT_TRACE_L(label) \ 98 ba label ;\ 99 rd %pc, %l4 ;\ 100 clr %l4 101#define TT_TRACE_L_INS 3 102 103#else 104 105#define TT_TRACE(label) 106#define TT_TRACE_INS 0 107 108#define TT_TRACE_L(label) 109#define TT_TRACE_L_INS 0 110 111#endif 112 113/* 114 * This macro is used to update per cpu mmu stats in perf critical 115 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 116 * is defined. 117 */ 118#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 119#define HAT_PERCPU_DBSTAT(stat) \ 120 mov stat, %g1 ;\ 121 ba stat_mmu ;\ 122 rd %pc, %g7 123#else 124#define HAT_PERCPU_DBSTAT(stat) 125#endif /* DEBUG || SFMMU_STAT_GATHER */ 126 127/* 128 * This first set are funneled to trap() with %tt as the type. 129 * Trap will then either panic or send the user a signal. 130 */ 131/* 132 * NOT is used for traps that just shouldn't happen. 133 * It comes in both single and quadruple flavors. 134 */ 135#if !defined(lint) 136 .global trap 137#endif /* !lint */ 138#define NOT \ 139 TT_TRACE(trace_gen) ;\ 140 set trap, %g1 ;\ 141 rdpr %tt, %g3 ;\ 142 ba,pt %xcc, sys_trap ;\ 143 sub %g0, 1, %g4 ;\ 144 .align 32 145#define NOT4 NOT; NOT; NOT; NOT 146 147#define NOTP \ 148 TT_TRACE(trace_gen) ;\ 149 ba,pt %xcc, ptl1_panic ;\ 150 mov PTL1_BAD_TRAP, %g1 ;\ 151 .align 32 152#define NOTP4 NOTP; NOTP; NOTP; NOTP 153 154 155/* 156 * BAD is used for trap vectors we don't have a kernel 157 * handler for. 158 * It also comes in single and quadruple versions. 159 */ 160#define BAD NOT 161#define BAD4 NOT4 162 163#define DONE \ 164 done; \ 165 .align 32 166 167/* 168 * TRAP vectors to the trap() function. 169 * It's main use is for user errors. 170 */ 171#if !defined(lint) 172 .global trap 173#endif /* !lint */ 174#define TRAP(arg) \ 175 TT_TRACE(trace_gen) ;\ 176 set trap, %g1 ;\ 177 mov arg, %g3 ;\ 178 ba,pt %xcc, sys_trap ;\ 179 sub %g0, 1, %g4 ;\ 180 .align 32 181 182/* 183 * SYSCALL is used for system calls on both ILP32 and LP64 kernels 184 * depending on the "which" parameter (should be syscall_trap, 185 * syscall_trap32, or nosys for unused system call traps). 186 */ 187#define SYSCALL(which) \ 188 TT_TRACE(trace_gen) ;\ 189 set (which), %g1 ;\ 190 ba,pt %xcc, sys_trap ;\ 191 sub %g0, 1, %g4 ;\ 192 .align 32 193 194/* 195 * GOTO just jumps to a label. 196 * It's used for things that can be fixed without going thru sys_trap. 197 */ 198#define GOTO(label) \ 199 .global label ;\ 200 ba,a label ;\ 201 .empty ;\ 202 .align 32 203 204/* 205 * GOTO_TT just jumps to a label. 206 * correctable ECC error traps at level 0 and 1 will use this macro. 207 * It's used for things that can be fixed without going thru sys_trap. 208 */ 209#define GOTO_TT(label, ttlabel) \ 210 .global label ;\ 211 TT_TRACE(ttlabel) ;\ 212 ba,a label ;\ 213 .empty ;\ 214 .align 32 215 216/* 217 * Privileged traps 218 * Takes breakpoint if privileged, calls trap() if not. 219 */ 220#define PRIV(label) \ 221 rdpr %tstate, %g1 ;\ 222 btst TSTATE_PRIV, %g1 ;\ 223 bnz label ;\ 224 rdpr %tt, %g3 ;\ 225 set trap, %g1 ;\ 226 ba,pt %xcc, sys_trap ;\ 227 sub %g0, 1, %g4 ;\ 228 .align 32 229 230 231/* 232 * DTrace traps. 233 */ 234#define DTRACE_FASTTRAP \ 235 .global dtrace_fasttrap_probe ;\ 236 .global dtrace_fasttrap_probe_ptr ;\ 237 sethi %hi(dtrace_fasttrap_probe_ptr), %g4 ;\ 238 ldn [%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4 ;\ 239 set dtrace_fasttrap_probe, %g1 ;\ 240 brnz,pn %g4, user_trap ;\ 241 sub %g0, 1, %g4 ;\ 242 FAST_TRAP_DONE ;\ 243 .align 32 244 245#define DTRACE_PID \ 246 .global dtrace_pid_probe ;\ 247 set dtrace_pid_probe, %g1 ;\ 248 ba,pt %xcc, user_trap ;\ 249 sub %g0, 1, %g4 ;\ 250 .align 32 251 252#define DTRACE_RETURN \ 253 .global dtrace_return_probe ;\ 254 set dtrace_return_probe, %g1 ;\ 255 ba,pt %xcc, user_trap ;\ 256 sub %g0, 1, %g4 ;\ 257 .align 32 258 259/* 260 * REGISTER WINDOW MANAGEMENT MACROS 261 */ 262 263/* 264 * various convenient units of padding 265 */ 266#define SKIP(n) .skip 4*(n) 267 268/* 269 * CLEAN_WINDOW is the simple handler for cleaning a register window. 270 */ 271#define CLEAN_WINDOW \ 272 TT_TRACE_L(trace_win) ;\ 273 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 274 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 275 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 276 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 277 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 278 retry; .align 128 279 280#if !defined(lint) 281 282/* 283 * If we get an unresolved tlb miss while in a window handler, the fault 284 * handler will resume execution at the last instruction of the window 285 * hander, instead of delivering the fault to the kernel. Spill handlers 286 * use this to spill windows into the wbuf. 287 * 288 * The mixed handler works by checking %sp, and branching to the correct 289 * handler. This is done by branching back to label 1: for 32b frames, 290 * or label 2: for 64b frames; which implies the handler order is: 32b, 291 * 64b, mixed. The 1: and 2: labels are offset into the routines to 292 * allow the branchs' delay slots to contain useful instructions. 293 */ 294 295/* 296 * SPILL_32bit spills a 32-bit-wide kernel register window. It 297 * assumes that the kernel context and the nucleus context are the 298 * same. The stack pointer is required to be eight-byte aligned even 299 * though this code only needs it to be four-byte aligned. 300 */ 301#define SPILL_32bit(tail) \ 302 srl %sp, 0, %sp ;\ 3031: st %l0, [%sp + 0] ;\ 304 st %l1, [%sp + 4] ;\ 305 st %l2, [%sp + 8] ;\ 306 st %l3, [%sp + 12] ;\ 307 st %l4, [%sp + 16] ;\ 308 st %l5, [%sp + 20] ;\ 309 st %l6, [%sp + 24] ;\ 310 st %l7, [%sp + 28] ;\ 311 st %i0, [%sp + 32] ;\ 312 st %i1, [%sp + 36] ;\ 313 st %i2, [%sp + 40] ;\ 314 st %i3, [%sp + 44] ;\ 315 st %i4, [%sp + 48] ;\ 316 st %i5, [%sp + 52] ;\ 317 st %i6, [%sp + 56] ;\ 318 st %i7, [%sp + 60] ;\ 319 TT_TRACE_L(trace_win) ;\ 320 saved ;\ 321 retry ;\ 322 SKIP(31-19-TT_TRACE_L_INS) ;\ 323 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 324 .empty 325 326/* 327 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 328 * wide address space via the designated asi. It is used to spill 329 * non-kernel windows. The stack pointer is required to be eight-byte 330 * aligned even though this code only needs it to be four-byte 331 * aligned. 332 */ 333#define SPILL_32bit_asi(asi_num, tail) \ 334 srl %sp, 0, %sp ;\ 3351: sta %l0, [%sp + %g0]asi_num ;\ 336 mov 4, %g1 ;\ 337 sta %l1, [%sp + %g1]asi_num ;\ 338 mov 8, %g2 ;\ 339 sta %l2, [%sp + %g2]asi_num ;\ 340 mov 12, %g3 ;\ 341 sta %l3, [%sp + %g3]asi_num ;\ 342 add %sp, 16, %g4 ;\ 343 sta %l4, [%g4 + %g0]asi_num ;\ 344 sta %l5, [%g4 + %g1]asi_num ;\ 345 sta %l6, [%g4 + %g2]asi_num ;\ 346 sta %l7, [%g4 + %g3]asi_num ;\ 347 add %g4, 16, %g4 ;\ 348 sta %i0, [%g4 + %g0]asi_num ;\ 349 sta %i1, [%g4 + %g1]asi_num ;\ 350 sta %i2, [%g4 + %g2]asi_num ;\ 351 sta %i3, [%g4 + %g3]asi_num ;\ 352 add %g4, 16, %g4 ;\ 353 sta %i4, [%g4 + %g0]asi_num ;\ 354 sta %i5, [%g4 + %g1]asi_num ;\ 355 sta %i6, [%g4 + %g2]asi_num ;\ 356 sta %i7, [%g4 + %g3]asi_num ;\ 357 TT_TRACE_L(trace_win) ;\ 358 saved ;\ 359 retry ;\ 360 SKIP(31-25-TT_TRACE_L_INS) ;\ 361 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 362 .empty 363 364#define SPILL_32bit_tt1(asi_num, tail) \ 365 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 366 .empty ;\ 367 .align 128 368 369 370/* 371 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 372 * that the kernel context and the nucleus context are the same. The 373 * stack pointer is required to be eight-byte aligned even though this 374 * code only needs it to be four-byte aligned. 375 */ 376#define FILL_32bit(tail) \ 377 srl %sp, 0, %sp ;\ 3781: TT_TRACE_L(trace_win) ;\ 379 ld [%sp + 0], %l0 ;\ 380 ld [%sp + 4], %l1 ;\ 381 ld [%sp + 8], %l2 ;\ 382 ld [%sp + 12], %l3 ;\ 383 ld [%sp + 16], %l4 ;\ 384 ld [%sp + 20], %l5 ;\ 385 ld [%sp + 24], %l6 ;\ 386 ld [%sp + 28], %l7 ;\ 387 ld [%sp + 32], %i0 ;\ 388 ld [%sp + 36], %i1 ;\ 389 ld [%sp + 40], %i2 ;\ 390 ld [%sp + 44], %i3 ;\ 391 ld [%sp + 48], %i4 ;\ 392 ld [%sp + 52], %i5 ;\ 393 ld [%sp + 56], %i6 ;\ 394 ld [%sp + 60], %i7 ;\ 395 restored ;\ 396 retry ;\ 397 SKIP(31-19-TT_TRACE_L_INS) ;\ 398 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 399 .empty 400 401/* 402 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 403 * wide address space via the designated asi. It is used to fill 404 * non-kernel windows. The stack pointer is required to be eight-byte 405 * aligned even though this code only needs it to be four-byte 406 * aligned. 407 */ 408#define FILL_32bit_asi(asi_num, tail) \ 409 srl %sp, 0, %sp ;\ 4101: TT_TRACE_L(trace_win) ;\ 411 mov 4, %g1 ;\ 412 lda [%sp + %g0]asi_num, %l0 ;\ 413 mov 8, %g2 ;\ 414 lda [%sp + %g1]asi_num, %l1 ;\ 415 mov 12, %g3 ;\ 416 lda [%sp + %g2]asi_num, %l2 ;\ 417 lda [%sp + %g3]asi_num, %l3 ;\ 418 add %sp, 16, %g4 ;\ 419 lda [%g4 + %g0]asi_num, %l4 ;\ 420 lda [%g4 + %g1]asi_num, %l5 ;\ 421 lda [%g4 + %g2]asi_num, %l6 ;\ 422 lda [%g4 + %g3]asi_num, %l7 ;\ 423 add %g4, 16, %g4 ;\ 424 lda [%g4 + %g0]asi_num, %i0 ;\ 425 lda [%g4 + %g1]asi_num, %i1 ;\ 426 lda [%g4 + %g2]asi_num, %i2 ;\ 427 lda [%g4 + %g3]asi_num, %i3 ;\ 428 add %g4, 16, %g4 ;\ 429 lda [%g4 + %g0]asi_num, %i4 ;\ 430 lda [%g4 + %g1]asi_num, %i5 ;\ 431 lda [%g4 + %g2]asi_num, %i6 ;\ 432 lda [%g4 + %g3]asi_num, %i7 ;\ 433 restored ;\ 434 retry ;\ 435 SKIP(31-25-TT_TRACE_L_INS) ;\ 436 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 437 .empty 438 439 440/* 441 * SPILL_64bit spills a 64-bit-wide kernel register window. It 442 * assumes that the kernel context and the nucleus context are the 443 * same. The stack pointer is required to be eight-byte aligned. 444 */ 445#define SPILL_64bit(tail) \ 4462: stx %l0, [%sp + V9BIAS64 + 0] ;\ 447 stx %l1, [%sp + V9BIAS64 + 8] ;\ 448 stx %l2, [%sp + V9BIAS64 + 16] ;\ 449 stx %l3, [%sp + V9BIAS64 + 24] ;\ 450 stx %l4, [%sp + V9BIAS64 + 32] ;\ 451 stx %l5, [%sp + V9BIAS64 + 40] ;\ 452 stx %l6, [%sp + V9BIAS64 + 48] ;\ 453 stx %l7, [%sp + V9BIAS64 + 56] ;\ 454 stx %i0, [%sp + V9BIAS64 + 64] ;\ 455 stx %i1, [%sp + V9BIAS64 + 72] ;\ 456 stx %i2, [%sp + V9BIAS64 + 80] ;\ 457 stx %i3, [%sp + V9BIAS64 + 88] ;\ 458 stx %i4, [%sp + V9BIAS64 + 96] ;\ 459 stx %i5, [%sp + V9BIAS64 + 104] ;\ 460 stx %i6, [%sp + V9BIAS64 + 112] ;\ 461 stx %i7, [%sp + V9BIAS64 + 120] ;\ 462 TT_TRACE_L(trace_win) ;\ 463 saved ;\ 464 retry ;\ 465 SKIP(31-18-TT_TRACE_L_INS) ;\ 466 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 467 .empty 468 469#define SPILL_64bit_ktt1(tail) \ 470 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 471 .empty ;\ 472 .align 128 473 474#define SPILL_mixed_ktt1(tail) \ 475 btst 1, %sp ;\ 476 bz,a,pt %xcc, fault_32bit_/**/tail ;\ 477 srl %sp, 0, %sp ;\ 478 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 479 .empty ;\ 480 .align 128 481 482/* 483 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 484 * wide address space via the designated asi. It is used to spill 485 * non-kernel windows. The stack pointer is required to be eight-byte 486 * aligned. 487 */ 488#define SPILL_64bit_asi(asi_num, tail) \ 489 mov 0 + V9BIAS64, %g1 ;\ 4902: stxa %l0, [%sp + %g1]asi_num ;\ 491 mov 8 + V9BIAS64, %g2 ;\ 492 stxa %l1, [%sp + %g2]asi_num ;\ 493 mov 16 + V9BIAS64, %g3 ;\ 494 stxa %l2, [%sp + %g3]asi_num ;\ 495 mov 24 + V9BIAS64, %g4 ;\ 496 stxa %l3, [%sp + %g4]asi_num ;\ 497 add %sp, 32, %g5 ;\ 498 stxa %l4, [%g5 + %g1]asi_num ;\ 499 stxa %l5, [%g5 + %g2]asi_num ;\ 500 stxa %l6, [%g5 + %g3]asi_num ;\ 501 stxa %l7, [%g5 + %g4]asi_num ;\ 502 add %g5, 32, %g5 ;\ 503 stxa %i0, [%g5 + %g1]asi_num ;\ 504 stxa %i1, [%g5 + %g2]asi_num ;\ 505 stxa %i2, [%g5 + %g3]asi_num ;\ 506 stxa %i3, [%g5 + %g4]asi_num ;\ 507 add %g5, 32, %g5 ;\ 508 stxa %i4, [%g5 + %g1]asi_num ;\ 509 stxa %i5, [%g5 + %g2]asi_num ;\ 510 stxa %i6, [%g5 + %g3]asi_num ;\ 511 stxa %i7, [%g5 + %g4]asi_num ;\ 512 TT_TRACE_L(trace_win) ;\ 513 saved ;\ 514 retry ;\ 515 SKIP(31-25-TT_TRACE_L_INS) ;\ 516 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 517 .empty 518 519#define SPILL_64bit_tt1(asi_num, tail) \ 520 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 521 .empty ;\ 522 .align 128 523 524/* 525 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 526 * that the kernel context and the nucleus context are the same. The 527 * stack pointer is required to be eight-byte aligned. 528 */ 529#define FILL_64bit(tail) \ 5302: TT_TRACE_L(trace_win) ;\ 531 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 532 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 533 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 534 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 535 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 536 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 537 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 538 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 539 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 540 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 541 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 542 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 543 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 544 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 545 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 546 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 547 restored ;\ 548 retry ;\ 549 SKIP(31-18-TT_TRACE_L_INS) ;\ 550 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 551 .empty 552 553/* 554 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 555 * wide address space via the designated asi. It is used to fill 556 * non-kernel windows. The stack pointer is required to be eight-byte 557 * aligned. 558 */ 559#define FILL_64bit_asi(asi_num, tail) \ 560 mov V9BIAS64 + 0, %g1 ;\ 5612: TT_TRACE_L(trace_win) ;\ 562 ldxa [%sp + %g1]asi_num, %l0 ;\ 563 mov V9BIAS64 + 8, %g2 ;\ 564 ldxa [%sp + %g2]asi_num, %l1 ;\ 565 mov V9BIAS64 + 16, %g3 ;\ 566 ldxa [%sp + %g3]asi_num, %l2 ;\ 567 mov V9BIAS64 + 24, %g4 ;\ 568 ldxa [%sp + %g4]asi_num, %l3 ;\ 569 add %sp, 32, %g5 ;\ 570 ldxa [%g5 + %g1]asi_num, %l4 ;\ 571 ldxa [%g5 + %g2]asi_num, %l5 ;\ 572 ldxa [%g5 + %g3]asi_num, %l6 ;\ 573 ldxa [%g5 + %g4]asi_num, %l7 ;\ 574 add %g5, 32, %g5 ;\ 575 ldxa [%g5 + %g1]asi_num, %i0 ;\ 576 ldxa [%g5 + %g2]asi_num, %i1 ;\ 577 ldxa [%g5 + %g3]asi_num, %i2 ;\ 578 ldxa [%g5 + %g4]asi_num, %i3 ;\ 579 add %g5, 32, %g5 ;\ 580 ldxa [%g5 + %g1]asi_num, %i4 ;\ 581 ldxa [%g5 + %g2]asi_num, %i5 ;\ 582 ldxa [%g5 + %g3]asi_num, %i6 ;\ 583 ldxa [%g5 + %g4]asi_num, %i7 ;\ 584 restored ;\ 585 retry ;\ 586 SKIP(31-25-TT_TRACE_L_INS) ;\ 587 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 588 .empty 589 590 591#endif /* !lint */ 592 593/* 594 * SPILL_mixed spills either size window, depending on 595 * whether %sp is even or odd, to a 32-bit address space. 596 * This may only be used in conjunction with SPILL_32bit/ 597 * FILL_64bit. 598 * Clear upper 32 bits of %sp if it is odd. 599 * We won't need to clear them in 64 bit kernel. 600 */ 601#define SPILL_mixed \ 602 btst 1, %sp ;\ 603 bz,a,pt %xcc, 1b ;\ 604 srl %sp, 0, %sp ;\ 605 ba,pt %xcc, 2b ;\ 606 nop ;\ 607 .align 128 608 609/* 610 * FILL_mixed(ASI) fills either size window, depending on 611 * whether %sp is even or odd, from a 32-bit address space. 612 * This may only be used in conjunction with FILL_32bit/ 613 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 614 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 615 * attention should be paid to the instructions that belong 616 * in the delay slots of the branches depending on the type 617 * of fill handler being branched to. 618 * Clear upper 32 bits of %sp if it is odd. 619 * We won't need to clear them in 64 bit kernel. 620 */ 621#define FILL_mixed \ 622 btst 1, %sp ;\ 623 bz,a,pt %xcc, 1b ;\ 624 srl %sp, 0, %sp ;\ 625 ba,pt %xcc, 2b ;\ 626 nop ;\ 627 .align 128 628 629 630/* 631 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 632 * respectively, into the address space via the designated asi. The 633 * unbiased stack pointer is required to be eight-byte aligned (even for 634 * the 32-bit case even though this code does not require such strict 635 * alignment). 636 * 637 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 638 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 639 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 640 * window may contain kernel data so in user_rtt we set wstate to call 641 * these spill handlers on the first user spill trap. These handler then 642 * spill the appropriate window but also back up a window and clean the 643 * window that didn't get a cleanwin trap. 644 */ 645#define SPILL_32clean(asi_num, tail) \ 646 srl %sp, 0, %sp ;\ 647 sta %l0, [%sp + %g0]asi_num ;\ 648 mov 4, %g1 ;\ 649 sta %l1, [%sp + %g1]asi_num ;\ 650 mov 8, %g2 ;\ 651 sta %l2, [%sp + %g2]asi_num ;\ 652 mov 12, %g3 ;\ 653 sta %l3, [%sp + %g3]asi_num ;\ 654 add %sp, 16, %g4 ;\ 655 sta %l4, [%g4 + %g0]asi_num ;\ 656 sta %l5, [%g4 + %g1]asi_num ;\ 657 sta %l6, [%g4 + %g2]asi_num ;\ 658 sta %l7, [%g4 + %g3]asi_num ;\ 659 add %g4, 16, %g4 ;\ 660 sta %i0, [%g4 + %g0]asi_num ;\ 661 sta %i1, [%g4 + %g1]asi_num ;\ 662 sta %i2, [%g4 + %g2]asi_num ;\ 663 sta %i3, [%g4 + %g3]asi_num ;\ 664 add %g4, 16, %g4 ;\ 665 sta %i4, [%g4 + %g0]asi_num ;\ 666 sta %i5, [%g4 + %g1]asi_num ;\ 667 sta %i6, [%g4 + %g2]asi_num ;\ 668 sta %i7, [%g4 + %g3]asi_num ;\ 669 TT_TRACE_L(trace_win) ;\ 670 b .spill_clean ;\ 671 mov WSTATE_USER32, %g7 ;\ 672 SKIP(31-25-TT_TRACE_L_INS) ;\ 673 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 674 .empty 675 676#define SPILL_64clean(asi_num, tail) \ 677 mov 0 + V9BIAS64, %g1 ;\ 678 stxa %l0, [%sp + %g1]asi_num ;\ 679 mov 8 + V9BIAS64, %g2 ;\ 680 stxa %l1, [%sp + %g2]asi_num ;\ 681 mov 16 + V9BIAS64, %g3 ;\ 682 stxa %l2, [%sp + %g3]asi_num ;\ 683 mov 24 + V9BIAS64, %g4 ;\ 684 stxa %l3, [%sp + %g4]asi_num ;\ 685 add %sp, 32, %g5 ;\ 686 stxa %l4, [%g5 + %g1]asi_num ;\ 687 stxa %l5, [%g5 + %g2]asi_num ;\ 688 stxa %l6, [%g5 + %g3]asi_num ;\ 689 stxa %l7, [%g5 + %g4]asi_num ;\ 690 add %g5, 32, %g5 ;\ 691 stxa %i0, [%g5 + %g1]asi_num ;\ 692 stxa %i1, [%g5 + %g2]asi_num ;\ 693 stxa %i2, [%g5 + %g3]asi_num ;\ 694 stxa %i3, [%g5 + %g4]asi_num ;\ 695 add %g5, 32, %g5 ;\ 696 stxa %i4, [%g5 + %g1]asi_num ;\ 697 stxa %i5, [%g5 + %g2]asi_num ;\ 698 stxa %i6, [%g5 + %g3]asi_num ;\ 699 stxa %i7, [%g5 + %g4]asi_num ;\ 700 TT_TRACE_L(trace_win) ;\ 701 b .spill_clean ;\ 702 mov WSTATE_USER64, %g7 ;\ 703 SKIP(31-25-TT_TRACE_L_INS) ;\ 704 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 705 .empty 706 707 708/* 709 * Floating point disabled. 710 */ 711#define FP_DISABLED_TRAP \ 712 TT_TRACE(trace_gen) ;\ 713 ba,pt %xcc,.fp_disabled ;\ 714 nop ;\ 715 .align 32 716 717/* 718 * Floating point exceptions. 719 */ 720#define FP_IEEE_TRAP \ 721 TT_TRACE(trace_gen) ;\ 722 ba,pt %xcc,.fp_ieee_exception ;\ 723 nop ;\ 724 .align 32 725 726#define FP_TRAP \ 727 TT_TRACE(trace_gen) ;\ 728 ba,pt %xcc,.fp_exception ;\ 729 nop ;\ 730 .align 32 731 732#if !defined(lint) 733 734/* 735 * ECACHE_ECC error traps at level 0 and level 1 736 */ 737#define ECACHE_ECC(table_name) \ 738 .global table_name ;\ 739table_name: ;\ 740 membar #Sync ;\ 741 set trap, %g1 ;\ 742 rdpr %tt, %g3 ;\ 743 ba,pt %xcc, sys_trap ;\ 744 sub %g0, 1, %g4 ;\ 745 .align 32 746 747#endif /* !lint */ 748 749/* 750 * illegal instruction trap 751 */ 752#define ILLTRAP_INSTR \ 753 membar #Sync ;\ 754 TT_TRACE(trace_gen) ;\ 755 or %g0, P_UTRAP4, %g2 ;\ 756 or %g0, T_UNIMP_INSTR, %g3 ;\ 757 sethi %hi(.check_v9utrap), %g4 ;\ 758 jmp %g4 + %lo(.check_v9utrap) ;\ 759 nop ;\ 760 .align 32 761 762/* 763 * tag overflow trap 764 */ 765#define TAG_OVERFLOW \ 766 TT_TRACE(trace_gen) ;\ 767 or %g0, P_UTRAP10, %g2 ;\ 768 or %g0, T_TAG_OVERFLOW, %g3 ;\ 769 sethi %hi(.check_v9utrap), %g4 ;\ 770 jmp %g4 + %lo(.check_v9utrap) ;\ 771 nop ;\ 772 .align 32 773 774/* 775 * divide by zero trap 776 */ 777#define DIV_BY_ZERO \ 778 TT_TRACE(trace_gen) ;\ 779 or %g0, P_UTRAP11, %g2 ;\ 780 or %g0, T_IDIV0, %g3 ;\ 781 sethi %hi(.check_v9utrap), %g4 ;\ 782 jmp %g4 + %lo(.check_v9utrap) ;\ 783 nop ;\ 784 .align 32 785 786/* 787 * trap instruction for V9 user trap handlers 788 */ 789#define TRAP_INSTR \ 790 TT_TRACE(trace_gen) ;\ 791 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 792 sethi %hi(.check_v9utrap), %g4 ;\ 793 jmp %g4 + %lo(.check_v9utrap) ;\ 794 nop ;\ 795 .align 32 796#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 797 798/* 799 * LEVEL_INTERRUPT is for level N interrupts. 800 * VECTOR_INTERRUPT is for the vector trap. 801 */ 802#define LEVEL_INTERRUPT(level) \ 803 .global tt_pil/**/level ;\ 804tt_pil/**/level: ;\ 805 ba,pt %xcc, pil_interrupt ;\ 806 mov level, %g4 ;\ 807 .align 32 808 809#define LEVEL14_INTERRUPT \ 810 ba pil14_interrupt ;\ 811 mov PIL_14, %g4 ;\ 812 .align 32 813 814#define CPU_MONDO \ 815 ba,a,pt %xcc, cpu_mondo ;\ 816 .align 32 817 818#define DEV_MONDO \ 819 ba,a,pt %xcc, dev_mondo ;\ 820 .align 32 821 822/* 823 * We take over the rtba after we set our trap table and 824 * fault status area. The watchdog reset trap is now handled by the OS. 825 */ 826#define WATCHDOG_RESET \ 827 mov PTL1_BAD_WATCHDOG, %g1 ;\ 828 ba,a,pt %xcc, .watchdog_trap ;\ 829 .align 32 830 831/* 832 * RED is for traps that use the red mode handler. 833 * We should never see these either. 834 */ 835#define RED \ 836 mov PTL1_BAD_RED, %g1 ;\ 837 ba,a,pt %xcc, .watchdog_trap ;\ 838 .align 32 839 840 841/* 842 * MMU Trap Handlers. 843 */ 844 845/* 846 * synthesize for trap(): SFSR in %g3 847 */ 848#define IMMU_EXCEPTION \ 849 MMU_FAULT_STATUS_AREA(%g3) ;\ 850 rdpr %tpc, %g2 ;\ 851 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\ 852 ldx [%g3 + MMFSA_I_CTX], %g3 ;\ 853 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 854 or %g3, %g1, %g3 ;\ 855 ba,pt %xcc, .mmu_exception_end ;\ 856 mov T_INSTR_EXCEPTION, %g1 ;\ 857 .align 32 858 859/* 860 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3 861 */ 862#define DMMU_EXCEPTION \ 863 ba,a,pt %xcc, .dmmu_exception ;\ 864 .align 32 865 866/* 867 * synthesize for trap(): SFAR in %g2, SFSR in %g3 868 */ 869#define DMMU_EXC_AG_PRIV \ 870 MMU_FAULT_STATUS_AREA(%g3) ;\ 871 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 872 /* Fault type not available in MMU fault status area */ ;\ 873 mov MMFSA_F_PRVACT, %g1 ;\ 874 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 875 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 876 ba,pt %xcc, .mmu_priv_exception ;\ 877 or %g3, %g1, %g3 ;\ 878 .align 32 879 880/* 881 * synthesize for trap(): SFAR in %g2, SFSR in %g3 882 */ 883#define DMMU_EXC_AG_NOT_ALIGNED \ 884 MMU_FAULT_STATUS_AREA(%g3) ;\ 885 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 886 /* Fault type not available in MMU fault status area */ ;\ 887 mov MMFSA_F_UNALIGN, %g1 ;\ 888 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 889 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 890 ba,pt %xcc, .mmu_exception_not_aligned ;\ 891 or %g3, %g1, %g3 /* SFSR */ ;\ 892 .align 32 893/* 894 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 895 */ 896 897/* 898 * synthesize for trap(): SFAR in %g2, SFSR in %g3 899 */ 900#define DMMU_EXC_LDDF_NOT_ALIGNED \ 901 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\ 902 .align 32 903/* 904 * synthesize for trap(): SFAR in %g2, SFSR in %g3 905 */ 906#define DMMU_EXC_STDF_NOT_ALIGNED \ 907 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\ 908 .align 32 909 910#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 911#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 912#endif 913 914#if defined(cscope) 915/* 916 * Define labels to direct cscope quickly to labels that 917 * are generated by macro expansion of DTLB_MISS(). 918 */ 919 .global tt0_dtlbmiss 920tt0_dtlbmiss: 921 .global tt1_dtlbmiss 922tt1_dtlbmiss: 923 nop 924#endif 925 926/* 927 * Data miss handler (must be exactly 32 instructions) 928 * 929 * This handler is invoked only if the hypervisor has been instructed 930 * not to do any TSB walk. 931 * 932 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss 933 * handler. 934 * 935 * User TLB miss handling depends upon whether a user process has one or 936 * two TSBs. User TSB information (physical base and size code) is kept 937 * in two dedicated scratchpad registers. Absence of a user TSB (primarily 938 * second TSB) is indicated by a negative value (-1) in that register. 939 */ 940 941/* 942 * synthesize for miss handler: TAG_ACCESS in %g2 943 */ 944#define DTLB_MISS(table_name) ;\ 945 .global table_name/**/_dtlbmiss ;\ 946table_name/**/_dtlbmiss: ;\ 947 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 948 MMU_FAULT_STATUS_AREA(%g7) ;\ 949 ldx [%g7 + MMFSA_D_ADDR], %g2 /* address */ ;\ 950 ldx [%g7 + MMFSA_D_CTX], %g3 /* g3 = ctx */ ;\ 951 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 952 cmp %g3, INVALID_CONTEXT ;\ 953 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 954 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 955 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 956 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 957 brgez,pn %g1, sfmmu_udtlb_slowpath /* brnach if 2 TSBs */ ;\ 958 nop ;\ 959 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 960 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\ 961 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 962 .align 128 963 964 965#if defined(cscope) 966/* 967 * Define labels to direct cscope quickly to labels that 968 * are generated by macro expansion of ITLB_MISS(). 969 */ 970 .global tt0_itlbmiss 971tt0_itlbmiss: 972 .global tt1_itlbmiss 973tt1_itlbmiss: 974 nop 975#endif 976 977/* 978 * Instruction miss handler. 979 * 980 * This handler is invoked only if the hypervisor has been instructed 981 * not to do any TSB walk. 982 * 983 * ldda instructions will have their ASI patched 984 * by sfmmu_patch_ktsb at runtime. 985 * MUST be EXACTLY 32 instructions or we'll break. 986 */ 987 988/* 989 * synthesize for miss handler: TAG_ACCESS in %g2 990 */ 991#define ITLB_MISS(table_name) \ 992 .global table_name/**/_itlbmiss ;\ 993table_name/**/_itlbmiss: ;\ 994 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 995 MMU_FAULT_STATUS_AREA(%g7) ;\ 996 ldx [%g7 + MMFSA_I_ADDR], %g2 /* g2 = address */ ;\ 997 ldx [%g7 + MMFSA_I_CTX], %g3 /* g3 = ctx */ ;\ 998 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 999 cmp %g3, INVALID_CONTEXT ;\ 1000 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1001 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1002 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 1003 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 1004 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBS */ ;\ 1005 nop ;\ 1006 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 1007 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\ 1008 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1009 .align 128 1010 1011#define DTSB_MISS \ 1012 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu) 1013 1014#define ITSB_MISS \ 1015 GOTO_TT(sfmmu_slow_immu_miss,trace_immu) 1016 1017/* 1018 * This macro is the first level handler for fast protection faults. 1019 * It first demaps the tlb entry which generated the fault and then 1020 * attempts to set the modify bit on the hash. It needs to be 1021 * exactly 32 instructions. 1022 */ 1023/* 1024 * synthesize for miss handler: TAG_ACCESS in %g2 1025 */ 1026#define DTLB_PROT \ 1027 MMU_FAULT_STATUS_AREA(%g7) ;\ 1028 ldx [%g7 + MMFSA_D_ADDR], %g2 /* address */ ;\ 1029 ldx [%g7 + MMFSA_D_CTX], %g3 /* %g3 = ctx */ ;\ 1030 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 1031 /* ;\ 1032 * g2 = tag access register ;\ 1033 * g3 = ctx number ;\ 1034 */ ;\ 1035 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1036 /* clobbers g1 and g6 XXXQ? */ ;\ 1037 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1038 nop ;\ 1039 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1040 .align 128 1041 1042#define DMMU_EXCEPTION_TL1 ;\ 1043 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1044 .align 32 1045 1046#define MISALIGN_ADDR_TL1 ;\ 1047 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1048 .align 32 1049 1050/* 1051 * Trace a tsb hit 1052 * g1 = tsbe pointer (in/clobbered) 1053 * g2 = tag access register (in) 1054 * g3 - g4 = scratch (clobbered) 1055 * g5 = tsbe data (in) 1056 * g6 = scratch (clobbered) 1057 * g7 = pc we jumped here from (in) 1058 * ttextra = value to OR in to trap type (%tt) (in) 1059 */ 1060#ifdef TRAPTRACE 1061#define TRACE_TSBHIT(ttextra) \ 1062 membar #Sync ;\ 1063 sethi %hi(FLUSH_ADDR), %g6 ;\ 1064 flush %g6 ;\ 1065 TRACE_PTR(%g3, %g6) ;\ 1066 GET_TRACE_TICK(%g6) ;\ 1067 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1068 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1069 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1070 rdpr %tnpc, %g6 ;\ 1071 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1072 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1073 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1074 rdpr %tpc, %g6 ;\ 1075 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1076 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\ 1077 rdpr %tt, %g6 ;\ 1078 or %g6, (ttextra), %g1 ;\ 1079 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\ 1080 MMU_FAULT_STATUS_AREA(%g4) ;\ 1081 mov MMFSA_D_ADDR, %g1 ;\ 1082 cmp %g6, FAST_IMMU_MISS_TT ;\ 1083 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1084 cmp %g6, T_INSTR_MMU_MISS ;\ 1085 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1086 ldx [%g4 + %g1], %g1 ;\ 1087 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\ 1088 mov MMFSA_D_CTX, %g1 ;\ 1089 cmp %g6, FAST_IMMU_MISS_TT ;\ 1090 move %xcc, MMFSA_I_CTX, %g1 ;\ 1091 cmp %g6, T_INSTR_MMU_MISS ;\ 1092 move %xcc, MMFSA_I_CTX, %g1 ;\ 1093 ldx [%g4 + %g1], %g1 ;\ 1094 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\ 1095 TRACE_NEXT(%g3, %g4, %g6) 1096#else 1097#define TRACE_TSBHIT(ttextra) 1098#endif 1099 1100 1101#if defined(lint) 1102 1103struct scb trap_table; 1104struct scb scb; /* trap_table/scb are the same object */ 1105 1106#else /* lint */ 1107 1108/* 1109 * ======================================================================= 1110 * SPARC V9 TRAP TABLE 1111 * 1112 * The trap table is divided into two halves: the first half is used when 1113 * taking traps when TL=0; the second half is used when taking traps from 1114 * TL>0. Note that handlers in the second half of the table might not be able 1115 * to make the same assumptions as handlers in the first half of the table. 1116 * 1117 * Worst case trap nesting so far: 1118 * 1119 * at TL=0 client issues software trap requesting service 1120 * at TL=1 nucleus wants a register window 1121 * at TL=2 register window clean/spill/fill takes a TLB miss 1122 * at TL=3 processing TLB miss 1123 * at TL=4 handle asynchronous error 1124 * 1125 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1126 * 1127 * ======================================================================= 1128 */ 1129 .section ".text" 1130 .align 4 1131 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1132 .type trap_table, #object 1133 .type trap_table0, #object 1134 .type trap_table1, #object 1135 .type scb, #object 1136trap_table: 1137scb: 1138trap_table0: 1139 /* hardware traps */ 1140 NOT; /* 000 reserved */ 1141 RED; /* 001 power on reset */ 1142 WATCHDOG_RESET; /* 002 watchdog reset */ 1143 RED; /* 003 externally initiated reset */ 1144 RED; /* 004 software initiated reset */ 1145 RED; /* 005 red mode exception */ 1146 NOT; NOT; /* 006 - 007 reserved */ 1147 IMMU_EXCEPTION; /* 008 instruction access exception */ 1148 ITSB_MISS; /* 009 instruction access MMU miss */ 1149 NOT; /* 00A reserved */ 1150 NOT; NOT4; /* 00B - 00F reserved */ 1151 ILLTRAP_INSTR; /* 010 illegal instruction */ 1152 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1153 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */ 1154 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */ 1155 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1156 FP_DISABLED_TRAP; /* 020 fp disabled */ 1157 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1158 FP_TRAP; /* 022 fp exception other */ 1159 TAG_OVERFLOW; /* 023 tag overflow */ 1160 CLEAN_WINDOW; /* 024 - 027 clean window */ 1161 DIV_BY_ZERO; /* 028 division by zero */ 1162 NOT; /* 029 internal processor error */ 1163 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1164 DMMU_EXCEPTION; /* 030 data access exception */ 1165 DTSB_MISS; /* 031 data access MMU miss */ 1166 NOT; /* 032 reserved */ 1167 NOT; /* 033 data access protection */ 1168 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1169 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1170 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1171 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1172 NOT; /* 038 LDQF mem address not aligned */ 1173 NOT; /* 039 STQF mem address not aligned */ 1174 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1175 NOT; /* 040 async data error */ 1176 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1177 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1178 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1179 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1180 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1181 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1182 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1183 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1184 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1185 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1186 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1187 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1188 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1189 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1190 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1191 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1192 NOT; /* 060 interrupt vector */ 1193 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1194 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1195 NOT; /* 063 reserved */ 1196 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1197 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1198 DTLB_PROT; /* 06C data access protection */ 1199 NOT; /* 070 reserved */ 1200 NOT; /* 071 reserved */ 1201 NOT; /* 072 reserved */ 1202 NOT; /* 073 reserved */ 1203 NOT4; NOT4 /* 074 - 07B reserved */ 1204 CPU_MONDO; /* 07C cpu_mondo */ 1205 DEV_MONDO; /* 07D dev_mondo */ 1206 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */ 1207 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */ 1208 NOT4; /* 080 spill 0 normal */ 1209 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1210 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1211 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1212 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1213 SPILL_32bit(not); /* 094 spill 5 normal */ 1214 SPILL_64bit(not); /* 098 spill 6 normal */ 1215 SPILL_mixed; /* 09C spill 7 normal */ 1216 NOT4; /* 0A0 spill 0 other */ 1217 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1218 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1219 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1220 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1221 NOT4; /* 0B4 spill 5 other */ 1222 NOT4; /* 0B8 spill 6 other */ 1223 NOT4; /* 0BC spill 7 other */ 1224 NOT4; /* 0C0 fill 0 normal */ 1225 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1226 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1227 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1228 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1229 FILL_32bit(not); /* 0D4 fill 5 normal */ 1230 FILL_64bit(not); /* 0D8 fill 6 normal */ 1231 FILL_mixed; /* 0DC fill 7 normal */ 1232 NOT4; /* 0E0 fill 0 other */ 1233 NOT4; /* 0E4 fill 1 other */ 1234 NOT4; /* 0E8 fill 2 other */ 1235 NOT4; /* 0EC fill 3 other */ 1236 NOT4; /* 0F0 fill 4 other */ 1237 NOT4; /* 0F4 fill 5 other */ 1238 NOT4; /* 0F8 fill 6 other */ 1239 NOT4; /* 0FC fill 7 other */ 1240 /* user traps */ 1241 GOTO(syscall_trap_4x); /* 100 old system call */ 1242 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1243 TRAP(T_DIV0); /* 102 user divide by zero */ 1244 GOTO(.flushw); /* 103 flush windows */ 1245 GOTO(.clean_windows); /* 104 clean windows */ 1246 BAD; /* 105 range check ?? */ 1247 GOTO(.fix_alignment); /* 106 do unaligned references */ 1248 BAD; /* 107 unused */ 1249 SYSCALL(syscall_trap32); /* 108 ILP32 system call on LP64 */ 1250 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1251 BAD; BAD; BAD4; /* 10A - 10F unused */ 1252 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1253 GOTO(.getcc); /* 120 get condition codes */ 1254 GOTO(.setcc); /* 121 set condition codes */ 1255 GOTO(.getpsr); /* 122 get psr */ 1256 GOTO(.setpsr); /* 123 set psr (some fields) */ 1257 GOTO(get_timestamp); /* 124 get timestamp */ 1258 GOTO(get_virtime); /* 125 get lwp virtual time */ 1259 PRIV(self_xcall); /* 126 self xcall */ 1260 GOTO(get_hrestime); /* 127 get hrestime */ 1261 BAD; /* 128 ST_SETV9STACK */ 1262 GOTO(.getlgrp); /* 129 get lgrpid */ 1263 BAD; BAD; BAD4; /* 12A - 12F unused */ 1264 BAD4; BAD4; /* 130 - 137 unused */ 1265 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1266 DTRACE_FASTTRAP; /* 139 dtrace fasttrap provider */ 1267 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1268 BAD; BAD4; /* 13B - 13F unused */ 1269 SYSCALL(syscall_trap) /* 140 LP64 system call */ 1270 SYSCALL(nosys); /* 141 unused system call trap */ 1271#ifdef DEBUG_USER_TRAPTRACECTL 1272 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1273 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1274#else 1275 SYSCALL(nosys); /* 142 unused system call trap */ 1276 SYSCALL(nosys); /* 143 unused system call trap */ 1277#endif 1278 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1279 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1280 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1281 BAD; /* 170 - unused */ 1282 BAD; /* 171 - unused */ 1283 BAD; BAD; /* 172 - 173 unused */ 1284 BAD4; BAD4; /* 174 - 17B unused */ 1285#ifdef PTL1_PANIC_DEBUG 1286 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1287 /* 17C test ptl1_panic */ 1288#else 1289 BAD; /* 17C unused */ 1290#endif /* PTL1_PANIC_DEBUG */ 1291 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1292 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1293 PRIV(obp_bpt); /* 17F obp breakpoint */ 1294 /* reserved */ 1295 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1296 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1297 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1298 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1299 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1300 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1301 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1302 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1303 .size trap_table0, (.-trap_table0) 1304trap_table1: 1305 NOT4; NOT4; /* 000 - 007 unused */ 1306 NOT; /* 008 instruction access exception */ 1307 ITSB_MISS; /* 009 instruction access MMU miss */ 1308 NOT; /* 00A reserved */ 1309 NOT; NOT4; /* 00B - 00F unused */ 1310 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1311 NOT4; /* 020 - 023 unused */ 1312 CLEAN_WINDOW; /* 024 - 027 clean window */ 1313 NOT4; NOT4; /* 028 - 02F unused */ 1314 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1315 DTSB_MISS; /* 031 data access MMU miss */ 1316 NOT; /* 032 reserved */ 1317 NOT; /* 033 unused */ 1318 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1319 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1320 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */ 1321 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1322 NOT; /* 060 unused */ 1323 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1324 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1325 NOT; /* 063 reserved */ 1326 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1327 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1328 DTLB_PROT; /* 06C data access protection */ 1329 NOT; /* 070 reserved */ 1330 NOT; /* 071 reserved */ 1331 NOT; /* 072 reserved */ 1332 NOT; /* 073 reserved */ 1333 NOT4; NOT4; /* 074 - 07B reserved */ 1334 NOT; /* 07C reserved */ 1335 NOT; /* 07D reserved */ 1336 NOT; /* 07E resumable error */ 1337 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */ 1338 NOTP4; /* 080 spill 0 normal */ 1339 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1340 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1341 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1342 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1343 NOTP4; /* 094 spill 5 normal */ 1344 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */ 1345 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */ 1346 NOTP4; /* 0A0 spill 0 other */ 1347 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1348 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1349 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1350 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1351 NOTP4; /* 0B4 spill 5 other */ 1352 NOTP4; /* 0B8 spill 6 other */ 1353 NOTP4; /* 0BC spill 7 other */ 1354 NOT4; /* 0C0 fill 0 normal */ 1355 NOT4; /* 0C4 fill 1 normal */ 1356 NOT4; /* 0C8 fill 2 normal */ 1357 NOT4; /* 0CC fill 3 normal */ 1358 NOT4; /* 0D0 fill 4 normal */ 1359 NOT4; /* 0D4 fill 5 normal */ 1360 NOT4; /* 0D8 fill 6 normal */ 1361 NOT4; /* 0DC fill 7 normal */ 1362 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1363 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1364/* 1365 * Code running at TL>0 does not use soft traps, so 1366 * we can truncate the table here. 1367 * However: 1368 * sun4v uses (hypervisor) ta instructions at TL > 0, so 1369 * provide a safety net for now. 1370 */ 1371 /* soft traps */ 1372 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */ 1373 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */ 1374 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */ 1375 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */ 1376 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */ 1377 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1378 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1379 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */ 1380 /* reserved */ 1381 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1382 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1383 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1384 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1385 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1386 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1387 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1388 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1389etrap_table: 1390 .size trap_table1, (.-trap_table1) 1391 .size trap_table, (.-trap_table) 1392 .size scb, (.-scb) 1393 1394/* 1395 * We get to exec_fault in the case of an instruction miss and tte 1396 * has no execute bit set. We go to tl0 to handle it. 1397 * 1398 * g1 = tsbe pointer (in/clobbered) 1399 * g2 = tag access register (in) 1400 * g3 - g4 = scratch (clobbered) 1401 * g5 = tsbe data (in) 1402 * g6 = scratch (clobbered) 1403 * g7 = pc we jumped here from (in) 1404 */ 1405/* 1406 * synthesize for trap(): TAG_ACCESS in %g2 1407 */ 1408 ALTENTRY(exec_fault) 1409 TRACE_TSBHIT(TT_MMU_EXEC) 1410 MMU_FAULT_STATUS_AREA(%g4) 1411 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */ 1412 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */ 1413 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 1414 sllx %g2, MMU_PAGESHIFT, %g2 1415 or %g2, %g3, %g2 /* TAG_ACCESS */ 1416 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1417 set trap, %g1 1418 ba,pt %xcc, sys_trap 1419 mov -1, %g4 1420 1421.mmu_exception_not_aligned: 1422 /* %g2 = sfar, %g3 = sfsr */ 1423 rdpr %tstate, %g1 1424 btst TSTATE_PRIV, %g1 1425 bnz,pn %icc, 2f 1426 nop 1427 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1428 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1429 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1430 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1431 brz,pt %g5, 2f 1432 nop 1433 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1434 brz,pn %g5, 2f 1435 nop 1436 btst 1, %sp 1437 bz,pt %xcc, 1f ! 32 bit user program 1438 nop 1439 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1440 nop 14411: 1442 ba,pt %xcc, .setup_utrap 1443 or %g2, %g0, %g7 14442: 1445 ba,pt %xcc, .mmu_exception_end 1446 mov T_ALIGNMENT, %g1 1447 1448.mmu_priv_exception: 1449 rdpr %tstate, %g1 1450 btst TSTATE_PRIV, %g1 1451 bnz,pn %icc, 1f 1452 nop 1453 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1454 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1455 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1456 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1457 brz,pt %g5, 1f 1458 nop 1459 ldn [%g5 + P_UTRAP16], %g5 1460 brnz,pt %g5, .setup_v9utrap 1461 nop 14621: 1463 mov T_PRIV_INSTR, %g1 1464 1465.mmu_exception_end: 1466 CPU_INDEX(%g4, %g5) 1467 set cpu_core, %g5 1468 sllx %g4, CPU_CORE_SHIFT, %g4 1469 add %g4, %g5, %g4 1470 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1471 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1472 bz 1f 1473 or %g5, CPU_DTRACE_BADADDR, %g5 1474 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1475 done 1476 14771: 1478 sllx %g3, 32, %g3 1479 or %g3, %g1, %g3 1480 set trap, %g1 1481 ba,pt %xcc, sys_trap 1482 sub %g0, 1, %g4 1483 1484.fp_disabled: 1485 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1486 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1487 rdpr %tstate, %g4 1488 btst TSTATE_PRIV, %g4 1489 bnz,a,pn %icc, ptl1_panic 1490 mov PTL1_BAD_FPTRAP, %g1 1491 1492 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1493 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1494 brz,a,pt %g5, 2f 1495 nop 1496 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1497 brz,a,pn %g5, 2f 1498 nop 1499 btst 1, %sp 1500 bz,a,pt %xcc, 1f ! 32 bit user program 1501 nop 1502 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1503 nop 15041: 1505 ba,pt %xcc, .setup_utrap 1506 or %g0, %g0, %g7 15072: 1508 set fp_disabled, %g1 1509 ba,pt %xcc, sys_trap 1510 sub %g0, 1, %g4 1511 1512.fp_ieee_exception: 1513 rdpr %tstate, %g1 1514 btst TSTATE_PRIV, %g1 1515 bnz,a,pn %icc, ptl1_panic 1516 mov PTL1_BAD_FPTRAP, %g1 1517 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1518 stx %fsr, [%g1 + CPU_TMP1] 1519 ldx [%g1 + CPU_TMP1], %g2 1520 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1521 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1522 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1523 brz,a,pt %g5, 1f 1524 nop 1525 ldn [%g5 + P_UTRAP8], %g5 1526 brnz,a,pt %g5, .setup_v9utrap 1527 nop 15281: 1529 set _fp_ieee_exception, %g1 1530 ba,pt %xcc, sys_trap 1531 sub %g0, 1, %g4 1532 1533/* 1534 * Register Inputs: 1535 * %g5 user trap handler 1536 * %g7 misaligned addr - for alignment traps only 1537 */ 1538.setup_utrap: 1539 set trap, %g1 ! setup in case we go 1540 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1541 sub %g0, 1, %g4 ! the save instruction below 1542 1543 /* 1544 * If the DTrace pid provider is single stepping a copied-out 1545 * instruction, t->t_dtrace_step will be set. In that case we need 1546 * to abort the single-stepping (since execution of the instruction 1547 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1548 */ 1549 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1550 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1551 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1552 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1553 rdpr %tnpc, %l2 ! arg1 == tnpc 1554 brz,pt %g2, 1f 1555 rdpr %tpc, %l1 ! arg0 == tpc 1556 1557 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1558 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1559 brz,pt %g2, 1f 1560 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1561 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 15621: 1563 mov %g7, %l3 ! arg2 == misaligned address 1564 1565 rdpr %tstate, %g1 ! cwp for trap handler 1566 rdpr %cwp, %g4 1567 bclr TSTATE_CWP_MASK, %g1 1568 wrpr %g1, %g4, %tstate 1569 wrpr %g0, %g5, %tnpc ! trap handler address 1570 FAST_TRAP_DONE 1571 /* NOTREACHED */ 1572 1573.check_v9utrap: 1574 rdpr %tstate, %g1 1575 btst TSTATE_PRIV, %g1 1576 bnz,a,pn %icc, 3f 1577 nop 1578 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1579 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1580 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1581 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1582 1583 cmp %g3, T_SOFTWARE_TRAP 1584 bne,a,pt %icc, 1f 1585 nop 1586 1587 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1588 rdpr %tt, %g3 ! delay - get actual hw trap type 1589 1590 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1591 ba,pt %icc, 2f 1592 smul %g1, CPTRSIZE, %g2 15931: 1594 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1595 nop 1596 1597 cmp %g3, T_UNIMP_INSTR 1598 bne,a,pt %icc, 2f 1599 nop 1600 1601 mov 1, %g1 1602 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1603 rdpr %tpc, %g1 ! ld trapping instruction using 1604 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1605 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1606 1607 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1608 andcc %g1, %g4, %g4 ! and instruction with mask 1609 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1610 nop ! fall thru to setup 16112: 1612 ldn [%g5 + %g2], %g5 1613 brnz,a,pt %g5, .setup_v9utrap 1614 nop 16153: 1616 set trap, %g1 1617 ba,pt %xcc, sys_trap 1618 sub %g0, 1, %g4 1619 /* NOTREACHED */ 1620 1621/* 1622 * Register Inputs: 1623 * %g5 user trap handler 1624 */ 1625.setup_v9utrap: 1626 set trap, %g1 ! setup in case we go 1627 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1628 sub %g0, 1, %g4 ! the save instruction below 1629 1630 /* 1631 * If the DTrace pid provider is single stepping a copied-out 1632 * instruction, t->t_dtrace_step will be set. In that case we need 1633 * to abort the single-stepping (since execution of the instruction 1634 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1635 */ 1636 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1637 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1638 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1639 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1640 rdpr %tnpc, %l7 ! arg1 == tnpc 1641 brz,pt %g2, 1f 1642 rdpr %tpc, %l6 ! arg0 == tpc 1643 1644 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1645 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1646 brz,pt %g2, 1f 1647 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1648 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 16491: 1650 rdpr %tstate, %g2 ! cwp for trap handler 1651 rdpr %cwp, %g4 1652 bclr TSTATE_CWP_MASK, %g2 1653 wrpr %g2, %g4, %tstate 1654 1655 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1656 ldn [%g4 + P_AS], %g4 ! load as pointer 1657 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1658 cmp %l7, %g4 ! check for single-step set 1659 bne,pt %xcc, 4f 1660 nop 1661 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1662 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1663 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1664 bne,pt %icc, 4f 1665 nop 1666 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1667 mov %l7, %g4 ! on entry to precise user trap 1668 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1669 ! at time of trap 1670 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1671 ! %g4 == userlimit 1672 FAST_TRAP_DONE 1673 /* NOTREACHED */ 16744: 1675 wrpr %g0, %g5, %tnpc ! trap handler address 1676 FAST_TRAP_DONE_CHK_INTR 1677 /* NOTREACHED */ 1678 1679.fp_exception: 1680 CPU_ADDR(%g1, %g4) 1681 stx %fsr, [%g1 + CPU_TMP1] 1682 ldx [%g1 + CPU_TMP1], %g2 1683 1684 /* 1685 * Cheetah takes unfinished_FPop trap for certain range of operands 1686 * to the "fitos" instruction. Instead of going through the slow 1687 * software emulation path, we try to simulate the "fitos" instruction 1688 * via "fitod" and "fdtos" provided the following conditions are met: 1689 * 1690 * fpu_exists is set (if DEBUG) 1691 * not in privileged mode 1692 * ftt is unfinished_FPop 1693 * NXM IEEE trap is not enabled 1694 * instruction at %tpc is "fitos" 1695 * 1696 * Usage: 1697 * %g1 per cpu address 1698 * %g2 %fsr 1699 * %g6 user instruction 1700 * 1701 * Note that we can take a memory access related trap while trying 1702 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1703 * flag to catch those traps and let the SFMMU code deal with page 1704 * fault and data access exception. 1705 */ 1706#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1707 sethi %hi(fpu_exists), %g7 1708 ld [%g7 + %lo(fpu_exists)], %g7 1709 brz,pn %g7, .fp_exception_cont 1710 nop 1711#endif 1712 rdpr %tstate, %g7 ! branch if in privileged mode 1713 btst TSTATE_PRIV, %g7 1714 bnz,pn %xcc, .fp_exception_cont 1715 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1716 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1717 cmp %g7, FTT_UNFIN 1718 set FSR_TEM_NX, %g5 1719 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1720 andcc %g2, %g5, %g0 1721 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1722 rdpr %tpc, %g5 ! get faulting PC 1723 1724 or %g0, 1, %g7 1725 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1726 lda [%g5]ASI_USER, %g6 ! get user's instruction 1727 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1728 1729 set FITOS_INSTR_MASK, %g7 1730 and %g6, %g7, %g7 1731 set FITOS_INSTR, %g5 1732 cmp %g7, %g5 1733 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1734 nop 1735 1736 /* 1737 * This is unfinished FPops trap for "fitos" instruction. We 1738 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1739 * sequence. 1740 * 1741 * We need a temporary FP register to do the conversion. Since 1742 * both source and destination operands for the "fitos" instruction 1743 * have to be within %f0-%f31, we use an FP register from the upper 1744 * half to guarantee that it won't collide with the source or the 1745 * dest operand. However, we do have to save and restore its value. 1746 * 1747 * We use %d62 as a temporary FP register for the conversion and 1748 * branch to appropriate instruction within the conversion tables 1749 * based upon the rs2 and rd values. 1750 */ 1751 1752 std %d62, [%g1 + CPU_TMP1] ! save original value 1753 1754 srl %g6, FITOS_RS2_SHIFT, %g7 1755 and %g7, FITOS_REG_MASK, %g7 1756 set _fitos_fitod_table, %g4 1757 sllx %g7, 2, %g7 1758 jmp %g4 + %g7 1759 ba,pt %xcc, _fitos_fitod_done 1760 .empty 1761 1762_fitos_fitod_table: 1763 fitod %f0, %d62 1764 fitod %f1, %d62 1765 fitod %f2, %d62 1766 fitod %f3, %d62 1767 fitod %f4, %d62 1768 fitod %f5, %d62 1769 fitod %f6, %d62 1770 fitod %f7, %d62 1771 fitod %f8, %d62 1772 fitod %f9, %d62 1773 fitod %f10, %d62 1774 fitod %f11, %d62 1775 fitod %f12, %d62 1776 fitod %f13, %d62 1777 fitod %f14, %d62 1778 fitod %f15, %d62 1779 fitod %f16, %d62 1780 fitod %f17, %d62 1781 fitod %f18, %d62 1782 fitod %f19, %d62 1783 fitod %f20, %d62 1784 fitod %f21, %d62 1785 fitod %f22, %d62 1786 fitod %f23, %d62 1787 fitod %f24, %d62 1788 fitod %f25, %d62 1789 fitod %f26, %d62 1790 fitod %f27, %d62 1791 fitod %f28, %d62 1792 fitod %f29, %d62 1793 fitod %f30, %d62 1794 fitod %f31, %d62 1795_fitos_fitod_done: 1796 1797 /* 1798 * Now convert data back into single precision 1799 */ 1800 srl %g6, FITOS_RD_SHIFT, %g7 1801 and %g7, FITOS_REG_MASK, %g7 1802 set _fitos_fdtos_table, %g4 1803 sllx %g7, 2, %g7 1804 jmp %g4 + %g7 1805 ba,pt %xcc, _fitos_fdtos_done 1806 .empty 1807 1808_fitos_fdtos_table: 1809 fdtos %d62, %f0 1810 fdtos %d62, %f1 1811 fdtos %d62, %f2 1812 fdtos %d62, %f3 1813 fdtos %d62, %f4 1814 fdtos %d62, %f5 1815 fdtos %d62, %f6 1816 fdtos %d62, %f7 1817 fdtos %d62, %f8 1818 fdtos %d62, %f9 1819 fdtos %d62, %f10 1820 fdtos %d62, %f11 1821 fdtos %d62, %f12 1822 fdtos %d62, %f13 1823 fdtos %d62, %f14 1824 fdtos %d62, %f15 1825 fdtos %d62, %f16 1826 fdtos %d62, %f17 1827 fdtos %d62, %f18 1828 fdtos %d62, %f19 1829 fdtos %d62, %f20 1830 fdtos %d62, %f21 1831 fdtos %d62, %f22 1832 fdtos %d62, %f23 1833 fdtos %d62, %f24 1834 fdtos %d62, %f25 1835 fdtos %d62, %f26 1836 fdtos %d62, %f27 1837 fdtos %d62, %f28 1838 fdtos %d62, %f29 1839 fdtos %d62, %f30 1840 fdtos %d62, %f31 1841_fitos_fdtos_done: 1842 1843 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 1844 1845#if DEBUG 1846 /* 1847 * Update FPop_unfinished trap kstat 1848 */ 1849 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 1850 ldx [%g7], %g5 18511: 1852 add %g5, 1, %g6 1853 1854 casxa [%g7] ASI_N, %g5, %g6 1855 cmp %g5, %g6 1856 bne,a,pn %xcc, 1b 1857 or %g0, %g6, %g5 1858 1859 /* 1860 * Update fpu_sim_fitos kstat 1861 */ 1862 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 1863 ldx [%g7], %g5 18641: 1865 add %g5, 1, %g6 1866 1867 casxa [%g7] ASI_N, %g5, %g6 1868 cmp %g5, %g6 1869 bne,a,pn %xcc, 1b 1870 or %g0, %g6, %g5 1871#endif /* DEBUG */ 1872 1873 FAST_TRAP_DONE 1874 1875.fp_exception_cont: 1876 /* 1877 * Let _fp_exception deal with simulating FPop instruction. 1878 * Note that we need to pass %fsr in %g2 (already read above). 1879 */ 1880 1881 set _fp_exception, %g1 1882 ba,pt %xcc, sys_trap 1883 sub %g0, 1, %g4 1884 1885 1886/* 1887 * Register windows 1888 */ 1889.flushw: 1890.clean_windows: 1891 rdpr %tnpc, %g1 1892 wrpr %g1, %tpc 1893 add %g1, 4, %g1 1894 wrpr %g1, %tnpc 1895 set trap, %g1 1896 mov T_FLUSH_PCB, %g3 1897 ba,pt %xcc, sys_trap 1898 sub %g0, 1, %g4 1899 1900/* 1901 * .spill_clean: clean the previous window, restore the wstate, and 1902 * "done". 1903 * 1904 * Entry: %g7 contains new wstate 1905 */ 1906.spill_clean: 1907 sethi %hi(nwin_minus_one), %g5 1908 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 1909 rdpr %cwp, %g6 ! %g6 = %cwp 1910 deccc %g6 ! %g6-- 1911 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 1912 wrpr %g6, %cwp 1913 TT_TRACE_L(trace_win) 1914 clr %l0 1915 clr %l1 1916 clr %l2 1917 clr %l3 1918 clr %l4 1919 clr %l5 1920 clr %l6 1921 clr %l7 1922 wrpr %g0, %g7, %wstate 1923 saved 1924 retry ! restores correct %cwp 1925 1926.fix_alignment: 1927 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 1928 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1929 ldn [%g1 + T_PROCP], %g1 1930 mov 1, %g2 1931 stb %g2, [%g1 + P_FIXALIGNMENT] 1932 FAST_TRAP_DONE 1933 1934#define STDF_REG(REG, ADDR, TMP) \ 1935 sll REG, 3, REG ;\ 1936mark1: set start1, TMP ;\ 1937 jmp REG + TMP ;\ 1938 nop ;\ 1939start1: ba,pt %xcc, done1 ;\ 1940 std %f0, [ADDR + CPU_TMP1] ;\ 1941 ba,pt %xcc, done1 ;\ 1942 std %f32, [ADDR + CPU_TMP1] ;\ 1943 ba,pt %xcc, done1 ;\ 1944 std %f2, [ADDR + CPU_TMP1] ;\ 1945 ba,pt %xcc, done1 ;\ 1946 std %f34, [ADDR + CPU_TMP1] ;\ 1947 ba,pt %xcc, done1 ;\ 1948 std %f4, [ADDR + CPU_TMP1] ;\ 1949 ba,pt %xcc, done1 ;\ 1950 std %f36, [ADDR + CPU_TMP1] ;\ 1951 ba,pt %xcc, done1 ;\ 1952 std %f6, [ADDR + CPU_TMP1] ;\ 1953 ba,pt %xcc, done1 ;\ 1954 std %f38, [ADDR + CPU_TMP1] ;\ 1955 ba,pt %xcc, done1 ;\ 1956 std %f8, [ADDR + CPU_TMP1] ;\ 1957 ba,pt %xcc, done1 ;\ 1958 std %f40, [ADDR + CPU_TMP1] ;\ 1959 ba,pt %xcc, done1 ;\ 1960 std %f10, [ADDR + CPU_TMP1] ;\ 1961 ba,pt %xcc, done1 ;\ 1962 std %f42, [ADDR + CPU_TMP1] ;\ 1963 ba,pt %xcc, done1 ;\ 1964 std %f12, [ADDR + CPU_TMP1] ;\ 1965 ba,pt %xcc, done1 ;\ 1966 std %f44, [ADDR + CPU_TMP1] ;\ 1967 ba,pt %xcc, done1 ;\ 1968 std %f14, [ADDR + CPU_TMP1] ;\ 1969 ba,pt %xcc, done1 ;\ 1970 std %f46, [ADDR + CPU_TMP1] ;\ 1971 ba,pt %xcc, done1 ;\ 1972 std %f16, [ADDR + CPU_TMP1] ;\ 1973 ba,pt %xcc, done1 ;\ 1974 std %f48, [ADDR + CPU_TMP1] ;\ 1975 ba,pt %xcc, done1 ;\ 1976 std %f18, [ADDR + CPU_TMP1] ;\ 1977 ba,pt %xcc, done1 ;\ 1978 std %f50, [ADDR + CPU_TMP1] ;\ 1979 ba,pt %xcc, done1 ;\ 1980 std %f20, [ADDR + CPU_TMP1] ;\ 1981 ba,pt %xcc, done1 ;\ 1982 std %f52, [ADDR + CPU_TMP1] ;\ 1983 ba,pt %xcc, done1 ;\ 1984 std %f22, [ADDR + CPU_TMP1] ;\ 1985 ba,pt %xcc, done1 ;\ 1986 std %f54, [ADDR + CPU_TMP1] ;\ 1987 ba,pt %xcc, done1 ;\ 1988 std %f24, [ADDR + CPU_TMP1] ;\ 1989 ba,pt %xcc, done1 ;\ 1990 std %f56, [ADDR + CPU_TMP1] ;\ 1991 ba,pt %xcc, done1 ;\ 1992 std %f26, [ADDR + CPU_TMP1] ;\ 1993 ba,pt %xcc, done1 ;\ 1994 std %f58, [ADDR + CPU_TMP1] ;\ 1995 ba,pt %xcc, done1 ;\ 1996 std %f28, [ADDR + CPU_TMP1] ;\ 1997 ba,pt %xcc, done1 ;\ 1998 std %f60, [ADDR + CPU_TMP1] ;\ 1999 ba,pt %xcc, done1 ;\ 2000 std %f30, [ADDR + CPU_TMP1] ;\ 2001 ba,pt %xcc, done1 ;\ 2002 std %f62, [ADDR + CPU_TMP1] ;\ 2003done1: 2004 2005#define LDDF_REG(REG, ADDR, TMP) \ 2006 sll REG, 3, REG ;\ 2007mark2: set start2, TMP ;\ 2008 jmp REG + TMP ;\ 2009 nop ;\ 2010start2: ba,pt %xcc, done2 ;\ 2011 ldd [ADDR + CPU_TMP1], %f0 ;\ 2012 ba,pt %xcc, done2 ;\ 2013 ldd [ADDR + CPU_TMP1], %f32 ;\ 2014 ba,pt %xcc, done2 ;\ 2015 ldd [ADDR + CPU_TMP1], %f2 ;\ 2016 ba,pt %xcc, done2 ;\ 2017 ldd [ADDR + CPU_TMP1], %f34 ;\ 2018 ba,pt %xcc, done2 ;\ 2019 ldd [ADDR + CPU_TMP1], %f4 ;\ 2020 ba,pt %xcc, done2 ;\ 2021 ldd [ADDR + CPU_TMP1], %f36 ;\ 2022 ba,pt %xcc, done2 ;\ 2023 ldd [ADDR + CPU_TMP1], %f6 ;\ 2024 ba,pt %xcc, done2 ;\ 2025 ldd [ADDR + CPU_TMP1], %f38 ;\ 2026 ba,pt %xcc, done2 ;\ 2027 ldd [ADDR + CPU_TMP1], %f8 ;\ 2028 ba,pt %xcc, done2 ;\ 2029 ldd [ADDR + CPU_TMP1], %f40 ;\ 2030 ba,pt %xcc, done2 ;\ 2031 ldd [ADDR + CPU_TMP1], %f10 ;\ 2032 ba,pt %xcc, done2 ;\ 2033 ldd [ADDR + CPU_TMP1], %f42 ;\ 2034 ba,pt %xcc, done2 ;\ 2035 ldd [ADDR + CPU_TMP1], %f12 ;\ 2036 ba,pt %xcc, done2 ;\ 2037 ldd [ADDR + CPU_TMP1], %f44 ;\ 2038 ba,pt %xcc, done2 ;\ 2039 ldd [ADDR + CPU_TMP1], %f14 ;\ 2040 ba,pt %xcc, done2 ;\ 2041 ldd [ADDR + CPU_TMP1], %f46 ;\ 2042 ba,pt %xcc, done2 ;\ 2043 ldd [ADDR + CPU_TMP1], %f16 ;\ 2044 ba,pt %xcc, done2 ;\ 2045 ldd [ADDR + CPU_TMP1], %f48 ;\ 2046 ba,pt %xcc, done2 ;\ 2047 ldd [ADDR + CPU_TMP1], %f18 ;\ 2048 ba,pt %xcc, done2 ;\ 2049 ldd [ADDR + CPU_TMP1], %f50 ;\ 2050 ba,pt %xcc, done2 ;\ 2051 ldd [ADDR + CPU_TMP1], %f20 ;\ 2052 ba,pt %xcc, done2 ;\ 2053 ldd [ADDR + CPU_TMP1], %f52 ;\ 2054 ba,pt %xcc, done2 ;\ 2055 ldd [ADDR + CPU_TMP1], %f22 ;\ 2056 ba,pt %xcc, done2 ;\ 2057 ldd [ADDR + CPU_TMP1], %f54 ;\ 2058 ba,pt %xcc, done2 ;\ 2059 ldd [ADDR + CPU_TMP1], %f24 ;\ 2060 ba,pt %xcc, done2 ;\ 2061 ldd [ADDR + CPU_TMP1], %f56 ;\ 2062 ba,pt %xcc, done2 ;\ 2063 ldd [ADDR + CPU_TMP1], %f26 ;\ 2064 ba,pt %xcc, done2 ;\ 2065 ldd [ADDR + CPU_TMP1], %f58 ;\ 2066 ba,pt %xcc, done2 ;\ 2067 ldd [ADDR + CPU_TMP1], %f28 ;\ 2068 ba,pt %xcc, done2 ;\ 2069 ldd [ADDR + CPU_TMP1], %f60 ;\ 2070 ba,pt %xcc, done2 ;\ 2071 ldd [ADDR + CPU_TMP1], %f30 ;\ 2072 ba,pt %xcc, done2 ;\ 2073 ldd [ADDR + CPU_TMP1], %f62 ;\ 2074done2: 2075 2076.lddf_exception_not_aligned: 2077 /* %g2 = sfar, %g3 = sfsr */ 2078 mov %g2, %g5 ! stash sfar 2079#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2080 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2081 ld [%g2 + %lo(fpu_exists)], %g2 2082 brz,a,pn %g2, 4f 2083 nop 2084#endif 2085 CPU_ADDR(%g1, %g4) 2086 or %g0, 1, %g4 2087 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2088 2089 rdpr %tpc, %g2 2090 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2091 srl %g6, 23, %g1 ! using ldda or not? 2092 and %g1, 1, %g1 2093 brz,a,pt %g1, 2f ! check for ldda instruction 2094 nop 2095 srl %g6, 13, %g1 ! check immflag 2096 and %g1, 1, %g1 2097 rdpr %tstate, %g2 ! %tstate in %g2 2098 brnz,a,pn %g1, 1f 2099 srl %g2, 31, %g1 ! get asi from %tstate 2100 srl %g6, 5, %g1 ! get asi from instruction 2101 and %g1, 0xFF, %g1 ! imm_asi field 21021: 2103 cmp %g1, ASI_P ! primary address space 2104 be,a,pt %icc, 2f 2105 nop 2106 cmp %g1, ASI_PNF ! primary no fault address space 2107 be,a,pt %icc, 2f 2108 nop 2109 cmp %g1, ASI_S ! secondary address space 2110 be,a,pt %icc, 2f 2111 nop 2112 cmp %g1, ASI_SNF ! secondary no fault address space 2113 bne,a,pn %icc, 3f 2114 nop 21152: 2116 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2117 add %g5, 4, %g5 ! increment misaligned data address 2118 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2119 2120 sllx %g7, 32, %g7 2121 or %g5, %g7, %g5 ! combine data 2122 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2123 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2124 2125 srl %g6, 25, %g3 ! %g6 has the instruction 2126 and %g3, 0x1F, %g3 ! %g3 has rd 2127 LDDF_REG(%g3, %g7, %g4) 2128 2129 CPU_ADDR(%g1, %g4) 2130 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2131 FAST_TRAP_DONE 21323: 2133 CPU_ADDR(%g1, %g4) 2134 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 21354: 2136 set T_USER, %g3 ! trap type in %g3 2137 or %g3, T_LDDF_ALIGN, %g3 2138 mov %g5, %g2 ! misaligned vaddr in %g2 2139 set fpu_trap, %g1 ! goto C for the little and 2140 ba,pt %xcc, sys_trap ! no fault little asi's 2141 sub %g0, 1, %g4 2142 2143.stdf_exception_not_aligned: 2144 /* %g2 = sfar, %g3 = sfsr */ 2145 mov %g2, %g5 2146 2147#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2148 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2149 ld [%g7 + %lo(fpu_exists)], %g3 2150 brz,a,pn %g3, 4f 2151 nop 2152#endif 2153 CPU_ADDR(%g1, %g4) 2154 or %g0, 1, %g4 2155 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2156 2157 rdpr %tpc, %g2 2158 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2159 2160 srl %g6, 23, %g1 ! using stda or not? 2161 and %g1, 1, %g1 2162 brz,a,pt %g1, 2f ! check for stda instruction 2163 nop 2164 srl %g6, 13, %g1 ! check immflag 2165 and %g1, 1, %g1 2166 rdpr %tstate, %g2 ! %tstate in %g2 2167 brnz,a,pn %g1, 1f 2168 srl %g2, 31, %g1 ! get asi from %tstate 2169 srl %g6, 5, %g1 ! get asi from instruction 2170 and %g1, 0xff, %g1 ! imm_asi field 21711: 2172 cmp %g1, ASI_P ! primary address space 2173 be,a,pt %icc, 2f 2174 nop 2175 cmp %g1, ASI_S ! secondary address space 2176 bne,a,pn %icc, 3f 2177 nop 21782: 2179 srl %g6, 25, %g6 2180 and %g6, 0x1F, %g6 ! %g6 has rd 2181 CPU_ADDR(%g7, %g1) 2182 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2183 2184 ldx [%g7 + CPU_TMP1], %g6 2185 srlx %g6, 32, %g7 2186 stuwa %g7, [%g5]ASI_USER ! first half 2187 add %g5, 4, %g5 ! increment misaligned data address 2188 stuwa %g6, [%g5]ASI_USER ! second half 2189 2190 CPU_ADDR(%g1, %g4) 2191 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2192 FAST_TRAP_DONE 21933: 2194 CPU_ADDR(%g1, %g4) 2195 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 21964: 2197 set T_USER, %g3 ! trap type in %g3 2198 or %g3, T_STDF_ALIGN, %g3 2199 mov %g5, %g2 ! misaligned vaddr in %g2 2200 set fpu_trap, %g1 ! goto C for the little and 2201 ba,pt %xcc, sys_trap ! nofault little asi's 2202 sub %g0, 1, %g4 2203 2204#ifdef DEBUG_USER_TRAPTRACECTL 2205 2206.traptrace_freeze: 2207 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2208 TT_TRACE_L(trace_win) 2209 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2210 set trap_freeze, %g1 2211 mov 1, %g2 2212 st %g2, [%g1] 2213 FAST_TRAP_DONE 2214 2215.traptrace_unfreeze: 2216 set trap_freeze, %g1 2217 st %g0, [%g1] 2218 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2219 TT_TRACE_L(trace_win) 2220 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2221 FAST_TRAP_DONE 2222 2223#endif /* DEBUG_USER_TRAPTRACECTL */ 2224 2225.getcc: 2226 CPU_ADDR(%g1, %g2) 2227 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2228 rdpr %tstate, %g3 ! get tstate 2229 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2230 set PSR_ICC, %g2 2231 and %o0, %g2, %o0 ! mask out the rest 2232 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2233 wrpr %g0, 0, %gl 2234 mov %o0, %g1 ! move ccr to normal %g1 2235 wrpr %g0, 1, %gl 2236 ! cannot assume globals retained their values after increasing %gl 2237 CPU_ADDR(%g1, %g2) 2238 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2239 FAST_TRAP_DONE 2240 2241.setcc: 2242 CPU_ADDR(%g1, %g2) 2243 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2244 wrpr %g0, 0, %gl 2245 mov %g1, %o0 2246 wrpr %g0, 1, %gl 2247 ! cannot assume globals retained their values after increasing %gl 2248 CPU_ADDR(%g1, %g2) 2249 sll %o0, PSR_ICC_SHIFT, %g2 2250 set PSR_ICC, %g3 2251 and %g2, %g3, %g2 ! mask out rest 2252 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2253 rdpr %tstate, %g3 ! get tstate 2254 srl %g3, 0, %g3 ! clear upper word 2255 or %g3, %g2, %g3 ! or in new bits 2256 wrpr %g3, %tstate 2257 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2258 FAST_TRAP_DONE 2259 2260/* 2261 * getpsr(void) 2262 * Note that the xcc part of the ccr is not provided. 2263 * The V8 code shows why the V9 trap is not faster: 2264 * #define GETPSR_TRAP() \ 2265 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2266 */ 2267 2268 .type .getpsr, #function 2269.getpsr: 2270 rdpr %tstate, %g1 ! get tstate 2271 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2272 set PSR_ICC, %g2 2273 and %o0, %g2, %o0 ! mask out the rest 2274 2275 rd %fprs, %g1 ! get fprs 2276 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2277 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2278 or %o0, %g2, %o0 ! or result into psr.ef 2279 2280 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2281 or %o0, %g2, %o0 ! or psr.impl/ver 2282 FAST_TRAP_DONE 2283 SET_SIZE(.getpsr) 2284 2285/* 2286 * setpsr(newpsr) 2287 * Note that there is no support for ccr.xcc in the V9 code. 2288 */ 2289 2290 .type .setpsr, #function 2291.setpsr: 2292 rdpr %tstate, %g1 ! get tstate 2293! setx TSTATE_V8_UBITS, %g2 2294 or %g0, CCR_ICC, %g3 2295 sllx %g3, TSTATE_CCR_SHIFT, %g2 2296 2297 andn %g1, %g2, %g1 ! zero current user bits 2298 set PSR_ICC, %g2 2299 and %g2, %o0, %g2 ! clear all but psr.icc bits 2300 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2301 wrpr %g1, %g3, %tstate ! write tstate 2302 2303 set PSR_EF, %g2 2304 and %g2, %o0, %g2 ! clear all but fp enable bit 2305 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2306 wr %g0, %g4, %fprs ! write fprs 2307 2308 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2309 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2310 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2311 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2312 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2313 srlx %g4, 2, %g4 ! shift fef value to bit 0 2314 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2315 FAST_TRAP_DONE 2316 SET_SIZE(.setpsr) 2317 2318/* 2319 * getlgrp 2320 * get home lgrpid on which the calling thread is currently executing. 2321 */ 2322 .type .getlgrp, #function 2323.getlgrp: 2324 ! Thanks for the incredibly helpful comments 2325 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2326 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2327 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2328 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2329 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2330 sra %g1, 0, %o1 2331 FAST_TRAP_DONE 2332 SET_SIZE(.getlgrp) 2333 2334/* 2335 * Entry for old 4.x trap (trap 0). 2336 */ 2337 ENTRY_NP(syscall_trap_4x) 2338 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2339 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2340 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2341 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2342 brz,pn %g2, 1f ! has it been set? 2343 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2344 st %l1, [%g1 + CPU_TMP2] 2345 rdpr %tnpc, %l1 ! save old tnpc 2346 wrpr %g0, %g2, %tnpc ! setup tnpc 2347 2348 mov %g1, %l0 ! save CPU struct addr 2349 wrpr %g0, 0, %gl 2350 mov %l1, %g6 ! pass tnpc to user code in %g6 2351 wrpr %g0, 1, %gl 2352 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2353 ld [%l0 + CPU_TMP1], %l0 2354 FAST_TRAP_DONE_CHK_INTR 23551: 2356 ! 2357 ! check for old syscall mmap which is the only different one which 2358 ! must be the same. Others are handled in the compatibility library. 2359 ! 2360 mov %g1, %l0 ! save CPU struct addr 2361 wrpr %g0, 0, %gl 2362 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2363 movz %icc, SYS_mmap, %g1 2364 wrpr %g0, 1, %gl 2365 ld [%l0 + CPU_TMP1], %l0 2366 SYSCALL(syscall_trap32) 2367 SET_SIZE(syscall_trap_4x) 2368 2369/* 2370 * Handler for software trap 9. 2371 * Set trap0 emulation address for old 4.x system call trap. 2372 * XXX - this should be a system call. 2373 */ 2374 ENTRY_NP(set_trap0_addr) 2375 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2376 st %l0, [%g1 + CPU_TMP1] ! save some locals 2377 st %l1, [%g1 + CPU_TMP2] 2378 mov %g1, %l0 ! preserve CPU addr 2379 wrpr %g0, 0, %gl 2380 mov %g1, %l1 2381 wrpr %g0, 1, %gl 2382 ! cannot assume globals retained their values after increasing %gl 2383 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer 2384 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2385 andn %l1, 3, %l1 ! force alignment 2386 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2387 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2388 ld [%l0 + CPU_TMP1], %l0 2389 FAST_TRAP_DONE 2390 SET_SIZE(set_trap0_addr) 2391 2392/* 2393 * mmu_trap_tl1 2394 * trap handler for unexpected mmu traps. 2395 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2396 * case we go to fpu_trap or a user trap from the window handler, in which 2397 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2398 */ 2399 .type mmu_trap_tl1, #function 2400mmu_trap_tl1: 2401#ifdef TRAPTRACE 2402 TRACE_PTR(%g5, %g6) 2403 GET_TRACE_TICK(%g6) 2404 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2405 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2406 rdpr %tt, %g6 2407 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2408 rdpr %tstate, %g6 2409 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2410 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2411 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2412 rdpr %tpc, %g6 2413 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2414 MMU_FAULT_STATUS_AREA(%g6) 2415 ldx [%g6 + MMFSA_D_ADDR], %g6 2416 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address 2417 CPU_PADDR(%g7, %g6); 2418 add %g7, CPU_TL1_HDLR, %g7 2419 lda [%g7]ASI_MEM, %g6 2420 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2421 MMU_FAULT_STATUS_AREA(%g6) 2422 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant? 2423 ldx [%g6 + MMFSA_D_CTX], %g6 2424 sllx %g6, SFSR_CTX_SHIFT, %g6 2425 or %g6, %g7, %g6 2426 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type 2427 set 0xdeadbeef, %g6 2428 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2429 TRACE_NEXT(%g5, %g6, %g7) 2430#endif /* TRAPTRACE */ 2431 CPU_PADDR(%g7, %g6); 2432 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2433 lda [%g7]ASI_MEM, %g6 2434 brz,a,pt %g6, 1f 2435 nop 2436 sta %g0, [%g7]ASI_MEM 2437 ! XXXQ need to setup registers for sfmmu_mmu_trap? 2438 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 24391: 2440 rdpr %tpc, %g7 2441 /* in user_rtt? */ 2442 set rtt_fill_start, %g6 2443 cmp %g7, %g6 2444 blu,pn %xcc, 6f 2445 .empty 2446 set rtt_fill_end, %g6 2447 cmp %g7, %g6 2448 bgeu,pn %xcc, 6f 2449 nop 2450 set fault_rtt_fn1, %g7 2451 ba,a 7f 24526: 2453 ! check to see if the trap pc is in a window spill/fill handling 2454 rdpr %tpc, %g7 2455 /* tpc should be in the trap table */ 2456 set trap_table, %g6 2457 cmp %g7, %g6 2458 blu,a,pn %xcc, ptl1_panic 2459 mov PTL1_BAD_MMUTRAP, %g1 2460 set etrap_table, %g6 2461 cmp %g7, %g6 2462 bgeu,a,pn %xcc, ptl1_panic 2463 mov PTL1_BAD_MMUTRAP, %g1 2464 ! pc is inside the trap table, convert to trap type 2465 srl %g7, 5, %g6 ! XXXQ need #define 2466 and %g6, 0x1ff, %g6 ! XXXQ need #define 2467 ! and check for a window trap type 2468 and %g6, WTRAP_TTMASK, %g6 2469 cmp %g6, WTRAP_TYPE 2470 bne,a,pn %xcc, ptl1_panic 2471 mov PTL1_BAD_MMUTRAP, %g1 2472 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2473 add %g7, WTRAP_FAULTOFF, %g7 2474 24757: 2476 ! Arguments are passed in the global set active after the 2477 ! 'done' instruction. Before switching sets, must save 2478 ! the calculated next pc 2479 wrpr %g0, %g7, %tnpc 2480 wrpr %g0, 1, %gl 2481 rdpr %tt, %g5 2482 MMU_FAULT_STATUS_AREA(%g7) 2483 cmp %g5, T_ALIGNMENT 2484 be,pn %xcc, 1f 2485 ldx [%g7 + MMFSA_D_ADDR], %g6 2486 ldx [%g7 + MMFSA_D_CTX], %g7 2487 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */ 2488 sllx %g6, MMU_PAGESHIFT, %g6 2489 or %g6, %g7, %g6 /* TAG_ACCESS */ 24901: 2491 done 2492 SET_SIZE(mmu_trap_tl1) 2493 2494/* 2495 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2496 * traps are valid only when kmdb is loaded. When the debugger is active, 2497 * the code below is rewritten to transfer control to the appropriate 2498 * debugger entry points. 2499 */ 2500 .global kmdb_trap 2501 .align 8 2502kmdb_trap: 2503 ba,a trap_table0 2504 jmp %g1 + 0 2505 nop 2506 2507 .global kmdb_trap_tl1 2508 .align 8 2509kmdb_trap_tl1: 2510 ba,a trap_table0 2511 jmp %g1 + 0 2512 nop 2513 2514/* 2515 * This entry is copied from OBP's trap table during boot. 2516 */ 2517 .global obp_bpt 2518 .align 8 2519obp_bpt: 2520 NOT 2521 2522 2523 2524#ifdef TRAPTRACE 2525/* 2526 * TRAPTRACE support. 2527 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2528 * Return is done by "jmp %g7 + 4". 2529 */ 2530 2531trace_dmmu: 2532 TRACE_PTR(%g3, %g6) 2533 GET_TRACE_TICK(%g6) 2534 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2535 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2536 rdpr %tt, %g6 2537 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2538 rdpr %tstate, %g6 2539 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2540 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2541 rdpr %tpc, %g6 2542 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2543 MMU_FAULT_STATUS_AREA(%g6) 2544 ldx [%g6 + MMFSA_D_ADDR], %g4 2545 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2546 ldx [%g6 + MMFSA_D_CTX], %g4 2547 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2548 ldx [%g6 + MMFSA_D_TYPE], %g4 2549 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2550 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2551 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2552 TRACE_NEXT(%g3, %g4, %g5) 2553 jmp %g7 + 4 2554 nop 2555 2556trace_immu: 2557 TRACE_PTR(%g3, %g6) 2558 GET_TRACE_TICK(%g6) 2559 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2560 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2561 rdpr %tt, %g6 2562 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2563 rdpr %tstate, %g6 2564 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2565 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2566 rdpr %tpc, %g6 2567 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2568 MMU_FAULT_STATUS_AREA(%g6) 2569 ldx [%g6 + MMFSA_I_ADDR], %g4 2570 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2571 ldx [%g6 + MMFSA_I_CTX], %g4 2572 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2573 ldx [%g6 + MMFSA_I_TYPE], %g4 2574 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2575 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2576 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2577 TRACE_NEXT(%g3, %g4, %g5) 2578 jmp %g7 + 4 2579 nop 2580 2581trace_gen: 2582 TRACE_PTR(%g3, %g6) 2583 GET_TRACE_TICK(%g6) 2584 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2585 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2586 rdpr %tt, %g6 2587 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2588 rdpr %tstate, %g6 2589 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2590 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2591 rdpr %tpc, %g6 2592 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2593 stna %g0, [%g3 + TRAP_ENT_TR]%asi 2594 stna %g0, [%g3 + TRAP_ENT_F1]%asi 2595 stna %g0, [%g3 + TRAP_ENT_F2]%asi 2596 stna %g0, [%g3 + TRAP_ENT_F3]%asi 2597 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2598 TRACE_NEXT(%g3, %g4, %g5) 2599 jmp %g7 + 4 2600 nop 2601 2602trace_win: 2603 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2604 ! Keep the locals as clean as possible, caller cleans %l4 2605 clr %l2 2606 clr %l1 2607 jmp %l4 + 4 2608 clr %l0 2609 2610/* 2611 * Trace a tsb hit 2612 * g1 = tsbe pointer (in/clobbered) 2613 * g2 = tag access register (in) 2614 * g3 - g4 = scratch (clobbered) 2615 * g5 = tsbe data (in) 2616 * g6 = scratch (clobbered) 2617 * g7 = pc we jumped here from (in) 2618 */ 2619 2620 ! Do not disturb %g5, it will be used after the trace 2621 ALTENTRY(trace_tsbhit) 2622 TRACE_TSBHIT(0) 2623 jmp %g7 + 4 2624 nop 2625 2626/* 2627 * Trace a TSB miss 2628 * 2629 * g1 = tsb8k pointer (in) 2630 * g2 = tag access register (in) 2631 * g3 = tsb4m pointer (in) 2632 * g4 = tsbe tag (in/clobbered) 2633 * g5 - g6 = scratch (clobbered) 2634 * g7 = pc we jumped here from (in) 2635 */ 2636 .global trace_tsbmiss 2637trace_tsbmiss: 2638 membar #Sync 2639 sethi %hi(FLUSH_ADDR), %g6 2640 flush %g6 2641 TRACE_PTR(%g5, %g6) 2642 GET_TRACE_TICK(%g6) 2643 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2644 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2645 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag 2646 rdpr %tnpc, %g6 2647 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2648 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2649 srlx %g1, 32, %g6 2650 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2651 rdpr %tpc, %g6 2652 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2653 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2654 rdpr %tt, %g6 2655 or %g6, TT_MMU_MISS, %g4 2656 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2657 mov MMFSA_D_ADDR, %g4 2658 cmp %g6, FAST_IMMU_MISS_TT 2659 move %xcc, MMFSA_I_ADDR, %g4 2660 cmp %g6, T_INSTR_MMU_MISS 2661 move %xcc, MMFSA_I_ADDR, %g4 2662 MMU_FAULT_STATUS_AREA(%g6) 2663 ldx [%g6 + %g4], %g6 2664 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2665 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2666 TRACE_NEXT(%g5, %g4, %g6) 2667 jmp %g7 + 4 2668 nop 2669 2670/* 2671 * g2 = tag access register (in) 2672 * g3 = ctx number (in) 2673 */ 2674trace_dataprot: 2675 membar #Sync 2676 sethi %hi(FLUSH_ADDR), %g6 2677 flush %g6 2678 TRACE_PTR(%g1, %g6) 2679 GET_TRACE_TICK(%g6) 2680 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2681 rdpr %tpc, %g6 2682 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2683 rdpr %tstate, %g6 2684 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2685 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2686 stna %g0, [%g1 + TRAP_ENT_TR]%asi 2687 stna %g0, [%g1 + TRAP_ENT_F1]%asi 2688 stna %g0, [%g1 + TRAP_ENT_F2]%asi 2689 stna %g0, [%g1 + TRAP_ENT_F3]%asi 2690 stna %g0, [%g1 + TRAP_ENT_F4]%asi 2691 TRACE_SAVE_TL_GL_REGS(%g1, %g6) 2692 rdpr %tt, %g6 2693 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2694 TRACE_NEXT(%g1, %g4, %g5) 2695 jmp %g7 + 4 2696 nop 2697 2698#endif /* TRAPTRACE */ 2699 2700/* 2701 * Handle watchdog reset trap. Enable the MMU using the MMU_ENABLE 2702 * HV service, which requires the return target to be specified as a VA 2703 * since we are enabling the MMU. We set the target to ptl1_panic. 2704 */ 2705 2706 .type .watchdog_trap, #function 2707.watchdog_trap: 2708 mov 1, %o0 2709 setx ptl1_panic, %g2, %o1 2710 mov MMU_ENABLE, %o5 2711 ta FAST_TRAP 2712 done 2713 SET_SIZE(.watchdog_trap) 2714/* 2715 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2716 */ 2717 .type .dmmu_exc_lddf_not_aligned, #function 2718.dmmu_exc_lddf_not_aligned: 2719 MMU_FAULT_STATUS_AREA(%g3) 2720 ldx [%g3 + MMFSA_D_ADDR], %g2 2721 /* Fault type not available in MMU fault status area */ 2722 mov MMFSA_F_UNALIGN, %g1 2723 ldx [%g3 + MMFSA_D_CTX], %g3 2724 sllx %g3, SFSR_CTX_SHIFT, %g3 2725 btst 1, %sp 2726 bnz,pt %xcc, .lddf_exception_not_aligned 2727 or %g3, %g1, %g3 /* SFSR */ 2728 ba,a,pt %xcc, .mmu_exception_not_aligned 2729 SET_SIZE(.dmmu_exc_lddf_not_aligned) 2730 2731/* 2732 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2733 */ 2734 .type .dmmu_exc_stdf_not_aligned, #function 2735.dmmu_exc_stdf_not_aligned: 2736 MMU_FAULT_STATUS_AREA(%g3) 2737 ldx [%g3 + MMFSA_D_ADDR], %g2 2738 /* Fault type not available in MMU fault status area */ 2739 mov MMFSA_F_UNALIGN, %g1 2740 ldx [%g3 + MMFSA_D_CTX], %g3 2741 sllx %g3, SFSR_CTX_SHIFT, %g3 2742 btst 1, %sp 2743 bnz,pt %xcc, .stdf_exception_not_aligned 2744 or %g3, %g1, %g3 /* SFSR */ 2745 ba,a,pt %xcc, .mmu_exception_not_aligned 2746 SET_SIZE(.dmmu_exc_stdf_not_aligned) 2747 2748 .type .dmmu_exception, #function 2749.dmmu_exception: 2750 MMU_FAULT_STATUS_AREA(%g3) 2751 ldx [%g3 + MMFSA_D_ADDR], %g2 2752 ldx [%g3 + MMFSA_D_TYPE], %g1 2753 ldx [%g3 + MMFSA_D_CTX], %g3 2754 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */ 2755 sllx %g2, MMU_PAGESHIFT, %g2 2756 or %g2, %g3, %g2 /* TAG_ACCESS */ 2757 sllx %g3, SFSR_CTX_SHIFT, %g3 2758 or %g3, %g1, %g3 /* SFSR */ 2759 ba,pt %xcc, .mmu_exception_end 2760 mov T_DATA_EXCEPTION, %g1 2761 SET_SIZE(.dmmu_exception) 2762/* 2763 * expects offset into tsbmiss area in %g1 and return pc in %g7 2764 */ 2765stat_mmu: 2766 CPU_INDEX(%g5, %g6) 2767 sethi %hi(tsbmiss_area), %g6 2768 sllx %g5, TSBMISS_SHIFT, %g5 2769 or %g6, %lo(tsbmiss_area), %g6 2770 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2771 ld [%g6 + %g1], %g5 2772 add %g5, 1, %g5 2773 jmp %g7 + 4 2774 st %g5, [%g6 + %g1] 2775 2776 2777/* 2778 * fast_trap_done, fast_trap_done_chk_intr: 2779 * 2780 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2781 * taken immediately after a RETRY or DONE instruction which causes IE to 2782 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2783 * to execute first before taking any interrupts. If that instruction 2784 * results in other traps, and if the corresponding trap handler runs 2785 * entirely at TL=1 with interrupts disabled, then pending interrupts 2786 * won't be taken until after yet another instruction following the %tpc 2787 * or %tnpc. 2788 * 2789 * A malicious user program can use this feature to block out interrupts 2790 * for extended durations, which can result in send_mondo_timeout kernel 2791 * panic. 2792 * 2793 * This problem is addressed by servicing any pending interrupts via 2794 * sys_trap before returning back to the user mode from a fast trap 2795 * handler. The "done" instruction within a fast trap handler, which 2796 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2797 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2798 * entry point. 2799 * 2800 * We check for any pending interrupts here and force a sys_trap to 2801 * service those interrupts, if any. To minimize overhead, pending 2802 * interrupts are checked if the %tpc happens to be at 16K boundary, 2803 * which allows a malicious program to execute at most 4K consecutive 2804 * instructions before we service any pending interrupts. If a worst 2805 * case fast trap handler takes about 2 usec, then interrupts will be 2806 * blocked for at most 8 msec, less than a clock tick. 2807 * 2808 * For the cases where we don't know if the %tpc will cross a 16K 2809 * boundary, we can't use the above optimization and always process 2810 * any pending interrupts via fast_frap_done_chk_intr entry point. 2811 * 2812 * Entry Conditions: 2813 * %pstate am:0 priv:1 ie:0 2814 * globals are AG (not normal globals) 2815 */ 2816 2817 .global fast_trap_done, fast_trap_done_chk_intr 2818fast_trap_done: 2819 rdpr %tpc, %g5 2820 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2821 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2822 bz,pn %icc, 1f ! branch if zero (lower 32 bits only) 2823 nop 2824 done 2825 2826fast_trap_done_chk_intr: 28271: rd SOFTINT, %g6 2828 brnz,pn %g6, 2f ! branch if any pending intr 2829 nop 2830 done 2831 28322: 2833 /* 2834 * We get here if there are any pending interrupts. 2835 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2836 * instruction. 2837 */ 2838 rdpr %tnpc, %g5 2839 wrpr %g0, %g5, %tpc 2840 add %g5, 4, %g5 2841 wrpr %g0, %g5, %tnpc 2842 2843 /* 2844 * Force a dummy sys_trap call so that interrupts can be serviced. 2845 */ 2846 set fast_trap_dummy_call, %g1 2847 ba,pt %xcc, sys_trap 2848 mov -1, %g4 2849 2850fast_trap_dummy_call: 2851 retl 2852 nop 2853 2854#endif /* lint */ 2855