1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#pragma ident "%Z%%M% %I% %E% SMI" 28 29#if !defined(lint) 30#include "assym.h" 31#endif /* !lint */ 32#include <sys/asm_linkage.h> 33#include <sys/privregs.h> 34#include <sys/sun4asi.h> 35#include <sys/machasi.h> 36#include <sys/hypervisor_api.h> 37#include <sys/machtrap.h> 38#include <sys/machthread.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This macro is used to update per cpu mmu stats in perf critical 116 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 117 * is defined. 118 */ 119#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 120#define HAT_PERCPU_DBSTAT(stat) \ 121 mov stat, %g1 ;\ 122 ba stat_mmu ;\ 123 rd %pc, %g7 124#else 125#define HAT_PERCPU_DBSTAT(stat) 126#endif /* DEBUG || SFMMU_STAT_GATHER */ 127 128/* 129 * This first set are funneled to trap() with %tt as the type. 130 * Trap will then either panic or send the user a signal. 131 */ 132/* 133 * NOT is used for traps that just shouldn't happen. 134 * It comes in both single and quadruple flavors. 135 */ 136#if !defined(lint) 137 .global trap 138#endif /* !lint */ 139#define NOT \ 140 TT_TRACE(trace_gen) ;\ 141 set trap, %g1 ;\ 142 rdpr %tt, %g3 ;\ 143 ba,pt %xcc, sys_trap ;\ 144 sub %g0, 1, %g4 ;\ 145 .align 32 146#define NOT4 NOT; NOT; NOT; NOT 147 148#define NOTP \ 149 TT_TRACE(trace_gen) ;\ 150 ba,pt %xcc, ptl1_panic ;\ 151 mov PTL1_BAD_TRAP, %g1 ;\ 152 .align 32 153#define NOTP4 NOTP; NOTP; NOTP; NOTP 154 155/* 156 * RED is for traps that use the red mode handler. 157 * We should never see these either. 158 */ 159#define RED NOT 160/* 161 * BAD is used for trap vectors we don't have a kernel 162 * handler for. 163 * It also comes in single and quadruple versions. 164 */ 165#define BAD NOT 166#define BAD4 NOT4 167 168#define DONE \ 169 done; \ 170 .align 32 171 172/* 173 * TRAP vectors to the trap() function. 174 * It's main use is for user errors. 175 */ 176#if !defined(lint) 177 .global trap 178#endif /* !lint */ 179#define TRAP(arg) \ 180 TT_TRACE(trace_gen) ;\ 181 set trap, %g1 ;\ 182 mov arg, %g3 ;\ 183 ba,pt %xcc, sys_trap ;\ 184 sub %g0, 1, %g4 ;\ 185 .align 32 186 187/* 188 * SYSCALL is used for system calls on both ILP32 and LP64 kernels 189 * depending on the "which" parameter (should be syscall_trap, 190 * syscall_trap32, or nosys for unused system call traps). 191 */ 192#define SYSCALL(which) \ 193 TT_TRACE(trace_gen) ;\ 194 set (which), %g1 ;\ 195 ba,pt %xcc, sys_trap ;\ 196 sub %g0, 1, %g4 ;\ 197 .align 32 198 199/* 200 * GOTO just jumps to a label. 201 * It's used for things that can be fixed without going thru sys_trap. 202 */ 203#define GOTO(label) \ 204 .global label ;\ 205 ba,a label ;\ 206 .empty ;\ 207 .align 32 208 209/* 210 * GOTO_TT just jumps to a label. 211 * correctable ECC error traps at level 0 and 1 will use this macro. 212 * It's used for things that can be fixed without going thru sys_trap. 213 */ 214#define GOTO_TT(label, ttlabel) \ 215 .global label ;\ 216 TT_TRACE(ttlabel) ;\ 217 ba,a label ;\ 218 .empty ;\ 219 .align 32 220 221/* 222 * Privileged traps 223 * Takes breakpoint if privileged, calls trap() if not. 224 */ 225#define PRIV(label) \ 226 rdpr %tstate, %g1 ;\ 227 btst TSTATE_PRIV, %g1 ;\ 228 bnz label ;\ 229 rdpr %tt, %g3 ;\ 230 set trap, %g1 ;\ 231 ba,pt %xcc, sys_trap ;\ 232 sub %g0, 1, %g4 ;\ 233 .align 32 234 235 236/* 237 * DTrace traps. 238 */ 239#define DTRACE_FASTTRAP \ 240 .global dtrace_fasttrap_probe ;\ 241 .global dtrace_fasttrap_probe_ptr ;\ 242 sethi %hi(dtrace_fasttrap_probe_ptr), %g4 ;\ 243 ldn [%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4 ;\ 244 set dtrace_fasttrap_probe, %g1 ;\ 245 brnz,pn %g4, user_trap ;\ 246 sub %g0, 1, %g4 ;\ 247 FAST_TRAP_DONE ;\ 248 .align 32 249 250#define DTRACE_PID \ 251 .global dtrace_pid_probe ;\ 252 set dtrace_pid_probe, %g1 ;\ 253 ba,pt %xcc, user_trap ;\ 254 sub %g0, 1, %g4 ;\ 255 .align 32 256 257#define DTRACE_RETURN \ 258 .global dtrace_return_probe ;\ 259 set dtrace_return_probe, %g1 ;\ 260 ba,pt %xcc, user_trap ;\ 261 sub %g0, 1, %g4 ;\ 262 .align 32 263 264/* 265 * REGISTER WINDOW MANAGEMENT MACROS 266 */ 267 268/* 269 * various convenient units of padding 270 */ 271#define SKIP(n) .skip 4*(n) 272 273/* 274 * CLEAN_WINDOW is the simple handler for cleaning a register window. 275 */ 276#define CLEAN_WINDOW \ 277 TT_TRACE_L(trace_win) ;\ 278 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 279 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 280 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 281 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 282 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 283 retry; .align 128 284 285#if !defined(lint) 286 287/* 288 * If we get an unresolved tlb miss while in a window handler, the fault 289 * handler will resume execution at the last instruction of the window 290 * hander, instead of delivering the fault to the kernel. Spill handlers 291 * use this to spill windows into the wbuf. 292 * 293 * The mixed handler works by checking %sp, and branching to the correct 294 * handler. This is done by branching back to label 1: for 32b frames, 295 * or label 2: for 64b frames; which implies the handler order is: 32b, 296 * 64b, mixed. The 1: and 2: labels are offset into the routines to 297 * allow the branchs' delay slots to contain useful instructions. 298 */ 299 300/* 301 * SPILL_32bit spills a 32-bit-wide kernel register window. It 302 * assumes that the kernel context and the nucleus context are the 303 * same. The stack pointer is required to be eight-byte aligned even 304 * though this code only needs it to be four-byte aligned. 305 */ 306#define SPILL_32bit(tail) \ 307 srl %sp, 0, %sp ;\ 3081: st %l0, [%sp + 0] ;\ 309 st %l1, [%sp + 4] ;\ 310 st %l2, [%sp + 8] ;\ 311 st %l3, [%sp + 12] ;\ 312 st %l4, [%sp + 16] ;\ 313 st %l5, [%sp + 20] ;\ 314 st %l6, [%sp + 24] ;\ 315 st %l7, [%sp + 28] ;\ 316 st %i0, [%sp + 32] ;\ 317 st %i1, [%sp + 36] ;\ 318 st %i2, [%sp + 40] ;\ 319 st %i3, [%sp + 44] ;\ 320 st %i4, [%sp + 48] ;\ 321 st %i5, [%sp + 52] ;\ 322 st %i6, [%sp + 56] ;\ 323 st %i7, [%sp + 60] ;\ 324 TT_TRACE_L(trace_win) ;\ 325 saved ;\ 326 retry ;\ 327 SKIP(31-19-TT_TRACE_L_INS) ;\ 328 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 329 .empty 330 331/* 332 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 333 * wide address space via the designated asi. It is used to spill 334 * non-kernel windows. The stack pointer is required to be eight-byte 335 * aligned even though this code only needs it to be four-byte 336 * aligned. 337 */ 338#define SPILL_32bit_asi(asi_num, tail) \ 339 srl %sp, 0, %sp ;\ 3401: sta %l0, [%sp + %g0]asi_num ;\ 341 mov 4, %g1 ;\ 342 sta %l1, [%sp + %g1]asi_num ;\ 343 mov 8, %g2 ;\ 344 sta %l2, [%sp + %g2]asi_num ;\ 345 mov 12, %g3 ;\ 346 sta %l3, [%sp + %g3]asi_num ;\ 347 add %sp, 16, %g4 ;\ 348 sta %l4, [%g4 + %g0]asi_num ;\ 349 sta %l5, [%g4 + %g1]asi_num ;\ 350 sta %l6, [%g4 + %g2]asi_num ;\ 351 sta %l7, [%g4 + %g3]asi_num ;\ 352 add %g4, 16, %g4 ;\ 353 sta %i0, [%g4 + %g0]asi_num ;\ 354 sta %i1, [%g4 + %g1]asi_num ;\ 355 sta %i2, [%g4 + %g2]asi_num ;\ 356 sta %i3, [%g4 + %g3]asi_num ;\ 357 add %g4, 16, %g4 ;\ 358 sta %i4, [%g4 + %g0]asi_num ;\ 359 sta %i5, [%g4 + %g1]asi_num ;\ 360 sta %i6, [%g4 + %g2]asi_num ;\ 361 sta %i7, [%g4 + %g3]asi_num ;\ 362 TT_TRACE_L(trace_win) ;\ 363 saved ;\ 364 retry ;\ 365 SKIP(31-25-TT_TRACE_L_INS) ;\ 366 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 367 .empty 368 369#define SPILL_32bit_tt1(asi_num, tail) \ 370 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 371 .empty ;\ 372 .align 128 373 374 375/* 376 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 377 * that the kernel context and the nucleus context are the same. The 378 * stack pointer is required to be eight-byte aligned even though this 379 * code only needs it to be four-byte aligned. 380 */ 381#define FILL_32bit(tail) \ 382 srl %sp, 0, %sp ;\ 3831: TT_TRACE_L(trace_win) ;\ 384 ld [%sp + 0], %l0 ;\ 385 ld [%sp + 4], %l1 ;\ 386 ld [%sp + 8], %l2 ;\ 387 ld [%sp + 12], %l3 ;\ 388 ld [%sp + 16], %l4 ;\ 389 ld [%sp + 20], %l5 ;\ 390 ld [%sp + 24], %l6 ;\ 391 ld [%sp + 28], %l7 ;\ 392 ld [%sp + 32], %i0 ;\ 393 ld [%sp + 36], %i1 ;\ 394 ld [%sp + 40], %i2 ;\ 395 ld [%sp + 44], %i3 ;\ 396 ld [%sp + 48], %i4 ;\ 397 ld [%sp + 52], %i5 ;\ 398 ld [%sp + 56], %i6 ;\ 399 ld [%sp + 60], %i7 ;\ 400 restored ;\ 401 retry ;\ 402 SKIP(31-19-TT_TRACE_L_INS) ;\ 403 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 404 .empty 405 406/* 407 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 408 * wide address space via the designated asi. It is used to fill 409 * non-kernel windows. The stack pointer is required to be eight-byte 410 * aligned even though this code only needs it to be four-byte 411 * aligned. 412 */ 413#define FILL_32bit_asi(asi_num, tail) \ 414 srl %sp, 0, %sp ;\ 4151: TT_TRACE_L(trace_win) ;\ 416 mov 4, %g1 ;\ 417 lda [%sp + %g0]asi_num, %l0 ;\ 418 mov 8, %g2 ;\ 419 lda [%sp + %g1]asi_num, %l1 ;\ 420 mov 12, %g3 ;\ 421 lda [%sp + %g2]asi_num, %l2 ;\ 422 lda [%sp + %g3]asi_num, %l3 ;\ 423 add %sp, 16, %g4 ;\ 424 lda [%g4 + %g0]asi_num, %l4 ;\ 425 lda [%g4 + %g1]asi_num, %l5 ;\ 426 lda [%g4 + %g2]asi_num, %l6 ;\ 427 lda [%g4 + %g3]asi_num, %l7 ;\ 428 add %g4, 16, %g4 ;\ 429 lda [%g4 + %g0]asi_num, %i0 ;\ 430 lda [%g4 + %g1]asi_num, %i1 ;\ 431 lda [%g4 + %g2]asi_num, %i2 ;\ 432 lda [%g4 + %g3]asi_num, %i3 ;\ 433 add %g4, 16, %g4 ;\ 434 lda [%g4 + %g0]asi_num, %i4 ;\ 435 lda [%g4 + %g1]asi_num, %i5 ;\ 436 lda [%g4 + %g2]asi_num, %i6 ;\ 437 lda [%g4 + %g3]asi_num, %i7 ;\ 438 restored ;\ 439 retry ;\ 440 SKIP(31-25-TT_TRACE_L_INS) ;\ 441 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 442 .empty 443 444 445/* 446 * SPILL_64bit spills a 64-bit-wide kernel register window. It 447 * assumes that the kernel context and the nucleus context are the 448 * same. The stack pointer is required to be eight-byte aligned. 449 */ 450#define SPILL_64bit(tail) \ 4512: stx %l0, [%sp + V9BIAS64 + 0] ;\ 452 stx %l1, [%sp + V9BIAS64 + 8] ;\ 453 stx %l2, [%sp + V9BIAS64 + 16] ;\ 454 stx %l3, [%sp + V9BIAS64 + 24] ;\ 455 stx %l4, [%sp + V9BIAS64 + 32] ;\ 456 stx %l5, [%sp + V9BIAS64 + 40] ;\ 457 stx %l6, [%sp + V9BIAS64 + 48] ;\ 458 stx %l7, [%sp + V9BIAS64 + 56] ;\ 459 stx %i0, [%sp + V9BIAS64 + 64] ;\ 460 stx %i1, [%sp + V9BIAS64 + 72] ;\ 461 stx %i2, [%sp + V9BIAS64 + 80] ;\ 462 stx %i3, [%sp + V9BIAS64 + 88] ;\ 463 stx %i4, [%sp + V9BIAS64 + 96] ;\ 464 stx %i5, [%sp + V9BIAS64 + 104] ;\ 465 stx %i6, [%sp + V9BIAS64 + 112] ;\ 466 stx %i7, [%sp + V9BIAS64 + 120] ;\ 467 TT_TRACE_L(trace_win) ;\ 468 saved ;\ 469 retry ;\ 470 SKIP(31-18-TT_TRACE_L_INS) ;\ 471 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 472 .empty 473 474#define SPILL_64bit_ktt1(tail) \ 475 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 476 .empty ;\ 477 .align 128 478 479#define SPILL_mixed_ktt1(tail) \ 480 btst 1, %sp ;\ 481 bz,a,pt %xcc, fault_32bit_/**/tail ;\ 482 srl %sp, 0, %sp ;\ 483 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 484 .empty ;\ 485 .align 128 486 487/* 488 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 489 * wide address space via the designated asi. It is used to spill 490 * non-kernel windows. The stack pointer is required to be eight-byte 491 * aligned. 492 */ 493#define SPILL_64bit_asi(asi_num, tail) \ 494 mov 0 + V9BIAS64, %g1 ;\ 4952: stxa %l0, [%sp + %g1]asi_num ;\ 496 mov 8 + V9BIAS64, %g2 ;\ 497 stxa %l1, [%sp + %g2]asi_num ;\ 498 mov 16 + V9BIAS64, %g3 ;\ 499 stxa %l2, [%sp + %g3]asi_num ;\ 500 mov 24 + V9BIAS64, %g4 ;\ 501 stxa %l3, [%sp + %g4]asi_num ;\ 502 add %sp, 32, %g5 ;\ 503 stxa %l4, [%g5 + %g1]asi_num ;\ 504 stxa %l5, [%g5 + %g2]asi_num ;\ 505 stxa %l6, [%g5 + %g3]asi_num ;\ 506 stxa %l7, [%g5 + %g4]asi_num ;\ 507 add %g5, 32, %g5 ;\ 508 stxa %i0, [%g5 + %g1]asi_num ;\ 509 stxa %i1, [%g5 + %g2]asi_num ;\ 510 stxa %i2, [%g5 + %g3]asi_num ;\ 511 stxa %i3, [%g5 + %g4]asi_num ;\ 512 add %g5, 32, %g5 ;\ 513 stxa %i4, [%g5 + %g1]asi_num ;\ 514 stxa %i5, [%g5 + %g2]asi_num ;\ 515 stxa %i6, [%g5 + %g3]asi_num ;\ 516 stxa %i7, [%g5 + %g4]asi_num ;\ 517 TT_TRACE_L(trace_win) ;\ 518 saved ;\ 519 retry ;\ 520 SKIP(31-25-TT_TRACE_L_INS) ;\ 521 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 522 .empty 523 524#define SPILL_64bit_tt1(asi_num, tail) \ 525 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 526 .empty ;\ 527 .align 128 528 529/* 530 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 531 * that the kernel context and the nucleus context are the same. The 532 * stack pointer is required to be eight-byte aligned. 533 */ 534#define FILL_64bit(tail) \ 5352: TT_TRACE_L(trace_win) ;\ 536 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 537 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 538 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 539 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 540 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 541 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 542 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 543 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 544 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 545 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 546 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 547 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 548 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 549 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 550 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 551 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 552 restored ;\ 553 retry ;\ 554 SKIP(31-18-TT_TRACE_L_INS) ;\ 555 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 556 .empty 557 558/* 559 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 560 * wide address space via the designated asi. It is used to fill 561 * non-kernel windows. The stack pointer is required to be eight-byte 562 * aligned. 563 */ 564#define FILL_64bit_asi(asi_num, tail) \ 565 mov V9BIAS64 + 0, %g1 ;\ 5662: TT_TRACE_L(trace_win) ;\ 567 ldxa [%sp + %g1]asi_num, %l0 ;\ 568 mov V9BIAS64 + 8, %g2 ;\ 569 ldxa [%sp + %g2]asi_num, %l1 ;\ 570 mov V9BIAS64 + 16, %g3 ;\ 571 ldxa [%sp + %g3]asi_num, %l2 ;\ 572 mov V9BIAS64 + 24, %g4 ;\ 573 ldxa [%sp + %g4]asi_num, %l3 ;\ 574 add %sp, 32, %g5 ;\ 575 ldxa [%g5 + %g1]asi_num, %l4 ;\ 576 ldxa [%g5 + %g2]asi_num, %l5 ;\ 577 ldxa [%g5 + %g3]asi_num, %l6 ;\ 578 ldxa [%g5 + %g4]asi_num, %l7 ;\ 579 add %g5, 32, %g5 ;\ 580 ldxa [%g5 + %g1]asi_num, %i0 ;\ 581 ldxa [%g5 + %g2]asi_num, %i1 ;\ 582 ldxa [%g5 + %g3]asi_num, %i2 ;\ 583 ldxa [%g5 + %g4]asi_num, %i3 ;\ 584 add %g5, 32, %g5 ;\ 585 ldxa [%g5 + %g1]asi_num, %i4 ;\ 586 ldxa [%g5 + %g2]asi_num, %i5 ;\ 587 ldxa [%g5 + %g3]asi_num, %i6 ;\ 588 ldxa [%g5 + %g4]asi_num, %i7 ;\ 589 restored ;\ 590 retry ;\ 591 SKIP(31-25-TT_TRACE_L_INS) ;\ 592 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 593 .empty 594 595 596#endif /* !lint */ 597 598/* 599 * SPILL_mixed spills either size window, depending on 600 * whether %sp is even or odd, to a 32-bit address space. 601 * This may only be used in conjunction with SPILL_32bit/ 602 * FILL_64bit. 603 * Clear upper 32 bits of %sp if it is odd. 604 * We won't need to clear them in 64 bit kernel. 605 */ 606#define SPILL_mixed \ 607 btst 1, %sp ;\ 608 bz,a,pt %xcc, 1b ;\ 609 srl %sp, 0, %sp ;\ 610 ba,pt %xcc, 2b ;\ 611 nop ;\ 612 .align 128 613 614/* 615 * FILL_mixed(ASI) fills either size window, depending on 616 * whether %sp is even or odd, from a 32-bit address space. 617 * This may only be used in conjunction with FILL_32bit/ 618 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 619 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 620 * attention should be paid to the instructions that belong 621 * in the delay slots of the branches depending on the type 622 * of fill handler being branched to. 623 * Clear upper 32 bits of %sp if it is odd. 624 * We won't need to clear them in 64 bit kernel. 625 */ 626#define FILL_mixed \ 627 btst 1, %sp ;\ 628 bz,a,pt %xcc, 1b ;\ 629 srl %sp, 0, %sp ;\ 630 ba,pt %xcc, 2b ;\ 631 nop ;\ 632 .align 128 633 634 635/* 636 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 637 * respectively, into the address space via the designated asi. The 638 * unbiased stack pointer is required to be eight-byte aligned (even for 639 * the 32-bit case even though this code does not require such strict 640 * alignment). 641 * 642 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 643 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 644 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 645 * window may contain kernel data so in user_rtt we set wstate to call 646 * these spill handlers on the first user spill trap. These handler then 647 * spill the appropriate window but also back up a window and clean the 648 * window that didn't get a cleanwin trap. 649 */ 650#define SPILL_32clean(asi_num, tail) \ 651 srl %sp, 0, %sp ;\ 652 sta %l0, [%sp + %g0]asi_num ;\ 653 mov 4, %g1 ;\ 654 sta %l1, [%sp + %g1]asi_num ;\ 655 mov 8, %g2 ;\ 656 sta %l2, [%sp + %g2]asi_num ;\ 657 mov 12, %g3 ;\ 658 sta %l3, [%sp + %g3]asi_num ;\ 659 add %sp, 16, %g4 ;\ 660 sta %l4, [%g4 + %g0]asi_num ;\ 661 sta %l5, [%g4 + %g1]asi_num ;\ 662 sta %l6, [%g4 + %g2]asi_num ;\ 663 sta %l7, [%g4 + %g3]asi_num ;\ 664 add %g4, 16, %g4 ;\ 665 sta %i0, [%g4 + %g0]asi_num ;\ 666 sta %i1, [%g4 + %g1]asi_num ;\ 667 sta %i2, [%g4 + %g2]asi_num ;\ 668 sta %i3, [%g4 + %g3]asi_num ;\ 669 add %g4, 16, %g4 ;\ 670 sta %i4, [%g4 + %g0]asi_num ;\ 671 sta %i5, [%g4 + %g1]asi_num ;\ 672 sta %i6, [%g4 + %g2]asi_num ;\ 673 sta %i7, [%g4 + %g3]asi_num ;\ 674 TT_TRACE_L(trace_win) ;\ 675 b .spill_clean ;\ 676 mov WSTATE_USER32, %g7 ;\ 677 SKIP(31-25-TT_TRACE_L_INS) ;\ 678 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 679 .empty 680 681#define SPILL_64clean(asi_num, tail) \ 682 mov 0 + V9BIAS64, %g1 ;\ 683 stxa %l0, [%sp + %g1]asi_num ;\ 684 mov 8 + V9BIAS64, %g2 ;\ 685 stxa %l1, [%sp + %g2]asi_num ;\ 686 mov 16 + V9BIAS64, %g3 ;\ 687 stxa %l2, [%sp + %g3]asi_num ;\ 688 mov 24 + V9BIAS64, %g4 ;\ 689 stxa %l3, [%sp + %g4]asi_num ;\ 690 add %sp, 32, %g5 ;\ 691 stxa %l4, [%g5 + %g1]asi_num ;\ 692 stxa %l5, [%g5 + %g2]asi_num ;\ 693 stxa %l6, [%g5 + %g3]asi_num ;\ 694 stxa %l7, [%g5 + %g4]asi_num ;\ 695 add %g5, 32, %g5 ;\ 696 stxa %i0, [%g5 + %g1]asi_num ;\ 697 stxa %i1, [%g5 + %g2]asi_num ;\ 698 stxa %i2, [%g5 + %g3]asi_num ;\ 699 stxa %i3, [%g5 + %g4]asi_num ;\ 700 add %g5, 32, %g5 ;\ 701 stxa %i4, [%g5 + %g1]asi_num ;\ 702 stxa %i5, [%g5 + %g2]asi_num ;\ 703 stxa %i6, [%g5 + %g3]asi_num ;\ 704 stxa %i7, [%g5 + %g4]asi_num ;\ 705 TT_TRACE_L(trace_win) ;\ 706 b .spill_clean ;\ 707 mov WSTATE_USER64, %g7 ;\ 708 SKIP(31-25-TT_TRACE_L_INS) ;\ 709 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 710 .empty 711 712 713/* 714 * Floating point disabled. 715 */ 716#define FP_DISABLED_TRAP \ 717 TT_TRACE(trace_gen) ;\ 718 ba,pt %xcc,.fp_disabled ;\ 719 nop ;\ 720 .align 32 721 722/* 723 * Floating point exceptions. 724 */ 725#define FP_IEEE_TRAP \ 726 TT_TRACE(trace_gen) ;\ 727 ba,pt %xcc,.fp_ieee_exception ;\ 728 nop ;\ 729 .align 32 730 731#define FP_TRAP \ 732 TT_TRACE(trace_gen) ;\ 733 ba,pt %xcc,.fp_exception ;\ 734 nop ;\ 735 .align 32 736 737#if !defined(lint) 738 739/* 740 * ECACHE_ECC error traps at level 0 and level 1 741 */ 742#define ECACHE_ECC(table_name) \ 743 .global table_name ;\ 744table_name: ;\ 745 membar #Sync ;\ 746 set trap, %g1 ;\ 747 rdpr %tt, %g3 ;\ 748 ba,pt %xcc, sys_trap ;\ 749 sub %g0, 1, %g4 ;\ 750 .align 32 751 752#endif /* !lint */ 753 754/* 755 * illegal instruction trap 756 */ 757#define ILLTRAP_INSTR \ 758 membar #Sync ;\ 759 TT_TRACE(trace_gen) ;\ 760 or %g0, P_UTRAP4, %g2 ;\ 761 or %g0, T_UNIMP_INSTR, %g3 ;\ 762 sethi %hi(.check_v9utrap), %g4 ;\ 763 jmp %g4 + %lo(.check_v9utrap) ;\ 764 nop ;\ 765 .align 32 766 767/* 768 * tag overflow trap 769 */ 770#define TAG_OVERFLOW \ 771 TT_TRACE(trace_gen) ;\ 772 or %g0, P_UTRAP10, %g2 ;\ 773 or %g0, T_TAG_OVERFLOW, %g3 ;\ 774 sethi %hi(.check_v9utrap), %g4 ;\ 775 jmp %g4 + %lo(.check_v9utrap) ;\ 776 nop ;\ 777 .align 32 778 779/* 780 * divide by zero trap 781 */ 782#define DIV_BY_ZERO \ 783 TT_TRACE(trace_gen) ;\ 784 or %g0, P_UTRAP11, %g2 ;\ 785 or %g0, T_IDIV0, %g3 ;\ 786 sethi %hi(.check_v9utrap), %g4 ;\ 787 jmp %g4 + %lo(.check_v9utrap) ;\ 788 nop ;\ 789 .align 32 790 791/* 792 * trap instruction for V9 user trap handlers 793 */ 794#define TRAP_INSTR \ 795 TT_TRACE(trace_gen) ;\ 796 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 797 sethi %hi(.check_v9utrap), %g4 ;\ 798 jmp %g4 + %lo(.check_v9utrap) ;\ 799 nop ;\ 800 .align 32 801#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 802 803/* 804 * LEVEL_INTERRUPT is for level N interrupts. 805 * VECTOR_INTERRUPT is for the vector trap. 806 */ 807#define LEVEL_INTERRUPT(level) \ 808 .global tt_pil/**/level ;\ 809tt_pil/**/level: ;\ 810 ba,pt %xcc, pil_interrupt ;\ 811 mov level, %g4 ;\ 812 .align 32 813 814#define LEVEL14_INTERRUPT \ 815 ba pil14_interrupt ;\ 816 mov PIL_14, %g4 ;\ 817 .align 32 818 819#define CPU_MONDO \ 820 ba,a,pt %xcc, cpu_mondo ;\ 821 .align 32 822 823#define DEV_MONDO \ 824 ba,a,pt %xcc, dev_mondo ;\ 825 .align 32 826 827/* 828 * MMU Trap Handlers. 829 */ 830 831/* 832 * synthesize for trap(): SFSR in %g3 833 */ 834#define IMMU_EXCEPTION \ 835 MMU_FAULT_STATUS_AREA(%g3) ;\ 836 rdpr %tpc, %g2 ;\ 837 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\ 838 ldx [%g3 + MMFSA_I_CTX], %g3 ;\ 839 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 840 or %g3, %g1, %g3 ;\ 841 ba,pt %xcc, .mmu_exception_end ;\ 842 mov T_INSTR_EXCEPTION, %g1 ;\ 843 .align 32 844 845/* 846 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3 847 */ 848#define DMMU_EXCEPTION \ 849 ba,a,pt %xcc, .dmmu_exception ;\ 850 .align 32 851 852/* 853 * synthesize for trap(): SFAR in %g2, SFSR in %g3 854 */ 855#define DMMU_EXC_AG_PRIV \ 856 MMU_FAULT_STATUS_AREA(%g3) ;\ 857 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 858 /* Fault type not available in MMU fault status area */ ;\ 859 mov MMFSA_F_PRVACT, %g1 ;\ 860 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 861 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 862 ba,pt %xcc, .mmu_priv_exception ;\ 863 or %g3, %g1, %g3 ;\ 864 .align 32 865 866/* 867 * synthesize for trap(): SFAR in %g2, SFSR in %g3 868 */ 869#define DMMU_EXC_AG_NOT_ALIGNED \ 870 MMU_FAULT_STATUS_AREA(%g3) ;\ 871 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 872 /* Fault type not available in MMU fault status area */ ;\ 873 mov MMFSA_F_UNALIGN, %g1 ;\ 874 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 875 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 876 ba,pt %xcc, .mmu_exception_not_aligned ;\ 877 or %g3, %g1, %g3 /* SFSR */ ;\ 878 .align 32 879/* 880 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 881 */ 882 883/* 884 * synthesize for trap(): SFAR in %g2, SFSR in %g3 885 */ 886#define DMMU_EXC_LDDF_NOT_ALIGNED \ 887 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\ 888 .align 32 889/* 890 * synthesize for trap(): SFAR in %g2, SFSR in %g3 891 */ 892#define DMMU_EXC_STDF_NOT_ALIGNED \ 893 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\ 894 .align 32 895 896#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 897#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 898#endif 899 900#if defined(cscope) 901/* 902 * Define labels to direct cscope quickly to labels that 903 * are generated by macro expansion of DTLB_MISS(). 904 */ 905 .global tt0_dtlbmiss 906tt0_dtlbmiss: 907 .global tt1_dtlbmiss 908tt1_dtlbmiss: 909 nop 910#endif 911 912/* 913 * Data miss handler (must be exactly 32 instructions) 914 * 915 * This handler is invoked only if the hypervisor has been instructed 916 * not to do any TSB walk. 917 * 918 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss 919 * handler. 920 * 921 * User TLB miss handling depends upon whether a user process has one or 922 * two TSBs. User TSB information (physical base and size code) is kept 923 * in two dedicated scratchpad registers. Absence of a user TSB (primarily 924 * second TSB) is indicated by a negative value (-1) in that register. 925 */ 926 927/* 928 * synthesize for miss handler: TAG_ACCESS in %g2 929 */ 930#define DTLB_MISS(table_name) ;\ 931 .global table_name/**/_dtlbmiss ;\ 932table_name/**/_dtlbmiss: ;\ 933 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 934 MMU_FAULT_STATUS_AREA(%g7) ;\ 935 ldx [%g7 + MMFSA_D_ADDR], %g2 /* address */ ;\ 936 ldx [%g7 + MMFSA_D_CTX], %g3 /* g3 = ctx */ ;\ 937 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 938 cmp %g3, INVALID_CONTEXT ;\ 939 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 940 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 941 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 942 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 943 brgez,pn %g1, sfmmu_udtlb_slowpath /* brnach if 2 TSBs */ ;\ 944 nop ;\ 945 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 946 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\ 947 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 948 .align 128 949 950 951#if defined(cscope) 952/* 953 * Define labels to direct cscope quickly to labels that 954 * are generated by macro expansion of ITLB_MISS(). 955 */ 956 .global tt0_itlbmiss 957tt0_itlbmiss: 958 .global tt1_itlbmiss 959tt1_itlbmiss: 960 nop 961#endif 962 963/* 964 * Instruction miss handler. 965 * 966 * This handler is invoked only if the hypervisor has been instructed 967 * not to do any TSB walk. 968 * 969 * ldda instructions will have their ASI patched 970 * by sfmmu_patch_ktsb at runtime. 971 * MUST be EXACTLY 32 instructions or we'll break. 972 */ 973 974/* 975 * synthesize for miss handler: TAG_ACCESS in %g2 976 */ 977#define ITLB_MISS(table_name) \ 978 .global table_name/**/_itlbmiss ;\ 979table_name/**/_itlbmiss: ;\ 980 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 981 MMU_FAULT_STATUS_AREA(%g7) ;\ 982 ldx [%g7 + MMFSA_I_ADDR], %g2 /* g2 = address */ ;\ 983 ldx [%g7 + MMFSA_I_CTX], %g3 /* g3 = ctx */ ;\ 984 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 985 cmp %g3, INVALID_CONTEXT ;\ 986 ble,pn %xcc, sfmmu_kitlb_miss ;\ 987 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 988 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 989 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 990 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBS */ ;\ 991 nop ;\ 992 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 993 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\ 994 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 995 .align 128 996 997#define DTSB_MISS \ 998 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu) 999 1000#define ITSB_MISS \ 1001 GOTO_TT(sfmmu_slow_immu_miss,trace_immu) 1002 1003/* 1004 * This macro is the first level handler for fast protection faults. 1005 * It first demaps the tlb entry which generated the fault and then 1006 * attempts to set the modify bit on the hash. It needs to be 1007 * exactly 32 instructions. 1008 */ 1009/* 1010 * synthesize for miss handler: TAG_ACCESS in %g2 1011 */ 1012#define DTLB_PROT \ 1013 MMU_FAULT_STATUS_AREA(%g7) ;\ 1014 ldx [%g7 + MMFSA_D_ADDR], %g2 /* address */ ;\ 1015 ldx [%g7 + MMFSA_D_CTX], %g3 /* %g3 = ctx */ ;\ 1016 or %g2, %g3, %g2 /* TAG_ACCESS */ ;\ 1017 /* ;\ 1018 * g2 = tag access register ;\ 1019 * g3 = ctx number ;\ 1020 */ ;\ 1021 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1022 /* clobbers g1 and g6 XXXQ? */ ;\ 1023 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1024 nop ;\ 1025 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1026 .align 128 1027 1028#define DMMU_EXCEPTION_TL1 ;\ 1029 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1030 .align 32 1031 1032#define MISALIGN_ADDR_TL1 ;\ 1033 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1034 .align 32 1035 1036/* 1037 * Trace a tsb hit 1038 * g1 = tsbe pointer (in/clobbered) 1039 * g2 = tag access register (in) 1040 * g3 - g4 = scratch (clobbered) 1041 * g5 = tsbe data (in) 1042 * g6 = scratch (clobbered) 1043 * g7 = pc we jumped here from (in) 1044 * ttextra = value to OR in to trap type (%tt) (in) 1045 */ 1046#ifdef TRAPTRACE 1047#define TRACE_TSBHIT(ttextra) \ 1048 membar #Sync ;\ 1049 sethi %hi(FLUSH_ADDR), %g6 ;\ 1050 flush %g6 ;\ 1051 TRACE_PTR(%g3, %g6) ;\ 1052 GET_TRACE_TICK(%g6) ;\ 1053 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1054 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1055 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1056 rdpr %tnpc, %g6 ;\ 1057 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1058 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1059 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1060 rdpr %tpc, %g6 ;\ 1061 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1062 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\ 1063 rdpr %tt, %g6 ;\ 1064 or %g6, (ttextra), %g1 ;\ 1065 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\ 1066 MMU_FAULT_STATUS_AREA(%g4) ;\ 1067 mov MMFSA_D_ADDR, %g1 ;\ 1068 cmp %g6, FAST_IMMU_MISS_TT ;\ 1069 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1070 cmp %g6, T_INSTR_MMU_MISS ;\ 1071 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1072 ldx [%g4 + %g1], %g1 ;\ 1073 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\ 1074 mov MMFSA_D_CTX, %g1 ;\ 1075 cmp %g6, FAST_IMMU_MISS_TT ;\ 1076 move %xcc, MMFSA_I_CTX, %g1 ;\ 1077 cmp %g6, T_INSTR_MMU_MISS ;\ 1078 move %xcc, MMFSA_I_CTX, %g1 ;\ 1079 ldx [%g4 + %g1], %g1 ;\ 1080 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\ 1081 TRACE_NEXT(%g3, %g4, %g6) 1082#else 1083#define TRACE_TSBHIT(ttextra) 1084#endif 1085 1086 1087#if defined(lint) 1088 1089struct scb trap_table; 1090struct scb scb; /* trap_table/scb are the same object */ 1091 1092#else /* lint */ 1093 1094/* 1095 * ======================================================================= 1096 * SPARC V9 TRAP TABLE 1097 * 1098 * The trap table is divided into two halves: the first half is used when 1099 * taking traps when TL=0; the second half is used when taking traps from 1100 * TL>0. Note that handlers in the second half of the table might not be able 1101 * to make the same assumptions as handlers in the first half of the table. 1102 * 1103 * Worst case trap nesting so far: 1104 * 1105 * at TL=0 client issues software trap requesting service 1106 * at TL=1 nucleus wants a register window 1107 * at TL=2 register window clean/spill/fill takes a TLB miss 1108 * at TL=3 processing TLB miss 1109 * at TL=4 handle asynchronous error 1110 * 1111 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1112 * 1113 * ======================================================================= 1114 */ 1115 .section ".text" 1116 .align 4 1117 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1118 .type trap_table, #function 1119 .type trap_table0, #function 1120 .type trap_table1, #function 1121 .type scb, #function 1122trap_table: 1123scb: 1124trap_table0: 1125 /* hardware traps */ 1126 NOT; /* 000 reserved */ 1127 RED; /* 001 power on reset */ 1128 RED; /* 002 watchdog reset */ 1129 RED; /* 003 externally initiated reset */ 1130 RED; /* 004 software initiated reset */ 1131 RED; /* 005 red mode exception */ 1132 NOT; NOT; /* 006 - 007 reserved */ 1133 IMMU_EXCEPTION; /* 008 instruction access exception */ 1134 ITSB_MISS; /* 009 instruction access MMU miss */ 1135 NOT; /* 00A reserved */ 1136 NOT; NOT4; /* 00B - 00F reserved */ 1137 ILLTRAP_INSTR; /* 010 illegal instruction */ 1138 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1139 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */ 1140 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */ 1141 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1142 FP_DISABLED_TRAP; /* 020 fp disabled */ 1143 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1144 FP_TRAP; /* 022 fp exception other */ 1145 TAG_OVERFLOW; /* 023 tag overflow */ 1146 CLEAN_WINDOW; /* 024 - 027 clean window */ 1147 DIV_BY_ZERO; /* 028 division by zero */ 1148 NOT; /* 029 internal processor error */ 1149 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1150 DMMU_EXCEPTION; /* 030 data access exception */ 1151 DTSB_MISS; /* 031 data access MMU miss */ 1152 NOT; /* 032 reserved */ 1153 NOT; /* 033 data access protection */ 1154 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1155 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1156 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1157 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1158 NOT; /* 038 LDQF mem address not aligned */ 1159 NOT; /* 039 STQF mem address not aligned */ 1160 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1161 NOT; /* 040 async data error */ 1162 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1163 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1164 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1165 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1166 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1167 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1168 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1169 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1170 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1171 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1172 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1173 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1174 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1175 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1176 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1177 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1178 NOT; /* 060 interrupt vector */ 1179 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1180 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1181 NOT; /* 063 reserved */ 1182 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1183 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1184 DTLB_PROT; /* 06C data access protection */ 1185 NOT; /* 070 reserved */ 1186 NOT; /* 071 reserved */ 1187 NOT; /* 072 reserved */ 1188 NOT; /* 073 reserved */ 1189 NOT4; NOT4 /* 074 - 07B reserved */ 1190 CPU_MONDO; /* 07C cpu_mondo */ 1191 DEV_MONDO; /* 07D dev_mondo */ 1192 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */ 1193 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */ 1194 NOT4; /* 080 spill 0 normal */ 1195 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1196 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1197 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1198 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1199 SPILL_32bit(not); /* 094 spill 5 normal */ 1200 SPILL_64bit(not); /* 098 spill 6 normal */ 1201 SPILL_mixed; /* 09C spill 7 normal */ 1202 NOT4; /* 0A0 spill 0 other */ 1203 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1204 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1205 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1206 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1207 NOT4; /* 0B4 spill 5 other */ 1208 NOT4; /* 0B8 spill 6 other */ 1209 NOT4; /* 0BC spill 7 other */ 1210 NOT4; /* 0C0 fill 0 normal */ 1211 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1212 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1213 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1214 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1215 FILL_32bit(not); /* 0D4 fill 5 normal */ 1216 FILL_64bit(not); /* 0D8 fill 6 normal */ 1217 FILL_mixed; /* 0DC fill 7 normal */ 1218 NOT4; /* 0E0 fill 0 other */ 1219 NOT4; /* 0E4 fill 1 other */ 1220 NOT4; /* 0E8 fill 2 other */ 1221 NOT4; /* 0EC fill 3 other */ 1222 NOT4; /* 0F0 fill 4 other */ 1223 NOT4; /* 0F4 fill 5 other */ 1224 NOT4; /* 0F8 fill 6 other */ 1225 NOT4; /* 0FC fill 7 other */ 1226 /* user traps */ 1227 GOTO(syscall_trap_4x); /* 100 old system call */ 1228 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1229 TRAP(T_DIV0); /* 102 user divide by zero */ 1230 GOTO(.flushw); /* 103 flush windows */ 1231 GOTO(.clean_windows); /* 104 clean windows */ 1232 BAD; /* 105 range check ?? */ 1233 GOTO(.fix_alignment); /* 106 do unaligned references */ 1234 BAD; /* 107 unused */ 1235 SYSCALL(syscall_trap32); /* 108 ILP32 system call on LP64 */ 1236 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1237 BAD; BAD; BAD4; /* 10A - 10F unused */ 1238 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1239 GOTO(.getcc); /* 120 get condition codes */ 1240 GOTO(.setcc); /* 121 set condition codes */ 1241 GOTO(.getpsr); /* 122 get psr */ 1242 GOTO(.setpsr); /* 123 set psr (some fields) */ 1243 GOTO(get_timestamp); /* 124 get timestamp */ 1244 GOTO(get_virtime); /* 125 get lwp virtual time */ 1245 PRIV(self_xcall); /* 126 self xcall */ 1246 GOTO(get_hrestime); /* 127 get hrestime */ 1247 BAD; /* 128 ST_SETV9STACK */ 1248 GOTO(.getlgrp); /* 129 get lgrpid */ 1249 BAD; BAD; BAD4; /* 12A - 12F unused */ 1250 BAD4; BAD4; /* 130 - 137 unused */ 1251 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1252 DTRACE_FASTTRAP; /* 139 dtrace fasttrap provider */ 1253 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1254 BAD; BAD4; /* 13B - 13F unused */ 1255 SYSCALL(syscall_trap) /* 140 LP64 system call */ 1256 SYSCALL(nosys); /* 141 unused system call trap */ 1257#ifdef DEBUG_USER_TRAPTRACECTL 1258 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1259 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1260#else 1261 SYSCALL(nosys); /* 142 unused system call trap */ 1262 SYSCALL(nosys); /* 143 unused system call trap */ 1263#endif 1264 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1265 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1266 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1267 BAD; /* 170 - unused */ 1268 BAD; /* 171 - unused */ 1269 BAD; BAD; /* 172 - 173 unused */ 1270 BAD4; BAD4; /* 174 - 17B unused */ 1271#ifdef PTL1_PANIC_DEBUG 1272 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1273 /* 17C test ptl1_panic */ 1274#else 1275 BAD; /* 17C unused */ 1276#endif /* PTL1_PANIC_DEBUG */ 1277 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1278 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1279 PRIV(obp_bpt); /* 17F obp breakpoint */ 1280 /* reserved */ 1281 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1282 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1283 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1284 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1285 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1286 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1287 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1288 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1289 .size trap_table0, (.-trap_table0) 1290trap_table1: 1291 NOT4; NOT4; /* 000 - 007 unused */ 1292 NOT; /* 008 instruction access exception */ 1293 ITSB_MISS; /* 009 instruction access MMU miss */ 1294 NOT; /* 00A reserved */ 1295 NOT; NOT4; /* 00B - 00F unused */ 1296 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1297 NOT4; /* 020 - 023 unused */ 1298 CLEAN_WINDOW; /* 024 - 027 clean window */ 1299 NOT4; NOT4; /* 028 - 02F unused */ 1300 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1301 DTSB_MISS; /* 031 data access MMU miss */ 1302 NOT; /* 032 reserved */ 1303 NOT; /* 033 unused */ 1304 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1305 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1306 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */ 1307 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1308 NOT; /* 060 unused */ 1309 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1310 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1311 NOT; /* 063 reserved */ 1312 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1313 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1314 DTLB_PROT; /* 06C data access protection */ 1315 NOT; /* 070 reserved */ 1316 NOT; /* 071 reserved */ 1317 NOT; /* 072 reserved */ 1318 NOT; /* 073 reserved */ 1319 NOT4; NOT4; /* 074 - 07B reserved */ 1320 NOT; /* 07C reserved */ 1321 NOT; /* 07D reserved */ 1322 NOT; /* 07E resumable error */ 1323 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */ 1324 NOTP4; /* 080 spill 0 normal */ 1325 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1326 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1327 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1328 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1329 NOTP4; /* 094 spill 5 normal */ 1330 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */ 1331 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */ 1332 NOTP4; /* 0A0 spill 0 other */ 1333 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1334 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1335 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1336 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1337 NOTP4; /* 0B4 spill 5 other */ 1338 NOTP4; /* 0B8 spill 6 other */ 1339 NOTP4; /* 0BC spill 7 other */ 1340 NOT4; /* 0C0 fill 0 normal */ 1341 NOT4; /* 0C4 fill 1 normal */ 1342 NOT4; /* 0C8 fill 2 normal */ 1343 NOT4; /* 0CC fill 3 normal */ 1344 NOT4; /* 0D0 fill 4 normal */ 1345 NOT4; /* 0D4 fill 5 normal */ 1346 NOT4; /* 0D8 fill 6 normal */ 1347 NOT4; /* 0DC fill 7 normal */ 1348 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1349 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1350/* 1351 * Code running at TL>0 does not use soft traps, so 1352 * we can truncate the table here. 1353 * However: 1354 * sun4v uses (hypervisor) ta instructions at TL > 0, so 1355 * provide a safety net for now. 1356 */ 1357 /* soft traps */ 1358 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */ 1359 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */ 1360 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */ 1361 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */ 1362 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */ 1363 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1364 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1365 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */ 1366 /* reserved */ 1367 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1368 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1369 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1370 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1371 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1372 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1373 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1374 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1375etrap_table: 1376 .size trap_table1, (.-trap_table1) 1377 .size trap_table, (.-trap_table) 1378 .size scb, (.-scb) 1379 1380/* 1381 * We get to exec_fault in the case of an instruction miss and tte 1382 * has no execute bit set. We go to tl0 to handle it. 1383 * 1384 * g1 = tsbe pointer (in/clobbered) 1385 * g2 = tag access register (in) 1386 * g3 - g4 = scratch (clobbered) 1387 * g5 = tsbe data (in) 1388 * g6 = scratch (clobbered) 1389 * g7 = pc we jumped here from (in) 1390 */ 1391/* 1392 * synthesize for trap(): TAG_ACCESS in %g2 1393 */ 1394 ALTENTRY(exec_fault) 1395 TRACE_TSBHIT(TT_MMU_EXEC) 1396 MMU_FAULT_STATUS_AREA(%g4) 1397 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */ 1398 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */ 1399 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 1400 sllx %g2, MMU_PAGESHIFT, %g2 1401 or %g2, %g3, %g2 /* TAG_ACCESS */ 1402 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1403 set trap, %g1 1404 ba,pt %xcc, sys_trap 1405 mov -1, %g4 1406 1407.mmu_exception_not_aligned: 1408 /* %g2 = sfar, %g3 = sfsr */ 1409 rdpr %tstate, %g1 1410 btst TSTATE_PRIV, %g1 1411 bnz,pn %icc, 2f 1412 nop 1413 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1414 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1415 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1416 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1417 brz,pt %g5, 2f 1418 nop 1419 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1420 brz,pn %g5, 2f 1421 nop 1422 btst 1, %sp 1423 bz,pt %xcc, 1f ! 32 bit user program 1424 nop 1425 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1426 nop 14271: 1428 ba,pt %xcc, .setup_utrap 1429 or %g2, %g0, %g7 14302: 1431 ba,pt %xcc, .mmu_exception_end 1432 mov T_ALIGNMENT, %g1 1433 1434.mmu_priv_exception: 1435 rdpr %tstate, %g1 1436 btst TSTATE_PRIV, %g1 1437 bnz,pn %icc, 1f 1438 nop 1439 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1440 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1441 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1442 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1443 brz,pt %g5, 1f 1444 nop 1445 ldn [%g5 + P_UTRAP16], %g5 1446 brnz,pt %g5, .setup_v9utrap 1447 nop 14481: 1449 mov T_PRIV_INSTR, %g1 1450 1451.mmu_exception_end: 1452 CPU_INDEX(%g4, %g5) 1453 set cpu_core, %g5 1454 sllx %g4, CPU_CORE_SHIFT, %g4 1455 add %g4, %g5, %g4 1456 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1457 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1458 bz 1f 1459 or %g5, CPU_DTRACE_BADADDR, %g5 1460 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1461 done 1462 14631: 1464 sllx %g3, 32, %g3 1465 or %g3, %g1, %g3 1466 set trap, %g1 1467 ba,pt %xcc, sys_trap 1468 sub %g0, 1, %g4 1469 1470.fp_disabled: 1471 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1472 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1473 rdpr %tstate, %g4 1474 btst TSTATE_PRIV, %g4 1475 bnz,a,pn %icc, ptl1_panic 1476 mov PTL1_BAD_FPTRAP, %g1 1477 1478 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1479 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1480 brz,a,pt %g5, 2f 1481 nop 1482 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1483 brz,a,pn %g5, 2f 1484 nop 1485 btst 1, %sp 1486 bz,a,pt %xcc, 1f ! 32 bit user program 1487 nop 1488 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1489 nop 14901: 1491 ba,pt %xcc, .setup_utrap 1492 or %g0, %g0, %g7 14932: 1494 set fp_disabled, %g1 1495 ba,pt %xcc, sys_trap 1496 sub %g0, 1, %g4 1497 1498.fp_ieee_exception: 1499 rdpr %tstate, %g1 1500 btst TSTATE_PRIV, %g1 1501 bnz,a,pn %icc, ptl1_panic 1502 mov PTL1_BAD_FPTRAP, %g1 1503 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1504 stx %fsr, [%g1 + CPU_TMP1] 1505 ldx [%g1 + CPU_TMP1], %g2 1506 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1507 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1508 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1509 brz,a,pt %g5, 1f 1510 nop 1511 ldn [%g5 + P_UTRAP8], %g5 1512 brnz,a,pt %g5, .setup_v9utrap 1513 nop 15141: 1515 set _fp_ieee_exception, %g1 1516 ba,pt %xcc, sys_trap 1517 sub %g0, 1, %g4 1518 1519/* 1520 * Register Inputs: 1521 * %g5 user trap handler 1522 * %g7 misaligned addr - for alignment traps only 1523 */ 1524.setup_utrap: 1525 set trap, %g1 ! setup in case we go 1526 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1527 sub %g0, 1, %g4 ! the save instruction below 1528 1529 /* 1530 * If the DTrace pid provider is single stepping a copied-out 1531 * instruction, t->t_dtrace_step will be set. In that case we need 1532 * to abort the single-stepping (since execution of the instruction 1533 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1534 */ 1535 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1536 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1537 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1538 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1539 rdpr %tnpc, %l2 ! arg1 == tnpc 1540 brz,pt %g2, 1f 1541 rdpr %tpc, %l1 ! arg0 == tpc 1542 1543 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1544 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1545 brz,pt %g2, 1f 1546 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1547 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 15481: 1549 mov %g7, %l3 ! arg2 == misaligned address 1550 1551 rdpr %tstate, %g1 ! cwp for trap handler 1552 rdpr %cwp, %g4 1553 bclr TSTATE_CWP_MASK, %g1 1554 wrpr %g1, %g4, %tstate 1555 wrpr %g0, %g5, %tnpc ! trap handler address 1556 FAST_TRAP_DONE 1557 /* NOTREACHED */ 1558 1559.check_v9utrap: 1560 rdpr %tstate, %g1 1561 btst TSTATE_PRIV, %g1 1562 bnz,a,pn %icc, 3f 1563 nop 1564 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1565 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1566 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1567 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1568 1569 cmp %g3, T_SOFTWARE_TRAP 1570 bne,a,pt %icc, 1f 1571 nop 1572 1573 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1574 rdpr %tt, %g3 ! delay - get actual hw trap type 1575 1576 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1577 ba,pt %icc, 2f 1578 smul %g1, CPTRSIZE, %g2 15791: 1580 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1581 nop 1582 1583 cmp %g3, T_UNIMP_INSTR 1584 bne,a,pt %icc, 2f 1585 nop 1586 1587 mov 1, %g1 1588 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1589 rdpr %tpc, %g1 ! ld trapping instruction using 1590 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1591 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1592 1593 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1594 andcc %g1, %g4, %g4 ! and instruction with mask 1595 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1596 nop ! fall thru to setup 15972: 1598 ldn [%g5 + %g2], %g5 1599 brnz,a,pt %g5, .setup_v9utrap 1600 nop 16013: 1602 set trap, %g1 1603 ba,pt %xcc, sys_trap 1604 sub %g0, 1, %g4 1605 /* NOTREACHED */ 1606 1607/* 1608 * Register Inputs: 1609 * %g5 user trap handler 1610 */ 1611.setup_v9utrap: 1612 set trap, %g1 ! setup in case we go 1613 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1614 sub %g0, 1, %g4 ! the save instruction below 1615 1616 /* 1617 * If the DTrace pid provider is single stepping a copied-out 1618 * instruction, t->t_dtrace_step will be set. In that case we need 1619 * to abort the single-stepping (since execution of the instruction 1620 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1621 */ 1622 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1623 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1624 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1625 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1626 rdpr %tnpc, %l7 ! arg1 == tnpc 1627 brz,pt %g2, 1f 1628 rdpr %tpc, %l6 ! arg0 == tpc 1629 1630 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1631 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1632 brz,pt %g2, 1f 1633 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1634 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 16351: 1636 rdpr %tstate, %g2 ! cwp for trap handler 1637 rdpr %cwp, %g4 1638 bclr TSTATE_CWP_MASK, %g2 1639 wrpr %g2, %g4, %tstate 1640 1641 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1642 ldn [%g4 + P_AS], %g4 ! load as pointer 1643 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1644 cmp %l7, %g4 ! check for single-step set 1645 bne,pt %xcc, 4f 1646 nop 1647 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1648 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1649 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1650 bne,pt %icc, 4f 1651 nop 1652 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1653 mov %l7, %g4 ! on entry to precise user trap 1654 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1655 ! at time of trap 1656 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1657 ! %g4 == userlimit 1658 FAST_TRAP_DONE 1659 /* NOTREACHED */ 16604: 1661 wrpr %g0, %g5, %tnpc ! trap handler address 1662 FAST_TRAP_DONE_CHK_INTR 1663 /* NOTREACHED */ 1664 1665.fp_exception: 1666 CPU_ADDR(%g1, %g4) 1667 stx %fsr, [%g1 + CPU_TMP1] 1668 ldx [%g1 + CPU_TMP1], %g2 1669 1670 /* 1671 * Cheetah takes unfinished_FPop trap for certain range of operands 1672 * to the "fitos" instruction. Instead of going through the slow 1673 * software emulation path, we try to simulate the "fitos" instruction 1674 * via "fitod" and "fdtos" provided the following conditions are met: 1675 * 1676 * fpu_exists is set (if DEBUG) 1677 * not in privileged mode 1678 * ftt is unfinished_FPop 1679 * NXM IEEE trap is not enabled 1680 * instruction at %tpc is "fitos" 1681 * 1682 * Usage: 1683 * %g1 per cpu address 1684 * %g2 %fsr 1685 * %g6 user instruction 1686 * 1687 * Note that we can take a memory access related trap while trying 1688 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1689 * flag to catch those traps and let the SFMMU code deal with page 1690 * fault and data access exception. 1691 */ 1692#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1693 sethi %hi(fpu_exists), %g7 1694 ld [%g7 + %lo(fpu_exists)], %g7 1695 brz,pn %g7, .fp_exception_cont 1696 nop 1697#endif 1698 rdpr %tstate, %g7 ! branch if in privileged mode 1699 btst TSTATE_PRIV, %g7 1700 bnz,pn %xcc, .fp_exception_cont 1701 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1702 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1703 cmp %g7, FTT_UNFIN 1704 set FSR_TEM_NX, %g5 1705 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1706 andcc %g2, %g5, %g0 1707 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1708 rdpr %tpc, %g5 ! get faulting PC 1709 1710 or %g0, 1, %g7 1711 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1712 lda [%g5]ASI_USER, %g6 ! get user's instruction 1713 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1714 1715 set FITOS_INSTR_MASK, %g7 1716 and %g6, %g7, %g7 1717 set FITOS_INSTR, %g5 1718 cmp %g7, %g5 1719 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1720 nop 1721 1722 /* 1723 * This is unfinished FPops trap for "fitos" instruction. We 1724 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1725 * sequence. 1726 * 1727 * We need a temporary FP register to do the conversion. Since 1728 * both source and destination operands for the "fitos" instruction 1729 * have to be within %f0-%f31, we use an FP register from the upper 1730 * half to guarantee that it won't collide with the source or the 1731 * dest operand. However, we do have to save and restore its value. 1732 * 1733 * We use %d62 as a temporary FP register for the conversion and 1734 * branch to appropriate instruction within the conversion tables 1735 * based upon the rs2 and rd values. 1736 */ 1737 1738 std %d62, [%g1 + CPU_TMP1] ! save original value 1739 1740 srl %g6, FITOS_RS2_SHIFT, %g7 1741 and %g7, FITOS_REG_MASK, %g7 1742 set _fitos_fitod_table, %g4 1743 sllx %g7, 2, %g7 1744 jmp %g4 + %g7 1745 ba,pt %xcc, _fitos_fitod_done 1746 .empty 1747 1748_fitos_fitod_table: 1749 fitod %f0, %d62 1750 fitod %f1, %d62 1751 fitod %f2, %d62 1752 fitod %f3, %d62 1753 fitod %f4, %d62 1754 fitod %f5, %d62 1755 fitod %f6, %d62 1756 fitod %f7, %d62 1757 fitod %f8, %d62 1758 fitod %f9, %d62 1759 fitod %f10, %d62 1760 fitod %f11, %d62 1761 fitod %f12, %d62 1762 fitod %f13, %d62 1763 fitod %f14, %d62 1764 fitod %f15, %d62 1765 fitod %f16, %d62 1766 fitod %f17, %d62 1767 fitod %f18, %d62 1768 fitod %f19, %d62 1769 fitod %f20, %d62 1770 fitod %f21, %d62 1771 fitod %f22, %d62 1772 fitod %f23, %d62 1773 fitod %f24, %d62 1774 fitod %f25, %d62 1775 fitod %f26, %d62 1776 fitod %f27, %d62 1777 fitod %f28, %d62 1778 fitod %f29, %d62 1779 fitod %f30, %d62 1780 fitod %f31, %d62 1781_fitos_fitod_done: 1782 1783 /* 1784 * Now convert data back into single precision 1785 */ 1786 srl %g6, FITOS_RD_SHIFT, %g7 1787 and %g7, FITOS_REG_MASK, %g7 1788 set _fitos_fdtos_table, %g4 1789 sllx %g7, 2, %g7 1790 jmp %g4 + %g7 1791 ba,pt %xcc, _fitos_fdtos_done 1792 .empty 1793 1794_fitos_fdtos_table: 1795 fdtos %d62, %f0 1796 fdtos %d62, %f1 1797 fdtos %d62, %f2 1798 fdtos %d62, %f3 1799 fdtos %d62, %f4 1800 fdtos %d62, %f5 1801 fdtos %d62, %f6 1802 fdtos %d62, %f7 1803 fdtos %d62, %f8 1804 fdtos %d62, %f9 1805 fdtos %d62, %f10 1806 fdtos %d62, %f11 1807 fdtos %d62, %f12 1808 fdtos %d62, %f13 1809 fdtos %d62, %f14 1810 fdtos %d62, %f15 1811 fdtos %d62, %f16 1812 fdtos %d62, %f17 1813 fdtos %d62, %f18 1814 fdtos %d62, %f19 1815 fdtos %d62, %f20 1816 fdtos %d62, %f21 1817 fdtos %d62, %f22 1818 fdtos %d62, %f23 1819 fdtos %d62, %f24 1820 fdtos %d62, %f25 1821 fdtos %d62, %f26 1822 fdtos %d62, %f27 1823 fdtos %d62, %f28 1824 fdtos %d62, %f29 1825 fdtos %d62, %f30 1826 fdtos %d62, %f31 1827_fitos_fdtos_done: 1828 1829 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 1830 1831#if DEBUG 1832 /* 1833 * Update FPop_unfinished trap kstat 1834 */ 1835 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 1836 ldx [%g7], %g5 18371: 1838 add %g5, 1, %g6 1839 1840 casxa [%g7] ASI_N, %g5, %g6 1841 cmp %g5, %g6 1842 bne,a,pn %xcc, 1b 1843 or %g0, %g6, %g5 1844 1845 /* 1846 * Update fpu_sim_fitos kstat 1847 */ 1848 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 1849 ldx [%g7], %g5 18501: 1851 add %g5, 1, %g6 1852 1853 casxa [%g7] ASI_N, %g5, %g6 1854 cmp %g5, %g6 1855 bne,a,pn %xcc, 1b 1856 or %g0, %g6, %g5 1857#endif /* DEBUG */ 1858 1859 FAST_TRAP_DONE 1860 1861.fp_exception_cont: 1862 /* 1863 * Let _fp_exception deal with simulating FPop instruction. 1864 * Note that we need to pass %fsr in %g2 (already read above). 1865 */ 1866 1867 set _fp_exception, %g1 1868 ba,pt %xcc, sys_trap 1869 sub %g0, 1, %g4 1870 1871 1872/* 1873 * Register windows 1874 */ 1875 1876/* 1877 * FILL_32bit_flushw/FILL_64bit_flushw fills a 32/64-bit-wide register window 1878 * from a 32/64-bit * wide address space via the designated asi. 1879 * It is used to fill windows in user_flushw to avoid going above TL 2. 1880 */ 1881/* TODO: Use the faster FILL based on FILL_32bit_asi/FILL_64bit_asi */ 1882#define FILL_32bit_flushw(asi_num) \ 1883 mov asi_num, %asi ;\ 1884 rdpr %cwp, %g2 ;\ 1885 sub %g2, 1, %g2 ;\ 1886 wrpr %g2, %cwp ;\ 18871: srl %sp, 0, %sp ;\ 1888 lda [%sp + 0]%asi, %l0 ;\ 1889 lda [%sp + 4]%asi, %l1 ;\ 1890 lda [%sp + 8]%asi, %l2 ;\ 1891 lda [%sp + 12]%asi, %l3 ;\ 1892 lda [%sp + 16]%asi, %l4 ;\ 1893 lda [%sp + 20]%asi, %l5 ;\ 1894 lda [%sp + 24]%asi, %l6 ;\ 1895 lda [%sp + 28]%asi, %l7 ;\ 1896 lda [%sp + 32]%asi, %i0 ;\ 1897 lda [%sp + 36]%asi, %i1 ;\ 1898 lda [%sp + 40]%asi, %i2 ;\ 1899 lda [%sp + 44]%asi, %i3 ;\ 1900 lda [%sp + 48]%asi, %i4 ;\ 1901 lda [%sp + 52]%asi, %i5 ;\ 1902 lda [%sp + 56]%asi, %i6 ;\ 1903 lda [%sp + 60]%asi, %i7 ;\ 1904 restored ;\ 1905 add %g2, 1, %g2 ;\ 1906 wrpr %g2, %cwp 1907 1908#define FILL_64bit_flushw(asi_num) \ 1909 mov asi_num, %asi ;\ 1910 rdpr %cwp, %g2 ;\ 1911 sub %g2, 1, %g2 ;\ 1912 wrpr %g2, %cwp ;\ 1913 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ 1914 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ 1915 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ 1916 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ 1917 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ 1918 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ 1919 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ 1920 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ 1921 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ 1922 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ 1923 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ 1924 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ 1925 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ 1926 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ 1927 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ 1928 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ 1929 restored ;\ 1930 add %g2, 1, %g2 ;\ 1931 wrpr %g2, %cwp 1932 1933.flushw: 1934 rdpr %tnpc, %g1 1935 wrpr %g1, %tpc 1936 add %g1, 4, %g1 1937 wrpr %g1, %tnpc 1938 set trap, %g1 1939 mov T_FLUSH_PCB, %g3 1940 ba,pt %xcc, sys_trap 1941 sub %g0, 1, %g4 1942 1943.clean_windows: 1944 set trap, %g1 1945 mov T_FLUSH_PCB, %g3 1946 sub %g0, 1, %g4 1947 save 1948 flushw 1949 rdpr %canrestore, %g2 1950 brnz %g2, 1f 1951 nop 1952 rdpr %wstate, %g2 1953 btst 1, %g2 1954 beq 2f 1955 nop 1956 FILL_32bit_flushw(ASI_AIUP) 1957 ba,a 1f 1958 .empty 19592: 1960 FILL_64bit_flushw(ASI_AIUP) 19611: 1962 restore 1963 wrpr %g0, %g0, %cleanwin ! no clean windows 1964 1965 CPU_ADDR(%g4, %g5) 1966 ldn [%g4 + CPU_MPCB], %g4 1967 brz,a,pn %g4, 1f 1968 nop 1969 ld [%g4 + MPCB_WSTATE], %g5 1970 add %g5, WSTATE_CLEAN_OFFSET, %g5 1971 wrpr %g0, %g5, %wstate 19721: FAST_TRAP_DONE 1973 1974/* 1975 * .spill_clean: clean the previous window, restore the wstate, and 1976 * "done". 1977 * 1978 * Entry: %g7 contains new wstate 1979 */ 1980.spill_clean: 1981 sethi %hi(nwin_minus_one), %g5 1982 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 1983 rdpr %cwp, %g6 ! %g6 = %cwp 1984 deccc %g6 ! %g6-- 1985 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 1986 wrpr %g6, %cwp 1987 TT_TRACE_L(trace_win) 1988 clr %l0 1989 clr %l1 1990 clr %l2 1991 clr %l3 1992 clr %l4 1993 clr %l5 1994 clr %l6 1995 clr %l7 1996 wrpr %g0, %g7, %wstate 1997 saved 1998 retry ! restores correct %cwp 1999 2000.fix_alignment: 2001 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2002 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 2003 ldn [%g1 + T_PROCP], %g1 2004 mov 1, %g2 2005 stb %g2, [%g1 + P_FIXALIGNMENT] 2006 FAST_TRAP_DONE 2007 2008#define STDF_REG(REG, ADDR, TMP) \ 2009 sll REG, 3, REG ;\ 2010mark1: set start1, TMP ;\ 2011 jmp REG + TMP ;\ 2012 nop ;\ 2013start1: ba,pt %xcc, done1 ;\ 2014 std %f0, [ADDR + CPU_TMP1] ;\ 2015 ba,pt %xcc, done1 ;\ 2016 std %f32, [ADDR + CPU_TMP1] ;\ 2017 ba,pt %xcc, done1 ;\ 2018 std %f2, [ADDR + CPU_TMP1] ;\ 2019 ba,pt %xcc, done1 ;\ 2020 std %f34, [ADDR + CPU_TMP1] ;\ 2021 ba,pt %xcc, done1 ;\ 2022 std %f4, [ADDR + CPU_TMP1] ;\ 2023 ba,pt %xcc, done1 ;\ 2024 std %f36, [ADDR + CPU_TMP1] ;\ 2025 ba,pt %xcc, done1 ;\ 2026 std %f6, [ADDR + CPU_TMP1] ;\ 2027 ba,pt %xcc, done1 ;\ 2028 std %f38, [ADDR + CPU_TMP1] ;\ 2029 ba,pt %xcc, done1 ;\ 2030 std %f8, [ADDR + CPU_TMP1] ;\ 2031 ba,pt %xcc, done1 ;\ 2032 std %f40, [ADDR + CPU_TMP1] ;\ 2033 ba,pt %xcc, done1 ;\ 2034 std %f10, [ADDR + CPU_TMP1] ;\ 2035 ba,pt %xcc, done1 ;\ 2036 std %f42, [ADDR + CPU_TMP1] ;\ 2037 ba,pt %xcc, done1 ;\ 2038 std %f12, [ADDR + CPU_TMP1] ;\ 2039 ba,pt %xcc, done1 ;\ 2040 std %f44, [ADDR + CPU_TMP1] ;\ 2041 ba,pt %xcc, done1 ;\ 2042 std %f14, [ADDR + CPU_TMP1] ;\ 2043 ba,pt %xcc, done1 ;\ 2044 std %f46, [ADDR + CPU_TMP1] ;\ 2045 ba,pt %xcc, done1 ;\ 2046 std %f16, [ADDR + CPU_TMP1] ;\ 2047 ba,pt %xcc, done1 ;\ 2048 std %f48, [ADDR + CPU_TMP1] ;\ 2049 ba,pt %xcc, done1 ;\ 2050 std %f18, [ADDR + CPU_TMP1] ;\ 2051 ba,pt %xcc, done1 ;\ 2052 std %f50, [ADDR + CPU_TMP1] ;\ 2053 ba,pt %xcc, done1 ;\ 2054 std %f20, [ADDR + CPU_TMP1] ;\ 2055 ba,pt %xcc, done1 ;\ 2056 std %f52, [ADDR + CPU_TMP1] ;\ 2057 ba,pt %xcc, done1 ;\ 2058 std %f22, [ADDR + CPU_TMP1] ;\ 2059 ba,pt %xcc, done1 ;\ 2060 std %f54, [ADDR + CPU_TMP1] ;\ 2061 ba,pt %xcc, done1 ;\ 2062 std %f24, [ADDR + CPU_TMP1] ;\ 2063 ba,pt %xcc, done1 ;\ 2064 std %f56, [ADDR + CPU_TMP1] ;\ 2065 ba,pt %xcc, done1 ;\ 2066 std %f26, [ADDR + CPU_TMP1] ;\ 2067 ba,pt %xcc, done1 ;\ 2068 std %f58, [ADDR + CPU_TMP1] ;\ 2069 ba,pt %xcc, done1 ;\ 2070 std %f28, [ADDR + CPU_TMP1] ;\ 2071 ba,pt %xcc, done1 ;\ 2072 std %f60, [ADDR + CPU_TMP1] ;\ 2073 ba,pt %xcc, done1 ;\ 2074 std %f30, [ADDR + CPU_TMP1] ;\ 2075 ba,pt %xcc, done1 ;\ 2076 std %f62, [ADDR + CPU_TMP1] ;\ 2077done1: 2078 2079#define LDDF_REG(REG, ADDR, TMP) \ 2080 sll REG, 3, REG ;\ 2081mark2: set start2, TMP ;\ 2082 jmp REG + TMP ;\ 2083 nop ;\ 2084start2: ba,pt %xcc, done2 ;\ 2085 ldd [ADDR + CPU_TMP1], %f0 ;\ 2086 ba,pt %xcc, done2 ;\ 2087 ldd [ADDR + CPU_TMP1], %f32 ;\ 2088 ba,pt %xcc, done2 ;\ 2089 ldd [ADDR + CPU_TMP1], %f2 ;\ 2090 ba,pt %xcc, done2 ;\ 2091 ldd [ADDR + CPU_TMP1], %f34 ;\ 2092 ba,pt %xcc, done2 ;\ 2093 ldd [ADDR + CPU_TMP1], %f4 ;\ 2094 ba,pt %xcc, done2 ;\ 2095 ldd [ADDR + CPU_TMP1], %f36 ;\ 2096 ba,pt %xcc, done2 ;\ 2097 ldd [ADDR + CPU_TMP1], %f6 ;\ 2098 ba,pt %xcc, done2 ;\ 2099 ldd [ADDR + CPU_TMP1], %f38 ;\ 2100 ba,pt %xcc, done2 ;\ 2101 ldd [ADDR + CPU_TMP1], %f8 ;\ 2102 ba,pt %xcc, done2 ;\ 2103 ldd [ADDR + CPU_TMP1], %f40 ;\ 2104 ba,pt %xcc, done2 ;\ 2105 ldd [ADDR + CPU_TMP1], %f10 ;\ 2106 ba,pt %xcc, done2 ;\ 2107 ldd [ADDR + CPU_TMP1], %f42 ;\ 2108 ba,pt %xcc, done2 ;\ 2109 ldd [ADDR + CPU_TMP1], %f12 ;\ 2110 ba,pt %xcc, done2 ;\ 2111 ldd [ADDR + CPU_TMP1], %f44 ;\ 2112 ba,pt %xcc, done2 ;\ 2113 ldd [ADDR + CPU_TMP1], %f14 ;\ 2114 ba,pt %xcc, done2 ;\ 2115 ldd [ADDR + CPU_TMP1], %f46 ;\ 2116 ba,pt %xcc, done2 ;\ 2117 ldd [ADDR + CPU_TMP1], %f16 ;\ 2118 ba,pt %xcc, done2 ;\ 2119 ldd [ADDR + CPU_TMP1], %f48 ;\ 2120 ba,pt %xcc, done2 ;\ 2121 ldd [ADDR + CPU_TMP1], %f18 ;\ 2122 ba,pt %xcc, done2 ;\ 2123 ldd [ADDR + CPU_TMP1], %f50 ;\ 2124 ba,pt %xcc, done2 ;\ 2125 ldd [ADDR + CPU_TMP1], %f20 ;\ 2126 ba,pt %xcc, done2 ;\ 2127 ldd [ADDR + CPU_TMP1], %f52 ;\ 2128 ba,pt %xcc, done2 ;\ 2129 ldd [ADDR + CPU_TMP1], %f22 ;\ 2130 ba,pt %xcc, done2 ;\ 2131 ldd [ADDR + CPU_TMP1], %f54 ;\ 2132 ba,pt %xcc, done2 ;\ 2133 ldd [ADDR + CPU_TMP1], %f24 ;\ 2134 ba,pt %xcc, done2 ;\ 2135 ldd [ADDR + CPU_TMP1], %f56 ;\ 2136 ba,pt %xcc, done2 ;\ 2137 ldd [ADDR + CPU_TMP1], %f26 ;\ 2138 ba,pt %xcc, done2 ;\ 2139 ldd [ADDR + CPU_TMP1], %f58 ;\ 2140 ba,pt %xcc, done2 ;\ 2141 ldd [ADDR + CPU_TMP1], %f28 ;\ 2142 ba,pt %xcc, done2 ;\ 2143 ldd [ADDR + CPU_TMP1], %f60 ;\ 2144 ba,pt %xcc, done2 ;\ 2145 ldd [ADDR + CPU_TMP1], %f30 ;\ 2146 ba,pt %xcc, done2 ;\ 2147 ldd [ADDR + CPU_TMP1], %f62 ;\ 2148done2: 2149 2150.lddf_exception_not_aligned: 2151 /* %g2 = sfar, %g3 = sfsr */ 2152 mov %g2, %g5 ! stash sfar 2153#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2154 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2155 ld [%g2 + %lo(fpu_exists)], %g2 2156 brz,a,pn %g2, 4f 2157 nop 2158#endif 2159 CPU_ADDR(%g1, %g4) 2160 or %g0, 1, %g4 2161 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2162 2163 rdpr %tpc, %g2 2164 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2165 srl %g6, 23, %g1 ! using ldda or not? 2166 and %g1, 1, %g1 2167 brz,a,pt %g1, 2f ! check for ldda instruction 2168 nop 2169 srl %g6, 13, %g1 ! check immflag 2170 and %g1, 1, %g1 2171 rdpr %tstate, %g2 ! %tstate in %g2 2172 brnz,a,pn %g1, 1f 2173 srl %g2, 31, %g1 ! get asi from %tstate 2174 srl %g6, 5, %g1 ! get asi from instruction 2175 and %g1, 0xFF, %g1 ! imm_asi field 21761: 2177 cmp %g1, ASI_P ! primary address space 2178 be,a,pt %icc, 2f 2179 nop 2180 cmp %g1, ASI_PNF ! primary no fault address space 2181 be,a,pt %icc, 2f 2182 nop 2183 cmp %g1, ASI_S ! secondary address space 2184 be,a,pt %icc, 2f 2185 nop 2186 cmp %g1, ASI_SNF ! secondary no fault address space 2187 bne,a,pn %icc, 3f 2188 nop 21892: 2190 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2191 add %g5, 4, %g5 ! increment misaligned data address 2192 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2193 2194 sllx %g7, 32, %g7 2195 or %g5, %g7, %g5 ! combine data 2196 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2197 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2198 2199 srl %g6, 25, %g3 ! %g6 has the instruction 2200 and %g3, 0x1F, %g3 ! %g3 has rd 2201 LDDF_REG(%g3, %g7, %g4) 2202 2203 CPU_ADDR(%g1, %g4) 2204 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2205 FAST_TRAP_DONE 22063: 2207 CPU_ADDR(%g1, %g4) 2208 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 22094: 2210 set T_USER, %g3 ! trap type in %g3 2211 or %g3, T_LDDF_ALIGN, %g3 2212 mov %g5, %g2 ! misaligned vaddr in %g2 2213 set fpu_trap, %g1 ! goto C for the little and 2214 ba,pt %xcc, sys_trap ! no fault little asi's 2215 sub %g0, 1, %g4 2216 2217.stdf_exception_not_aligned: 2218 /* %g2 = sfar, %g3 = sfsr */ 2219 mov %g2, %g5 2220 2221#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2222 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2223 ld [%g7 + %lo(fpu_exists)], %g3 2224 brz,a,pn %g3, 4f 2225 nop 2226#endif 2227 CPU_ADDR(%g1, %g4) 2228 or %g0, 1, %g4 2229 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2230 2231 rdpr %tpc, %g2 2232 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2233 2234 srl %g6, 23, %g1 ! using stda or not? 2235 and %g1, 1, %g1 2236 brz,a,pt %g1, 2f ! check for stda instruction 2237 nop 2238 srl %g6, 13, %g1 ! check immflag 2239 and %g1, 1, %g1 2240 rdpr %tstate, %g2 ! %tstate in %g2 2241 brnz,a,pn %g1, 1f 2242 srl %g2, 31, %g1 ! get asi from %tstate 2243 srl %g6, 5, %g1 ! get asi from instruction 2244 and %g1, 0xff, %g1 ! imm_asi field 22451: 2246 cmp %g1, ASI_P ! primary address space 2247 be,a,pt %icc, 2f 2248 nop 2249 cmp %g1, ASI_S ! secondary address space 2250 bne,a,pn %icc, 3f 2251 nop 22522: 2253 srl %g6, 25, %g6 2254 and %g6, 0x1F, %g6 ! %g6 has rd 2255 CPU_ADDR(%g7, %g1) 2256 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2257 2258 ldx [%g7 + CPU_TMP1], %g6 2259 srlx %g6, 32, %g7 2260 stuwa %g7, [%g5]ASI_USER ! first half 2261 add %g5, 4, %g5 ! increment misaligned data address 2262 stuwa %g6, [%g5]ASI_USER ! second half 2263 2264 CPU_ADDR(%g1, %g4) 2265 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2266 FAST_TRAP_DONE 22673: 2268 CPU_ADDR(%g1, %g4) 2269 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 22704: 2271 set T_USER, %g3 ! trap type in %g3 2272 or %g3, T_STDF_ALIGN, %g3 2273 mov %g5, %g2 ! misaligned vaddr in %g2 2274 set fpu_trap, %g1 ! goto C for the little and 2275 ba,pt %xcc, sys_trap ! nofault little asi's 2276 sub %g0, 1, %g4 2277 2278#ifdef DEBUG_USER_TRAPTRACECTL 2279 2280.traptrace_freeze: 2281 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2282 TT_TRACE_L(trace_win) 2283 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2284 set trap_freeze, %g1 2285 mov 1, %g2 2286 st %g2, [%g1] 2287 FAST_TRAP_DONE 2288 2289.traptrace_unfreeze: 2290 set trap_freeze, %g1 2291 st %g0, [%g1] 2292 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2293 TT_TRACE_L(trace_win) 2294 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2295 FAST_TRAP_DONE 2296 2297#endif /* DEBUG_USER_TRAPTRACECTL */ 2298 2299.getcc: 2300 CPU_ADDR(%g1, %g2) 2301 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2302 rdpr %tstate, %g3 ! get tstate 2303 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2304 set PSR_ICC, %g2 2305 and %o0, %g2, %o0 ! mask out the rest 2306 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2307 wrpr %g0, 0, %gl 2308 mov %o0, %g1 ! move ccr to normal %g1 2309 wrpr %g0, 1, %gl 2310 ! cannot assume globals retained their values after increasing %gl 2311 CPU_ADDR(%g1, %g2) 2312 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2313 FAST_TRAP_DONE 2314 2315.setcc: 2316 CPU_ADDR(%g1, %g2) 2317 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2318 wrpr %g0, 0, %gl 2319 mov %g1, %o0 2320 wrpr %g0, 1, %gl 2321 ! cannot assume globals retained their values after increasing %gl 2322 CPU_ADDR(%g1, %g2) 2323 sll %o0, PSR_ICC_SHIFT, %g2 2324 set PSR_ICC, %g3 2325 and %g2, %g3, %g2 ! mask out rest 2326 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2327 rdpr %tstate, %g3 ! get tstate 2328 srl %g3, 0, %g3 ! clear upper word 2329 or %g3, %g2, %g3 ! or in new bits 2330 wrpr %g3, %tstate 2331 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2332 FAST_TRAP_DONE 2333 2334/* 2335 * getpsr(void) 2336 * Note that the xcc part of the ccr is not provided. 2337 * The V8 code shows why the V9 trap is not faster: 2338 * #define GETPSR_TRAP() \ 2339 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2340 */ 2341 2342 .type .getpsr, #function 2343.getpsr: 2344 rdpr %tstate, %g1 ! get tstate 2345 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2346 set PSR_ICC, %g2 2347 and %o0, %g2, %o0 ! mask out the rest 2348 2349 rd %fprs, %g1 ! get fprs 2350 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2351 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2352 or %o0, %g2, %o0 ! or result into psr.ef 2353 2354 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2355 or %o0, %g2, %o0 ! or psr.impl/ver 2356 FAST_TRAP_DONE 2357 SET_SIZE(.getpsr) 2358 2359/* 2360 * setpsr(newpsr) 2361 * Note that there is no support for ccr.xcc in the V9 code. 2362 */ 2363 2364 .type .setpsr, #function 2365.setpsr: 2366 rdpr %tstate, %g1 ! get tstate 2367! setx TSTATE_V8_UBITS, %g2 2368 or %g0, CCR_ICC, %g3 2369 sllx %g3, TSTATE_CCR_SHIFT, %g2 2370 2371 andn %g1, %g2, %g1 ! zero current user bits 2372 set PSR_ICC, %g2 2373 and %g2, %o0, %g2 ! clear all but psr.icc bits 2374 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2375 wrpr %g1, %g3, %tstate ! write tstate 2376 2377 set PSR_EF, %g2 2378 and %g2, %o0, %g2 ! clear all but fp enable bit 2379 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2380 wr %g0, %g4, %fprs ! write fprs 2381 2382 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2383 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2384 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2385 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2386 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2387 srlx %g4, 2, %g4 ! shift fef value to bit 0 2388 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2389 FAST_TRAP_DONE 2390 SET_SIZE(.setpsr) 2391 2392/* 2393 * getlgrp 2394 * get home lgrpid on which the calling thread is currently executing. 2395 */ 2396 .type .getlgrp, #function 2397.getlgrp: 2398 ! Thanks for the incredibly helpful comments 2399 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2400 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2401 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2402 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2403 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2404 sra %g1, 0, %o1 2405 FAST_TRAP_DONE 2406 SET_SIZE(.getlgrp) 2407 2408/* 2409 * Entry for old 4.x trap (trap 0). 2410 */ 2411 ENTRY_NP(syscall_trap_4x) 2412 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2413 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2414 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2415 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2416 brz,pn %g2, 1f ! has it been set? 2417 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2418 st %l1, [%g1 + CPU_TMP2] 2419 rdpr %tnpc, %l1 ! save old tnpc 2420 wrpr %g0, %g2, %tnpc ! setup tnpc 2421 2422 mov %g1, %l0 ! save CPU struct addr 2423 wrpr %g0, 0, %gl 2424 mov %l1, %g6 ! pass tnpc to user code in %g6 2425 wrpr %g0, 1, %gl 2426 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2427 ld [%l0 + CPU_TMP1], %l0 2428 FAST_TRAP_DONE_CHK_INTR 24291: 2430 ! 2431 ! check for old syscall mmap which is the only different one which 2432 ! must be the same. Others are handled in the compatibility library. 2433 ! 2434 mov %g1, %l0 ! save CPU struct addr 2435 wrpr %g0, 0, %gl 2436 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2437 movz %icc, SYS_mmap, %g1 2438 wrpr %g0, 1, %gl 2439 ld [%l0 + CPU_TMP1], %l0 2440 SYSCALL(syscall_trap32) 2441 SET_SIZE(syscall_trap_4x) 2442 2443/* 2444 * Handler for software trap 9. 2445 * Set trap0 emulation address for old 4.x system call trap. 2446 * XXX - this should be a system call. 2447 */ 2448 ENTRY_NP(set_trap0_addr) 2449 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2450 st %l0, [%g1 + CPU_TMP1] ! save some locals 2451 st %l1, [%g1 + CPU_TMP2] 2452 mov %g1, %l0 ! preserve CPU addr 2453 wrpr %g0, 0, %gl 2454 mov %g1, %l1 2455 wrpr %g0, 1, %gl 2456 ! cannot assume globals retained their values after increasing %gl 2457 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer 2458 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2459 andn %l1, 3, %l1 ! force alignment 2460 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2461 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2462 ld [%l0 + CPU_TMP1], %l0 2463 FAST_TRAP_DONE 2464 SET_SIZE(set_trap0_addr) 2465 2466/* 2467 * mmu_trap_tl1 2468 * trap handler for unexpected mmu traps. 2469 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2470 * case we go to fpu_trap or a user trap from the window handler, in which 2471 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2472 */ 2473 .type mmu_trap_tl1, #function 2474mmu_trap_tl1: 2475#ifdef TRAPTRACE 2476 TRACE_PTR(%g5, %g6) 2477 GET_TRACE_TICK(%g6) 2478 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2479 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2480 rdpr %tt, %g6 2481 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2482 rdpr %tstate, %g6 2483 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2484 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2485 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2486 rdpr %tpc, %g6 2487 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2488 MMU_FAULT_STATUS_AREA(%g6) 2489 ldx [%g6 + MMFSA_D_ADDR], %g6 2490 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address 2491 CPU_PADDR(%g7, %g6); 2492 add %g7, CPU_TL1_HDLR, %g7 2493 lda [%g7]ASI_MEM, %g6 2494 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2495 MMU_FAULT_STATUS_AREA(%g6) 2496 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant? 2497 ldx [%g6 + MMFSA_D_CTX], %g6 2498 sllx %g6, SFSR_CTX_SHIFT, %g6 2499 or %g6, %g7, %g6 2500 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type 2501 set 0xdeadbeef, %g6 2502 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2503 TRACE_NEXT(%g5, %g6, %g7) 2504#endif /* TRAPTRACE */ 2505 CPU_PADDR(%g7, %g6); 2506 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2507 lda [%g7]ASI_MEM, %g6 2508 brz,a,pt %g6, 1f 2509 nop 2510 sta %g0, [%g7]ASI_MEM 2511 ! XXXQ need to setup registers for sfmmu_mmu_trap? 2512 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 25131: 2514 rdpr %tpc, %g7 2515 /* in user_rtt? */ 2516 set rtt_fill_start, %g6 2517 cmp %g7, %g6 2518 blu,pn %xcc, 6f 2519 .empty 2520 set rtt_fill_end, %g6 2521 cmp %g7, %g6 2522 bgeu,pn %xcc, 6f 2523 nop 2524 set fault_rtt_fn1, %g7 2525 ba,a 7f 25266: 2527 ! check to see if the trap pc is in a window spill/fill handling 2528 rdpr %tpc, %g7 2529 /* tpc should be in the trap table */ 2530 set trap_table, %g6 2531 cmp %g7, %g6 2532 blu,a,pn %xcc, ptl1_panic 2533 mov PTL1_BAD_MMUTRAP, %g1 2534 set etrap_table, %g6 2535 cmp %g7, %g6 2536 bgeu,a,pn %xcc, ptl1_panic 2537 mov PTL1_BAD_MMUTRAP, %g1 2538 ! pc is inside the trap table, convert to trap type 2539 srl %g7, 5, %g6 ! XXXQ need #define 2540 and %g6, 0x1ff, %g6 ! XXXQ need #define 2541 ! and check for a window trap type 2542 and %g6, WTRAP_TTMASK, %g6 2543 cmp %g6, WTRAP_TYPE 2544 bne,a,pn %xcc, ptl1_panic 2545 mov PTL1_BAD_MMUTRAP, %g1 2546 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2547 add %g7, WTRAP_FAULTOFF, %g7 2548 25497: 2550 ! Arguments are passed in the global set active after the 2551 ! 'done' instruction. Before switching sets, must save 2552 ! the calculated next pc 2553 wrpr %g0, %g7, %tnpc 2554 wrpr %g0, 1, %gl 2555 rdpr %tt, %g5 2556 MMU_FAULT_STATUS_AREA(%g7) 2557 cmp %g5, T_ALIGNMENT 2558 be,pn %xcc, 1f 2559 ldx [%g7 + MMFSA_D_ADDR], %g6 2560 ldx [%g7 + MMFSA_D_CTX], %g7 2561 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */ 2562 sllx %g6, MMU_PAGESHIFT, %g6 2563 or %g6, %g7, %g6 /* TAG_ACCESS */ 25641: 2565 done 2566 SET_SIZE(mmu_trap_tl1) 2567 2568/* 2569 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2570 * traps are valid only when kmdb is loaded. When the debugger is active, 2571 * the code below is rewritten to transfer control to the appropriate 2572 * debugger entry points. 2573 */ 2574 .global kmdb_trap 2575 .align 8 2576kmdb_trap: 2577 ba,a trap_table0 2578 jmp %g1 + 0 2579 nop 2580 2581 .global kmdb_trap_tl1 2582 .align 8 2583kmdb_trap_tl1: 2584 ba,a trap_table0 2585 jmp %g1 + 0 2586 nop 2587 2588/* 2589 * This entry is copied from OBP's trap table during boot. 2590 */ 2591 .global obp_bpt 2592 .align 8 2593obp_bpt: 2594 NOT 2595 2596 2597 2598#ifdef TRAPTRACE 2599/* 2600 * TRAPTRACE support. 2601 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2602 * Return is done by "jmp %g7 + 4". 2603 */ 2604 2605trace_dmmu: 2606 TRACE_PTR(%g3, %g6) 2607 GET_TRACE_TICK(%g6) 2608 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2609 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2610 rdpr %tt, %g6 2611 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2612 rdpr %tstate, %g6 2613 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2614 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2615 rdpr %tpc, %g6 2616 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2617 MMU_FAULT_STATUS_AREA(%g6) 2618 ldx [%g6 + MMFSA_D_ADDR], %g4 2619 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2620 ldx [%g6 + MMFSA_D_CTX], %g4 2621 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2622 ldx [%g6 + MMFSA_D_TYPE], %g4 2623 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2624 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2625 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2626 TRACE_NEXT(%g3, %g4, %g5) 2627 jmp %g7 + 4 2628 nop 2629 2630trace_immu: 2631 TRACE_PTR(%g3, %g6) 2632 GET_TRACE_TICK(%g6) 2633 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2634 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2635 rdpr %tt, %g6 2636 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2637 rdpr %tstate, %g6 2638 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2639 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2640 rdpr %tpc, %g6 2641 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2642 MMU_FAULT_STATUS_AREA(%g6) 2643 ldx [%g6 + MMFSA_I_ADDR], %g4 2644 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2645 ldx [%g6 + MMFSA_I_CTX], %g4 2646 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2647 ldx [%g6 + MMFSA_I_TYPE], %g4 2648 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2649 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2650 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2651 TRACE_NEXT(%g3, %g4, %g5) 2652 jmp %g7 + 4 2653 nop 2654 2655trace_gen: 2656 TRACE_PTR(%g3, %g6) 2657 GET_TRACE_TICK(%g6) 2658 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2659 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2660 rdpr %tt, %g6 2661 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2662 rdpr %tstate, %g6 2663 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2664 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2665 rdpr %tpc, %g6 2666 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2667 stna %g0, [%g3 + TRAP_ENT_TR]%asi 2668 stna %g0, [%g3 + TRAP_ENT_F1]%asi 2669 stna %g0, [%g3 + TRAP_ENT_F2]%asi 2670 stna %g0, [%g3 + TRAP_ENT_F3]%asi 2671 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2672 TRACE_NEXT(%g3, %g4, %g5) 2673 jmp %g7 + 4 2674 nop 2675 2676trace_win: 2677 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2678 ! Keep the locals as clean as possible, caller cleans %l4 2679 clr %l2 2680 clr %l1 2681 jmp %l4 + 4 2682 clr %l0 2683 2684/* 2685 * Trace a tsb hit 2686 * g1 = tsbe pointer (in/clobbered) 2687 * g2 = tag access register (in) 2688 * g3 - g4 = scratch (clobbered) 2689 * g5 = tsbe data (in) 2690 * g6 = scratch (clobbered) 2691 * g7 = pc we jumped here from (in) 2692 */ 2693 2694 ! Do not disturb %g5, it will be used after the trace 2695 ALTENTRY(trace_tsbhit) 2696 TRACE_TSBHIT(0) 2697 jmp %g7 + 4 2698 nop 2699 2700/* 2701 * Trace a TSB miss 2702 * 2703 * g1 = tsb8k pointer (in) 2704 * g2 = tag access register (in) 2705 * g3 = tsb4m pointer (in) 2706 * g4 = tsbe tag (in/clobbered) 2707 * g5 - g6 = scratch (clobbered) 2708 * g7 = pc we jumped here from (in) 2709 */ 2710 .global trace_tsbmiss 2711trace_tsbmiss: 2712 membar #Sync 2713 sethi %hi(FLUSH_ADDR), %g6 2714 flush %g6 2715 TRACE_PTR(%g5, %g6) 2716 GET_TRACE_TICK(%g6) 2717 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2718 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2719 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag 2720 rdpr %tnpc, %g6 2721 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2722 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2723 srlx %g1, 32, %g6 2724 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2725 rdpr %tpc, %g6 2726 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2727 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2728 rdpr %tt, %g6 2729 or %g6, TT_MMU_MISS, %g4 2730 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2731 mov MMFSA_D_ADDR, %g4 2732 cmp %g6, FAST_IMMU_MISS_TT 2733 move %xcc, MMFSA_I_ADDR, %g4 2734 cmp %g6, T_INSTR_MMU_MISS 2735 move %xcc, MMFSA_I_ADDR, %g4 2736 MMU_FAULT_STATUS_AREA(%g6) 2737 ldx [%g6 + %g4], %g6 2738 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2739 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2740 TRACE_NEXT(%g5, %g4, %g6) 2741 jmp %g7 + 4 2742 nop 2743 2744/* 2745 * g2 = tag access register (in) 2746 * g3 = ctx number (in) 2747 */ 2748trace_dataprot: 2749 membar #Sync 2750 sethi %hi(FLUSH_ADDR), %g6 2751 flush %g6 2752 TRACE_PTR(%g1, %g6) 2753 GET_TRACE_TICK(%g6) 2754 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2755 rdpr %tpc, %g6 2756 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2757 rdpr %tstate, %g6 2758 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2759 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2760 stna %g0, [%g1 + TRAP_ENT_TR]%asi 2761 stna %g0, [%g1 + TRAP_ENT_F1]%asi 2762 stna %g0, [%g1 + TRAP_ENT_F2]%asi 2763 stna %g0, [%g1 + TRAP_ENT_F3]%asi 2764 stna %g0, [%g1 + TRAP_ENT_F4]%asi 2765 TRACE_SAVE_TL_GL_REGS(%g1, %g6) 2766 rdpr %tt, %g6 2767 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2768 TRACE_NEXT(%g1, %g4, %g5) 2769 jmp %g7 + 4 2770 nop 2771 2772#endif /* TRAPTRACE */ 2773 2774/* 2775 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2776 */ 2777 .type .dmmu_exc_lddf_not_aligned, #function 2778.dmmu_exc_lddf_not_aligned: 2779 MMU_FAULT_STATUS_AREA(%g3) 2780 ldx [%g3 + MMFSA_D_ADDR], %g2 2781 /* Fault type not available in MMU fault status area */ 2782 mov MMFSA_F_UNALIGN, %g1 2783 ldx [%g3 + MMFSA_D_CTX], %g3 2784 sllx %g3, SFSR_CTX_SHIFT, %g3 2785 btst 1, %sp 2786 bnz,pt %xcc, .lddf_exception_not_aligned 2787 or %g3, %g1, %g3 /* SFSR */ 2788 ba,a,pt %xcc, .mmu_exception_not_aligned 2789 SET_SIZE(.dmmu_exc_lddf_not_aligned) 2790 2791/* 2792 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2793 */ 2794 .type .dmmu_exc_stdf_not_aligned, #function 2795.dmmu_exc_stdf_not_aligned: 2796 MMU_FAULT_STATUS_AREA(%g3) 2797 ldx [%g3 + MMFSA_D_ADDR], %g2 2798 /* Fault type not available in MMU fault status area */ 2799 mov MMFSA_F_UNALIGN, %g1 2800 ldx [%g3 + MMFSA_D_CTX], %g3 2801 sllx %g3, SFSR_CTX_SHIFT, %g3 2802 btst 1, %sp 2803 bnz,pt %xcc, .stdf_exception_not_aligned 2804 or %g3, %g1, %g3 /* SFSR */ 2805 ba,a,pt %xcc, .mmu_exception_not_aligned 2806 SET_SIZE(.dmmu_exc_stdf_not_aligned) 2807 2808 .type .dmmu_exception, #function 2809.dmmu_exception: 2810 MMU_FAULT_STATUS_AREA(%g3) 2811 ldx [%g3 + MMFSA_D_ADDR], %g2 2812 ldx [%g3 + MMFSA_D_TYPE], %g1 2813 ldx [%g3 + MMFSA_D_CTX], %g3 2814 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */ 2815 sllx %g2, MMU_PAGESHIFT, %g2 2816 or %g2, %g3, %g2 /* TAG_ACCESS */ 2817 sllx %g3, SFSR_CTX_SHIFT, %g3 2818 or %g3, %g1, %g3 /* SFSR */ 2819 ba,pt %xcc, .mmu_exception_end 2820 mov T_DATA_EXCEPTION, %g1 2821 SET_SIZE(.dmmu_exception) 2822/* 2823 * expects offset into tsbmiss area in %g1 and return pc in %g7 2824 */ 2825stat_mmu: 2826 CPU_INDEX(%g5, %g6) 2827 sethi %hi(tsbmiss_area), %g6 2828 sllx %g5, TSBMISS_SHIFT, %g5 2829 or %g6, %lo(tsbmiss_area), %g6 2830 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2831 ld [%g6 + %g1], %g5 2832 add %g5, 1, %g5 2833 jmp %g7 + 4 2834 st %g5, [%g6 + %g1] 2835 2836 2837/* 2838 * fast_trap_done, fast_trap_done_chk_intr: 2839 * 2840 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2841 * taken immediately after a RETRY or DONE instruction which causes IE to 2842 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2843 * to execute first before taking any interrupts. If that instruction 2844 * results in other traps, and if the corresponding trap handler runs 2845 * entirely at TL=1 with interrupts disabled, then pending interrupts 2846 * won't be taken until after yet another instruction following the %tpc 2847 * or %tnpc. 2848 * 2849 * A malicious user program can use this feature to block out interrupts 2850 * for extended durations, which can result in send_mondo_timeout kernel 2851 * panic. 2852 * 2853 * This problem is addressed by servicing any pending interrupts via 2854 * sys_trap before returning back to the user mode from a fast trap 2855 * handler. The "done" instruction within a fast trap handler, which 2856 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2857 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2858 * entry point. 2859 * 2860 * We check for any pending interrupts here and force a sys_trap to 2861 * service those interrupts, if any. To minimize overhead, pending 2862 * interrupts are checked if the %tpc happens to be at 16K boundary, 2863 * which allows a malicious program to execute at most 4K consecutive 2864 * instructions before we service any pending interrupts. If a worst 2865 * case fast trap handler takes about 2 usec, then interrupts will be 2866 * blocked for at most 8 msec, less than a clock tick. 2867 * 2868 * For the cases where we don't know if the %tpc will cross a 16K 2869 * boundary, we can't use the above optimization and always process 2870 * any pending interrupts via fast_frap_done_chk_intr entry point. 2871 * 2872 * Entry Conditions: 2873 * %pstate am:0 priv:1 ie:0 2874 * globals are AG (not normal globals) 2875 */ 2876 2877 .global fast_trap_done, fast_trap_done_chk_intr 2878fast_trap_done: 2879 rdpr %tpc, %g5 2880 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2881 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2882 bz,pn %icc, 1f ! branch if zero (lower 32 bits only) 2883 nop 2884 done 2885 2886fast_trap_done_chk_intr: 28871: rd SOFTINT, %g6 2888 brnz,pn %g6, 2f ! branch if any pending intr 2889 nop 2890 done 2891 28922: 2893 /* 2894 * We get here if there are any pending interrupts. 2895 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2896 * instruction. 2897 */ 2898 rdpr %tnpc, %g5 2899 wrpr %g0, %g5, %tpc 2900 add %g5, 4, %g5 2901 wrpr %g0, %g5, %tnpc 2902 2903 /* 2904 * Force a dummy sys_trap call so that interrupts can be serviced. 2905 */ 2906 set fast_trap_dummy_call, %g1 2907 ba,pt %xcc, sys_trap 2908 mov -1, %g4 2909 2910fast_trap_dummy_call: 2911 retl 2912 nop 2913 2914#endif /* lint */ 2915