1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#pragma ident "%Z%%M% %I% %E% SMI" 28 29#if !defined(lint) 30#include "assym.h" 31#endif /* !lint */ 32#include <sys/asm_linkage.h> 33#include <sys/privregs.h> 34#include <sys/sun4asi.h> 35#include <sys/machasi.h> 36#include <sys/hypervisor_api.h> 37#include <sys/machtrap.h> 38#include <sys/machthread.h> 39#include <sys/machbrand.h> 40#include <sys/pcb.h> 41#include <sys/pte.h> 42#include <sys/mmu.h> 43#include <sys/machpcb.h> 44#include <sys/async.h> 45#include <sys/intreg.h> 46#include <sys/scb.h> 47#include <sys/psr_compat.h> 48#include <sys/syscall.h> 49#include <sys/machparam.h> 50#include <sys/traptrace.h> 51#include <vm/hat_sfmmu.h> 52#include <sys/archsystm.h> 53#include <sys/utrap.h> 54#include <sys/clock.h> 55#include <sys/intr.h> 56#include <sys/fpu/fpu_simulator.h> 57#include <vm/seg_spt.h> 58 59/* 60 * WARNING: If you add a fast trap handler which can be invoked by a 61 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 62 * instead of "done" instruction to return back to the user mode. See 63 * comments for the "fast_trap_done" entry point for more information. 64 * 65 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 66 * cases where you always want to process any pending interrupts before 67 * returning back to the user mode. 68 */ 69#define FAST_TRAP_DONE \ 70 ba,a fast_trap_done 71 72#define FAST_TRAP_DONE_CHK_INTR \ 73 ba,a fast_trap_done_chk_intr 74 75/* 76 * SPARC V9 Trap Table 77 * 78 * Most of the trap handlers are made from common building 79 * blocks, and some are instantiated multiple times within 80 * the trap table. So, I build a bunch of macros, then 81 * populate the table using only the macros. 82 * 83 * Many macros branch to sys_trap. Its calling convention is: 84 * %g1 kernel trap handler 85 * %g2, %g3 args for above 86 * %g4 desire %pil 87 */ 88 89#ifdef TRAPTRACE 90 91/* 92 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 93 */ 94#define TT_TRACE(label) \ 95 ba label ;\ 96 rd %pc, %g7 97#define TT_TRACE_INS 2 98 99#define TT_TRACE_L(label) \ 100 ba label ;\ 101 rd %pc, %l4 ;\ 102 clr %l4 103#define TT_TRACE_L_INS 3 104 105#else 106 107#define TT_TRACE(label) 108#define TT_TRACE_INS 0 109 110#define TT_TRACE_L(label) 111#define TT_TRACE_L_INS 0 112 113#endif 114 115/* 116 * This macro is used to update per cpu mmu stats in perf critical 117 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 118 * is defined. 119 */ 120#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 121#define HAT_PERCPU_DBSTAT(stat) \ 122 mov stat, %g1 ;\ 123 ba stat_mmu ;\ 124 rd %pc, %g7 125#else 126#define HAT_PERCPU_DBSTAT(stat) 127#endif /* DEBUG || SFMMU_STAT_GATHER */ 128 129/* 130 * This first set are funneled to trap() with %tt as the type. 131 * Trap will then either panic or send the user a signal. 132 */ 133/* 134 * NOT is used for traps that just shouldn't happen. 135 * It comes in both single and quadruple flavors. 136 */ 137#if !defined(lint) 138 .global trap 139#endif /* !lint */ 140#define NOT \ 141 TT_TRACE(trace_gen) ;\ 142 set trap, %g1 ;\ 143 rdpr %tt, %g3 ;\ 144 ba,pt %xcc, sys_trap ;\ 145 sub %g0, 1, %g4 ;\ 146 .align 32 147#define NOT4 NOT; NOT; NOT; NOT 148 149#define NOTP \ 150 TT_TRACE(trace_gen) ;\ 151 ba,pt %xcc, ptl1_panic ;\ 152 mov PTL1_BAD_TRAP, %g1 ;\ 153 .align 32 154#define NOTP4 NOTP; NOTP; NOTP; NOTP 155 156 157/* 158 * BAD is used for trap vectors we don't have a kernel 159 * handler for. 160 * It also comes in single and quadruple versions. 161 */ 162#define BAD NOT 163#define BAD4 NOT4 164 165#define DONE \ 166 done; \ 167 .align 32 168 169/* 170 * TRAP vectors to the trap() function. 171 * It's main use is for user errors. 172 */ 173#if !defined(lint) 174 .global trap 175#endif /* !lint */ 176#define TRAP(arg) \ 177 TT_TRACE(trace_gen) ;\ 178 set trap, %g1 ;\ 179 mov arg, %g3 ;\ 180 ba,pt %xcc, sys_trap ;\ 181 sub %g0, 1, %g4 ;\ 182 .align 32 183 184/* 185 * SYSCALL is used for unsupported syscall interfaces (with 'which' 186 * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with 187 * 'which' set to 'syscall_trap32'). 188 * 189 * The SYSCALL_TRAP* macros are used for syscall entry points. 190 * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32 191 * is used to support ILP32. Each macro can only be used once 192 * since they each define a symbol. The symbols are used as hot patch 193 * points by the brand infrastructure to dynamically enable and disable 194 * brand syscall interposition. See the comments around BRAND_CALLBACK 195 * and brand_plat_interposition_enable() for more information. 196 */ 197#define SYSCALL_NOTT(which) \ 198 set (which), %g1 ;\ 199 ba,pt %xcc, sys_trap ;\ 200 sub %g0, 1, %g4 ;\ 201 .align 32 202 203#define SYSCALL(which) \ 204 TT_TRACE(trace_gen) ;\ 205 SYSCALL_NOTT(which) 206 207#define SYSCALL_TRAP32 \ 208 TT_TRACE(trace_gen) ;\ 209 ALTENTRY(syscall_trap32_patch_point) \ 210 SYSCALL_NOTT(syscall_trap32) 211 212#define SYSCALL_TRAP \ 213 TT_TRACE(trace_gen) ;\ 214 ALTENTRY(syscall_trap_patch_point) \ 215 SYSCALL_NOTT(syscall_trap) 216 217/* 218 * GOTO just jumps to a label. 219 * It's used for things that can be fixed without going thru sys_trap. 220 */ 221#define GOTO(label) \ 222 .global label ;\ 223 ba,a label ;\ 224 .empty ;\ 225 .align 32 226 227/* 228 * GOTO_TT just jumps to a label. 229 * correctable ECC error traps at level 0 and 1 will use this macro. 230 * It's used for things that can be fixed without going thru sys_trap. 231 */ 232#define GOTO_TT(label, ttlabel) \ 233 .global label ;\ 234 TT_TRACE(ttlabel) ;\ 235 ba,a label ;\ 236 .empty ;\ 237 .align 32 238 239/* 240 * Privileged traps 241 * Takes breakpoint if privileged, calls trap() if not. 242 */ 243#define PRIV(label) \ 244 rdpr %tstate, %g1 ;\ 245 btst TSTATE_PRIV, %g1 ;\ 246 bnz label ;\ 247 rdpr %tt, %g3 ;\ 248 set trap, %g1 ;\ 249 ba,pt %xcc, sys_trap ;\ 250 sub %g0, 1, %g4 ;\ 251 .align 32 252 253 254/* 255 * DTrace traps. 256 */ 257#define DTRACE_PID \ 258 .global dtrace_pid_probe ;\ 259 set dtrace_pid_probe, %g1 ;\ 260 ba,pt %xcc, user_trap ;\ 261 sub %g0, 1, %g4 ;\ 262 .align 32 263 264#define DTRACE_RETURN \ 265 .global dtrace_return_probe ;\ 266 set dtrace_return_probe, %g1 ;\ 267 ba,pt %xcc, user_trap ;\ 268 sub %g0, 1, %g4 ;\ 269 .align 32 270 271/* 272 * REGISTER WINDOW MANAGEMENT MACROS 273 */ 274 275/* 276 * various convenient units of padding 277 */ 278#define SKIP(n) .skip 4*(n) 279 280/* 281 * CLEAN_WINDOW is the simple handler for cleaning a register window. 282 */ 283#define CLEAN_WINDOW \ 284 TT_TRACE_L(trace_win) ;\ 285 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 286 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 287 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 288 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 289 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 290 retry; .align 128 291 292#if !defined(lint) 293 294/* 295 * If we get an unresolved tlb miss while in a window handler, the fault 296 * handler will resume execution at the last instruction of the window 297 * hander, instead of delivering the fault to the kernel. Spill handlers 298 * use this to spill windows into the wbuf. 299 * 300 * The mixed handler works by checking %sp, and branching to the correct 301 * handler. This is done by branching back to label 1: for 32b frames, 302 * or label 2: for 64b frames; which implies the handler order is: 32b, 303 * 64b, mixed. The 1: and 2: labels are offset into the routines to 304 * allow the branchs' delay slots to contain useful instructions. 305 */ 306 307/* 308 * SPILL_32bit spills a 32-bit-wide kernel register window. It 309 * assumes that the kernel context and the nucleus context are the 310 * same. The stack pointer is required to be eight-byte aligned even 311 * though this code only needs it to be four-byte aligned. 312 */ 313#define SPILL_32bit(tail) \ 314 srl %sp, 0, %sp ;\ 3151: st %l0, [%sp + 0] ;\ 316 st %l1, [%sp + 4] ;\ 317 st %l2, [%sp + 8] ;\ 318 st %l3, [%sp + 12] ;\ 319 st %l4, [%sp + 16] ;\ 320 st %l5, [%sp + 20] ;\ 321 st %l6, [%sp + 24] ;\ 322 st %l7, [%sp + 28] ;\ 323 st %i0, [%sp + 32] ;\ 324 st %i1, [%sp + 36] ;\ 325 st %i2, [%sp + 40] ;\ 326 st %i3, [%sp + 44] ;\ 327 st %i4, [%sp + 48] ;\ 328 st %i5, [%sp + 52] ;\ 329 st %i6, [%sp + 56] ;\ 330 st %i7, [%sp + 60] ;\ 331 TT_TRACE_L(trace_win) ;\ 332 saved ;\ 333 retry ;\ 334 SKIP(31-19-TT_TRACE_L_INS) ;\ 335 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 336 .empty 337 338/* 339 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 340 * wide address space via the designated asi. It is used to spill 341 * non-kernel windows. The stack pointer is required to be eight-byte 342 * aligned even though this code only needs it to be four-byte 343 * aligned. 344 */ 345#define SPILL_32bit_asi(asi_num, tail) \ 346 srl %sp, 0, %sp ;\ 3471: sta %l0, [%sp + %g0]asi_num ;\ 348 mov 4, %g1 ;\ 349 sta %l1, [%sp + %g1]asi_num ;\ 350 mov 8, %g2 ;\ 351 sta %l2, [%sp + %g2]asi_num ;\ 352 mov 12, %g3 ;\ 353 sta %l3, [%sp + %g3]asi_num ;\ 354 add %sp, 16, %g4 ;\ 355 sta %l4, [%g4 + %g0]asi_num ;\ 356 sta %l5, [%g4 + %g1]asi_num ;\ 357 sta %l6, [%g4 + %g2]asi_num ;\ 358 sta %l7, [%g4 + %g3]asi_num ;\ 359 add %g4, 16, %g4 ;\ 360 sta %i0, [%g4 + %g0]asi_num ;\ 361 sta %i1, [%g4 + %g1]asi_num ;\ 362 sta %i2, [%g4 + %g2]asi_num ;\ 363 sta %i3, [%g4 + %g3]asi_num ;\ 364 add %g4, 16, %g4 ;\ 365 sta %i4, [%g4 + %g0]asi_num ;\ 366 sta %i5, [%g4 + %g1]asi_num ;\ 367 sta %i6, [%g4 + %g2]asi_num ;\ 368 sta %i7, [%g4 + %g3]asi_num ;\ 369 TT_TRACE_L(trace_win) ;\ 370 saved ;\ 371 retry ;\ 372 SKIP(31-25-TT_TRACE_L_INS) ;\ 373 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 374 .empty 375 376#define SPILL_32bit_tt1(asi_num, tail) \ 377 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 378 .empty ;\ 379 .align 128 380 381 382/* 383 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 384 * that the kernel context and the nucleus context are the same. The 385 * stack pointer is required to be eight-byte aligned even though this 386 * code only needs it to be four-byte aligned. 387 */ 388#define FILL_32bit(tail) \ 389 srl %sp, 0, %sp ;\ 3901: TT_TRACE_L(trace_win) ;\ 391 ld [%sp + 0], %l0 ;\ 392 ld [%sp + 4], %l1 ;\ 393 ld [%sp + 8], %l2 ;\ 394 ld [%sp + 12], %l3 ;\ 395 ld [%sp + 16], %l4 ;\ 396 ld [%sp + 20], %l5 ;\ 397 ld [%sp + 24], %l6 ;\ 398 ld [%sp + 28], %l7 ;\ 399 ld [%sp + 32], %i0 ;\ 400 ld [%sp + 36], %i1 ;\ 401 ld [%sp + 40], %i2 ;\ 402 ld [%sp + 44], %i3 ;\ 403 ld [%sp + 48], %i4 ;\ 404 ld [%sp + 52], %i5 ;\ 405 ld [%sp + 56], %i6 ;\ 406 ld [%sp + 60], %i7 ;\ 407 restored ;\ 408 retry ;\ 409 SKIP(31-19-TT_TRACE_L_INS) ;\ 410 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 411 .empty 412 413/* 414 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 415 * wide address space via the designated asi. It is used to fill 416 * non-kernel windows. The stack pointer is required to be eight-byte 417 * aligned even though this code only needs it to be four-byte 418 * aligned. 419 */ 420#define FILL_32bit_asi(asi_num, tail) \ 421 srl %sp, 0, %sp ;\ 4221: TT_TRACE_L(trace_win) ;\ 423 mov 4, %g1 ;\ 424 lda [%sp + %g0]asi_num, %l0 ;\ 425 mov 8, %g2 ;\ 426 lda [%sp + %g1]asi_num, %l1 ;\ 427 mov 12, %g3 ;\ 428 lda [%sp + %g2]asi_num, %l2 ;\ 429 lda [%sp + %g3]asi_num, %l3 ;\ 430 add %sp, 16, %g4 ;\ 431 lda [%g4 + %g0]asi_num, %l4 ;\ 432 lda [%g4 + %g1]asi_num, %l5 ;\ 433 lda [%g4 + %g2]asi_num, %l6 ;\ 434 lda [%g4 + %g3]asi_num, %l7 ;\ 435 add %g4, 16, %g4 ;\ 436 lda [%g4 + %g0]asi_num, %i0 ;\ 437 lda [%g4 + %g1]asi_num, %i1 ;\ 438 lda [%g4 + %g2]asi_num, %i2 ;\ 439 lda [%g4 + %g3]asi_num, %i3 ;\ 440 add %g4, 16, %g4 ;\ 441 lda [%g4 + %g0]asi_num, %i4 ;\ 442 lda [%g4 + %g1]asi_num, %i5 ;\ 443 lda [%g4 + %g2]asi_num, %i6 ;\ 444 lda [%g4 + %g3]asi_num, %i7 ;\ 445 restored ;\ 446 retry ;\ 447 SKIP(31-25-TT_TRACE_L_INS) ;\ 448 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 449 .empty 450 451 452/* 453 * SPILL_64bit spills a 64-bit-wide kernel register window. It 454 * assumes that the kernel context and the nucleus context are the 455 * same. The stack pointer is required to be eight-byte aligned. 456 */ 457#define SPILL_64bit(tail) \ 4582: stx %l0, [%sp + V9BIAS64 + 0] ;\ 459 stx %l1, [%sp + V9BIAS64 + 8] ;\ 460 stx %l2, [%sp + V9BIAS64 + 16] ;\ 461 stx %l3, [%sp + V9BIAS64 + 24] ;\ 462 stx %l4, [%sp + V9BIAS64 + 32] ;\ 463 stx %l5, [%sp + V9BIAS64 + 40] ;\ 464 stx %l6, [%sp + V9BIAS64 + 48] ;\ 465 stx %l7, [%sp + V9BIAS64 + 56] ;\ 466 stx %i0, [%sp + V9BIAS64 + 64] ;\ 467 stx %i1, [%sp + V9BIAS64 + 72] ;\ 468 stx %i2, [%sp + V9BIAS64 + 80] ;\ 469 stx %i3, [%sp + V9BIAS64 + 88] ;\ 470 stx %i4, [%sp + V9BIAS64 + 96] ;\ 471 stx %i5, [%sp + V9BIAS64 + 104] ;\ 472 stx %i6, [%sp + V9BIAS64 + 112] ;\ 473 stx %i7, [%sp + V9BIAS64 + 120] ;\ 474 TT_TRACE_L(trace_win) ;\ 475 saved ;\ 476 retry ;\ 477 SKIP(31-18-TT_TRACE_L_INS) ;\ 478 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 479 .empty 480 481#define SPILL_64bit_ktt1(tail) \ 482 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 483 .empty ;\ 484 .align 128 485 486#define SPILL_mixed_ktt1(tail) \ 487 btst 1, %sp ;\ 488 bz,a,pt %xcc, fault_32bit_/**/tail ;\ 489 srl %sp, 0, %sp ;\ 490 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 491 .empty ;\ 492 .align 128 493 494/* 495 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 496 * wide address space via the designated asi. It is used to spill 497 * non-kernel windows. The stack pointer is required to be eight-byte 498 * aligned. 499 */ 500#define SPILL_64bit_asi(asi_num, tail) \ 501 mov 0 + V9BIAS64, %g1 ;\ 5022: stxa %l0, [%sp + %g1]asi_num ;\ 503 mov 8 + V9BIAS64, %g2 ;\ 504 stxa %l1, [%sp + %g2]asi_num ;\ 505 mov 16 + V9BIAS64, %g3 ;\ 506 stxa %l2, [%sp + %g3]asi_num ;\ 507 mov 24 + V9BIAS64, %g4 ;\ 508 stxa %l3, [%sp + %g4]asi_num ;\ 509 add %sp, 32, %g5 ;\ 510 stxa %l4, [%g5 + %g1]asi_num ;\ 511 stxa %l5, [%g5 + %g2]asi_num ;\ 512 stxa %l6, [%g5 + %g3]asi_num ;\ 513 stxa %l7, [%g5 + %g4]asi_num ;\ 514 add %g5, 32, %g5 ;\ 515 stxa %i0, [%g5 + %g1]asi_num ;\ 516 stxa %i1, [%g5 + %g2]asi_num ;\ 517 stxa %i2, [%g5 + %g3]asi_num ;\ 518 stxa %i3, [%g5 + %g4]asi_num ;\ 519 add %g5, 32, %g5 ;\ 520 stxa %i4, [%g5 + %g1]asi_num ;\ 521 stxa %i5, [%g5 + %g2]asi_num ;\ 522 stxa %i6, [%g5 + %g3]asi_num ;\ 523 stxa %i7, [%g5 + %g4]asi_num ;\ 524 TT_TRACE_L(trace_win) ;\ 525 saved ;\ 526 retry ;\ 527 SKIP(31-25-TT_TRACE_L_INS) ;\ 528 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 529 .empty 530 531#define SPILL_64bit_tt1(asi_num, tail) \ 532 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 533 .empty ;\ 534 .align 128 535 536/* 537 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 538 * that the kernel context and the nucleus context are the same. The 539 * stack pointer is required to be eight-byte aligned. 540 */ 541#define FILL_64bit(tail) \ 5422: TT_TRACE_L(trace_win) ;\ 543 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 544 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 545 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 546 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 547 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 548 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 549 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 550 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 551 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 552 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 553 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 554 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 555 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 556 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 557 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 558 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 559 restored ;\ 560 retry ;\ 561 SKIP(31-18-TT_TRACE_L_INS) ;\ 562 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 563 .empty 564 565/* 566 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 567 * wide address space via the designated asi. It is used to fill 568 * non-kernel windows. The stack pointer is required to be eight-byte 569 * aligned. 570 */ 571#define FILL_64bit_asi(asi_num, tail) \ 572 mov V9BIAS64 + 0, %g1 ;\ 5732: TT_TRACE_L(trace_win) ;\ 574 ldxa [%sp + %g1]asi_num, %l0 ;\ 575 mov V9BIAS64 + 8, %g2 ;\ 576 ldxa [%sp + %g2]asi_num, %l1 ;\ 577 mov V9BIAS64 + 16, %g3 ;\ 578 ldxa [%sp + %g3]asi_num, %l2 ;\ 579 mov V9BIAS64 + 24, %g4 ;\ 580 ldxa [%sp + %g4]asi_num, %l3 ;\ 581 add %sp, 32, %g5 ;\ 582 ldxa [%g5 + %g1]asi_num, %l4 ;\ 583 ldxa [%g5 + %g2]asi_num, %l5 ;\ 584 ldxa [%g5 + %g3]asi_num, %l6 ;\ 585 ldxa [%g5 + %g4]asi_num, %l7 ;\ 586 add %g5, 32, %g5 ;\ 587 ldxa [%g5 + %g1]asi_num, %i0 ;\ 588 ldxa [%g5 + %g2]asi_num, %i1 ;\ 589 ldxa [%g5 + %g3]asi_num, %i2 ;\ 590 ldxa [%g5 + %g4]asi_num, %i3 ;\ 591 add %g5, 32, %g5 ;\ 592 ldxa [%g5 + %g1]asi_num, %i4 ;\ 593 ldxa [%g5 + %g2]asi_num, %i5 ;\ 594 ldxa [%g5 + %g3]asi_num, %i6 ;\ 595 ldxa [%g5 + %g4]asi_num, %i7 ;\ 596 restored ;\ 597 retry ;\ 598 SKIP(31-25-TT_TRACE_L_INS) ;\ 599 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 600 .empty 601 602 603#endif /* !lint */ 604 605/* 606 * SPILL_mixed spills either size window, depending on 607 * whether %sp is even or odd, to a 32-bit address space. 608 * This may only be used in conjunction with SPILL_32bit/ 609 * FILL_64bit. 610 * Clear upper 32 bits of %sp if it is odd. 611 * We won't need to clear them in 64 bit kernel. 612 */ 613#define SPILL_mixed \ 614 btst 1, %sp ;\ 615 bz,a,pt %xcc, 1b ;\ 616 srl %sp, 0, %sp ;\ 617 ba,pt %xcc, 2b ;\ 618 nop ;\ 619 .align 128 620 621/* 622 * FILL_mixed(ASI) fills either size window, depending on 623 * whether %sp is even or odd, from a 32-bit address space. 624 * This may only be used in conjunction with FILL_32bit/ 625 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 626 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 627 * attention should be paid to the instructions that belong 628 * in the delay slots of the branches depending on the type 629 * of fill handler being branched to. 630 * Clear upper 32 bits of %sp if it is odd. 631 * We won't need to clear them in 64 bit kernel. 632 */ 633#define FILL_mixed \ 634 btst 1, %sp ;\ 635 bz,a,pt %xcc, 1b ;\ 636 srl %sp, 0, %sp ;\ 637 ba,pt %xcc, 2b ;\ 638 nop ;\ 639 .align 128 640 641 642/* 643 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 644 * respectively, into the address space via the designated asi. The 645 * unbiased stack pointer is required to be eight-byte aligned (even for 646 * the 32-bit case even though this code does not require such strict 647 * alignment). 648 * 649 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 650 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 651 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 652 * window may contain kernel data so in user_rtt we set wstate to call 653 * these spill handlers on the first user spill trap. These handler then 654 * spill the appropriate window but also back up a window and clean the 655 * window that didn't get a cleanwin trap. 656 */ 657#define SPILL_32clean(asi_num, tail) \ 658 srl %sp, 0, %sp ;\ 659 sta %l0, [%sp + %g0]asi_num ;\ 660 mov 4, %g1 ;\ 661 sta %l1, [%sp + %g1]asi_num ;\ 662 mov 8, %g2 ;\ 663 sta %l2, [%sp + %g2]asi_num ;\ 664 mov 12, %g3 ;\ 665 sta %l3, [%sp + %g3]asi_num ;\ 666 add %sp, 16, %g4 ;\ 667 sta %l4, [%g4 + %g0]asi_num ;\ 668 sta %l5, [%g4 + %g1]asi_num ;\ 669 sta %l6, [%g4 + %g2]asi_num ;\ 670 sta %l7, [%g4 + %g3]asi_num ;\ 671 add %g4, 16, %g4 ;\ 672 sta %i0, [%g4 + %g0]asi_num ;\ 673 sta %i1, [%g4 + %g1]asi_num ;\ 674 sta %i2, [%g4 + %g2]asi_num ;\ 675 sta %i3, [%g4 + %g3]asi_num ;\ 676 add %g4, 16, %g4 ;\ 677 sta %i4, [%g4 + %g0]asi_num ;\ 678 sta %i5, [%g4 + %g1]asi_num ;\ 679 sta %i6, [%g4 + %g2]asi_num ;\ 680 sta %i7, [%g4 + %g3]asi_num ;\ 681 TT_TRACE_L(trace_win) ;\ 682 b .spill_clean ;\ 683 mov WSTATE_USER32, %g7 ;\ 684 SKIP(31-25-TT_TRACE_L_INS) ;\ 685 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 686 .empty 687 688#define SPILL_64clean(asi_num, tail) \ 689 mov 0 + V9BIAS64, %g1 ;\ 690 stxa %l0, [%sp + %g1]asi_num ;\ 691 mov 8 + V9BIAS64, %g2 ;\ 692 stxa %l1, [%sp + %g2]asi_num ;\ 693 mov 16 + V9BIAS64, %g3 ;\ 694 stxa %l2, [%sp + %g3]asi_num ;\ 695 mov 24 + V9BIAS64, %g4 ;\ 696 stxa %l3, [%sp + %g4]asi_num ;\ 697 add %sp, 32, %g5 ;\ 698 stxa %l4, [%g5 + %g1]asi_num ;\ 699 stxa %l5, [%g5 + %g2]asi_num ;\ 700 stxa %l6, [%g5 + %g3]asi_num ;\ 701 stxa %l7, [%g5 + %g4]asi_num ;\ 702 add %g5, 32, %g5 ;\ 703 stxa %i0, [%g5 + %g1]asi_num ;\ 704 stxa %i1, [%g5 + %g2]asi_num ;\ 705 stxa %i2, [%g5 + %g3]asi_num ;\ 706 stxa %i3, [%g5 + %g4]asi_num ;\ 707 add %g5, 32, %g5 ;\ 708 stxa %i4, [%g5 + %g1]asi_num ;\ 709 stxa %i5, [%g5 + %g2]asi_num ;\ 710 stxa %i6, [%g5 + %g3]asi_num ;\ 711 stxa %i7, [%g5 + %g4]asi_num ;\ 712 TT_TRACE_L(trace_win) ;\ 713 b .spill_clean ;\ 714 mov WSTATE_USER64, %g7 ;\ 715 SKIP(31-25-TT_TRACE_L_INS) ;\ 716 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 717 .empty 718 719 720/* 721 * Floating point disabled. 722 */ 723#define FP_DISABLED_TRAP \ 724 TT_TRACE(trace_gen) ;\ 725 ba,pt %xcc,.fp_disabled ;\ 726 nop ;\ 727 .align 32 728 729/* 730 * Floating point exceptions. 731 */ 732#define FP_IEEE_TRAP \ 733 TT_TRACE(trace_gen) ;\ 734 ba,pt %xcc,.fp_ieee_exception ;\ 735 nop ;\ 736 .align 32 737 738#define FP_TRAP \ 739 TT_TRACE(trace_gen) ;\ 740 ba,pt %xcc,.fp_exception ;\ 741 nop ;\ 742 .align 32 743 744#if !defined(lint) 745 746/* 747 * ECACHE_ECC error traps at level 0 and level 1 748 */ 749#define ECACHE_ECC(table_name) \ 750 .global table_name ;\ 751table_name: ;\ 752 membar #Sync ;\ 753 set trap, %g1 ;\ 754 rdpr %tt, %g3 ;\ 755 ba,pt %xcc, sys_trap ;\ 756 sub %g0, 1, %g4 ;\ 757 .align 32 758 759#endif /* !lint */ 760 761/* 762 * illegal instruction trap 763 */ 764#define ILLTRAP_INSTR \ 765 membar #Sync ;\ 766 TT_TRACE(trace_gen) ;\ 767 or %g0, P_UTRAP4, %g2 ;\ 768 or %g0, T_UNIMP_INSTR, %g3 ;\ 769 sethi %hi(.check_v9utrap), %g4 ;\ 770 jmp %g4 + %lo(.check_v9utrap) ;\ 771 nop ;\ 772 .align 32 773 774/* 775 * tag overflow trap 776 */ 777#define TAG_OVERFLOW \ 778 TT_TRACE(trace_gen) ;\ 779 or %g0, P_UTRAP10, %g2 ;\ 780 or %g0, T_TAG_OVERFLOW, %g3 ;\ 781 sethi %hi(.check_v9utrap), %g4 ;\ 782 jmp %g4 + %lo(.check_v9utrap) ;\ 783 nop ;\ 784 .align 32 785 786/* 787 * divide by zero trap 788 */ 789#define DIV_BY_ZERO \ 790 TT_TRACE(trace_gen) ;\ 791 or %g0, P_UTRAP11, %g2 ;\ 792 or %g0, T_IDIV0, %g3 ;\ 793 sethi %hi(.check_v9utrap), %g4 ;\ 794 jmp %g4 + %lo(.check_v9utrap) ;\ 795 nop ;\ 796 .align 32 797 798/* 799 * trap instruction for V9 user trap handlers 800 */ 801#define TRAP_INSTR \ 802 TT_TRACE(trace_gen) ;\ 803 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 804 sethi %hi(.check_v9utrap), %g4 ;\ 805 jmp %g4 + %lo(.check_v9utrap) ;\ 806 nop ;\ 807 .align 32 808#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 809 810/* 811 * LEVEL_INTERRUPT is for level N interrupts. 812 * VECTOR_INTERRUPT is for the vector trap. 813 */ 814#define LEVEL_INTERRUPT(level) \ 815 .global tt_pil/**/level ;\ 816tt_pil/**/level: ;\ 817 ba,pt %xcc, pil_interrupt ;\ 818 mov level, %g4 ;\ 819 .align 32 820 821#define LEVEL14_INTERRUPT \ 822 ba pil14_interrupt ;\ 823 mov PIL_14, %g4 ;\ 824 .align 32 825 826#define CPU_MONDO \ 827 ba,a,pt %xcc, cpu_mondo ;\ 828 .align 32 829 830#define DEV_MONDO \ 831 ba,a,pt %xcc, dev_mondo ;\ 832 .align 32 833 834/* 835 * We take over the rtba after we set our trap table and 836 * fault status area. The watchdog reset trap is now handled by the OS. 837 */ 838#define WATCHDOG_RESET \ 839 mov PTL1_BAD_WATCHDOG, %g1 ;\ 840 ba,a,pt %xcc, .watchdog_trap ;\ 841 .align 32 842 843/* 844 * RED is for traps that use the red mode handler. 845 * We should never see these either. 846 */ 847#define RED \ 848 mov PTL1_BAD_RED, %g1 ;\ 849 ba,a,pt %xcc, .watchdog_trap ;\ 850 .align 32 851 852 853/* 854 * MMU Trap Handlers. 855 */ 856 857/* 858 * synthesize for trap(): SFSR in %g3 859 */ 860#define IMMU_EXCEPTION \ 861 MMU_FAULT_STATUS_AREA(%g3) ;\ 862 rdpr %tpc, %g2 ;\ 863 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\ 864 ldx [%g3 + MMFSA_I_CTX], %g3 ;\ 865 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 866 or %g3, %g1, %g3 ;\ 867 ba,pt %xcc, .mmu_exception_end ;\ 868 mov T_INSTR_EXCEPTION, %g1 ;\ 869 .align 32 870 871/* 872 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3 873 */ 874#define DMMU_EXCEPTION \ 875 ba,a,pt %xcc, .dmmu_exception ;\ 876 .align 32 877 878/* 879 * synthesize for trap(): SFAR in %g2, SFSR in %g3 880 */ 881#define DMMU_EXC_AG_PRIV \ 882 MMU_FAULT_STATUS_AREA(%g3) ;\ 883 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 884 /* Fault type not available in MMU fault status area */ ;\ 885 mov MMFSA_F_PRVACT, %g1 ;\ 886 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 887 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 888 ba,pt %xcc, .mmu_priv_exception ;\ 889 or %g3, %g1, %g3 ;\ 890 .align 32 891 892/* 893 * synthesize for trap(): SFAR in %g2, SFSR in %g3 894 */ 895#define DMMU_EXC_AG_NOT_ALIGNED \ 896 MMU_FAULT_STATUS_AREA(%g3) ;\ 897 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 898 /* Fault type not available in MMU fault status area */ ;\ 899 mov MMFSA_F_UNALIGN, %g1 ;\ 900 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 901 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 902 ba,pt %xcc, .mmu_exception_not_aligned ;\ 903 or %g3, %g1, %g3 /* SFSR */ ;\ 904 .align 32 905/* 906 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 907 */ 908 909/* 910 * synthesize for trap(): SFAR in %g2, SFSR in %g3 911 */ 912#define DMMU_EXC_LDDF_NOT_ALIGNED \ 913 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\ 914 .align 32 915/* 916 * synthesize for trap(): SFAR in %g2, SFSR in %g3 917 */ 918#define DMMU_EXC_STDF_NOT_ALIGNED \ 919 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\ 920 .align 32 921 922#if defined(cscope) 923/* 924 * Define labels to direct cscope quickly to labels that 925 * are generated by macro expansion of DTLB_MISS(). 926 */ 927 .global tt0_dtlbmiss 928tt0_dtlbmiss: 929 .global tt1_dtlbmiss 930tt1_dtlbmiss: 931 nop 932#endif 933 934/* 935 * Data miss handler (must be exactly 32 instructions) 936 * 937 * This handler is invoked only if the hypervisor has been instructed 938 * not to do any TSB walk. 939 * 940 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss 941 * handler. 942 * 943 * User TLB miss handling depends upon whether a user process has one or 944 * two TSBs. User TSB information (physical base and size code) is kept 945 * in two dedicated scratchpad registers. Absence of a user TSB (primarily 946 * second TSB) is indicated by a negative value (-1) in that register. 947 */ 948 949/* 950 * synthesize for miss handler: pseudo-tag access in %g2 (with context "type" 951 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 952 */ 953#define DTLB_MISS(table_name) ;\ 954 .global table_name/**/_dtlbmiss ;\ 955table_name/**/_dtlbmiss: ;\ 956 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 957 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 958 cmp %g3, INVALID_CONTEXT ;\ 959 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 960 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 961 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 962 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 963 brgez,pn %g1, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 964 nop ;\ 965 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 966 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\ 967 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 968 .align 128 969 970 971#if defined(cscope) 972/* 973 * Define labels to direct cscope quickly to labels that 974 * are generated by macro expansion of ITLB_MISS(). 975 */ 976 .global tt0_itlbmiss 977tt0_itlbmiss: 978 .global tt1_itlbmiss 979tt1_itlbmiss: 980 nop 981#endif 982 983/* 984 * Instruction miss handler. 985 * 986 * This handler is invoked only if the hypervisor has been instructed 987 * not to do any TSB walk. 988 * 989 * ldda instructions will have their ASI patched 990 * by sfmmu_patch_ktsb at runtime. 991 * MUST be EXACTLY 32 instructions or we'll break. 992 */ 993 994/* 995 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 996 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 997 */ 998#define ITLB_MISS(table_name) \ 999 .global table_name/**/_itlbmiss ;\ 1000table_name/**/_itlbmiss: ;\ 1001 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1002 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 1003 cmp %g3, INVALID_CONTEXT ;\ 1004 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1005 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1006 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 1007 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 1008 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 1009 nop ;\ 1010 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 1011 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\ 1012 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1013 .align 128 1014 1015#define DTSB_MISS \ 1016 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu) 1017 1018#define ITSB_MISS \ 1019 GOTO_TT(sfmmu_slow_immu_miss,trace_immu) 1020 1021/* 1022 * This macro is the first level handler for fast protection faults. 1023 * It first demaps the tlb entry which generated the fault and then 1024 * attempts to set the modify bit on the hash. It needs to be 1025 * exactly 32 instructions. 1026 */ 1027/* 1028 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1029 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1030 */ 1031#define DTLB_PROT \ 1032 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 1033 /* ;\ 1034 * g2 = pseudo-tag access register (ctx type rather than ctx ID) ;\ 1035 * g3 = ctx type (0, 1, or 2) ;\ 1036 */ ;\ 1037 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1038 /* clobbers g1 and g6 XXXQ? */ ;\ 1039 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1040 nop ;\ 1041 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1042 .align 128 1043 1044#define DMMU_EXCEPTION_TL1 ;\ 1045 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1046 .align 32 1047 1048#define MISALIGN_ADDR_TL1 ;\ 1049 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1050 .align 32 1051 1052/* 1053 * Trace a tsb hit 1054 * g1 = tsbe pointer (in/clobbered) 1055 * g2 = tag access register (in) 1056 * g3 - g4 = scratch (clobbered) 1057 * g5 = tsbe data (in) 1058 * g6 = scratch (clobbered) 1059 * g7 = pc we jumped here from (in) 1060 * ttextra = value to OR in to trap type (%tt) (in) 1061 */ 1062#ifdef TRAPTRACE 1063#define TRACE_TSBHIT(ttextra) \ 1064 membar #Sync ;\ 1065 sethi %hi(FLUSH_ADDR), %g6 ;\ 1066 flush %g6 ;\ 1067 TRACE_PTR(%g3, %g6) ;\ 1068 GET_TRACE_TICK(%g6) ;\ 1069 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1070 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1071 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1072 rdpr %tnpc, %g6 ;\ 1073 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1074 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1075 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1076 rdpr %tpc, %g6 ;\ 1077 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1078 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\ 1079 rdpr %tt, %g6 ;\ 1080 or %g6, (ttextra), %g1 ;\ 1081 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\ 1082 MMU_FAULT_STATUS_AREA(%g4) ;\ 1083 mov MMFSA_D_ADDR, %g1 ;\ 1084 cmp %g6, FAST_IMMU_MISS_TT ;\ 1085 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1086 cmp %g6, T_INSTR_MMU_MISS ;\ 1087 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1088 ldx [%g4 + %g1], %g1 ;\ 1089 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\ 1090 mov MMFSA_D_CTX, %g1 ;\ 1091 cmp %g6, FAST_IMMU_MISS_TT ;\ 1092 move %xcc, MMFSA_I_CTX, %g1 ;\ 1093 cmp %g6, T_INSTR_MMU_MISS ;\ 1094 move %xcc, MMFSA_I_CTX, %g1 ;\ 1095 ldx [%g4 + %g1], %g1 ;\ 1096 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\ 1097 TRACE_NEXT(%g3, %g4, %g6) 1098#else 1099#define TRACE_TSBHIT(ttextra) 1100#endif 1101 1102 1103#if defined(lint) 1104 1105struct scb trap_table; 1106struct scb scb; /* trap_table/scb are the same object */ 1107 1108#else /* lint */ 1109 1110/* 1111 * ======================================================================= 1112 * SPARC V9 TRAP TABLE 1113 * 1114 * The trap table is divided into two halves: the first half is used when 1115 * taking traps when TL=0; the second half is used when taking traps from 1116 * TL>0. Note that handlers in the second half of the table might not be able 1117 * to make the same assumptions as handlers in the first half of the table. 1118 * 1119 * Worst case trap nesting so far: 1120 * 1121 * at TL=0 client issues software trap requesting service 1122 * at TL=1 nucleus wants a register window 1123 * at TL=2 register window clean/spill/fill takes a TLB miss 1124 * at TL=3 processing TLB miss 1125 * at TL=4 handle asynchronous error 1126 * 1127 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1128 * 1129 * ======================================================================= 1130 */ 1131 .section ".text" 1132 .align 4 1133 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1134 .type trap_table, #object 1135 .type trap_table0, #object 1136 .type trap_table1, #object 1137 .type scb, #object 1138trap_table: 1139scb: 1140trap_table0: 1141 /* hardware traps */ 1142 NOT; /* 000 reserved */ 1143 RED; /* 001 power on reset */ 1144 WATCHDOG_RESET; /* 002 watchdog reset */ 1145 RED; /* 003 externally initiated reset */ 1146 RED; /* 004 software initiated reset */ 1147 RED; /* 005 red mode exception */ 1148 NOT; NOT; /* 006 - 007 reserved */ 1149 IMMU_EXCEPTION; /* 008 instruction access exception */ 1150 ITSB_MISS; /* 009 instruction access MMU miss */ 1151 NOT; /* 00A reserved */ 1152 NOT; NOT4; /* 00B - 00F reserved */ 1153 ILLTRAP_INSTR; /* 010 illegal instruction */ 1154 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1155 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */ 1156 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */ 1157 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1158 FP_DISABLED_TRAP; /* 020 fp disabled */ 1159 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1160 FP_TRAP; /* 022 fp exception other */ 1161 TAG_OVERFLOW; /* 023 tag overflow */ 1162 CLEAN_WINDOW; /* 024 - 027 clean window */ 1163 DIV_BY_ZERO; /* 028 division by zero */ 1164 NOT; /* 029 internal processor error */ 1165 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1166 DMMU_EXCEPTION; /* 030 data access exception */ 1167 DTSB_MISS; /* 031 data access MMU miss */ 1168 NOT; /* 032 reserved */ 1169 NOT; /* 033 data access protection */ 1170 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1171 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1172 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1173 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1174 NOT; /* 038 LDQF mem address not aligned */ 1175 NOT; /* 039 STQF mem address not aligned */ 1176 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1177 NOT; /* 040 async data error */ 1178 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1179 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1180 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1181 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1182 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1183 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1184 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1185 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1186 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1187 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1188 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1189 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1190 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1191 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1192 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1193 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1194 NOT; /* 060 interrupt vector */ 1195 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1196 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1197 NOT; /* 063 reserved */ 1198 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1199 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1200 DTLB_PROT; /* 06C data access protection */ 1201 NOT; /* 070 reserved */ 1202 NOT; /* 071 reserved */ 1203 NOT; /* 072 reserved */ 1204 NOT; /* 073 reserved */ 1205 NOT4; NOT4 /* 074 - 07B reserved */ 1206 CPU_MONDO; /* 07C cpu_mondo */ 1207 DEV_MONDO; /* 07D dev_mondo */ 1208 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */ 1209 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */ 1210 NOT4; /* 080 spill 0 normal */ 1211 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1212 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1213 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1214 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1215 SPILL_32bit(not); /* 094 spill 5 normal */ 1216 SPILL_64bit(not); /* 098 spill 6 normal */ 1217 SPILL_mixed; /* 09C spill 7 normal */ 1218 NOT4; /* 0A0 spill 0 other */ 1219 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1220 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1221 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1222 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1223 NOT4; /* 0B4 spill 5 other */ 1224 NOT4; /* 0B8 spill 6 other */ 1225 NOT4; /* 0BC spill 7 other */ 1226 NOT4; /* 0C0 fill 0 normal */ 1227 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1228 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1229 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1230 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1231 FILL_32bit(not); /* 0D4 fill 5 normal */ 1232 FILL_64bit(not); /* 0D8 fill 6 normal */ 1233 FILL_mixed; /* 0DC fill 7 normal */ 1234 NOT4; /* 0E0 fill 0 other */ 1235 NOT4; /* 0E4 fill 1 other */ 1236 NOT4; /* 0E8 fill 2 other */ 1237 NOT4; /* 0EC fill 3 other */ 1238 NOT4; /* 0F0 fill 4 other */ 1239 NOT4; /* 0F4 fill 5 other */ 1240 NOT4; /* 0F8 fill 6 other */ 1241 NOT4; /* 0FC fill 7 other */ 1242 /* user traps */ 1243 GOTO(syscall_trap_4x); /* 100 old system call */ 1244 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1245 TRAP(T_DIV0); /* 102 user divide by zero */ 1246 GOTO(.flushw); /* 103 flush windows */ 1247 GOTO(.clean_windows); /* 104 clean windows */ 1248 BAD; /* 105 range check ?? */ 1249 GOTO(.fix_alignment); /* 106 do unaligned references */ 1250 BAD; /* 107 unused */ 1251 SYSCALL_TRAP32; /* 108 ILP32 system call on LP64 */ 1252 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1253 BAD; BAD; BAD4; /* 10A - 10F unused */ 1254 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1255 GOTO(.getcc); /* 120 get condition codes */ 1256 GOTO(.setcc); /* 121 set condition codes */ 1257 GOTO(.getpsr); /* 122 get psr */ 1258 GOTO(.setpsr); /* 123 set psr (some fields) */ 1259 GOTO(get_timestamp); /* 124 get timestamp */ 1260 GOTO(get_virtime); /* 125 get lwp virtual time */ 1261 PRIV(self_xcall); /* 126 self xcall */ 1262 GOTO(get_hrestime); /* 127 get hrestime */ 1263 BAD; /* 128 ST_SETV9STACK */ 1264 GOTO(.getlgrp); /* 129 get lgrpid */ 1265 BAD; BAD; BAD4; /* 12A - 12F unused */ 1266 BAD4; BAD4; /* 130 - 137 unused */ 1267 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1268 BAD; /* 139 unused */ 1269 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1270 BAD; BAD4; /* 13B - 13F unused */ 1271 SYSCALL_TRAP; /* 140 LP64 system call */ 1272 SYSCALL(nosys); /* 141 unused system call trap */ 1273#ifdef DEBUG_USER_TRAPTRACECTL 1274 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1275 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1276#else 1277 SYSCALL(nosys); /* 142 unused system call trap */ 1278 SYSCALL(nosys); /* 143 unused system call trap */ 1279#endif 1280 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1281 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1282 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1283 BAD; /* 170 - unused */ 1284 BAD; /* 171 - unused */ 1285 BAD; BAD; /* 172 - 173 unused */ 1286 BAD4; BAD4; /* 174 - 17B unused */ 1287#ifdef PTL1_PANIC_DEBUG 1288 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1289 /* 17C test ptl1_panic */ 1290#else 1291 BAD; /* 17C unused */ 1292#endif /* PTL1_PANIC_DEBUG */ 1293 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1294 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1295 PRIV(obp_bpt); /* 17F obp breakpoint */ 1296 /* reserved */ 1297 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1298 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1299 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1300 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1301 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1302 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1303 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1304 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1305 .size trap_table0, (.-trap_table0) 1306trap_table1: 1307 NOT4; NOT4; /* 000 - 007 unused */ 1308 NOT; /* 008 instruction access exception */ 1309 ITSB_MISS; /* 009 instruction access MMU miss */ 1310 NOT; /* 00A reserved */ 1311 NOT; NOT4; /* 00B - 00F unused */ 1312 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1313 NOT4; /* 020 - 023 unused */ 1314 CLEAN_WINDOW; /* 024 - 027 clean window */ 1315 NOT4; NOT4; /* 028 - 02F unused */ 1316 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1317 DTSB_MISS; /* 031 data access MMU miss */ 1318 NOT; /* 032 reserved */ 1319 NOT; /* 033 unused */ 1320 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1321 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1322 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */ 1323 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1324 NOT; /* 060 unused */ 1325 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1326 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1327 NOT; /* 063 reserved */ 1328 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1329 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1330 DTLB_PROT; /* 06C data access protection */ 1331 NOT; /* 070 reserved */ 1332 NOT; /* 071 reserved */ 1333 NOT; /* 072 reserved */ 1334 NOT; /* 073 reserved */ 1335 NOT4; NOT4; /* 074 - 07B reserved */ 1336 NOT; /* 07C reserved */ 1337 NOT; /* 07D reserved */ 1338 NOT; /* 07E resumable error */ 1339 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */ 1340 NOTP4; /* 080 spill 0 normal */ 1341 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1342 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1343 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1344 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1345 NOTP4; /* 094 spill 5 normal */ 1346 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */ 1347 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */ 1348 NOTP4; /* 0A0 spill 0 other */ 1349 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1350 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1351 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1352 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1353 NOTP4; /* 0B4 spill 5 other */ 1354 NOTP4; /* 0B8 spill 6 other */ 1355 NOTP4; /* 0BC spill 7 other */ 1356 NOT4; /* 0C0 fill 0 normal */ 1357 NOT4; /* 0C4 fill 1 normal */ 1358 NOT4; /* 0C8 fill 2 normal */ 1359 NOT4; /* 0CC fill 3 normal */ 1360 NOT4; /* 0D0 fill 4 normal */ 1361 NOT4; /* 0D4 fill 5 normal */ 1362 NOT4; /* 0D8 fill 6 normal */ 1363 NOT4; /* 0DC fill 7 normal */ 1364 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1365 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1366/* 1367 * Code running at TL>0 does not use soft traps, so 1368 * we can truncate the table here. 1369 * However: 1370 * sun4v uses (hypervisor) ta instructions at TL > 0, so 1371 * provide a safety net for now. 1372 */ 1373 /* soft traps */ 1374 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */ 1375 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */ 1376 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */ 1377 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */ 1378 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */ 1379 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1380 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1381 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */ 1382 /* reserved */ 1383 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1384 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1385 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1386 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1387 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1388 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1389 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1390 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1391etrap_table: 1392 .size trap_table1, (.-trap_table1) 1393 .size trap_table, (.-trap_table) 1394 .size scb, (.-scb) 1395 1396/* 1397 * We get to exec_fault in the case of an instruction miss and tte 1398 * has no execute bit set. We go to tl0 to handle it. 1399 * 1400 * g1 = tsbe pointer (in/clobbered) 1401 * g2 = tag access register (in) 1402 * g3 - g4 = scratch (clobbered) 1403 * g5 = tsbe data (in) 1404 * g6 = scratch (clobbered) 1405 * g7 = pc we jumped here from (in) 1406 */ 1407/* 1408 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1409 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1410 */ 1411 ALTENTRY(exec_fault) 1412 TRACE_TSBHIT(TT_MMU_EXEC) 1413 MMU_FAULT_STATUS_AREA(%g4) 1414 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */ 1415 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */ 1416 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 1417 cmp %g3, USER_CONTEXT_TYPE 1418 sllx %g2, MMU_PAGESHIFT, %g2 1419 movgu %icc, USER_CONTEXT_TYPE, %g3 1420 or %g2, %g3, %g2 /* TAG_ACCESS */ 1421 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1422 set trap, %g1 1423 ba,pt %xcc, sys_trap 1424 mov -1, %g4 1425 1426.mmu_exception_not_aligned: 1427 /* %g2 = sfar, %g3 = sfsr */ 1428 rdpr %tstate, %g1 1429 btst TSTATE_PRIV, %g1 1430 bnz,pn %icc, 2f 1431 nop 1432 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1433 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1434 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1435 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1436 brz,pt %g5, 2f 1437 nop 1438 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1439 brz,pn %g5, 2f 1440 nop 1441 btst 1, %sp 1442 bz,pt %xcc, 1f ! 32 bit user program 1443 nop 1444 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1445 nop 14461: 1447 ba,pt %xcc, .setup_utrap 1448 or %g2, %g0, %g7 14492: 1450 ba,pt %xcc, .mmu_exception_end 1451 mov T_ALIGNMENT, %g1 1452 1453.mmu_priv_exception: 1454 rdpr %tstate, %g1 1455 btst TSTATE_PRIV, %g1 1456 bnz,pn %icc, 1f 1457 nop 1458 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1459 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1460 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1461 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1462 brz,pt %g5, 1f 1463 nop 1464 ldn [%g5 + P_UTRAP16], %g5 1465 brnz,pt %g5, .setup_v9utrap 1466 nop 14671: 1468 mov T_PRIV_INSTR, %g1 1469 1470.mmu_exception_end: 1471 CPU_INDEX(%g4, %g5) 1472 set cpu_core, %g5 1473 sllx %g4, CPU_CORE_SHIFT, %g4 1474 add %g4, %g5, %g4 1475 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1476 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1477 bz 1f 1478 or %g5, CPU_DTRACE_BADADDR, %g5 1479 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1480 done 1481 14821: 1483 sllx %g3, 32, %g3 1484 or %g3, %g1, %g3 1485 set trap, %g1 1486 ba,pt %xcc, sys_trap 1487 sub %g0, 1, %g4 1488 1489.fp_disabled: 1490 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1491 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1492 rdpr %tstate, %g4 1493 btst TSTATE_PRIV, %g4 1494 bnz,a,pn %icc, ptl1_panic 1495 mov PTL1_BAD_FPTRAP, %g1 1496 1497 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1498 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1499 brz,a,pt %g5, 2f 1500 nop 1501 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1502 brz,a,pn %g5, 2f 1503 nop 1504 btst 1, %sp 1505 bz,a,pt %xcc, 1f ! 32 bit user program 1506 nop 1507 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1508 nop 15091: 1510 ba,pt %xcc, .setup_utrap 1511 or %g0, %g0, %g7 15122: 1513 set fp_disabled, %g1 1514 ba,pt %xcc, sys_trap 1515 sub %g0, 1, %g4 1516 1517.fp_ieee_exception: 1518 rdpr %tstate, %g1 1519 btst TSTATE_PRIV, %g1 1520 bnz,a,pn %icc, ptl1_panic 1521 mov PTL1_BAD_FPTRAP, %g1 1522 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1523 stx %fsr, [%g1 + CPU_TMP1] 1524 ldx [%g1 + CPU_TMP1], %g2 1525 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1526 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1527 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1528 brz,a,pt %g5, 1f 1529 nop 1530 ldn [%g5 + P_UTRAP8], %g5 1531 brnz,a,pt %g5, .setup_v9utrap 1532 nop 15331: 1534 set _fp_ieee_exception, %g1 1535 ba,pt %xcc, sys_trap 1536 sub %g0, 1, %g4 1537 1538/* 1539 * Register Inputs: 1540 * %g5 user trap handler 1541 * %g7 misaligned addr - for alignment traps only 1542 */ 1543.setup_utrap: 1544 set trap, %g1 ! setup in case we go 1545 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1546 sub %g0, 1, %g4 ! the save instruction below 1547 1548 /* 1549 * If the DTrace pid provider is single stepping a copied-out 1550 * instruction, t->t_dtrace_step will be set. In that case we need 1551 * to abort the single-stepping (since execution of the instruction 1552 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1553 */ 1554 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1555 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1556 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1557 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1558 rdpr %tnpc, %l2 ! arg1 == tnpc 1559 brz,pt %g2, 1f 1560 rdpr %tpc, %l1 ! arg0 == tpc 1561 1562 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1563 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1564 brz,pt %g2, 1f 1565 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1566 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 15671: 1568 mov %g7, %l3 ! arg2 == misaligned address 1569 1570 rdpr %tstate, %g1 ! cwp for trap handler 1571 rdpr %cwp, %g4 1572 bclr TSTATE_CWP_MASK, %g1 1573 wrpr %g1, %g4, %tstate 1574 wrpr %g0, %g5, %tnpc ! trap handler address 1575 FAST_TRAP_DONE 1576 /* NOTREACHED */ 1577 1578.check_v9utrap: 1579 rdpr %tstate, %g1 1580 btst TSTATE_PRIV, %g1 1581 bnz,a,pn %icc, 3f 1582 nop 1583 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1584 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1585 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1586 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1587 1588 cmp %g3, T_SOFTWARE_TRAP 1589 bne,a,pt %icc, 1f 1590 nop 1591 1592 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1593 rdpr %tt, %g3 ! delay - get actual hw trap type 1594 1595 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1596 ba,pt %icc, 2f 1597 smul %g1, CPTRSIZE, %g2 15981: 1599 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1600 nop 1601 1602 cmp %g3, T_UNIMP_INSTR 1603 bne,a,pt %icc, 2f 1604 nop 1605 1606 mov 1, %g1 1607 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1608 rdpr %tpc, %g1 ! ld trapping instruction using 1609 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1610 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1611 1612 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1613 andcc %g1, %g4, %g4 ! and instruction with mask 1614 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1615 nop ! fall thru to setup 16162: 1617 ldn [%g5 + %g2], %g5 1618 brnz,a,pt %g5, .setup_v9utrap 1619 nop 16203: 1621 set trap, %g1 1622 ba,pt %xcc, sys_trap 1623 sub %g0, 1, %g4 1624 /* NOTREACHED */ 1625 1626/* 1627 * Register Inputs: 1628 * %g5 user trap handler 1629 */ 1630.setup_v9utrap: 1631 set trap, %g1 ! setup in case we go 1632 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1633 sub %g0, 1, %g4 ! the save instruction below 1634 1635 /* 1636 * If the DTrace pid provider is single stepping a copied-out 1637 * instruction, t->t_dtrace_step will be set. In that case we need 1638 * to abort the single-stepping (since execution of the instruction 1639 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1640 */ 1641 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1642 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1643 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1644 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1645 rdpr %tnpc, %l7 ! arg1 == tnpc 1646 brz,pt %g2, 1f 1647 rdpr %tpc, %l6 ! arg0 == tpc 1648 1649 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1650 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1651 brz,pt %g2, 1f 1652 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1653 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 16541: 1655 rdpr %tstate, %g2 ! cwp for trap handler 1656 rdpr %cwp, %g4 1657 bclr TSTATE_CWP_MASK, %g2 1658 wrpr %g2, %g4, %tstate 1659 1660 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1661 ldn [%g4 + P_AS], %g4 ! load as pointer 1662 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1663 cmp %l7, %g4 ! check for single-step set 1664 bne,pt %xcc, 4f 1665 nop 1666 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1667 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1668 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1669 bne,pt %icc, 4f 1670 nop 1671 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1672 mov %l7, %g4 ! on entry to precise user trap 1673 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1674 ! at time of trap 1675 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1676 ! %g4 == userlimit 1677 FAST_TRAP_DONE 1678 /* NOTREACHED */ 16794: 1680 wrpr %g0, %g5, %tnpc ! trap handler address 1681 FAST_TRAP_DONE_CHK_INTR 1682 /* NOTREACHED */ 1683 1684.fp_exception: 1685 CPU_ADDR(%g1, %g4) 1686 stx %fsr, [%g1 + CPU_TMP1] 1687 ldx [%g1 + CPU_TMP1], %g2 1688 1689 /* 1690 * Cheetah takes unfinished_FPop trap for certain range of operands 1691 * to the "fitos" instruction. Instead of going through the slow 1692 * software emulation path, we try to simulate the "fitos" instruction 1693 * via "fitod" and "fdtos" provided the following conditions are met: 1694 * 1695 * fpu_exists is set (if DEBUG) 1696 * not in privileged mode 1697 * ftt is unfinished_FPop 1698 * NXM IEEE trap is not enabled 1699 * instruction at %tpc is "fitos" 1700 * 1701 * Usage: 1702 * %g1 per cpu address 1703 * %g2 %fsr 1704 * %g6 user instruction 1705 * 1706 * Note that we can take a memory access related trap while trying 1707 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1708 * flag to catch those traps and let the SFMMU code deal with page 1709 * fault and data access exception. 1710 */ 1711#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1712 sethi %hi(fpu_exists), %g7 1713 ld [%g7 + %lo(fpu_exists)], %g7 1714 brz,pn %g7, .fp_exception_cont 1715 nop 1716#endif 1717 rdpr %tstate, %g7 ! branch if in privileged mode 1718 btst TSTATE_PRIV, %g7 1719 bnz,pn %xcc, .fp_exception_cont 1720 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1721 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1722 cmp %g7, FTT_UNFIN 1723 set FSR_TEM_NX, %g5 1724 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1725 andcc %g2, %g5, %g0 1726 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1727 rdpr %tpc, %g5 ! get faulting PC 1728 1729 or %g0, 1, %g7 1730 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1731 lda [%g5]ASI_USER, %g6 ! get user's instruction 1732 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1733 1734 set FITOS_INSTR_MASK, %g7 1735 and %g6, %g7, %g7 1736 set FITOS_INSTR, %g5 1737 cmp %g7, %g5 1738 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1739 nop 1740 1741 /* 1742 * This is unfinished FPops trap for "fitos" instruction. We 1743 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1744 * sequence. 1745 * 1746 * We need a temporary FP register to do the conversion. Since 1747 * both source and destination operands for the "fitos" instruction 1748 * have to be within %f0-%f31, we use an FP register from the upper 1749 * half to guarantee that it won't collide with the source or the 1750 * dest operand. However, we do have to save and restore its value. 1751 * 1752 * We use %d62 as a temporary FP register for the conversion and 1753 * branch to appropriate instruction within the conversion tables 1754 * based upon the rs2 and rd values. 1755 */ 1756 1757 std %d62, [%g1 + CPU_TMP1] ! save original value 1758 1759 srl %g6, FITOS_RS2_SHIFT, %g7 1760 and %g7, FITOS_REG_MASK, %g7 1761 set _fitos_fitod_table, %g4 1762 sllx %g7, 2, %g7 1763 jmp %g4 + %g7 1764 ba,pt %xcc, _fitos_fitod_done 1765 .empty 1766 1767_fitos_fitod_table: 1768 fitod %f0, %d62 1769 fitod %f1, %d62 1770 fitod %f2, %d62 1771 fitod %f3, %d62 1772 fitod %f4, %d62 1773 fitod %f5, %d62 1774 fitod %f6, %d62 1775 fitod %f7, %d62 1776 fitod %f8, %d62 1777 fitod %f9, %d62 1778 fitod %f10, %d62 1779 fitod %f11, %d62 1780 fitod %f12, %d62 1781 fitod %f13, %d62 1782 fitod %f14, %d62 1783 fitod %f15, %d62 1784 fitod %f16, %d62 1785 fitod %f17, %d62 1786 fitod %f18, %d62 1787 fitod %f19, %d62 1788 fitod %f20, %d62 1789 fitod %f21, %d62 1790 fitod %f22, %d62 1791 fitod %f23, %d62 1792 fitod %f24, %d62 1793 fitod %f25, %d62 1794 fitod %f26, %d62 1795 fitod %f27, %d62 1796 fitod %f28, %d62 1797 fitod %f29, %d62 1798 fitod %f30, %d62 1799 fitod %f31, %d62 1800_fitos_fitod_done: 1801 1802 /* 1803 * Now convert data back into single precision 1804 */ 1805 srl %g6, FITOS_RD_SHIFT, %g7 1806 and %g7, FITOS_REG_MASK, %g7 1807 set _fitos_fdtos_table, %g4 1808 sllx %g7, 2, %g7 1809 jmp %g4 + %g7 1810 ba,pt %xcc, _fitos_fdtos_done 1811 .empty 1812 1813_fitos_fdtos_table: 1814 fdtos %d62, %f0 1815 fdtos %d62, %f1 1816 fdtos %d62, %f2 1817 fdtos %d62, %f3 1818 fdtos %d62, %f4 1819 fdtos %d62, %f5 1820 fdtos %d62, %f6 1821 fdtos %d62, %f7 1822 fdtos %d62, %f8 1823 fdtos %d62, %f9 1824 fdtos %d62, %f10 1825 fdtos %d62, %f11 1826 fdtos %d62, %f12 1827 fdtos %d62, %f13 1828 fdtos %d62, %f14 1829 fdtos %d62, %f15 1830 fdtos %d62, %f16 1831 fdtos %d62, %f17 1832 fdtos %d62, %f18 1833 fdtos %d62, %f19 1834 fdtos %d62, %f20 1835 fdtos %d62, %f21 1836 fdtos %d62, %f22 1837 fdtos %d62, %f23 1838 fdtos %d62, %f24 1839 fdtos %d62, %f25 1840 fdtos %d62, %f26 1841 fdtos %d62, %f27 1842 fdtos %d62, %f28 1843 fdtos %d62, %f29 1844 fdtos %d62, %f30 1845 fdtos %d62, %f31 1846_fitos_fdtos_done: 1847 1848 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 1849 1850#if DEBUG 1851 /* 1852 * Update FPop_unfinished trap kstat 1853 */ 1854 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 1855 ldx [%g7], %g5 18561: 1857 add %g5, 1, %g6 1858 1859 casxa [%g7] ASI_N, %g5, %g6 1860 cmp %g5, %g6 1861 bne,a,pn %xcc, 1b 1862 or %g0, %g6, %g5 1863 1864 /* 1865 * Update fpu_sim_fitos kstat 1866 */ 1867 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 1868 ldx [%g7], %g5 18691: 1870 add %g5, 1, %g6 1871 1872 casxa [%g7] ASI_N, %g5, %g6 1873 cmp %g5, %g6 1874 bne,a,pn %xcc, 1b 1875 or %g0, %g6, %g5 1876#endif /* DEBUG */ 1877 1878 FAST_TRAP_DONE 1879 1880.fp_exception_cont: 1881 /* 1882 * Let _fp_exception deal with simulating FPop instruction. 1883 * Note that we need to pass %fsr in %g2 (already read above). 1884 */ 1885 1886 set _fp_exception, %g1 1887 ba,pt %xcc, sys_trap 1888 sub %g0, 1, %g4 1889 1890 1891/* 1892 * Register windows 1893 */ 1894.flushw: 1895.clean_windows: 1896 rdpr %tnpc, %g1 1897 wrpr %g1, %tpc 1898 add %g1, 4, %g1 1899 wrpr %g1, %tnpc 1900 set trap, %g1 1901 mov T_FLUSH_PCB, %g3 1902 ba,pt %xcc, sys_trap 1903 sub %g0, 1, %g4 1904 1905/* 1906 * .spill_clean: clean the previous window, restore the wstate, and 1907 * "done". 1908 * 1909 * Entry: %g7 contains new wstate 1910 */ 1911.spill_clean: 1912 sethi %hi(nwin_minus_one), %g5 1913 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 1914 rdpr %cwp, %g6 ! %g6 = %cwp 1915 deccc %g6 ! %g6-- 1916 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 1917 wrpr %g6, %cwp 1918 TT_TRACE_L(trace_win) 1919 clr %l0 1920 clr %l1 1921 clr %l2 1922 clr %l3 1923 clr %l4 1924 clr %l5 1925 clr %l6 1926 clr %l7 1927 wrpr %g0, %g7, %wstate 1928 saved 1929 retry ! restores correct %cwp 1930 1931.fix_alignment: 1932 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 1933 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1934 ldn [%g1 + T_PROCP], %g1 1935 mov 1, %g2 1936 stb %g2, [%g1 + P_FIXALIGNMENT] 1937 FAST_TRAP_DONE 1938 1939#define STDF_REG(REG, ADDR, TMP) \ 1940 sll REG, 3, REG ;\ 1941mark1: set start1, TMP ;\ 1942 jmp REG + TMP ;\ 1943 nop ;\ 1944start1: ba,pt %xcc, done1 ;\ 1945 std %f0, [ADDR + CPU_TMP1] ;\ 1946 ba,pt %xcc, done1 ;\ 1947 std %f32, [ADDR + CPU_TMP1] ;\ 1948 ba,pt %xcc, done1 ;\ 1949 std %f2, [ADDR + CPU_TMP1] ;\ 1950 ba,pt %xcc, done1 ;\ 1951 std %f34, [ADDR + CPU_TMP1] ;\ 1952 ba,pt %xcc, done1 ;\ 1953 std %f4, [ADDR + CPU_TMP1] ;\ 1954 ba,pt %xcc, done1 ;\ 1955 std %f36, [ADDR + CPU_TMP1] ;\ 1956 ba,pt %xcc, done1 ;\ 1957 std %f6, [ADDR + CPU_TMP1] ;\ 1958 ba,pt %xcc, done1 ;\ 1959 std %f38, [ADDR + CPU_TMP1] ;\ 1960 ba,pt %xcc, done1 ;\ 1961 std %f8, [ADDR + CPU_TMP1] ;\ 1962 ba,pt %xcc, done1 ;\ 1963 std %f40, [ADDR + CPU_TMP1] ;\ 1964 ba,pt %xcc, done1 ;\ 1965 std %f10, [ADDR + CPU_TMP1] ;\ 1966 ba,pt %xcc, done1 ;\ 1967 std %f42, [ADDR + CPU_TMP1] ;\ 1968 ba,pt %xcc, done1 ;\ 1969 std %f12, [ADDR + CPU_TMP1] ;\ 1970 ba,pt %xcc, done1 ;\ 1971 std %f44, [ADDR + CPU_TMP1] ;\ 1972 ba,pt %xcc, done1 ;\ 1973 std %f14, [ADDR + CPU_TMP1] ;\ 1974 ba,pt %xcc, done1 ;\ 1975 std %f46, [ADDR + CPU_TMP1] ;\ 1976 ba,pt %xcc, done1 ;\ 1977 std %f16, [ADDR + CPU_TMP1] ;\ 1978 ba,pt %xcc, done1 ;\ 1979 std %f48, [ADDR + CPU_TMP1] ;\ 1980 ba,pt %xcc, done1 ;\ 1981 std %f18, [ADDR + CPU_TMP1] ;\ 1982 ba,pt %xcc, done1 ;\ 1983 std %f50, [ADDR + CPU_TMP1] ;\ 1984 ba,pt %xcc, done1 ;\ 1985 std %f20, [ADDR + CPU_TMP1] ;\ 1986 ba,pt %xcc, done1 ;\ 1987 std %f52, [ADDR + CPU_TMP1] ;\ 1988 ba,pt %xcc, done1 ;\ 1989 std %f22, [ADDR + CPU_TMP1] ;\ 1990 ba,pt %xcc, done1 ;\ 1991 std %f54, [ADDR + CPU_TMP1] ;\ 1992 ba,pt %xcc, done1 ;\ 1993 std %f24, [ADDR + CPU_TMP1] ;\ 1994 ba,pt %xcc, done1 ;\ 1995 std %f56, [ADDR + CPU_TMP1] ;\ 1996 ba,pt %xcc, done1 ;\ 1997 std %f26, [ADDR + CPU_TMP1] ;\ 1998 ba,pt %xcc, done1 ;\ 1999 std %f58, [ADDR + CPU_TMP1] ;\ 2000 ba,pt %xcc, done1 ;\ 2001 std %f28, [ADDR + CPU_TMP1] ;\ 2002 ba,pt %xcc, done1 ;\ 2003 std %f60, [ADDR + CPU_TMP1] ;\ 2004 ba,pt %xcc, done1 ;\ 2005 std %f30, [ADDR + CPU_TMP1] ;\ 2006 ba,pt %xcc, done1 ;\ 2007 std %f62, [ADDR + CPU_TMP1] ;\ 2008done1: 2009 2010#define LDDF_REG(REG, ADDR, TMP) \ 2011 sll REG, 3, REG ;\ 2012mark2: set start2, TMP ;\ 2013 jmp REG + TMP ;\ 2014 nop ;\ 2015start2: ba,pt %xcc, done2 ;\ 2016 ldd [ADDR + CPU_TMP1], %f0 ;\ 2017 ba,pt %xcc, done2 ;\ 2018 ldd [ADDR + CPU_TMP1], %f32 ;\ 2019 ba,pt %xcc, done2 ;\ 2020 ldd [ADDR + CPU_TMP1], %f2 ;\ 2021 ba,pt %xcc, done2 ;\ 2022 ldd [ADDR + CPU_TMP1], %f34 ;\ 2023 ba,pt %xcc, done2 ;\ 2024 ldd [ADDR + CPU_TMP1], %f4 ;\ 2025 ba,pt %xcc, done2 ;\ 2026 ldd [ADDR + CPU_TMP1], %f36 ;\ 2027 ba,pt %xcc, done2 ;\ 2028 ldd [ADDR + CPU_TMP1], %f6 ;\ 2029 ba,pt %xcc, done2 ;\ 2030 ldd [ADDR + CPU_TMP1], %f38 ;\ 2031 ba,pt %xcc, done2 ;\ 2032 ldd [ADDR + CPU_TMP1], %f8 ;\ 2033 ba,pt %xcc, done2 ;\ 2034 ldd [ADDR + CPU_TMP1], %f40 ;\ 2035 ba,pt %xcc, done2 ;\ 2036 ldd [ADDR + CPU_TMP1], %f10 ;\ 2037 ba,pt %xcc, done2 ;\ 2038 ldd [ADDR + CPU_TMP1], %f42 ;\ 2039 ba,pt %xcc, done2 ;\ 2040 ldd [ADDR + CPU_TMP1], %f12 ;\ 2041 ba,pt %xcc, done2 ;\ 2042 ldd [ADDR + CPU_TMP1], %f44 ;\ 2043 ba,pt %xcc, done2 ;\ 2044 ldd [ADDR + CPU_TMP1], %f14 ;\ 2045 ba,pt %xcc, done2 ;\ 2046 ldd [ADDR + CPU_TMP1], %f46 ;\ 2047 ba,pt %xcc, done2 ;\ 2048 ldd [ADDR + CPU_TMP1], %f16 ;\ 2049 ba,pt %xcc, done2 ;\ 2050 ldd [ADDR + CPU_TMP1], %f48 ;\ 2051 ba,pt %xcc, done2 ;\ 2052 ldd [ADDR + CPU_TMP1], %f18 ;\ 2053 ba,pt %xcc, done2 ;\ 2054 ldd [ADDR + CPU_TMP1], %f50 ;\ 2055 ba,pt %xcc, done2 ;\ 2056 ldd [ADDR + CPU_TMP1], %f20 ;\ 2057 ba,pt %xcc, done2 ;\ 2058 ldd [ADDR + CPU_TMP1], %f52 ;\ 2059 ba,pt %xcc, done2 ;\ 2060 ldd [ADDR + CPU_TMP1], %f22 ;\ 2061 ba,pt %xcc, done2 ;\ 2062 ldd [ADDR + CPU_TMP1], %f54 ;\ 2063 ba,pt %xcc, done2 ;\ 2064 ldd [ADDR + CPU_TMP1], %f24 ;\ 2065 ba,pt %xcc, done2 ;\ 2066 ldd [ADDR + CPU_TMP1], %f56 ;\ 2067 ba,pt %xcc, done2 ;\ 2068 ldd [ADDR + CPU_TMP1], %f26 ;\ 2069 ba,pt %xcc, done2 ;\ 2070 ldd [ADDR + CPU_TMP1], %f58 ;\ 2071 ba,pt %xcc, done2 ;\ 2072 ldd [ADDR + CPU_TMP1], %f28 ;\ 2073 ba,pt %xcc, done2 ;\ 2074 ldd [ADDR + CPU_TMP1], %f60 ;\ 2075 ba,pt %xcc, done2 ;\ 2076 ldd [ADDR + CPU_TMP1], %f30 ;\ 2077 ba,pt %xcc, done2 ;\ 2078 ldd [ADDR + CPU_TMP1], %f62 ;\ 2079done2: 2080 2081.lddf_exception_not_aligned: 2082 /* %g2 = sfar, %g3 = sfsr */ 2083 mov %g2, %g5 ! stash sfar 2084#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2085 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2086 ld [%g2 + %lo(fpu_exists)], %g2 2087 brz,a,pn %g2, 4f 2088 nop 2089#endif 2090 CPU_ADDR(%g1, %g4) 2091 or %g0, 1, %g4 2092 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2093 2094 rdpr %tpc, %g2 2095 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2096 srl %g6, 23, %g1 ! using ldda or not? 2097 and %g1, 1, %g1 2098 brz,a,pt %g1, 2f ! check for ldda instruction 2099 nop 2100 srl %g6, 13, %g1 ! check immflag 2101 and %g1, 1, %g1 2102 rdpr %tstate, %g2 ! %tstate in %g2 2103 brnz,a,pn %g1, 1f 2104 srl %g2, 31, %g1 ! get asi from %tstate 2105 srl %g6, 5, %g1 ! get asi from instruction 2106 and %g1, 0xFF, %g1 ! imm_asi field 21071: 2108 cmp %g1, ASI_P ! primary address space 2109 be,a,pt %icc, 2f 2110 nop 2111 cmp %g1, ASI_PNF ! primary no fault address space 2112 be,a,pt %icc, 2f 2113 nop 2114 cmp %g1, ASI_S ! secondary address space 2115 be,a,pt %icc, 2f 2116 nop 2117 cmp %g1, ASI_SNF ! secondary no fault address space 2118 bne,a,pn %icc, 3f 2119 nop 21202: 2121 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2122 add %g5, 4, %g5 ! increment misaligned data address 2123 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2124 2125 sllx %g7, 32, %g7 2126 or %g5, %g7, %g5 ! combine data 2127 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2128 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2129 2130 srl %g6, 25, %g3 ! %g6 has the instruction 2131 and %g3, 0x1F, %g3 ! %g3 has rd 2132 LDDF_REG(%g3, %g7, %g4) 2133 2134 CPU_ADDR(%g1, %g4) 2135 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2136 FAST_TRAP_DONE 21373: 2138 CPU_ADDR(%g1, %g4) 2139 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 21404: 2141 set T_USER, %g3 ! trap type in %g3 2142 or %g3, T_LDDF_ALIGN, %g3 2143 mov %g5, %g2 ! misaligned vaddr in %g2 2144 set fpu_trap, %g1 ! goto C for the little and 2145 ba,pt %xcc, sys_trap ! no fault little asi's 2146 sub %g0, 1, %g4 2147 2148.stdf_exception_not_aligned: 2149 /* %g2 = sfar, %g3 = sfsr */ 2150 mov %g2, %g5 2151 2152#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2153 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2154 ld [%g7 + %lo(fpu_exists)], %g3 2155 brz,a,pn %g3, 4f 2156 nop 2157#endif 2158 CPU_ADDR(%g1, %g4) 2159 or %g0, 1, %g4 2160 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2161 2162 rdpr %tpc, %g2 2163 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2164 2165 srl %g6, 23, %g1 ! using stda or not? 2166 and %g1, 1, %g1 2167 brz,a,pt %g1, 2f ! check for stda instruction 2168 nop 2169 srl %g6, 13, %g1 ! check immflag 2170 and %g1, 1, %g1 2171 rdpr %tstate, %g2 ! %tstate in %g2 2172 brnz,a,pn %g1, 1f 2173 srl %g2, 31, %g1 ! get asi from %tstate 2174 srl %g6, 5, %g1 ! get asi from instruction 2175 and %g1, 0xff, %g1 ! imm_asi field 21761: 2177 cmp %g1, ASI_P ! primary address space 2178 be,a,pt %icc, 2f 2179 nop 2180 cmp %g1, ASI_S ! secondary address space 2181 bne,a,pn %icc, 3f 2182 nop 21832: 2184 srl %g6, 25, %g6 2185 and %g6, 0x1F, %g6 ! %g6 has rd 2186 CPU_ADDR(%g7, %g1) 2187 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2188 2189 ldx [%g7 + CPU_TMP1], %g6 2190 srlx %g6, 32, %g7 2191 stuwa %g7, [%g5]ASI_USER ! first half 2192 add %g5, 4, %g5 ! increment misaligned data address 2193 stuwa %g6, [%g5]ASI_USER ! second half 2194 2195 CPU_ADDR(%g1, %g4) 2196 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2197 FAST_TRAP_DONE 21983: 2199 CPU_ADDR(%g1, %g4) 2200 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 22014: 2202 set T_USER, %g3 ! trap type in %g3 2203 or %g3, T_STDF_ALIGN, %g3 2204 mov %g5, %g2 ! misaligned vaddr in %g2 2205 set fpu_trap, %g1 ! goto C for the little and 2206 ba,pt %xcc, sys_trap ! nofault little asi's 2207 sub %g0, 1, %g4 2208 2209#ifdef DEBUG_USER_TRAPTRACECTL 2210 2211.traptrace_freeze: 2212 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2213 TT_TRACE_L(trace_win) 2214 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2215 set trap_freeze, %g1 2216 mov 1, %g2 2217 st %g2, [%g1] 2218 FAST_TRAP_DONE 2219 2220.traptrace_unfreeze: 2221 set trap_freeze, %g1 2222 st %g0, [%g1] 2223 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2224 TT_TRACE_L(trace_win) 2225 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2226 FAST_TRAP_DONE 2227 2228#endif /* DEBUG_USER_TRAPTRACECTL */ 2229 2230.getcc: 2231 CPU_ADDR(%g1, %g2) 2232 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2233 rdpr %tstate, %g3 ! get tstate 2234 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2235 set PSR_ICC, %g2 2236 and %o0, %g2, %o0 ! mask out the rest 2237 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2238 wrpr %g0, 0, %gl 2239 mov %o0, %g1 ! move ccr to normal %g1 2240 wrpr %g0, 1, %gl 2241 ! cannot assume globals retained their values after increasing %gl 2242 CPU_ADDR(%g1, %g2) 2243 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2244 FAST_TRAP_DONE 2245 2246.setcc: 2247 CPU_ADDR(%g1, %g2) 2248 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2249 wrpr %g0, 0, %gl 2250 mov %g1, %o0 2251 wrpr %g0, 1, %gl 2252 ! cannot assume globals retained their values after increasing %gl 2253 CPU_ADDR(%g1, %g2) 2254 sll %o0, PSR_ICC_SHIFT, %g2 2255 set PSR_ICC, %g3 2256 and %g2, %g3, %g2 ! mask out rest 2257 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2258 rdpr %tstate, %g3 ! get tstate 2259 srl %g3, 0, %g3 ! clear upper word 2260 or %g3, %g2, %g3 ! or in new bits 2261 wrpr %g3, %tstate 2262 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2263 FAST_TRAP_DONE 2264 2265/* 2266 * getpsr(void) 2267 * Note that the xcc part of the ccr is not provided. 2268 * The V8 code shows why the V9 trap is not faster: 2269 * #define GETPSR_TRAP() \ 2270 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2271 */ 2272 2273 .type .getpsr, #function 2274.getpsr: 2275 rdpr %tstate, %g1 ! get tstate 2276 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2277 set PSR_ICC, %g2 2278 and %o0, %g2, %o0 ! mask out the rest 2279 2280 rd %fprs, %g1 ! get fprs 2281 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2282 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2283 or %o0, %g2, %o0 ! or result into psr.ef 2284 2285 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2286 or %o0, %g2, %o0 ! or psr.impl/ver 2287 FAST_TRAP_DONE 2288 SET_SIZE(.getpsr) 2289 2290/* 2291 * setpsr(newpsr) 2292 * Note that there is no support for ccr.xcc in the V9 code. 2293 */ 2294 2295 .type .setpsr, #function 2296.setpsr: 2297 rdpr %tstate, %g1 ! get tstate 2298! setx TSTATE_V8_UBITS, %g2 2299 or %g0, CCR_ICC, %g3 2300 sllx %g3, TSTATE_CCR_SHIFT, %g2 2301 2302 andn %g1, %g2, %g1 ! zero current user bits 2303 set PSR_ICC, %g2 2304 and %g2, %o0, %g2 ! clear all but psr.icc bits 2305 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2306 wrpr %g1, %g3, %tstate ! write tstate 2307 2308 set PSR_EF, %g2 2309 and %g2, %o0, %g2 ! clear all but fp enable bit 2310 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2311 wr %g0, %g4, %fprs ! write fprs 2312 2313 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2314 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2315 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2316 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2317 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2318 srlx %g4, 2, %g4 ! shift fef value to bit 0 2319 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2320 FAST_TRAP_DONE 2321 SET_SIZE(.setpsr) 2322 2323/* 2324 * getlgrp 2325 * get home lgrpid on which the calling thread is currently executing. 2326 */ 2327 .type .getlgrp, #function 2328.getlgrp: 2329 ! Thanks for the incredibly helpful comments 2330 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2331 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2332 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2333 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2334 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2335 sra %g1, 0, %o1 2336 FAST_TRAP_DONE 2337 SET_SIZE(.getlgrp) 2338 2339/* 2340 * Entry for old 4.x trap (trap 0). 2341 */ 2342 ENTRY_NP(syscall_trap_4x) 2343 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2344 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2345 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2346 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2347 brz,pn %g2, 1f ! has it been set? 2348 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2349 st %l1, [%g1 + CPU_TMP2] 2350 rdpr %tnpc, %l1 ! save old tnpc 2351 wrpr %g0, %g2, %tnpc ! setup tnpc 2352 2353 mov %g1, %l0 ! save CPU struct addr 2354 wrpr %g0, 0, %gl 2355 mov %l1, %g6 ! pass tnpc to user code in %g6 2356 wrpr %g0, 1, %gl 2357 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2358 ld [%l0 + CPU_TMP1], %l0 2359 FAST_TRAP_DONE_CHK_INTR 23601: 2361 ! 2362 ! check for old syscall mmap which is the only different one which 2363 ! must be the same. Others are handled in the compatibility library. 2364 ! 2365 mov %g1, %l0 ! save CPU struct addr 2366 wrpr %g0, 0, %gl 2367 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2368 movz %icc, SYS_mmap, %g1 2369 wrpr %g0, 1, %gl 2370 ld [%l0 + CPU_TMP1], %l0 2371 SYSCALL(syscall_trap32) 2372 SET_SIZE(syscall_trap_4x) 2373 2374/* 2375 * Handler for software trap 9. 2376 * Set trap0 emulation address for old 4.x system call trap. 2377 * XXX - this should be a system call. 2378 */ 2379 ENTRY_NP(set_trap0_addr) 2380 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2381 st %l0, [%g1 + CPU_TMP1] ! save some locals 2382 st %l1, [%g1 + CPU_TMP2] 2383 mov %g1, %l0 ! preserve CPU addr 2384 wrpr %g0, 0, %gl 2385 mov %g1, %l1 2386 wrpr %g0, 1, %gl 2387 ! cannot assume globals retained their values after increasing %gl 2388 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer 2389 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2390 andn %l1, 3, %l1 ! force alignment 2391 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2392 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2393 ld [%l0 + CPU_TMP1], %l0 2394 FAST_TRAP_DONE 2395 SET_SIZE(set_trap0_addr) 2396 2397/* 2398 * mmu_trap_tl1 2399 * trap handler for unexpected mmu traps. 2400 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2401 * case we go to fpu_trap or a user trap from the window handler, in which 2402 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2403 */ 2404 .type mmu_trap_tl1, #function 2405mmu_trap_tl1: 2406#ifdef TRAPTRACE 2407 TRACE_PTR(%g5, %g6) 2408 GET_TRACE_TICK(%g6) 2409 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2410 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2411 rdpr %tt, %g6 2412 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2413 rdpr %tstate, %g6 2414 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2415 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2416 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2417 rdpr %tpc, %g6 2418 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2419 MMU_FAULT_STATUS_AREA(%g6) 2420 ldx [%g6 + MMFSA_D_ADDR], %g6 2421 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address 2422 CPU_PADDR(%g7, %g6); 2423 add %g7, CPU_TL1_HDLR, %g7 2424 lda [%g7]ASI_MEM, %g6 2425 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2426 MMU_FAULT_STATUS_AREA(%g6) 2427 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant? 2428 ldx [%g6 + MMFSA_D_CTX], %g6 2429 sllx %g6, SFSR_CTX_SHIFT, %g6 2430 or %g6, %g7, %g6 2431 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type 2432 set 0xdeadbeef, %g6 2433 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2434 TRACE_NEXT(%g5, %g6, %g7) 2435#endif /* TRAPTRACE */ 2436 CPU_PADDR(%g7, %g6); 2437 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2438 lda [%g7]ASI_MEM, %g6 2439 brz,a,pt %g6, 1f 2440 nop 2441 sta %g0, [%g7]ASI_MEM 2442 ! XXXQ need to setup registers for sfmmu_mmu_trap? 2443 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 24441: 2445 rdpr %tpc, %g7 2446 /* in user_rtt? */ 2447 set rtt_fill_start, %g6 2448 cmp %g7, %g6 2449 blu,pn %xcc, 6f 2450 .empty 2451 set rtt_fill_end, %g6 2452 cmp %g7, %g6 2453 bgeu,pn %xcc, 6f 2454 nop 2455 set fault_rtt_fn1, %g7 2456 ba,a 7f 24576: 2458 ! check to see if the trap pc is in a window spill/fill handling 2459 rdpr %tpc, %g7 2460 /* tpc should be in the trap table */ 2461 set trap_table, %g6 2462 cmp %g7, %g6 2463 blu,a,pn %xcc, ptl1_panic 2464 mov PTL1_BAD_MMUTRAP, %g1 2465 set etrap_table, %g6 2466 cmp %g7, %g6 2467 bgeu,a,pn %xcc, ptl1_panic 2468 mov PTL1_BAD_MMUTRAP, %g1 2469 ! pc is inside the trap table, convert to trap type 2470 srl %g7, 5, %g6 ! XXXQ need #define 2471 and %g6, 0x1ff, %g6 ! XXXQ need #define 2472 ! and check for a window trap type 2473 and %g6, WTRAP_TTMASK, %g6 2474 cmp %g6, WTRAP_TYPE 2475 bne,a,pn %xcc, ptl1_panic 2476 mov PTL1_BAD_MMUTRAP, %g1 2477 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2478 add %g7, WTRAP_FAULTOFF, %g7 2479 24807: 2481 ! Arguments are passed in the global set active after the 2482 ! 'done' instruction. Before switching sets, must save 2483 ! the calculated next pc 2484 wrpr %g0, %g7, %tnpc 2485 wrpr %g0, 1, %gl 2486 rdpr %tt, %g5 2487 MMU_FAULT_STATUS_AREA(%g7) 2488 cmp %g5, T_ALIGNMENT 2489 be,pn %xcc, 1f 2490 ldx [%g7 + MMFSA_D_ADDR], %g6 2491 ldx [%g7 + MMFSA_D_CTX], %g7 2492 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */ 2493 cmp %g7, USER_CONTEXT_TYPE 2494 sllx %g6, MMU_PAGESHIFT, %g6 2495 movgu %icc, USER_CONTEXT_TYPE, %g7 2496 or %g6, %g7, %g6 /* TAG_ACCESS */ 24971: 2498 done 2499 SET_SIZE(mmu_trap_tl1) 2500 2501/* 2502 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2503 * traps are valid only when kmdb is loaded. When the debugger is active, 2504 * the code below is rewritten to transfer control to the appropriate 2505 * debugger entry points. 2506 */ 2507 .global kmdb_trap 2508 .align 8 2509kmdb_trap: 2510 ba,a trap_table0 2511 jmp %g1 + 0 2512 nop 2513 2514 .global kmdb_trap_tl1 2515 .align 8 2516kmdb_trap_tl1: 2517 ba,a trap_table0 2518 jmp %g1 + 0 2519 nop 2520 2521/* 2522 * This entry is copied from OBP's trap table during boot. 2523 */ 2524 .global obp_bpt 2525 .align 8 2526obp_bpt: 2527 NOT 2528 2529 2530 2531#ifdef TRAPTRACE 2532/* 2533 * TRAPTRACE support. 2534 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2535 * Return is done by "jmp %g7 + 4". 2536 */ 2537 2538trace_dmmu: 2539 TRACE_PTR(%g3, %g6) 2540 GET_TRACE_TICK(%g6) 2541 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2542 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2543 rdpr %tt, %g6 2544 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2545 rdpr %tstate, %g6 2546 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2547 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2548 rdpr %tpc, %g6 2549 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2550 MMU_FAULT_STATUS_AREA(%g6) 2551 ldx [%g6 + MMFSA_D_ADDR], %g4 2552 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2553 ldx [%g6 + MMFSA_D_CTX], %g4 2554 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2555 ldx [%g6 + MMFSA_D_TYPE], %g4 2556 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2557 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2558 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2559 TRACE_NEXT(%g3, %g4, %g5) 2560 jmp %g7 + 4 2561 nop 2562 2563trace_immu: 2564 TRACE_PTR(%g3, %g6) 2565 GET_TRACE_TICK(%g6) 2566 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2567 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2568 rdpr %tt, %g6 2569 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2570 rdpr %tstate, %g6 2571 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2572 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2573 rdpr %tpc, %g6 2574 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2575 MMU_FAULT_STATUS_AREA(%g6) 2576 ldx [%g6 + MMFSA_I_ADDR], %g4 2577 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2578 ldx [%g6 + MMFSA_I_CTX], %g4 2579 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2580 ldx [%g6 + MMFSA_I_TYPE], %g4 2581 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2582 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2583 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2584 TRACE_NEXT(%g3, %g4, %g5) 2585 jmp %g7 + 4 2586 nop 2587 2588trace_gen: 2589 TRACE_PTR(%g3, %g6) 2590 GET_TRACE_TICK(%g6) 2591 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2592 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2593 rdpr %tt, %g6 2594 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2595 rdpr %tstate, %g6 2596 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2597 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2598 rdpr %tpc, %g6 2599 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2600 stna %g0, [%g3 + TRAP_ENT_TR]%asi 2601 stna %g0, [%g3 + TRAP_ENT_F1]%asi 2602 stna %g0, [%g3 + TRAP_ENT_F2]%asi 2603 stna %g0, [%g3 + TRAP_ENT_F3]%asi 2604 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2605 TRACE_NEXT(%g3, %g4, %g5) 2606 jmp %g7 + 4 2607 nop 2608 2609trace_win: 2610 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2611 ! Keep the locals as clean as possible, caller cleans %l4 2612 clr %l2 2613 clr %l1 2614 jmp %l4 + 4 2615 clr %l0 2616 2617/* 2618 * Trace a tsb hit 2619 * g1 = tsbe pointer (in/clobbered) 2620 * g2 = tag access register (in) 2621 * g3 - g4 = scratch (clobbered) 2622 * g5 = tsbe data (in) 2623 * g6 = scratch (clobbered) 2624 * g7 = pc we jumped here from (in) 2625 */ 2626 2627 ! Do not disturb %g5, it will be used after the trace 2628 ALTENTRY(trace_tsbhit) 2629 TRACE_TSBHIT(0) 2630 jmp %g7 + 4 2631 nop 2632 2633/* 2634 * Trace a TSB miss 2635 * 2636 * g1 = tsb8k pointer (in) 2637 * g2 = tag access register (in) 2638 * g3 = tsb4m pointer (in) 2639 * g4 = tsbe tag (in/clobbered) 2640 * g5 - g6 = scratch (clobbered) 2641 * g7 = pc we jumped here from (in) 2642 */ 2643 .global trace_tsbmiss 2644trace_tsbmiss: 2645 membar #Sync 2646 sethi %hi(FLUSH_ADDR), %g6 2647 flush %g6 2648 TRACE_PTR(%g5, %g6) 2649 GET_TRACE_TICK(%g6) 2650 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2651 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2652 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag 2653 rdpr %tnpc, %g6 2654 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2655 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2656 rdpr %tpc, %g6 2657 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2658 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2659 rdpr %tt, %g6 2660 or %g6, TT_MMU_MISS, %g4 2661 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2662 mov MMFSA_D_ADDR, %g4 2663 cmp %g6, FAST_IMMU_MISS_TT 2664 move %xcc, MMFSA_I_ADDR, %g4 2665 cmp %g6, T_INSTR_MMU_MISS 2666 move %xcc, MMFSA_I_ADDR, %g4 2667 MMU_FAULT_STATUS_AREA(%g6) 2668 ldx [%g6 + %g4], %g6 2669 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2670 cmp %g4, MMFSA_D_ADDR 2671 move %xcc, MMFSA_D_CTX, %g4 2672 movne %xcc, MMFSA_I_CTX, %g4 2673 MMU_FAULT_STATUS_AREA(%g6) 2674 ldx [%g6 + %g4], %g6 2675 stxa %g6, [%g5 + TRAP_ENT_F4]%asi ! context ID 2676 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2677 TRACE_NEXT(%g5, %g4, %g6) 2678 jmp %g7 + 4 2679 nop 2680 2681/* 2682 * g2 = tag access register (in) 2683 * g3 = ctx type (0, 1 or 2) (in) (not used) 2684 */ 2685trace_dataprot: 2686 membar #Sync 2687 sethi %hi(FLUSH_ADDR), %g6 2688 flush %g6 2689 TRACE_PTR(%g1, %g6) 2690 GET_TRACE_TICK(%g6) 2691 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2692 rdpr %tpc, %g6 2693 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2694 rdpr %tstate, %g6 2695 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2696 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2697 stna %g0, [%g1 + TRAP_ENT_F1]%asi 2698 stna %g0, [%g1 + TRAP_ENT_F2]%asi 2699 stna %g0, [%g1 + TRAP_ENT_F3]%asi 2700 stna %g0, [%g1 + TRAP_ENT_F4]%asi 2701 TRACE_SAVE_TL_GL_REGS(%g1, %g6) 2702 rdpr %tt, %g6 2703 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2704 mov MMFSA_D_CTX, %g4 2705 cmp %g6, FAST_IMMU_MISS_TT 2706 move %xcc, MMFSA_I_CTX, %g4 2707 cmp %g6, T_INSTR_MMU_MISS 2708 move %xcc, MMFSA_I_CTX, %g4 2709 MMU_FAULT_STATUS_AREA(%g6) 2710 ldx [%g6 + %g4], %g6 2711 stxa %g6, [%g1 + TRAP_ENT_TR]%asi ! context ID 2712 TRACE_NEXT(%g1, %g4, %g5) 2713 jmp %g7 + 4 2714 nop 2715 2716#endif /* TRAPTRACE */ 2717 2718/* 2719 * Handle watchdog reset trap. Enable the MMU using the MMU_ENABLE 2720 * HV service, which requires the return target to be specified as a VA 2721 * since we are enabling the MMU. We set the target to ptl1_panic. 2722 */ 2723 2724 .type .watchdog_trap, #function 2725.watchdog_trap: 2726 mov 1, %o0 2727 setx ptl1_panic, %g2, %o1 2728 mov MMU_ENABLE, %o5 2729 ta FAST_TRAP 2730 done 2731 SET_SIZE(.watchdog_trap) 2732/* 2733 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2734 */ 2735 .type .dmmu_exc_lddf_not_aligned, #function 2736.dmmu_exc_lddf_not_aligned: 2737 MMU_FAULT_STATUS_AREA(%g3) 2738 ldx [%g3 + MMFSA_D_ADDR], %g2 2739 /* Fault type not available in MMU fault status area */ 2740 mov MMFSA_F_UNALIGN, %g1 2741 ldx [%g3 + MMFSA_D_CTX], %g3 2742 sllx %g3, SFSR_CTX_SHIFT, %g3 2743 btst 1, %sp 2744 bnz,pt %xcc, .lddf_exception_not_aligned 2745 or %g3, %g1, %g3 /* SFSR */ 2746 ba,a,pt %xcc, .mmu_exception_not_aligned 2747 SET_SIZE(.dmmu_exc_lddf_not_aligned) 2748 2749/* 2750 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2751 */ 2752 .type .dmmu_exc_stdf_not_aligned, #function 2753.dmmu_exc_stdf_not_aligned: 2754 MMU_FAULT_STATUS_AREA(%g3) 2755 ldx [%g3 + MMFSA_D_ADDR], %g2 2756 /* Fault type not available in MMU fault status area */ 2757 mov MMFSA_F_UNALIGN, %g1 2758 ldx [%g3 + MMFSA_D_CTX], %g3 2759 sllx %g3, SFSR_CTX_SHIFT, %g3 2760 btst 1, %sp 2761 bnz,pt %xcc, .stdf_exception_not_aligned 2762 or %g3, %g1, %g3 /* SFSR */ 2763 ba,a,pt %xcc, .mmu_exception_not_aligned 2764 SET_SIZE(.dmmu_exc_stdf_not_aligned) 2765 2766 .type .dmmu_exception, #function 2767.dmmu_exception: 2768 MMU_FAULT_STATUS_AREA(%g3) 2769 ldx [%g3 + MMFSA_D_ADDR], %g2 2770 ldx [%g3 + MMFSA_D_TYPE], %g1 2771 ldx [%g3 + MMFSA_D_CTX], %g4 2772 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */ 2773 sllx %g2, MMU_PAGESHIFT, %g2 2774 sllx %g4, SFSR_CTX_SHIFT, %g3 2775 or %g3, %g1, %g3 /* SFSR */ 2776 cmp %g4, USER_CONTEXT_TYPE 2777 movgeu %icc, USER_CONTEXT_TYPE, %g4 2778 or %g2, %g4, %g2 /* TAG_ACCESS */ 2779 ba,pt %xcc, .mmu_exception_end 2780 mov T_DATA_EXCEPTION, %g1 2781 SET_SIZE(.dmmu_exception) 2782/* 2783 * expects offset into tsbmiss area in %g1 and return pc in %g7 2784 */ 2785stat_mmu: 2786 CPU_INDEX(%g5, %g6) 2787 sethi %hi(tsbmiss_area), %g6 2788 sllx %g5, TSBMISS_SHIFT, %g5 2789 or %g6, %lo(tsbmiss_area), %g6 2790 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2791 ld [%g6 + %g1], %g5 2792 add %g5, 1, %g5 2793 jmp %g7 + 4 2794 st %g5, [%g6 + %g1] 2795 2796 2797/* 2798 * fast_trap_done, fast_trap_done_chk_intr: 2799 * 2800 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2801 * taken immediately after a RETRY or DONE instruction which causes IE to 2802 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2803 * to execute first before taking any interrupts. If that instruction 2804 * results in other traps, and if the corresponding trap handler runs 2805 * entirely at TL=1 with interrupts disabled, then pending interrupts 2806 * won't be taken until after yet another instruction following the %tpc 2807 * or %tnpc. 2808 * 2809 * A malicious user program can use this feature to block out interrupts 2810 * for extended durations, which can result in send_mondo_timeout kernel 2811 * panic. 2812 * 2813 * This problem is addressed by servicing any pending interrupts via 2814 * sys_trap before returning back to the user mode from a fast trap 2815 * handler. The "done" instruction within a fast trap handler, which 2816 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2817 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2818 * entry point. 2819 * 2820 * We check for any pending interrupts here and force a sys_trap to 2821 * service those interrupts, if any. To minimize overhead, pending 2822 * interrupts are checked if the %tpc happens to be at 16K boundary, 2823 * which allows a malicious program to execute at most 4K consecutive 2824 * instructions before we service any pending interrupts. If a worst 2825 * case fast trap handler takes about 2 usec, then interrupts will be 2826 * blocked for at most 8 msec, less than a clock tick. 2827 * 2828 * For the cases where we don't know if the %tpc will cross a 16K 2829 * boundary, we can't use the above optimization and always process 2830 * any pending interrupts via fast_frap_done_chk_intr entry point. 2831 * 2832 * Entry Conditions: 2833 * %pstate am:0 priv:1 ie:0 2834 * globals are AG (not normal globals) 2835 */ 2836 2837 .global fast_trap_done, fast_trap_done_chk_intr 2838fast_trap_done: 2839 rdpr %tpc, %g5 2840 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2841 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2842 bz,pn %icc, 1f ! branch if zero (lower 32 bits only) 2843 nop 2844 done 2845 2846fast_trap_done_chk_intr: 28471: rd SOFTINT, %g6 2848 brnz,pn %g6, 2f ! branch if any pending intr 2849 nop 2850 done 2851 28522: 2853 /* 2854 * We get here if there are any pending interrupts. 2855 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2856 * instruction. 2857 */ 2858 rdpr %tnpc, %g5 2859 wrpr %g0, %g5, %tpc 2860 add %g5, 4, %g5 2861 wrpr %g0, %g5, %tnpc 2862 2863 /* 2864 * Force a dummy sys_trap call so that interrupts can be serviced. 2865 */ 2866 set fast_trap_dummy_call, %g1 2867 ba,pt %xcc, sys_trap 2868 mov -1, %g4 2869 2870fast_trap_dummy_call: 2871 retl 2872 nop 2873 2874/* 2875 * Currently the brand syscall interposition code is not enabled by 2876 * default. Instead, when a branded zone is first booted the brand 2877 * infrastructure will patch the trap table so that the syscall 2878 * entry points are redirected to syscall_wrapper32 and syscall_wrapper 2879 * for ILP32 and LP64 syscalls respectively. this is done in 2880 * brand_plat_interposition_enable(). Note that the syscall wrappers 2881 * below do not collect any trap trace data since the syscall hot patch 2882 * points are reached after trap trace data has already been collected. 2883 */ 2884#define BRAND_CALLBACK(callback_id) \ 2885 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\ 2886 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\ 2887 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\ 2888 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\ 2889 brz %g3, 1f /* No brand? No callback. */ ;\ 2890 nop ;\ 2891 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\ 2892 ldn [%g3 + (callback_id << 3)], %g3 ;\ 2893 brz %g3, 1f ;\ 2894 /* \ 2895 * This isn't pretty. We want a low-latency way for the callback \ 2896 * routine to decline to do anything. We just pass in an address \ 2897 * the routine can directly jmp back to, pretending that nothing \ 2898 * has happened. \ 2899 * \ 2900 * %g1: return address (where the brand handler jumps back to) \ 2901 * %g2: address of CPU structure \ 2902 * %g3: address of brand handler (where we will jump to) \ 2903 */ \ 2904 mov %pc, %g1 ;\ 2905 add %g1, 16, %g1 ;\ 2906 jmp %g3 ;\ 2907 nop ;\ 29081: 2909 2910 ENTRY_NP(syscall_wrapper32) 2911 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 2912 SYSCALL_NOTT(syscall_trap32) 2913 SET_SIZE(syscall_wrapper32) 2914 2915 ENTRY_NP(syscall_wrapper) 2916 BRAND_CALLBACK(BRAND_CB_SYSCALL) 2917 SYSCALL_NOTT(syscall_trap) 2918 SET_SIZE(syscall_wrapper) 2919 2920#endif /* lint */ 2921