1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif /* !lint */ 31#include <sys/asm_linkage.h> 32#include <sys/privregs.h> 33#include <sys/sun4asi.h> 34#include <sys/spitregs.h> 35#include <sys/cheetahregs.h> 36#include <sys/machtrap.h> 37#include <sys/machthread.h> 38#include <sys/machbrand.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This macro is used to update per cpu mmu stats in perf critical 116 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 117 * is defined. 118 */ 119#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 120#define HAT_PERCPU_DBSTAT(stat) \ 121 mov stat, %g1 ;\ 122 ba stat_mmu ;\ 123 rd %pc, %g7 124#else 125#define HAT_PERCPU_DBSTAT(stat) 126#endif /* DEBUG || SFMMU_STAT_GATHER */ 127 128/* 129 * This first set are funneled to trap() with %tt as the type. 130 * Trap will then either panic or send the user a signal. 131 */ 132/* 133 * NOT is used for traps that just shouldn't happen. 134 * It comes in both single and quadruple flavors. 135 */ 136#if !defined(lint) 137 .global trap 138#endif /* !lint */ 139#define NOT \ 140 TT_TRACE(trace_gen) ;\ 141 set trap, %g1 ;\ 142 rdpr %tt, %g3 ;\ 143 ba,pt %xcc, sys_trap ;\ 144 sub %g0, 1, %g4 ;\ 145 .align 32 146#define NOT4 NOT; NOT; NOT; NOT 147/* 148 * RED is for traps that use the red mode handler. 149 * We should never see these either. 150 */ 151#define RED NOT 152/* 153 * BAD is used for trap vectors we don't have a kernel 154 * handler for. 155 * It also comes in single and quadruple versions. 156 */ 157#define BAD NOT 158#define BAD4 NOT4 159 160#define DONE \ 161 done; \ 162 .align 32 163 164/* 165 * TRAP vectors to the trap() function. 166 * It's main use is for user errors. 167 */ 168#if !defined(lint) 169 .global trap 170#endif /* !lint */ 171#define TRAP(arg) \ 172 TT_TRACE(trace_gen) ;\ 173 set trap, %g1 ;\ 174 mov arg, %g3 ;\ 175 ba,pt %xcc, sys_trap ;\ 176 sub %g0, 1, %g4 ;\ 177 .align 32 178 179/* 180 * SYSCALL is used for system calls on both ILP32 and LP64 kernels 181 * depending on the "which" parameter (should be syscall_trap, 182 * syscall_trap32, or nosys for unused system call traps). 183 */ 184#define SYSCALL(which) \ 185 TT_TRACE(trace_gen) ;\ 186 SYSCALL_NOTT(which) 187 188#define SYSCALL_NOTT(which) \ 189 set (which), %g1 ;\ 190 ba,pt %xcc, sys_trap ;\ 191 sub %g0, 1, %g4 ;\ 192 .align 32 193 194#define FLUSHW() \ 195 set trap, %g1 ;\ 196 mov T_FLUSHW, %g3 ;\ 197 sub %g0, 1, %g4 ;\ 198 save ;\ 199 flushw ;\ 200 restore ;\ 201 FAST_TRAP_DONE ;\ 202 .align 32 203 204/* 205 * GOTO just jumps to a label. 206 * It's used for things that can be fixed without going thru sys_trap. 207 */ 208#define GOTO(label) \ 209 .global label ;\ 210 ba,a label ;\ 211 .empty ;\ 212 .align 32 213 214/* 215 * GOTO_TT just jumps to a label. 216 * correctable ECC error traps at level 0 and 1 will use this macro. 217 * It's used for things that can be fixed without going thru sys_trap. 218 */ 219#define GOTO_TT(label, ttlabel) \ 220 .global label ;\ 221 TT_TRACE(ttlabel) ;\ 222 ba,a label ;\ 223 .empty ;\ 224 .align 32 225 226/* 227 * Privileged traps 228 * Takes breakpoint if privileged, calls trap() if not. 229 */ 230#define PRIV(label) \ 231 rdpr %tstate, %g1 ;\ 232 btst TSTATE_PRIV, %g1 ;\ 233 bnz label ;\ 234 rdpr %tt, %g3 ;\ 235 set trap, %g1 ;\ 236 ba,pt %xcc, sys_trap ;\ 237 sub %g0, 1, %g4 ;\ 238 .align 32 239 240 241/* 242 * DTrace traps. 243 */ 244#define DTRACE_PID \ 245 .global dtrace_pid_probe ;\ 246 set dtrace_pid_probe, %g1 ;\ 247 ba,pt %xcc, user_trap ;\ 248 sub %g0, 1, %g4 ;\ 249 .align 32 250 251#define DTRACE_RETURN \ 252 .global dtrace_return_probe ;\ 253 set dtrace_return_probe, %g1 ;\ 254 ba,pt %xcc, user_trap ;\ 255 sub %g0, 1, %g4 ;\ 256 .align 32 257 258/* 259 * REGISTER WINDOW MANAGEMENT MACROS 260 */ 261 262/* 263 * various convenient units of padding 264 */ 265#define SKIP(n) .skip 4*(n) 266 267/* 268 * CLEAN_WINDOW is the simple handler for cleaning a register window. 269 */ 270#define CLEAN_WINDOW \ 271 TT_TRACE_L(trace_win) ;\ 272 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 273 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 274 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 275 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 276 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 277 retry; .align 128 278 279#if !defined(lint) 280 281/* 282 * If we get an unresolved tlb miss while in a window handler, the fault 283 * handler will resume execution at the last instruction of the window 284 * hander, instead of delivering the fault to the kernel. Spill handlers 285 * use this to spill windows into the wbuf. 286 * 287 * The mixed handler works by checking %sp, and branching to the correct 288 * handler. This is done by branching back to label 1: for 32b frames, 289 * or label 2: for 64b frames; which implies the handler order is: 32b, 290 * 64b, mixed. The 1: and 2: labels are offset into the routines to 291 * allow the branchs' delay slots to contain useful instructions. 292 */ 293 294/* 295 * SPILL_32bit spills a 32-bit-wide kernel register window. It 296 * assumes that the kernel context and the nucleus context are the 297 * same. The stack pointer is required to be eight-byte aligned even 298 * though this code only needs it to be four-byte aligned. 299 */ 300#define SPILL_32bit(tail) \ 301 srl %sp, 0, %sp ;\ 3021: st %l0, [%sp + 0] ;\ 303 st %l1, [%sp + 4] ;\ 304 st %l2, [%sp + 8] ;\ 305 st %l3, [%sp + 12] ;\ 306 st %l4, [%sp + 16] ;\ 307 st %l5, [%sp + 20] ;\ 308 st %l6, [%sp + 24] ;\ 309 st %l7, [%sp + 28] ;\ 310 st %i0, [%sp + 32] ;\ 311 st %i1, [%sp + 36] ;\ 312 st %i2, [%sp + 40] ;\ 313 st %i3, [%sp + 44] ;\ 314 st %i4, [%sp + 48] ;\ 315 st %i5, [%sp + 52] ;\ 316 st %i6, [%sp + 56] ;\ 317 st %i7, [%sp + 60] ;\ 318 TT_TRACE_L(trace_win) ;\ 319 saved ;\ 320 retry ;\ 321 SKIP(31-19-TT_TRACE_L_INS) ;\ 322 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 323 .empty 324 325/* 326 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 327 * wide address space via the designated asi. It is used to spill 328 * non-kernel windows. The stack pointer is required to be eight-byte 329 * aligned even though this code only needs it to be four-byte 330 * aligned. 331 */ 332#define SPILL_32bit_asi(asi_num, tail) \ 333 srl %sp, 0, %sp ;\ 3341: sta %l0, [%sp + %g0]asi_num ;\ 335 mov 4, %g1 ;\ 336 sta %l1, [%sp + %g1]asi_num ;\ 337 mov 8, %g2 ;\ 338 sta %l2, [%sp + %g2]asi_num ;\ 339 mov 12, %g3 ;\ 340 sta %l3, [%sp + %g3]asi_num ;\ 341 add %sp, 16, %g4 ;\ 342 sta %l4, [%g4 + %g0]asi_num ;\ 343 sta %l5, [%g4 + %g1]asi_num ;\ 344 sta %l6, [%g4 + %g2]asi_num ;\ 345 sta %l7, [%g4 + %g3]asi_num ;\ 346 add %g4, 16, %g4 ;\ 347 sta %i0, [%g4 + %g0]asi_num ;\ 348 sta %i1, [%g4 + %g1]asi_num ;\ 349 sta %i2, [%g4 + %g2]asi_num ;\ 350 sta %i3, [%g4 + %g3]asi_num ;\ 351 add %g4, 16, %g4 ;\ 352 sta %i4, [%g4 + %g0]asi_num ;\ 353 sta %i5, [%g4 + %g1]asi_num ;\ 354 sta %i6, [%g4 + %g2]asi_num ;\ 355 sta %i7, [%g4 + %g3]asi_num ;\ 356 TT_TRACE_L(trace_win) ;\ 357 saved ;\ 358 retry ;\ 359 SKIP(31-25-TT_TRACE_L_INS) ;\ 360 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 361 .empty 362 363/* 364 * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit 365 * wide address space via the designated asi. It is used to spill 366 * windows at tl>1 where performance isn't the primary concern and 367 * where we don't want to use unnecessary registers. The stack 368 * pointer is required to be eight-byte aligned even though this code 369 * only needs it to be four-byte aligned. 370 */ 371#define SPILL_32bit_tt1(asi_num, tail) \ 372 mov asi_num, %asi ;\ 3731: srl %sp, 0, %sp ;\ 374 sta %l0, [%sp + 0]%asi ;\ 375 sta %l1, [%sp + 4]%asi ;\ 376 sta %l2, [%sp + 8]%asi ;\ 377 sta %l3, [%sp + 12]%asi ;\ 378 sta %l4, [%sp + 16]%asi ;\ 379 sta %l5, [%sp + 20]%asi ;\ 380 sta %l6, [%sp + 24]%asi ;\ 381 sta %l7, [%sp + 28]%asi ;\ 382 sta %i0, [%sp + 32]%asi ;\ 383 sta %i1, [%sp + 36]%asi ;\ 384 sta %i2, [%sp + 40]%asi ;\ 385 sta %i3, [%sp + 44]%asi ;\ 386 sta %i4, [%sp + 48]%asi ;\ 387 sta %i5, [%sp + 52]%asi ;\ 388 sta %i6, [%sp + 56]%asi ;\ 389 sta %i7, [%sp + 60]%asi ;\ 390 TT_TRACE_L(trace_win) ;\ 391 saved ;\ 392 retry ;\ 393 SKIP(31-20-TT_TRACE_L_INS) ;\ 394 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 395 .empty 396 397 398/* 399 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 400 * that the kernel context and the nucleus context are the same. The 401 * stack pointer is required to be eight-byte aligned even though this 402 * code only needs it to be four-byte aligned. 403 */ 404#define FILL_32bit(tail) \ 405 srl %sp, 0, %sp ;\ 4061: TT_TRACE_L(trace_win) ;\ 407 ld [%sp + 0], %l0 ;\ 408 ld [%sp + 4], %l1 ;\ 409 ld [%sp + 8], %l2 ;\ 410 ld [%sp + 12], %l3 ;\ 411 ld [%sp + 16], %l4 ;\ 412 ld [%sp + 20], %l5 ;\ 413 ld [%sp + 24], %l6 ;\ 414 ld [%sp + 28], %l7 ;\ 415 ld [%sp + 32], %i0 ;\ 416 ld [%sp + 36], %i1 ;\ 417 ld [%sp + 40], %i2 ;\ 418 ld [%sp + 44], %i3 ;\ 419 ld [%sp + 48], %i4 ;\ 420 ld [%sp + 52], %i5 ;\ 421 ld [%sp + 56], %i6 ;\ 422 ld [%sp + 60], %i7 ;\ 423 restored ;\ 424 retry ;\ 425 SKIP(31-19-TT_TRACE_L_INS) ;\ 426 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 427 .empty 428 429/* 430 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 431 * wide address space via the designated asi. It is used to fill 432 * non-kernel windows. The stack pointer is required to be eight-byte 433 * aligned even though this code only needs it to be four-byte 434 * aligned. 435 */ 436#define FILL_32bit_asi(asi_num, tail) \ 437 srl %sp, 0, %sp ;\ 4381: TT_TRACE_L(trace_win) ;\ 439 mov 4, %g1 ;\ 440 lda [%sp + %g0]asi_num, %l0 ;\ 441 mov 8, %g2 ;\ 442 lda [%sp + %g1]asi_num, %l1 ;\ 443 mov 12, %g3 ;\ 444 lda [%sp + %g2]asi_num, %l2 ;\ 445 lda [%sp + %g3]asi_num, %l3 ;\ 446 add %sp, 16, %g4 ;\ 447 lda [%g4 + %g0]asi_num, %l4 ;\ 448 lda [%g4 + %g1]asi_num, %l5 ;\ 449 lda [%g4 + %g2]asi_num, %l6 ;\ 450 lda [%g4 + %g3]asi_num, %l7 ;\ 451 add %g4, 16, %g4 ;\ 452 lda [%g4 + %g0]asi_num, %i0 ;\ 453 lda [%g4 + %g1]asi_num, %i1 ;\ 454 lda [%g4 + %g2]asi_num, %i2 ;\ 455 lda [%g4 + %g3]asi_num, %i3 ;\ 456 add %g4, 16, %g4 ;\ 457 lda [%g4 + %g0]asi_num, %i4 ;\ 458 lda [%g4 + %g1]asi_num, %i5 ;\ 459 lda [%g4 + %g2]asi_num, %i6 ;\ 460 lda [%g4 + %g3]asi_num, %i7 ;\ 461 restored ;\ 462 retry ;\ 463 SKIP(31-25-TT_TRACE_L_INS) ;\ 464 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 465 .empty 466 467/* 468 * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit 469 * wide address space via the designated asi. It is used to fill 470 * windows at tl>1 where performance isn't the primary concern and 471 * where we don't want to use unnecessary registers. The stack 472 * pointer is required to be eight-byte aligned even though this code 473 * only needs it to be four-byte aligned. 474 */ 475#define FILL_32bit_tt1(asi_num, tail) \ 476 mov asi_num, %asi ;\ 4771: srl %sp, 0, %sp ;\ 478 TT_TRACE_L(trace_win) ;\ 479 lda [%sp + 0]%asi, %l0 ;\ 480 lda [%sp + 4]%asi, %l1 ;\ 481 lda [%sp + 8]%asi, %l2 ;\ 482 lda [%sp + 12]%asi, %l3 ;\ 483 lda [%sp + 16]%asi, %l4 ;\ 484 lda [%sp + 20]%asi, %l5 ;\ 485 lda [%sp + 24]%asi, %l6 ;\ 486 lda [%sp + 28]%asi, %l7 ;\ 487 lda [%sp + 32]%asi, %i0 ;\ 488 lda [%sp + 36]%asi, %i1 ;\ 489 lda [%sp + 40]%asi, %i2 ;\ 490 lda [%sp + 44]%asi, %i3 ;\ 491 lda [%sp + 48]%asi, %i4 ;\ 492 lda [%sp + 52]%asi, %i5 ;\ 493 lda [%sp + 56]%asi, %i6 ;\ 494 lda [%sp + 60]%asi, %i7 ;\ 495 restored ;\ 496 retry ;\ 497 SKIP(31-20-TT_TRACE_L_INS) ;\ 498 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 499 .empty 500 501 502/* 503 * SPILL_64bit spills a 64-bit-wide kernel register window. It 504 * assumes that the kernel context and the nucleus context are the 505 * same. The stack pointer is required to be eight-byte aligned. 506 */ 507#define SPILL_64bit(tail) \ 5082: stx %l0, [%sp + V9BIAS64 + 0] ;\ 509 stx %l1, [%sp + V9BIAS64 + 8] ;\ 510 stx %l2, [%sp + V9BIAS64 + 16] ;\ 511 stx %l3, [%sp + V9BIAS64 + 24] ;\ 512 stx %l4, [%sp + V9BIAS64 + 32] ;\ 513 stx %l5, [%sp + V9BIAS64 + 40] ;\ 514 stx %l6, [%sp + V9BIAS64 + 48] ;\ 515 stx %l7, [%sp + V9BIAS64 + 56] ;\ 516 stx %i0, [%sp + V9BIAS64 + 64] ;\ 517 stx %i1, [%sp + V9BIAS64 + 72] ;\ 518 stx %i2, [%sp + V9BIAS64 + 80] ;\ 519 stx %i3, [%sp + V9BIAS64 + 88] ;\ 520 stx %i4, [%sp + V9BIAS64 + 96] ;\ 521 stx %i5, [%sp + V9BIAS64 + 104] ;\ 522 stx %i6, [%sp + V9BIAS64 + 112] ;\ 523 stx %i7, [%sp + V9BIAS64 + 120] ;\ 524 TT_TRACE_L(trace_win) ;\ 525 saved ;\ 526 retry ;\ 527 SKIP(31-18-TT_TRACE_L_INS) ;\ 528 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 529 .empty 530 531/* 532 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 533 * wide address space via the designated asi. It is used to spill 534 * non-kernel windows. The stack pointer is required to be eight-byte 535 * aligned. 536 */ 537#define SPILL_64bit_asi(asi_num, tail) \ 538 mov 0 + V9BIAS64, %g1 ;\ 5392: stxa %l0, [%sp + %g1]asi_num ;\ 540 mov 8 + V9BIAS64, %g2 ;\ 541 stxa %l1, [%sp + %g2]asi_num ;\ 542 mov 16 + V9BIAS64, %g3 ;\ 543 stxa %l2, [%sp + %g3]asi_num ;\ 544 mov 24 + V9BIAS64, %g4 ;\ 545 stxa %l3, [%sp + %g4]asi_num ;\ 546 add %sp, 32, %g5 ;\ 547 stxa %l4, [%g5 + %g1]asi_num ;\ 548 stxa %l5, [%g5 + %g2]asi_num ;\ 549 stxa %l6, [%g5 + %g3]asi_num ;\ 550 stxa %l7, [%g5 + %g4]asi_num ;\ 551 add %g5, 32, %g5 ;\ 552 stxa %i0, [%g5 + %g1]asi_num ;\ 553 stxa %i1, [%g5 + %g2]asi_num ;\ 554 stxa %i2, [%g5 + %g3]asi_num ;\ 555 stxa %i3, [%g5 + %g4]asi_num ;\ 556 add %g5, 32, %g5 ;\ 557 stxa %i4, [%g5 + %g1]asi_num ;\ 558 stxa %i5, [%g5 + %g2]asi_num ;\ 559 stxa %i6, [%g5 + %g3]asi_num ;\ 560 stxa %i7, [%g5 + %g4]asi_num ;\ 561 TT_TRACE_L(trace_win) ;\ 562 saved ;\ 563 retry ;\ 564 SKIP(31-25-TT_TRACE_L_INS) ;\ 565 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 566 .empty 567 568/* 569 * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit 570 * wide address space via the designated asi. It is used to spill 571 * windows at tl>1 where performance isn't the primary concern and 572 * where we don't want to use unnecessary registers. The stack 573 * pointer is required to be eight-byte aligned. 574 */ 575#define SPILL_64bit_tt1(asi_num, tail) \ 576 mov asi_num, %asi ;\ 5772: stxa %l0, [%sp + V9BIAS64 + 0]%asi ;\ 578 stxa %l1, [%sp + V9BIAS64 + 8]%asi ;\ 579 stxa %l2, [%sp + V9BIAS64 + 16]%asi ;\ 580 stxa %l3, [%sp + V9BIAS64 + 24]%asi ;\ 581 stxa %l4, [%sp + V9BIAS64 + 32]%asi ;\ 582 stxa %l5, [%sp + V9BIAS64 + 40]%asi ;\ 583 stxa %l6, [%sp + V9BIAS64 + 48]%asi ;\ 584 stxa %l7, [%sp + V9BIAS64 + 56]%asi ;\ 585 stxa %i0, [%sp + V9BIAS64 + 64]%asi ;\ 586 stxa %i1, [%sp + V9BIAS64 + 72]%asi ;\ 587 stxa %i2, [%sp + V9BIAS64 + 80]%asi ;\ 588 stxa %i3, [%sp + V9BIAS64 + 88]%asi ;\ 589 stxa %i4, [%sp + V9BIAS64 + 96]%asi ;\ 590 stxa %i5, [%sp + V9BIAS64 + 104]%asi ;\ 591 stxa %i6, [%sp + V9BIAS64 + 112]%asi ;\ 592 stxa %i7, [%sp + V9BIAS64 + 120]%asi ;\ 593 TT_TRACE_L(trace_win) ;\ 594 saved ;\ 595 retry ;\ 596 SKIP(31-19-TT_TRACE_L_INS) ;\ 597 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 598 .empty 599 600 601/* 602 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 603 * that the kernel context and the nucleus context are the same. The 604 * stack pointer is required to be eight-byte aligned. 605 */ 606#define FILL_64bit(tail) \ 6072: TT_TRACE_L(trace_win) ;\ 608 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 609 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 610 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 611 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 612 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 613 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 614 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 615 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 616 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 617 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 618 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 619 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 620 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 621 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 622 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 623 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 624 restored ;\ 625 retry ;\ 626 SKIP(31-18-TT_TRACE_L_INS) ;\ 627 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 628 .empty 629 630/* 631 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 632 * wide address space via the designated asi. It is used to fill 633 * non-kernel windows. The stack pointer is required to be eight-byte 634 * aligned. 635 */ 636#define FILL_64bit_asi(asi_num, tail) \ 637 mov V9BIAS64 + 0, %g1 ;\ 6382: TT_TRACE_L(trace_win) ;\ 639 ldxa [%sp + %g1]asi_num, %l0 ;\ 640 mov V9BIAS64 + 8, %g2 ;\ 641 ldxa [%sp + %g2]asi_num, %l1 ;\ 642 mov V9BIAS64 + 16, %g3 ;\ 643 ldxa [%sp + %g3]asi_num, %l2 ;\ 644 mov V9BIAS64 + 24, %g4 ;\ 645 ldxa [%sp + %g4]asi_num, %l3 ;\ 646 add %sp, 32, %g5 ;\ 647 ldxa [%g5 + %g1]asi_num, %l4 ;\ 648 ldxa [%g5 + %g2]asi_num, %l5 ;\ 649 ldxa [%g5 + %g3]asi_num, %l6 ;\ 650 ldxa [%g5 + %g4]asi_num, %l7 ;\ 651 add %g5, 32, %g5 ;\ 652 ldxa [%g5 + %g1]asi_num, %i0 ;\ 653 ldxa [%g5 + %g2]asi_num, %i1 ;\ 654 ldxa [%g5 + %g3]asi_num, %i2 ;\ 655 ldxa [%g5 + %g4]asi_num, %i3 ;\ 656 add %g5, 32, %g5 ;\ 657 ldxa [%g5 + %g1]asi_num, %i4 ;\ 658 ldxa [%g5 + %g2]asi_num, %i5 ;\ 659 ldxa [%g5 + %g3]asi_num, %i6 ;\ 660 ldxa [%g5 + %g4]asi_num, %i7 ;\ 661 restored ;\ 662 retry ;\ 663 SKIP(31-25-TT_TRACE_L_INS) ;\ 664 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 665 .empty 666 667/* 668 * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit 669 * wide address space via the designated asi. It is used to fill 670 * windows at tl>1 where performance isn't the primary concern and 671 * where we don't want to use unnecessary registers. The stack 672 * pointer is required to be eight-byte aligned. 673 */ 674#define FILL_64bit_tt1(asi_num, tail) \ 675 mov asi_num, %asi ;\ 676 TT_TRACE_L(trace_win) ;\ 677 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ 678 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ 679 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ 680 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ 681 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ 682 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ 683 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ 684 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ 685 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ 686 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ 687 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ 688 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ 689 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ 690 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ 691 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ 692 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ 693 restored ;\ 694 retry ;\ 695 SKIP(31-19-TT_TRACE_L_INS) ;\ 696 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 697 .empty 698 699#endif /* !lint */ 700 701/* 702 * SPILL_mixed spills either size window, depending on 703 * whether %sp is even or odd, to a 32-bit address space. 704 * This may only be used in conjunction with SPILL_32bit/ 705 * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be 706 * needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular 707 * attention should be paid to the instructions that belong 708 * in the delay slots of the branches depending on the type 709 * of spill handler being branched to. 710 * Clear upper 32 bits of %sp if it is odd. 711 * We won't need to clear them in 64 bit kernel. 712 */ 713#define SPILL_mixed \ 714 btst 1, %sp ;\ 715 bz,a,pt %xcc, 1b ;\ 716 srl %sp, 0, %sp ;\ 717 ba,pt %xcc, 2b ;\ 718 nop ;\ 719 .align 128 720 721/* 722 * FILL_mixed(ASI) fills either size window, depending on 723 * whether %sp is even or odd, from a 32-bit address space. 724 * This may only be used in conjunction with FILL_32bit/ 725 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 726 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 727 * attention should be paid to the instructions that belong 728 * in the delay slots of the branches depending on the type 729 * of fill handler being branched to. 730 * Clear upper 32 bits of %sp if it is odd. 731 * We won't need to clear them in 64 bit kernel. 732 */ 733#define FILL_mixed \ 734 btst 1, %sp ;\ 735 bz,a,pt %xcc, 1b ;\ 736 srl %sp, 0, %sp ;\ 737 ba,pt %xcc, 2b ;\ 738 nop ;\ 739 .align 128 740 741 742/* 743 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 744 * respectively, into the address space via the designated asi. The 745 * unbiased stack pointer is required to be eight-byte aligned (even for 746 * the 32-bit case even though this code does not require such strict 747 * alignment). 748 * 749 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 750 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 751 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 752 * window may contain kernel data so in user_rtt we set wstate to call 753 * these spill handlers on the first user spill trap. These handler then 754 * spill the appropriate window but also back up a window and clean the 755 * window that didn't get a cleanwin trap. 756 */ 757#define SPILL_32clean(asi_num, tail) \ 758 srl %sp, 0, %sp ;\ 759 sta %l0, [%sp + %g0]asi_num ;\ 760 mov 4, %g1 ;\ 761 sta %l1, [%sp + %g1]asi_num ;\ 762 mov 8, %g2 ;\ 763 sta %l2, [%sp + %g2]asi_num ;\ 764 mov 12, %g3 ;\ 765 sta %l3, [%sp + %g3]asi_num ;\ 766 add %sp, 16, %g4 ;\ 767 sta %l4, [%g4 + %g0]asi_num ;\ 768 sta %l5, [%g4 + %g1]asi_num ;\ 769 sta %l6, [%g4 + %g2]asi_num ;\ 770 sta %l7, [%g4 + %g3]asi_num ;\ 771 add %g4, 16, %g4 ;\ 772 sta %i0, [%g4 + %g0]asi_num ;\ 773 sta %i1, [%g4 + %g1]asi_num ;\ 774 sta %i2, [%g4 + %g2]asi_num ;\ 775 sta %i3, [%g4 + %g3]asi_num ;\ 776 add %g4, 16, %g4 ;\ 777 sta %i4, [%g4 + %g0]asi_num ;\ 778 sta %i5, [%g4 + %g1]asi_num ;\ 779 sta %i6, [%g4 + %g2]asi_num ;\ 780 sta %i7, [%g4 + %g3]asi_num ;\ 781 TT_TRACE_L(trace_win) ;\ 782 b .spill_clean ;\ 783 mov WSTATE_USER32, %g7 ;\ 784 SKIP(31-25-TT_TRACE_L_INS) ;\ 785 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 786 .empty 787 788#define SPILL_64clean(asi_num, tail) \ 789 mov 0 + V9BIAS64, %g1 ;\ 790 stxa %l0, [%sp + %g1]asi_num ;\ 791 mov 8 + V9BIAS64, %g2 ;\ 792 stxa %l1, [%sp + %g2]asi_num ;\ 793 mov 16 + V9BIAS64, %g3 ;\ 794 stxa %l2, [%sp + %g3]asi_num ;\ 795 mov 24 + V9BIAS64, %g4 ;\ 796 stxa %l3, [%sp + %g4]asi_num ;\ 797 add %sp, 32, %g5 ;\ 798 stxa %l4, [%g5 + %g1]asi_num ;\ 799 stxa %l5, [%g5 + %g2]asi_num ;\ 800 stxa %l6, [%g5 + %g3]asi_num ;\ 801 stxa %l7, [%g5 + %g4]asi_num ;\ 802 add %g5, 32, %g5 ;\ 803 stxa %i0, [%g5 + %g1]asi_num ;\ 804 stxa %i1, [%g5 + %g2]asi_num ;\ 805 stxa %i2, [%g5 + %g3]asi_num ;\ 806 stxa %i3, [%g5 + %g4]asi_num ;\ 807 add %g5, 32, %g5 ;\ 808 stxa %i4, [%g5 + %g1]asi_num ;\ 809 stxa %i5, [%g5 + %g2]asi_num ;\ 810 stxa %i6, [%g5 + %g3]asi_num ;\ 811 stxa %i7, [%g5 + %g4]asi_num ;\ 812 TT_TRACE_L(trace_win) ;\ 813 b .spill_clean ;\ 814 mov WSTATE_USER64, %g7 ;\ 815 SKIP(31-25-TT_TRACE_L_INS) ;\ 816 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 817 .empty 818 819 820/* 821 * Floating point disabled. 822 */ 823#define FP_DISABLED_TRAP \ 824 TT_TRACE(trace_gen) ;\ 825 ba,pt %xcc,.fp_disabled ;\ 826 nop ;\ 827 .align 32 828 829/* 830 * Floating point exceptions. 831 */ 832#define FP_IEEE_TRAP \ 833 TT_TRACE(trace_gen) ;\ 834 ba,pt %xcc,.fp_ieee_exception ;\ 835 nop ;\ 836 .align 32 837 838#define FP_TRAP \ 839 TT_TRACE(trace_gen) ;\ 840 ba,pt %xcc,.fp_exception ;\ 841 nop ;\ 842 .align 32 843 844#if !defined(lint) 845/* 846 * asynchronous traps at level 0 and level 1 847 * 848 * The first instruction must be a membar for UltraSPARC-III 849 * to stop RED state entry if the store queue has many 850 * pending bad stores (PRM, Chapter 11). 851 */ 852#define ASYNC_TRAP(ttype, ttlabel, table_name)\ 853 .global table_name ;\ 854table_name: ;\ 855 membar #Sync ;\ 856 TT_TRACE(ttlabel) ;\ 857 ba async_err ;\ 858 mov ttype, %g5 ;\ 859 .align 32 860 861/* 862 * Defaults to BAD entry, but establishes label to be used for 863 * architecture-specific overwrite of trap table entry. 864 */ 865#define LABELED_BAD(table_name) \ 866 .global table_name ;\ 867table_name: ;\ 868 BAD 869 870#endif /* !lint */ 871 872/* 873 * illegal instruction trap 874 */ 875#define ILLTRAP_INSTR \ 876 membar #Sync ;\ 877 TT_TRACE(trace_gen) ;\ 878 or %g0, P_UTRAP4, %g2 ;\ 879 or %g0, T_UNIMP_INSTR, %g3 ;\ 880 sethi %hi(.check_v9utrap), %g4 ;\ 881 jmp %g4 + %lo(.check_v9utrap) ;\ 882 nop ;\ 883 .align 32 884 885/* 886 * tag overflow trap 887 */ 888#define TAG_OVERFLOW \ 889 TT_TRACE(trace_gen) ;\ 890 or %g0, P_UTRAP10, %g2 ;\ 891 or %g0, T_TAG_OVERFLOW, %g3 ;\ 892 sethi %hi(.check_v9utrap), %g4 ;\ 893 jmp %g4 + %lo(.check_v9utrap) ;\ 894 nop ;\ 895 .align 32 896 897/* 898 * divide by zero trap 899 */ 900#define DIV_BY_ZERO \ 901 TT_TRACE(trace_gen) ;\ 902 or %g0, P_UTRAP11, %g2 ;\ 903 or %g0, T_IDIV0, %g3 ;\ 904 sethi %hi(.check_v9utrap), %g4 ;\ 905 jmp %g4 + %lo(.check_v9utrap) ;\ 906 nop ;\ 907 .align 32 908 909/* 910 * trap instruction for V9 user trap handlers 911 */ 912#define TRAP_INSTR \ 913 TT_TRACE(trace_gen) ;\ 914 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 915 sethi %hi(.check_v9utrap), %g4 ;\ 916 jmp %g4 + %lo(.check_v9utrap) ;\ 917 nop ;\ 918 .align 32 919#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 920 921/* 922 * LEVEL_INTERRUPT is for level N interrupts. 923 * VECTOR_INTERRUPT is for the vector trap. 924 */ 925#define LEVEL_INTERRUPT(level) \ 926 .global tt_pil/**/level ;\ 927tt_pil/**/level: ;\ 928 ba,pt %xcc, pil_interrupt ;\ 929 mov level, %g4 ;\ 930 .align 32 931 932#define LEVEL14_INTERRUPT \ 933 ba pil14_interrupt ;\ 934 mov PIL_14, %g4 ;\ 935 .align 32 936 937#define VECTOR_INTERRUPT \ 938 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g1 ;\ 939 btst IRSR_BUSY, %g1 ;\ 940 bnz,pt %xcc, vec_interrupt ;\ 941 nop ;\ 942 ba,a,pt %xcc, vec_intr_spurious ;\ 943 .empty ;\ 944 .align 32 945 946/* 947 * MMU Trap Handlers. 948 */ 949#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \ 950 rdpr %pstate, %g5 ;\ 951 wrpr %g5, PSTATE_MG | PSTATE_AG, %pstate 952 953#define IMMU_EXCEPTION \ 954 membar #Sync ;\ 955 SWITCH_GLOBALS ;\ 956 wr %g0, ASI_IMMU, %asi ;\ 957 rdpr %tpc, %g2 ;\ 958 ldxa [MMU_SFSR]%asi, %g3 ;\ 959 ba,pt %xcc, .mmu_exception_end ;\ 960 mov T_INSTR_EXCEPTION, %g1 ;\ 961 .align 32 962 963#define DMMU_EXCEPTION \ 964 SWITCH_GLOBALS ;\ 965 wr %g0, ASI_DMMU, %asi ;\ 966 ldxa [MMU_TAG_ACCESS]%asi, %g2 ;\ 967 ldxa [MMU_SFSR]%asi, %g3 ;\ 968 ba,pt %xcc, .mmu_exception_end ;\ 969 mov T_DATA_EXCEPTION, %g1 ;\ 970 .align 32 971 972#define DMMU_EXC_AG_PRIV \ 973 wr %g0, ASI_DMMU, %asi ;\ 974 ldxa [MMU_SFAR]%asi, %g2 ;\ 975 ba,pt %xcc, .mmu_priv_exception ;\ 976 ldxa [MMU_SFSR]%asi, %g3 ;\ 977 .align 32 978 979#define DMMU_EXC_AG_NOT_ALIGNED \ 980 wr %g0, ASI_DMMU, %asi ;\ 981 ldxa [MMU_SFAR]%asi, %g2 ;\ 982 ba,pt %xcc, .mmu_exception_not_aligned ;\ 983 ldxa [MMU_SFSR]%asi, %g3 ;\ 984 .align 32 985 986/* 987 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 988 */ 989#define DMMU_EXC_LDDF_NOT_ALIGNED \ 990 btst 1, %sp ;\ 991 bnz,pt %xcc, .lddf_exception_not_aligned ;\ 992 wr %g0, ASI_DMMU, %asi ;\ 993 ldxa [MMU_SFAR]%asi, %g2 ;\ 994 ba,pt %xcc, .mmu_exception_not_aligned ;\ 995 ldxa [MMU_SFSR]%asi, %g3 ;\ 996 .align 32 997 998#define DMMU_EXC_STDF_NOT_ALIGNED \ 999 btst 1, %sp ;\ 1000 bnz,pt %xcc, .stdf_exception_not_aligned ;\ 1001 wr %g0, ASI_DMMU, %asi ;\ 1002 ldxa [MMU_SFAR]%asi, %g2 ;\ 1003 ba,pt %xcc, .mmu_exception_not_aligned ;\ 1004 ldxa [MMU_SFSR]%asi, %g3 ;\ 1005 .align 32 1006 1007/* 1008 * Flush the TLB using either the primary, secondary, or nucleus flush 1009 * operation based on whether the ctx from the tag access register matches 1010 * the primary or secondary context (flush the nucleus if neither matches). 1011 * 1012 * Requires a membar #Sync before next ld/st. 1013 * exits with: 1014 * g2 = tag access register 1015 * g3 = ctx number 1016 */ 1017#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 1018#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 1019#endif 1020#define DTLB_DEMAP_ENTRY \ 1021 mov MMU_TAG_ACCESS, %g1 ;\ 1022 mov MMU_PCONTEXT, %g5 ;\ 1023 ldxa [%g1]ASI_DMMU, %g2 ;\ 1024 sethi %hi(TAGACC_CTX_MASK), %g4 ;\ 1025 or %g4, %lo(TAGACC_CTX_MASK), %g4 ;\ 1026 and %g2, %g4, %g3 /* g3 = ctx */ ;\ 1027 ldxa [%g5]ASI_DMMU, %g6 /* g6 = primary ctx */ ;\ 1028 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1029 cmp %g3, %g6 ;\ 1030 be,pt %xcc, 1f ;\ 1031 andn %g2, %g4, %g1 /* ctx = primary */ ;\ 1032 mov MMU_SCONTEXT, %g5 ;\ 1033 ldxa [%g5]ASI_DMMU, %g6 /* g6 = secondary ctx */ ;\ 1034 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1035 cmp %g3, %g6 ;\ 1036 be,a,pt %xcc, 1f ;\ 1037 or %g1, DEMAP_SECOND, %g1 ;\ 1038 or %g1, DEMAP_NUCLEUS, %g1 ;\ 10391: stxa %g0, [%g1]ASI_DTLB_DEMAP /* MMU_DEMAP_PAGE */ ;\ 1040 membar #Sync 1041 1042#if defined(cscope) 1043/* 1044 * Define labels to direct cscope quickly to labels that 1045 * are generated by macro expansion of DTLB_MISS(). 1046 */ 1047 .global tt0_dtlbmiss 1048tt0_dtlbmiss: 1049 .global tt1_dtlbmiss 1050tt1_dtlbmiss: 1051 nop 1052#endif 1053 1054/* 1055 * Needs to be exactly 32 instructions 1056 * 1057 * UTLB NOTE: If we don't hit on the 8k pointer then we branch 1058 * to a special 4M tsb handler. It would be nice if that handler 1059 * could live in this file but currently it seems better to allow 1060 * it to fall thru to sfmmu_tsb_miss. 1061 */ 1062#ifdef UTSB_PHYS 1063#define DTLB_MISS(table_name) ;\ 1064 .global table_name/**/_dtlbmiss ;\ 1065table_name/**/_dtlbmiss: ;\ 1066 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1067 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1068 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1069 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1070 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1071 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1072 cmp %g3, INVALID_CONTEXT ;\ 1073 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1074 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1075 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1076 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1077 brgez,pn %g3, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 1078 nop ;\ 1079 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, %g5 data */;\ 1080 cmp %g4, %g7 ;\ 1081 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1082 mov %g0, %g3 /* clear 4M tsbe ptr */ ;\ 1083 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1084 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1085 retry /* in %g5 */ ;\ 1086 unimp 0 ;\ 1087 unimp 0 ;\ 1088 unimp 0 ;\ 1089 unimp 0 ;\ 1090 unimp 0 ;\ 1091 unimp 0 ;\ 1092 unimp 0 ;\ 1093 unimp 0 ;\ 1094 unimp 0 ;\ 1095 .align 128 1096#else /* UTSB_PHYS */ 1097#define DTLB_MISS(table_name) ;\ 1098 .global table_name/**/_dtlbmiss ;\ 1099table_name/**/_dtlbmiss: ;\ 1100 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1101 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1102 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1103 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1104 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1105 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1106 cmp %g3, INVALID_CONTEXT ;\ 1107 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1108 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1109 brlz,pn %g1, sfmmu_udtlb_slowpath ;\ 1110 nop ;\ 1111 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, %g5 data */ ;\ 1112 cmp %g4, %g7 ;\ 1113 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1114 mov %g0, %g3 /* clear 4M tsbe ptr */ ;\ 1115 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1116 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1117 retry /* in %g5 */ ;\ 1118 unimp 0 ;\ 1119 unimp 0 ;\ 1120 unimp 0 ;\ 1121 unimp 0 ;\ 1122 unimp 0 ;\ 1123 unimp 0 ;\ 1124 unimp 0 ;\ 1125 unimp 0 ;\ 1126 unimp 0 ;\ 1127 unimp 0 ;\ 1128 unimp 0 ;\ 1129 .align 128 1130#endif /* UTSB_PHYS */ 1131 1132#if defined(cscope) 1133/* 1134 * Define labels to direct cscope quickly to labels that 1135 * are generated by macro expansion of ITLB_MISS(). 1136 */ 1137 .global tt0_itlbmiss 1138tt0_itlbmiss: 1139 .global tt1_itlbmiss 1140tt1_itlbmiss: 1141 nop 1142#endif 1143 1144/* 1145 * Instruction miss handler. 1146 * ldda instructions will have their ASI patched 1147 * by sfmmu_patch_ktsb at runtime. 1148 * MUST be EXACTLY 32 instructions or we'll break. 1149 */ 1150#ifdef UTSB_PHYS 1151#define ITLB_MISS(table_name) \ 1152 .global table_name/**/_itlbmiss ;\ 1153table_name/**/_itlbmiss: ;\ 1154 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1155 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1156 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1157 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1158 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1159 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1160 cmp %g3, INVALID_CONTEXT ;\ 1161 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1162 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1163 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1164 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1165 brgez,pn %g3, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 1166 nop ;\ 1167 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\ 1168 cmp %g4, %g7 ;\ 1169 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1170 mov %g0, %g3 /* no 4M TSB */ ;\ 1171 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1172 bz,pn %icc, exec_fault ;\ 1173 nop ;\ 1174 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1175 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1176 retry ;\ 1177 unimp 0 ;\ 1178 unimp 0 ;\ 1179 unimp 0 ;\ 1180 unimp 0 ;\ 1181 unimp 0 ;\ 1182 unimp 0 ;\ 1183 .align 128 1184#else /* UTSB_PHYS */ 1185#define ITLB_MISS(table_name) \ 1186 .global table_name/**/_itlbmiss ;\ 1187table_name/**/_itlbmiss: ;\ 1188 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1189 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1190 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1191 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1192 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1193 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1194 cmp %g3, INVALID_CONTEXT ;\ 1195 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1196 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1197 brlz,pn %g1, sfmmu_uitlb_slowpath /* if >1 TSB branch */ ;\ 1198 nop ;\ 1199 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, g5 = data */ ;\ 1200 cmp %g4, %g7 ;\ 1201 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1202 mov %g0, %g3 /* no 4M TSB */ ;\ 1203 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1204 bz,pn %icc, exec_fault ;\ 1205 nop ;\ 1206 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1207 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1208 retry ;\ 1209 unimp 0 ;\ 1210 unimp 0 ;\ 1211 unimp 0 ;\ 1212 unimp 0 ;\ 1213 unimp 0 ;\ 1214 unimp 0 ;\ 1215 unimp 0 ;\ 1216 unimp 0 ;\ 1217 .align 128 1218#endif /* UTSB_PHYS */ 1219 1220 1221/* 1222 * This macro is the first level handler for fast protection faults. 1223 * It first demaps the tlb entry which generated the fault and then 1224 * attempts to set the modify bit on the hash. It needs to be 1225 * exactly 32 instructions. 1226 */ 1227#define DTLB_PROT \ 1228 DTLB_DEMAP_ENTRY /* 20 instructions */ ;\ 1229 /* ;\ 1230 * At this point: ;\ 1231 * g1 = ???? ;\ 1232 * g2 = tag access register ;\ 1233 * g3 = ctx number ;\ 1234 * g4 = ???? ;\ 1235 */ ;\ 1236 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1237 /* clobbers g1 and g6 */ ;\ 1238 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1239 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1240 nop ;\ 1241 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1242 unimp 0 ;\ 1243 unimp 0 ;\ 1244 unimp 0 ;\ 1245 unimp 0 ;\ 1246 unimp 0 ;\ 1247 unimp 0 ;\ 1248 .align 128 1249 1250#define DMMU_EXCEPTION_TL1 ;\ 1251 SWITCH_GLOBALS ;\ 1252 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1253 nop ;\ 1254 .align 32 1255 1256#define MISALIGN_ADDR_TL1 ;\ 1257 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1258 nop ;\ 1259 .align 32 1260 1261/* 1262 * Trace a tsb hit 1263 * g1 = tsbe pointer (in/clobbered) 1264 * g2 = tag access register (in) 1265 * g3 - g4 = scratch (clobbered) 1266 * g5 = tsbe data (in) 1267 * g6 = scratch (clobbered) 1268 * g7 = pc we jumped here from (in) 1269 * ttextra = value to OR in to trap type (%tt) (in) 1270 */ 1271#ifdef TRAPTRACE 1272#define TRACE_TSBHIT(ttextra) \ 1273 membar #Sync ;\ 1274 sethi %hi(FLUSH_ADDR), %g6 ;\ 1275 flush %g6 ;\ 1276 TRACE_PTR(%g3, %g6) ;\ 1277 GET_TRACE_TICK(%g6) ;\ 1278 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1279 stxa %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1280 stxa %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1281 rdpr %tnpc, %g6 ;\ 1282 stxa %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1283 stxa %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1284 stxa %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1285 rdpr %tpc, %g6 ;\ 1286 stxa %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1287 rdpr %tl, %g6 ;\ 1288 stha %g6, [%g3 + TRAP_ENT_TL]%asi ;\ 1289 rdpr %tt, %g6 ;\ 1290 or %g6, (ttextra), %g6 ;\ 1291 stha %g6, [%g3 + TRAP_ENT_TT]%asi ;\ 1292 ldxa [%g0]ASI_IMMU, %g1 /* tag target */ ;\ 1293 ldxa [%g0]ASI_DMMU, %g4 ;\ 1294 cmp %g6, FAST_IMMU_MISS_TT ;\ 1295 movne %icc, %g4, %g1 ;\ 1296 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */ ;\ 1297 stxa %g0, [%g3 + TRAP_ENT_TR]%asi ;\ 1298 TRACE_NEXT(%g3, %g4, %g6) 1299#else 1300#define TRACE_TSBHIT(ttextra) 1301#endif 1302 1303#if defined(lint) 1304 1305struct scb trap_table; 1306struct scb scb; /* trap_table/scb are the same object */ 1307 1308#else /* lint */ 1309 1310/* 1311 * ======================================================================= 1312 * SPARC V9 TRAP TABLE 1313 * 1314 * The trap table is divided into two halves: the first half is used when 1315 * taking traps when TL=0; the second half is used when taking traps from 1316 * TL>0. Note that handlers in the second half of the table might not be able 1317 * to make the same assumptions as handlers in the first half of the table. 1318 * 1319 * Worst case trap nesting so far: 1320 * 1321 * at TL=0 client issues software trap requesting service 1322 * at TL=1 nucleus wants a register window 1323 * at TL=2 register window clean/spill/fill takes a TLB miss 1324 * at TL=3 processing TLB miss 1325 * at TL=4 handle asynchronous error 1326 * 1327 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1328 * 1329 * ======================================================================= 1330 */ 1331 .section ".text" 1332 .align 4 1333 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1334 .type trap_table, #object 1335 .type scb, #object 1336trap_table: 1337scb: 1338trap_table0: 1339 /* hardware traps */ 1340 NOT; /* 000 reserved */ 1341 RED; /* 001 power on reset */ 1342 RED; /* 002 watchdog reset */ 1343 RED; /* 003 externally initiated reset */ 1344 RED; /* 004 software initiated reset */ 1345 RED; /* 005 red mode exception */ 1346 NOT; NOT; /* 006 - 007 reserved */ 1347 IMMU_EXCEPTION; /* 008 instruction access exception */ 1348 NOT; /* 009 instruction access MMU miss */ 1349 ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae); 1350 /* 00A instruction access error */ 1351 NOT; NOT4; /* 00B - 00F reserved */ 1352 ILLTRAP_INSTR; /* 010 illegal instruction */ 1353 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1354 NOT; /* 012 unimplemented LDD */ 1355 NOT; /* 013 unimplemented STD */ 1356 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1357 FP_DISABLED_TRAP; /* 020 fp disabled */ 1358 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1359 FP_TRAP; /* 022 fp exception other */ 1360 TAG_OVERFLOW; /* 023 tag overflow */ 1361 CLEAN_WINDOW; /* 024 - 027 clean window */ 1362 DIV_BY_ZERO; /* 028 division by zero */ 1363 NOT; /* 029 internal processor error */ 1364 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1365 DMMU_EXCEPTION; /* 030 data access exception */ 1366 NOT; /* 031 data access MMU miss */ 1367 ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae); 1368 /* 032 data access error */ 1369 NOT; /* 033 data access protection */ 1370 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1371 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1372 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1373 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1374 NOT; /* 038 LDQF mem address not aligned */ 1375 NOT; /* 039 STQF mem address not aligned */ 1376 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1377 LABELED_BAD(tt0_asdat); /* 040 async data error */ 1378 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1379 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1380 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1381 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1382 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1383 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1384 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1385 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1386 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1387 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1388 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1389 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1390 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1391 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1392 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1393 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1394 VECTOR_INTERRUPT; /* 060 interrupt vector */ 1395 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1396 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1397 GOTO_TT(ce_err, trace_gen); /* 063 corrected ECC error */ 1398 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1399 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1400 DTLB_PROT; /* 06C data access protection */ 1401 LABELED_BAD(tt0_fecc); /* 070 fast ecache ECC error */ 1402 LABELED_BAD(tt0_dperr); /* 071 Cheetah+ dcache parity error */ 1403 LABELED_BAD(tt0_iperr); /* 072 Cheetah+ icache parity error */ 1404 NOT; /* 073 reserved */ 1405 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1406 NOT4; /* 080 spill 0 normal */ 1407 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1408 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1409 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1410 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1411 SPILL_32bit(not); /* 094 spill 5 normal */ 1412 SPILL_64bit(not); /* 098 spill 6 normal */ 1413 SPILL_mixed; /* 09C spill 7 normal */ 1414 NOT4; /* 0A0 spill 0 other */ 1415 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1416 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1417 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1418 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1419 NOT4; /* 0B4 spill 5 other */ 1420 NOT4; /* 0B8 spill 6 other */ 1421 NOT4; /* 0BC spill 7 other */ 1422 NOT4; /* 0C0 fill 0 normal */ 1423 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1424 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1425 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1426 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1427 FILL_32bit(not); /* 0D4 fill 5 normal */ 1428 FILL_64bit(not); /* 0D8 fill 6 normal */ 1429 FILL_mixed; /* 0DC fill 7 normal */ 1430 NOT4; /* 0E0 fill 0 other */ 1431 NOT4; /* 0E4 fill 1 other */ 1432 NOT4; /* 0E8 fill 2 other */ 1433 NOT4; /* 0EC fill 3 other */ 1434 NOT4; /* 0F0 fill 4 other */ 1435 NOT4; /* 0F4 fill 5 other */ 1436 NOT4; /* 0F8 fill 6 other */ 1437 NOT4; /* 0FC fill 7 other */ 1438 /* user traps */ 1439 GOTO(syscall_trap_4x); /* 100 old system call */ 1440 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1441 TRAP(T_DIV0); /* 102 user divide by zero */ 1442 FLUSHW(); /* 103 flush windows */ 1443 GOTO(.clean_windows); /* 104 clean windows */ 1444 BAD; /* 105 range check ?? */ 1445 GOTO(.fix_alignment); /* 106 do unaligned references */ 1446 BAD; /* 107 unused */ 1447#ifndef DEBUG 1448 SYSCALL(syscall_trap32); /* 108 ILP32 system call on LP64 */ 1449#else /* DEBUG */ 1450 GOTO(syscall_wrapper32); /* 108 ILP32 system call on LP64 */ 1451#endif /* DEBUG */ 1452 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1453 BAD; BAD; BAD4; /* 10A - 10F unused */ 1454 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1455 GOTO(.getcc); /* 120 get condition codes */ 1456 GOTO(.setcc); /* 121 set condition codes */ 1457 GOTO(.getpsr); /* 122 get psr */ 1458 GOTO(.setpsr); /* 123 set psr (some fields) */ 1459 GOTO(get_timestamp); /* 124 get timestamp */ 1460 GOTO(get_virtime); /* 125 get lwp virtual time */ 1461 PRIV(self_xcall); /* 126 self xcall */ 1462 GOTO(get_hrestime); /* 127 get hrestime */ 1463 BAD; /* 128 ST_SETV9STACK */ 1464 GOTO(.getlgrp); /* 129 get lgrpid */ 1465 BAD; BAD; BAD4; /* 12A - 12F unused */ 1466 BAD4; BAD4; /* 130 - 137 unused */ 1467 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1468 BAD; /* 139 unused */ 1469 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1470 BAD; BAD4; /* 13B - 13F unused */ 1471#ifndef DEBUG 1472 SYSCALL(syscall_trap); /* 140 LP64 system call */ 1473#else /* DEBUG */ 1474 GOTO(syscall_wrapper); /* 140 LP64 system call */ 1475#endif /* DEBUG */ 1476 SYSCALL(nosys); /* 141 unused system call trap */ 1477#ifdef DEBUG_USER_TRAPTRACECTL 1478 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1479 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1480#else 1481 SYSCALL(nosys); /* 142 unused system call trap */ 1482 SYSCALL(nosys); /* 143 unused system call trap */ 1483#endif 1484 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1485 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1486 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1487 BAD; /* 170 - unused */ 1488 BAD; /* 171 - unused */ 1489 BAD; BAD; /* 172 - 173 unused */ 1490 BAD4; BAD4; /* 174 - 17B unused */ 1491#ifdef PTL1_PANIC_DEBUG 1492 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1493 /* 17C test ptl1_panic */ 1494#else 1495 BAD; /* 17C unused */ 1496#endif /* PTL1_PANIC_DEBUG */ 1497 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1498 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1499 PRIV(kctx_obp_bpt); /* 17F obp breakpoint */ 1500 /* reserved */ 1501 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1502 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1503 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1504 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1505 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1506 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1507 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1508 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1509trap_table1: 1510 NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */ 1511 ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae); 1512 /* 00A instruction access error */ 1513 NOT; NOT4; /* 00B - 00F unused */ 1514 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1515 NOT4; /* 020 - 023 unused */ 1516 CLEAN_WINDOW; /* 024 - 027 clean window */ 1517 NOT4; NOT4; /* 028 - 02F unused */ 1518 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1519 NOT; /* 031 unused */ 1520 ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae); 1521 /* 032 data access error */ 1522 NOT; /* 033 unused */ 1523 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1524 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1525 LABELED_BAD(tt1_asdat); /* 040 async data error */ 1526 NOT; NOT; NOT; /* 041 - 043 unused */ 1527 NOT4; NOT4; NOT4; /* 044 - 04F unused */ 1528 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1529 NOT; /* 060 unused */ 1530 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1531 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1532 GOTO_TT(ce_err_tl1, trace_gen); /* 063 corrected ECC error */ 1533 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1534 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1535 DTLB_PROT; /* 06C data access protection */ 1536 LABELED_BAD(tt1_fecc); /* 070 fast ecache ECC error */ 1537 LABELED_BAD(tt1_dperr); /* 071 Cheetah+ dcache parity error */ 1538 LABELED_BAD(tt1_iperr); /* 072 Cheetah+ icache parity error */ 1539 NOT; /* 073 reserved */ 1540 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1541 NOT4; /* 080 spill 0 normal */ 1542 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1543 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1544 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1545 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1546 SPILL_32bit(not); /* 094 spill 5 normal */ 1547 SPILL_64bit(not); /* 098 spill 6 normal */ 1548 SPILL_mixed; /* 09C spill 7 normal */ 1549 NOT4; /* 0A0 spill 0 other */ 1550 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1551 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1552 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1553 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1554 NOT4; /* 0B4 spill 5 other */ 1555 NOT4; /* 0B8 spill 6 other */ 1556 NOT4; /* 0BC spill 7 other */ 1557 NOT4; /* 0C0 fill 0 normal */ 1558 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0C4 fill 1 normal */ 1559 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0C8 fill 2 normal */ 1560 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0CC fill 3 normal */ 1561 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0D0 fill 4 normal */ 1562 FILL_32bit(not); /* 0D4 fill 5 normal */ 1563 FILL_64bit(not); /* 0D8 fill 6 normal */ 1564 FILL_mixed; /* 0DC fill 7 normal */ 1565 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1566 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1567 LABELED_BAD(tt1_swtrap0); /* 100 fast ecache ECC error (cont) */ 1568 LABELED_BAD(tt1_swtrap1); /* 101 Ch+ D$ parity error (cont) */ 1569 LABELED_BAD(tt1_swtrap2); /* 102 Ch+ I$ parity error (cont) */ 1570 NOT; /* 103 reserved */ 1571/* 1572 * We only reserve the above four special case soft traps for code running 1573 * at TL>0, so we can truncate the trap table here. 1574 */ 1575etrap_table: 1576 .size trap_table, (.-trap_table) 1577 .size scb, (.-scb) 1578 1579/* 1580 * We get to exec_fault in the case of an instruction miss and tte 1581 * has no execute bit set. We go to tl0 to handle it. 1582 * 1583 * g1 = tsbe pointer (in/clobbered) 1584 * g2 = tag access register (in) 1585 * g3 - g4 = scratch (clobbered) 1586 * g5 = tsbe data (in) 1587 * g6 = scratch (clobbered) 1588 */ 1589 ALTENTRY(exec_fault) 1590 TRACE_TSBHIT(0x200) 1591 SWITCH_GLOBALS 1592 mov MMU_TAG_ACCESS, %g4 1593 ldxa [%g4]ASI_IMMU, %g2 ! arg1 = addr 1594 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1595 set trap, %g1 1596 ba,pt %xcc, sys_trap 1597 mov -1, %g4 1598 1599.mmu_exception_not_aligned: 1600 rdpr %tstate, %g1 1601 btst TSTATE_PRIV, %g1 1602 bnz,pn %icc, 2f 1603 nop 1604 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1605 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1606 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1607 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1608 brz,pt %g5, 2f 1609 nop 1610 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1611 brz,pn %g5, 2f 1612 nop 1613 btst 1, %sp 1614 bz,pt %xcc, 1f ! 32 bit user program 1615 nop 1616 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1617 nop 16181: 1619 ba,pt %xcc, .setup_utrap 1620 or %g2, %g0, %g7 16212: 1622 ba,pt %xcc, .mmu_exception_end 1623 mov T_ALIGNMENT, %g1 1624 1625.mmu_priv_exception: 1626 rdpr %tstate, %g1 1627 btst TSTATE_PRIV, %g1 1628 bnz,pn %icc, 1f 1629 nop 1630 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1631 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1632 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1633 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1634 brz,pt %g5, 1f 1635 nop 1636 ldn [%g5 + P_UTRAP16], %g5 1637 brnz,pt %g5, .setup_v9utrap 1638 nop 16391: 1640 mov T_PRIV_INSTR, %g1 1641 1642.mmu_exception_end: 1643 CPU_INDEX(%g4, %g5) 1644 set cpu_core, %g5 1645 sllx %g4, CPU_CORE_SHIFT, %g4 1646 add %g4, %g5, %g4 1647 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1648 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1649 bz %xcc, .mmu_exception_tlb_chk 1650 or %g5, CPU_DTRACE_BADADDR, %g5 1651 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1652 done 1653 1654.mmu_exception_tlb_chk: 1655 GET_CPU_IMPL(%g5) ! check SFSR.FT to see if this 1656 cmp %g5, PANTHER_IMPL ! is a TLB parity error. But 1657 bne 2f ! we only do this check while 1658 mov 1, %g4 ! running on Panther CPUs 1659 sllx %g4, PN_SFSR_PARITY_SHIFT, %g4 ! since US-I/II use the same 1660 andcc %g3, %g4, %g0 ! bit for something else which 1661 bz 2f ! will be handled later. 1662 nop 1663.mmu_exception_is_tlb_parity: 1664 .weak itlb_parity_trap 1665 .weak dtlb_parity_trap 1666 set itlb_parity_trap, %g4 1667 cmp %g1, T_INSTR_EXCEPTION ! branch to the itlb or 1668 be 3f ! dtlb parity handler 1669 nop ! if this trap is due 1670 set dtlb_parity_trap, %g4 1671 cmp %g1, T_DATA_EXCEPTION ! to a IMMU exception 1672 be 3f ! or DMMU exception. 1673 nop 16742: 1675 sllx %g3, 32, %g3 1676 or %g3, %g1, %g3 1677 set trap, %g1 1678 ba,pt %xcc, sys_trap 1679 sub %g0, 1, %g4 16803: 1681 jmp %g4 ! off to the appropriate 1682 nop ! TLB parity handler 1683 1684.fp_disabled: 1685 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1686 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1687#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1688 brz,a,pn %g1, 2f 1689 nop 1690#endif 1691 rdpr %tstate, %g4 1692 btst TSTATE_PRIV, %g4 1693#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1694 bnz,pn %icc, 2f 1695 nop 1696#else 1697 bnz,a,pn %icc, ptl1_panic 1698 mov PTL1_BAD_FPTRAP, %g1 1699#endif 1700 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1701 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1702 brz,a,pt %g5, 2f 1703 nop 1704 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1705 brz,a,pn %g5, 2f 1706 nop 1707 btst 1, %sp 1708 bz,a,pt %xcc, 1f ! 32 bit user program 1709 nop 1710 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1711 nop 17121: 1713 ba,pt %xcc, .setup_utrap 1714 or %g0, %g0, %g7 17152: 1716 set fp_disabled, %g1 1717 ba,pt %xcc, sys_trap 1718 sub %g0, 1, %g4 1719 1720.fp_ieee_exception: 1721 rdpr %tstate, %g1 1722 btst TSTATE_PRIV, %g1 1723 bnz,a,pn %icc, ptl1_panic 1724 mov PTL1_BAD_FPTRAP, %g1 1725 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1726 stx %fsr, [%g1 + CPU_TMP1] 1727 ldx [%g1 + CPU_TMP1], %g2 1728 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1729 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1730 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1731 brz,a,pt %g5, 1f 1732 nop 1733 ldn [%g5 + P_UTRAP8], %g5 1734 brnz,a,pt %g5, .setup_v9utrap 1735 nop 17361: 1737 set _fp_ieee_exception, %g1 1738 ba,pt %xcc, sys_trap 1739 sub %g0, 1, %g4 1740 1741/* 1742 * Register Inputs: 1743 * %g5 user trap handler 1744 * %g7 misaligned addr - for alignment traps only 1745 */ 1746.setup_utrap: 1747 set trap, %g1 ! setup in case we go 1748 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1749 sub %g0, 1, %g4 ! the save instruction below 1750 1751 /* 1752 * If the DTrace pid provider is single stepping a copied-out 1753 * instruction, t->t_dtrace_step will be set. In that case we need 1754 * to abort the single-stepping (since execution of the instruction 1755 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1756 */ 1757 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1758 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1759 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1760 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1761 rdpr %tnpc, %l2 ! arg1 == tnpc 1762 brz,pt %g2, 1f 1763 rdpr %tpc, %l1 ! arg0 == tpc 1764 1765 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1766 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1767 brz,pt %g2, 1f 1768 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1769 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 17701: 1771 mov %g7, %l3 ! arg2 == misaligned address 1772 1773 rdpr %tstate, %g1 ! cwp for trap handler 1774 rdpr %cwp, %g4 1775 bclr TSTATE_CWP_MASK, %g1 1776 wrpr %g1, %g4, %tstate 1777 wrpr %g0, %g5, %tnpc ! trap handler address 1778 FAST_TRAP_DONE 1779 /* NOTREACHED */ 1780 1781.check_v9utrap: 1782 rdpr %tstate, %g1 1783 btst TSTATE_PRIV, %g1 1784 bnz,a,pn %icc, 3f 1785 nop 1786 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1787 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1788 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1789 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1790 1791 cmp %g3, T_SOFTWARE_TRAP 1792 bne,a,pt %icc, 1f 1793 nop 1794 1795 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1796 rdpr %tt, %g3 ! delay - get actual hw trap type 1797 1798 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1799 ba,pt %icc, 2f 1800 smul %g1, CPTRSIZE, %g2 18011: 1802 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1803 nop 1804 1805 cmp %g3, T_UNIMP_INSTR 1806 bne,a,pt %icc, 2f 1807 nop 1808 1809 mov 1, %g1 1810 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1811 rdpr %tpc, %g1 ! ld trapping instruction using 1812 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1813 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1814 1815 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1816 andcc %g1, %g4, %g4 ! and instruction with mask 1817 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1818 nop ! fall thru to setup 18192: 1820 ldn [%g5 + %g2], %g5 1821 brnz,a,pt %g5, .setup_v9utrap 1822 nop 18233: 1824 set trap, %g1 1825 ba,pt %xcc, sys_trap 1826 sub %g0, 1, %g4 1827 /* NOTREACHED */ 1828 1829/* 1830 * Register Inputs: 1831 * %g5 user trap handler 1832 */ 1833.setup_v9utrap: 1834 set trap, %g1 ! setup in case we go 1835 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1836 sub %g0, 1, %g4 ! the save instruction below 1837 1838 /* 1839 * If the DTrace pid provider is single stepping a copied-out 1840 * instruction, t->t_dtrace_step will be set. In that case we need 1841 * to abort the single-stepping (since execution of the instruction 1842 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1843 */ 1844 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1845 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1846 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1847 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1848 rdpr %tnpc, %l7 ! arg1 == tnpc 1849 brz,pt %g2, 1f 1850 rdpr %tpc, %l6 ! arg0 == tpc 1851 1852 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1853 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1854 brz,pt %g2, 1f 1855 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1856 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 18571: 1858 rdpr %tstate, %g2 ! cwp for trap handler 1859 rdpr %cwp, %g4 1860 bclr TSTATE_CWP_MASK, %g2 1861 wrpr %g2, %g4, %tstate 1862 1863 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1864 ldn [%g4 + P_AS], %g4 ! load as pointer 1865 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1866 cmp %l7, %g4 ! check for single-step set 1867 bne,pt %xcc, 4f 1868 nop 1869 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1870 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1871 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1872 bne,pt %icc, 4f 1873 nop 1874 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1875 mov %l7, %g4 ! on entry to precise user trap 1876 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1877 ! at time of trap 1878 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1879 ! %g4 == userlimit 1880 FAST_TRAP_DONE 1881 /* NOTREACHED */ 18824: 1883 wrpr %g0, %g5, %tnpc ! trap handler address 1884 FAST_TRAP_DONE_CHK_INTR 1885 /* NOTREACHED */ 1886 1887.fp_exception: 1888 CPU_ADDR(%g1, %g4) 1889 stx %fsr, [%g1 + CPU_TMP1] 1890 ldx [%g1 + CPU_TMP1], %g2 1891 1892 /* 1893 * Cheetah takes unfinished_FPop trap for certain range of operands 1894 * to the "fitos" instruction. Instead of going through the slow 1895 * software emulation path, we try to simulate the "fitos" instruction 1896 * via "fitod" and "fdtos" provided the following conditions are met: 1897 * 1898 * fpu_exists is set (if DEBUG) 1899 * not in privileged mode 1900 * ftt is unfinished_FPop 1901 * NXM IEEE trap is not enabled 1902 * instruction at %tpc is "fitos" 1903 * 1904 * Usage: 1905 * %g1 per cpu address 1906 * %g2 %fsr 1907 * %g6 user instruction 1908 * 1909 * Note that we can take a memory access related trap while trying 1910 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1911 * flag to catch those traps and let the SFMMU code deal with page 1912 * fault and data access exception. 1913 */ 1914#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1915 sethi %hi(fpu_exists), %g7 1916 ld [%g7 + %lo(fpu_exists)], %g7 1917 brz,pn %g7, .fp_exception_cont 1918 nop 1919#endif 1920 rdpr %tstate, %g7 ! branch if in privileged mode 1921 btst TSTATE_PRIV, %g7 1922 bnz,pn %xcc, .fp_exception_cont 1923 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1924 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1925 cmp %g7, FTT_UNFIN 1926 set FSR_TEM_NX, %g5 1927 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1928 andcc %g2, %g5, %g0 1929 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1930 rdpr %tpc, %g5 ! get faulting PC 1931 1932 or %g0, 1, %g7 1933 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1934 lda [%g5]ASI_USER, %g6 ! get user's instruction 1935 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1936 1937 set FITOS_INSTR_MASK, %g7 1938 and %g6, %g7, %g7 1939 set FITOS_INSTR, %g5 1940 cmp %g7, %g5 1941 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1942 nop 1943 1944 /* 1945 * This is unfinished FPops trap for "fitos" instruction. We 1946 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1947 * sequence. 1948 * 1949 * We need a temporary FP register to do the conversion. Since 1950 * both source and destination operands for the "fitos" instruction 1951 * have to be within %f0-%f31, we use an FP register from the upper 1952 * half to guarantee that it won't collide with the source or the 1953 * dest operand. However, we do have to save and restore its value. 1954 * 1955 * We use %d62 as a temporary FP register for the conversion and 1956 * branch to appropriate instruction within the conversion tables 1957 * based upon the rs2 and rd values. 1958 */ 1959 1960 std %d62, [%g1 + CPU_TMP1] ! save original value 1961 1962 srl %g6, FITOS_RS2_SHIFT, %g7 1963 and %g7, FITOS_REG_MASK, %g7 1964 set _fitos_fitod_table, %g4 1965 sllx %g7, 2, %g7 1966 jmp %g4 + %g7 1967 ba,pt %xcc, _fitos_fitod_done 1968 .empty 1969 1970_fitos_fitod_table: 1971 fitod %f0, %d62 1972 fitod %f1, %d62 1973 fitod %f2, %d62 1974 fitod %f3, %d62 1975 fitod %f4, %d62 1976 fitod %f5, %d62 1977 fitod %f6, %d62 1978 fitod %f7, %d62 1979 fitod %f8, %d62 1980 fitod %f9, %d62 1981 fitod %f10, %d62 1982 fitod %f11, %d62 1983 fitod %f12, %d62 1984 fitod %f13, %d62 1985 fitod %f14, %d62 1986 fitod %f15, %d62 1987 fitod %f16, %d62 1988 fitod %f17, %d62 1989 fitod %f18, %d62 1990 fitod %f19, %d62 1991 fitod %f20, %d62 1992 fitod %f21, %d62 1993 fitod %f22, %d62 1994 fitod %f23, %d62 1995 fitod %f24, %d62 1996 fitod %f25, %d62 1997 fitod %f26, %d62 1998 fitod %f27, %d62 1999 fitod %f28, %d62 2000 fitod %f29, %d62 2001 fitod %f30, %d62 2002 fitod %f31, %d62 2003_fitos_fitod_done: 2004 2005 /* 2006 * Now convert data back into single precision 2007 */ 2008 srl %g6, FITOS_RD_SHIFT, %g7 2009 and %g7, FITOS_REG_MASK, %g7 2010 set _fitos_fdtos_table, %g4 2011 sllx %g7, 2, %g7 2012 jmp %g4 + %g7 2013 ba,pt %xcc, _fitos_fdtos_done 2014 .empty 2015 2016_fitos_fdtos_table: 2017 fdtos %d62, %f0 2018 fdtos %d62, %f1 2019 fdtos %d62, %f2 2020 fdtos %d62, %f3 2021 fdtos %d62, %f4 2022 fdtos %d62, %f5 2023 fdtos %d62, %f6 2024 fdtos %d62, %f7 2025 fdtos %d62, %f8 2026 fdtos %d62, %f9 2027 fdtos %d62, %f10 2028 fdtos %d62, %f11 2029 fdtos %d62, %f12 2030 fdtos %d62, %f13 2031 fdtos %d62, %f14 2032 fdtos %d62, %f15 2033 fdtos %d62, %f16 2034 fdtos %d62, %f17 2035 fdtos %d62, %f18 2036 fdtos %d62, %f19 2037 fdtos %d62, %f20 2038 fdtos %d62, %f21 2039 fdtos %d62, %f22 2040 fdtos %d62, %f23 2041 fdtos %d62, %f24 2042 fdtos %d62, %f25 2043 fdtos %d62, %f26 2044 fdtos %d62, %f27 2045 fdtos %d62, %f28 2046 fdtos %d62, %f29 2047 fdtos %d62, %f30 2048 fdtos %d62, %f31 2049_fitos_fdtos_done: 2050 2051 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 2052 2053#if DEBUG 2054 /* 2055 * Update FPop_unfinished trap kstat 2056 */ 2057 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 2058 ldx [%g7], %g5 20591: 2060 add %g5, 1, %g6 2061 2062 casxa [%g7] ASI_N, %g5, %g6 2063 cmp %g5, %g6 2064 bne,a,pn %xcc, 1b 2065 or %g0, %g6, %g5 2066 2067 /* 2068 * Update fpu_sim_fitos kstat 2069 */ 2070 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 2071 ldx [%g7], %g5 20721: 2073 add %g5, 1, %g6 2074 2075 casxa [%g7] ASI_N, %g5, %g6 2076 cmp %g5, %g6 2077 bne,a,pn %xcc, 1b 2078 or %g0, %g6, %g5 2079#endif /* DEBUG */ 2080 2081 FAST_TRAP_DONE 2082 2083.fp_exception_cont: 2084 /* 2085 * Let _fp_exception deal with simulating FPop instruction. 2086 * Note that we need to pass %fsr in %g2 (already read above). 2087 */ 2088 2089 set _fp_exception, %g1 2090 ba,pt %xcc, sys_trap 2091 sub %g0, 1, %g4 2092 2093.clean_windows: 2094 set trap, %g1 2095 mov T_FLUSH_PCB, %g3 2096 sub %g0, 1, %g4 2097 save 2098 flushw 2099 restore 2100 wrpr %g0, %g0, %cleanwin ! no clean windows 2101 2102 CPU_ADDR(%g4, %g5) 2103 ldn [%g4 + CPU_MPCB], %g4 2104 brz,a,pn %g4, 1f 2105 nop 2106 ld [%g4 + MPCB_WSTATE], %g5 2107 add %g5, WSTATE_CLEAN_OFFSET, %g5 2108 wrpr %g0, %g5, %wstate 21091: FAST_TRAP_DONE 2110 2111/* 2112 * .spill_clean: clean the previous window, restore the wstate, and 2113 * "done". 2114 * 2115 * Entry: %g7 contains new wstate 2116 */ 2117.spill_clean: 2118 sethi %hi(nwin_minus_one), %g5 2119 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 2120 rdpr %cwp, %g6 ! %g6 = %cwp 2121 deccc %g6 ! %g6-- 2122 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 2123 wrpr %g6, %cwp 2124 TT_TRACE_L(trace_win) 2125 clr %l0 2126 clr %l1 2127 clr %l2 2128 clr %l3 2129 clr %l4 2130 clr %l5 2131 clr %l6 2132 clr %l7 2133 wrpr %g0, %g7, %wstate 2134 saved 2135 retry ! restores correct %cwp 2136 2137.fix_alignment: 2138 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2139 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 2140 ldn [%g1 + T_PROCP], %g1 2141 mov 1, %g2 2142 stb %g2, [%g1 + P_FIXALIGNMENT] 2143 FAST_TRAP_DONE 2144 2145#define STDF_REG(REG, ADDR, TMP) \ 2146 sll REG, 3, REG ;\ 2147mark1: set start1, TMP ;\ 2148 jmp REG + TMP ;\ 2149 nop ;\ 2150start1: ba,pt %xcc, done1 ;\ 2151 std %f0, [ADDR + CPU_TMP1] ;\ 2152 ba,pt %xcc, done1 ;\ 2153 std %f32, [ADDR + CPU_TMP1] ;\ 2154 ba,pt %xcc, done1 ;\ 2155 std %f2, [ADDR + CPU_TMP1] ;\ 2156 ba,pt %xcc, done1 ;\ 2157 std %f34, [ADDR + CPU_TMP1] ;\ 2158 ba,pt %xcc, done1 ;\ 2159 std %f4, [ADDR + CPU_TMP1] ;\ 2160 ba,pt %xcc, done1 ;\ 2161 std %f36, [ADDR + CPU_TMP1] ;\ 2162 ba,pt %xcc, done1 ;\ 2163 std %f6, [ADDR + CPU_TMP1] ;\ 2164 ba,pt %xcc, done1 ;\ 2165 std %f38, [ADDR + CPU_TMP1] ;\ 2166 ba,pt %xcc, done1 ;\ 2167 std %f8, [ADDR + CPU_TMP1] ;\ 2168 ba,pt %xcc, done1 ;\ 2169 std %f40, [ADDR + CPU_TMP1] ;\ 2170 ba,pt %xcc, done1 ;\ 2171 std %f10, [ADDR + CPU_TMP1] ;\ 2172 ba,pt %xcc, done1 ;\ 2173 std %f42, [ADDR + CPU_TMP1] ;\ 2174 ba,pt %xcc, done1 ;\ 2175 std %f12, [ADDR + CPU_TMP1] ;\ 2176 ba,pt %xcc, done1 ;\ 2177 std %f44, [ADDR + CPU_TMP1] ;\ 2178 ba,pt %xcc, done1 ;\ 2179 std %f14, [ADDR + CPU_TMP1] ;\ 2180 ba,pt %xcc, done1 ;\ 2181 std %f46, [ADDR + CPU_TMP1] ;\ 2182 ba,pt %xcc, done1 ;\ 2183 std %f16, [ADDR + CPU_TMP1] ;\ 2184 ba,pt %xcc, done1 ;\ 2185 std %f48, [ADDR + CPU_TMP1] ;\ 2186 ba,pt %xcc, done1 ;\ 2187 std %f18, [ADDR + CPU_TMP1] ;\ 2188 ba,pt %xcc, done1 ;\ 2189 std %f50, [ADDR + CPU_TMP1] ;\ 2190 ba,pt %xcc, done1 ;\ 2191 std %f20, [ADDR + CPU_TMP1] ;\ 2192 ba,pt %xcc, done1 ;\ 2193 std %f52, [ADDR + CPU_TMP1] ;\ 2194 ba,pt %xcc, done1 ;\ 2195 std %f22, [ADDR + CPU_TMP1] ;\ 2196 ba,pt %xcc, done1 ;\ 2197 std %f54, [ADDR + CPU_TMP1] ;\ 2198 ba,pt %xcc, done1 ;\ 2199 std %f24, [ADDR + CPU_TMP1] ;\ 2200 ba,pt %xcc, done1 ;\ 2201 std %f56, [ADDR + CPU_TMP1] ;\ 2202 ba,pt %xcc, done1 ;\ 2203 std %f26, [ADDR + CPU_TMP1] ;\ 2204 ba,pt %xcc, done1 ;\ 2205 std %f58, [ADDR + CPU_TMP1] ;\ 2206 ba,pt %xcc, done1 ;\ 2207 std %f28, [ADDR + CPU_TMP1] ;\ 2208 ba,pt %xcc, done1 ;\ 2209 std %f60, [ADDR + CPU_TMP1] ;\ 2210 ba,pt %xcc, done1 ;\ 2211 std %f30, [ADDR + CPU_TMP1] ;\ 2212 ba,pt %xcc, done1 ;\ 2213 std %f62, [ADDR + CPU_TMP1] ;\ 2214done1: 2215 2216#define LDDF_REG(REG, ADDR, TMP) \ 2217 sll REG, 3, REG ;\ 2218mark2: set start2, TMP ;\ 2219 jmp REG + TMP ;\ 2220 nop ;\ 2221start2: ba,pt %xcc, done2 ;\ 2222 ldd [ADDR + CPU_TMP1], %f0 ;\ 2223 ba,pt %xcc, done2 ;\ 2224 ldd [ADDR + CPU_TMP1], %f32 ;\ 2225 ba,pt %xcc, done2 ;\ 2226 ldd [ADDR + CPU_TMP1], %f2 ;\ 2227 ba,pt %xcc, done2 ;\ 2228 ldd [ADDR + CPU_TMP1], %f34 ;\ 2229 ba,pt %xcc, done2 ;\ 2230 ldd [ADDR + CPU_TMP1], %f4 ;\ 2231 ba,pt %xcc, done2 ;\ 2232 ldd [ADDR + CPU_TMP1], %f36 ;\ 2233 ba,pt %xcc, done2 ;\ 2234 ldd [ADDR + CPU_TMP1], %f6 ;\ 2235 ba,pt %xcc, done2 ;\ 2236 ldd [ADDR + CPU_TMP1], %f38 ;\ 2237 ba,pt %xcc, done2 ;\ 2238 ldd [ADDR + CPU_TMP1], %f8 ;\ 2239 ba,pt %xcc, done2 ;\ 2240 ldd [ADDR + CPU_TMP1], %f40 ;\ 2241 ba,pt %xcc, done2 ;\ 2242 ldd [ADDR + CPU_TMP1], %f10 ;\ 2243 ba,pt %xcc, done2 ;\ 2244 ldd [ADDR + CPU_TMP1], %f42 ;\ 2245 ba,pt %xcc, done2 ;\ 2246 ldd [ADDR + CPU_TMP1], %f12 ;\ 2247 ba,pt %xcc, done2 ;\ 2248 ldd [ADDR + CPU_TMP1], %f44 ;\ 2249 ba,pt %xcc, done2 ;\ 2250 ldd [ADDR + CPU_TMP1], %f14 ;\ 2251 ba,pt %xcc, done2 ;\ 2252 ldd [ADDR + CPU_TMP1], %f46 ;\ 2253 ba,pt %xcc, done2 ;\ 2254 ldd [ADDR + CPU_TMP1], %f16 ;\ 2255 ba,pt %xcc, done2 ;\ 2256 ldd [ADDR + CPU_TMP1], %f48 ;\ 2257 ba,pt %xcc, done2 ;\ 2258 ldd [ADDR + CPU_TMP1], %f18 ;\ 2259 ba,pt %xcc, done2 ;\ 2260 ldd [ADDR + CPU_TMP1], %f50 ;\ 2261 ba,pt %xcc, done2 ;\ 2262 ldd [ADDR + CPU_TMP1], %f20 ;\ 2263 ba,pt %xcc, done2 ;\ 2264 ldd [ADDR + CPU_TMP1], %f52 ;\ 2265 ba,pt %xcc, done2 ;\ 2266 ldd [ADDR + CPU_TMP1], %f22 ;\ 2267 ba,pt %xcc, done2 ;\ 2268 ldd [ADDR + CPU_TMP1], %f54 ;\ 2269 ba,pt %xcc, done2 ;\ 2270 ldd [ADDR + CPU_TMP1], %f24 ;\ 2271 ba,pt %xcc, done2 ;\ 2272 ldd [ADDR + CPU_TMP1], %f56 ;\ 2273 ba,pt %xcc, done2 ;\ 2274 ldd [ADDR + CPU_TMP1], %f26 ;\ 2275 ba,pt %xcc, done2 ;\ 2276 ldd [ADDR + CPU_TMP1], %f58 ;\ 2277 ba,pt %xcc, done2 ;\ 2278 ldd [ADDR + CPU_TMP1], %f28 ;\ 2279 ba,pt %xcc, done2 ;\ 2280 ldd [ADDR + CPU_TMP1], %f60 ;\ 2281 ba,pt %xcc, done2 ;\ 2282 ldd [ADDR + CPU_TMP1], %f30 ;\ 2283 ba,pt %xcc, done2 ;\ 2284 ldd [ADDR + CPU_TMP1], %f62 ;\ 2285done2: 2286 2287.lddf_exception_not_aligned: 2288 /* 2289 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2290 */ 2291 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2292 2293#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2294 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2295 ld [%g2 + %lo(fpu_exists)], %g2 2296 brz,a,pn %g2, 4f 2297 nop 2298#endif 2299 CPU_ADDR(%g1, %g4) 2300 or %g0, 1, %g4 2301 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2302 2303 rdpr %tpc, %g2 2304 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2305 srl %g6, 23, %g1 ! using ldda or not? 2306 and %g1, 1, %g1 2307 brz,a,pt %g1, 2f ! check for ldda instruction 2308 nop 2309 srl %g6, 13, %g1 ! check immflag 2310 and %g1, 1, %g1 2311 rdpr %tstate, %g2 ! %tstate in %g2 2312 brnz,a,pn %g1, 1f 2313 srl %g2, 31, %g1 ! get asi from %tstate 2314 srl %g6, 5, %g1 ! get asi from instruction 2315 and %g1, 0xFF, %g1 ! imm_asi field 23161: 2317 cmp %g1, ASI_P ! primary address space 2318 be,a,pt %icc, 2f 2319 nop 2320 cmp %g1, ASI_PNF ! primary no fault address space 2321 be,a,pt %icc, 2f 2322 nop 2323 cmp %g1, ASI_S ! secondary address space 2324 be,a,pt %icc, 2f 2325 nop 2326 cmp %g1, ASI_SNF ! secondary no fault address space 2327 bne,a,pn %icc, 3f 2328 nop 23292: 2330 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2331 add %g5, 4, %g5 ! increment misaligned data address 2332 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2333 2334 sllx %g7, 32, %g7 2335 or %g5, %g7, %g5 ! combine data 2336 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2337 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2338 2339 srl %g6, 25, %g3 ! %g6 has the instruction 2340 and %g3, 0x1F, %g3 ! %g3 has rd 2341 LDDF_REG(%g3, %g7, %g4) 2342 2343 CPU_ADDR(%g1, %g4) 2344 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2345 FAST_TRAP_DONE 23463: 2347 CPU_ADDR(%g1, %g4) 2348 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 23494: 2350 set T_USER, %g3 ! trap type in %g3 2351 or %g3, T_LDDF_ALIGN, %g3 2352 mov %g5, %g2 ! misaligned vaddr in %g2 2353 set fpu_trap, %g1 ! goto C for the little and 2354 ba,pt %xcc, sys_trap ! no fault little asi's 2355 sub %g0, 1, %g4 2356 2357.stdf_exception_not_aligned: 2358 /* 2359 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2360 */ 2361 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2362 2363#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2364 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2365 ld [%g7 + %lo(fpu_exists)], %g3 2366 brz,a,pn %g3, 4f 2367 nop 2368#endif 2369 CPU_ADDR(%g1, %g4) 2370 or %g0, 1, %g4 2371 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2372 2373 rdpr %tpc, %g2 2374 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2375 2376 srl %g6, 23, %g1 ! using stda or not? 2377 and %g1, 1, %g1 2378 brz,a,pt %g1, 2f ! check for stda instruction 2379 nop 2380 srl %g6, 13, %g1 ! check immflag 2381 and %g1, 1, %g1 2382 rdpr %tstate, %g2 ! %tstate in %g2 2383 brnz,a,pn %g1, 1f 2384 srl %g2, 31, %g1 ! get asi from %tstate 2385 srl %g6, 5, %g1 ! get asi from instruction 2386 and %g1, 0xFF, %g1 ! imm_asi field 23871: 2388 cmp %g1, ASI_P ! primary address space 2389 be,a,pt %icc, 2f 2390 nop 2391 cmp %g1, ASI_S ! secondary address space 2392 bne,a,pn %icc, 3f 2393 nop 23942: 2395 srl %g6, 25, %g6 2396 and %g6, 0x1F, %g6 ! %g6 has rd 2397 CPU_ADDR(%g7, %g1) 2398 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2399 2400 ldx [%g7 + CPU_TMP1], %g6 2401 srlx %g6, 32, %g7 2402 stuwa %g7, [%g5]ASI_USER ! first half 2403 add %g5, 4, %g5 ! increment misaligned data address 2404 stuwa %g6, [%g5]ASI_USER ! second half 2405 2406 CPU_ADDR(%g1, %g4) 2407 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2408 FAST_TRAP_DONE 24093: 2410 CPU_ADDR(%g1, %g4) 2411 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 24124: 2413 set T_USER, %g3 ! trap type in %g3 2414 or %g3, T_STDF_ALIGN, %g3 2415 mov %g5, %g2 ! misaligned vaddr in %g2 2416 set fpu_trap, %g1 ! goto C for the little and 2417 ba,pt %xcc, sys_trap ! nofault little asi's 2418 sub %g0, 1, %g4 2419 2420#ifdef DEBUG_USER_TRAPTRACECTL 2421 2422.traptrace_freeze: 2423 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2424 TT_TRACE_L(trace_win) 2425 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2426 set trap_freeze, %g1 2427 mov 1, %g2 2428 st %g2, [%g1] 2429 FAST_TRAP_DONE 2430 2431.traptrace_unfreeze: 2432 set trap_freeze, %g1 2433 st %g0, [%g1] 2434 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2435 TT_TRACE_L(trace_win) 2436 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2437 FAST_TRAP_DONE 2438 2439#endif /* DEBUG_USER_TRAPTRACECTL */ 2440 2441.getcc: 2442 CPU_ADDR(%g1, %g2) 2443 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2444 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2445 rdpr %tstate, %g3 ! get tstate 2446 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2447 set PSR_ICC, %g2 2448 and %o0, %g2, %o0 ! mask out the rest 2449 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2450 rdpr %pstate, %o1 2451 wrpr %o1, PSTATE_AG, %pstate ! get into normal globals 2452 mov %o0, %g1 ! move ccr to normal %g1 2453 wrpr %g0, %o1, %pstate ! back into alternate globals 2454 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2455 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2456 FAST_TRAP_DONE 2457 2458.setcc: 2459 CPU_ADDR(%g1, %g2) 2460 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2461 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2462 rdpr %pstate, %o0 2463 wrpr %o0, PSTATE_AG, %pstate ! get into normal globals 2464 mov %g1, %o1 2465 wrpr %g0, %o0, %pstate ! back to alternates 2466 sll %o1, PSR_ICC_SHIFT, %g2 2467 set PSR_ICC, %g3 2468 and %g2, %g3, %g2 ! mask out rest 2469 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2470 rdpr %tstate, %g3 ! get tstate 2471 srl %g3, 0, %g3 ! clear upper word 2472 or %g3, %g2, %g3 ! or in new bits 2473 wrpr %g3, %tstate 2474 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2475 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2476 FAST_TRAP_DONE 2477 2478/* 2479 * getpsr(void) 2480 * Note that the xcc part of the ccr is not provided. 2481 * The V8 code shows why the V9 trap is not faster: 2482 * #define GETPSR_TRAP() \ 2483 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2484 */ 2485 2486 .type .getpsr, #function 2487.getpsr: 2488 rdpr %tstate, %g1 ! get tstate 2489 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2490 set PSR_ICC, %g2 2491 and %o0, %g2, %o0 ! mask out the rest 2492 2493 rd %fprs, %g1 ! get fprs 2494 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2495 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2496 or %o0, %g2, %o0 ! or result into psr.ef 2497 2498 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2499 or %o0, %g2, %o0 ! or psr.impl/ver 2500 FAST_TRAP_DONE 2501 SET_SIZE(.getpsr) 2502 2503/* 2504 * setpsr(newpsr) 2505 * Note that there is no support for ccr.xcc in the V9 code. 2506 */ 2507 2508 .type .setpsr, #function 2509.setpsr: 2510 rdpr %tstate, %g1 ! get tstate 2511! setx TSTATE_V8_UBITS, %g2 2512 or %g0, CCR_ICC, %g3 2513 sllx %g3, TSTATE_CCR_SHIFT, %g2 2514 2515 andn %g1, %g2, %g1 ! zero current user bits 2516 set PSR_ICC, %g2 2517 and %g2, %o0, %g2 ! clear all but psr.icc bits 2518 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2519 wrpr %g1, %g3, %tstate ! write tstate 2520 2521 set PSR_EF, %g2 2522 and %g2, %o0, %g2 ! clear all but fp enable bit 2523 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2524 wr %g0, %g4, %fprs ! write fprs 2525 2526 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2527 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2528 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2529 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2530 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2531 srlx %g4, 2, %g4 ! shift fef value to bit 0 2532 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2533 FAST_TRAP_DONE 2534 SET_SIZE(.setpsr) 2535 2536/* 2537 * getlgrp 2538 * get home lgrpid on which the calling thread is currently executing. 2539 */ 2540 .type .getlgrp, #function 2541.getlgrp: 2542 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2543 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2544 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2545 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2546 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2547 sra %g1, 0, %o1 2548 FAST_TRAP_DONE 2549 SET_SIZE(.getlgrp) 2550 2551/* 2552 * Entry for old 4.x trap (trap 0). 2553 */ 2554 ENTRY_NP(syscall_trap_4x) 2555 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2556 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2557 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2558 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2559 brz,pn %g2, 1f ! has it been set? 2560 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2561 st %l1, [%g1 + CPU_TMP2] 2562 rdpr %tnpc, %l1 ! save old tnpc 2563 wrpr %g0, %g2, %tnpc ! setup tnpc 2564 2565 rdpr %pstate, %l0 2566 wrpr %l0, PSTATE_AG, %pstate ! switch to normal globals 2567 mov %l1, %g6 ! pass tnpc to user code in %g6 2568 wrpr %l0, %g0, %pstate ! switch back to alternate globals 2569 2570 ! Note that %g1 still contains CPU struct addr 2571 ld [%g1 + CPU_TMP2], %l1 ! restore locals 2572 ld [%g1 + CPU_TMP1], %l0 2573 FAST_TRAP_DONE_CHK_INTR 25741: 2575 mov %g1, %l0 2576 st %l1, [%g1 + CPU_TMP2] 2577 rdpr %pstate, %l1 2578 wrpr %l1, PSTATE_AG, %pstate 2579 ! 2580 ! check for old syscall mmap which is the only different one which 2581 ! must be the same. Others are handled in the compatibility library. 2582 ! 2583 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2584 movz %icc, SYS_mmap, %g1 2585 wrpr %g0, %l1, %pstate 2586 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2587 ld [%l0 + CPU_TMP1], %l0 2588 SYSCALL(syscall_trap32) 2589 SET_SIZE(syscall_trap_4x) 2590 2591/* 2592 * Handler for software trap 9. 2593 * Set trap0 emulation address for old 4.x system call trap. 2594 * XXX - this should be a system call. 2595 */ 2596 ENTRY_NP(set_trap0_addr) 2597 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2598 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2599 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2600 st %l0, [%g1 + CPU_TMP1] ! save some locals 2601 st %l1, [%g1 + CPU_TMP2] 2602 rdpr %pstate, %l0 2603 wrpr %l0, PSTATE_AG, %pstate 2604 mov %g1, %l1 2605 wrpr %g0, %l0, %pstate 2606 andn %l1, 3, %l1 ! force alignment 2607 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2608 ld [%g1 + CPU_TMP1], %l0 ! restore locals 2609 ld [%g1 + CPU_TMP2], %l1 2610 FAST_TRAP_DONE 2611 SET_SIZE(set_trap0_addr) 2612 2613/* 2614 * mmu_trap_tl1 2615 * trap handler for unexpected mmu traps. 2616 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2617 * case we go to fpu_trap or a user trap from the window handler, in which 2618 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2619 */ 2620 .type mmu_trap_tl1, #function 2621mmu_trap_tl1: 2622#ifdef TRAPTRACE 2623 TRACE_PTR(%g5, %g6) 2624 GET_TRACE_TICK(%g6) 2625 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2626 rdpr %tl, %g6 2627 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2628 rdpr %tt, %g6 2629 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2630 rdpr %tstate, %g6 2631 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2632 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2633 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2634 rdpr %tpc, %g6 2635 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2636 set MMU_SFAR, %g6 2637 ldxa [%g6]ASI_DMMU, %g6 2638 stxa %g6, [%g5 + TRAP_ENT_F1]%asi 2639 CPU_PADDR(%g7, %g6); 2640 add %g7, CPU_TL1_HDLR, %g7 2641 lda [%g7]ASI_MEM, %g6 2642 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2643 set 0xdeadbeef, %g6 2644 stna %g6, [%g5 + TRAP_ENT_F3]%asi 2645 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2646 TRACE_NEXT(%g5, %g6, %g7) 2647#endif /* TRAPTRACE */ 2648 2649 GET_CPU_IMPL(%g5) 2650 cmp %g5, PANTHER_IMPL 2651 bne mmu_trap_tl1_4 2652 nop 2653 rdpr %tt, %g5 2654 cmp %g5, T_DATA_EXCEPTION 2655 bne mmu_trap_tl1_4 2656 nop 2657 wr %g0, ASI_DMMU, %asi 2658 ldxa [MMU_SFSR]%asi, %g5 2659 mov 1, %g6 2660 sllx %g6, PN_SFSR_PARITY_SHIFT, %g6 2661 andcc %g5, %g6, %g0 2662 bz mmu_trap_tl1_4 2663 2664 /* 2665 * We are running on a Panther and have hit a DTLB parity error. 2666 */ 2667 ldxa [MMU_TAG_ACCESS]%asi, %g2 2668 mov %g5, %g3 2669 ba,pt %xcc, .mmu_exception_is_tlb_parity 2670 mov T_DATA_EXCEPTION, %g1 2671 2672mmu_trap_tl1_4: 2673 CPU_PADDR(%g7, %g6); 2674 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2675 /* 2676 * AM is cleared on trap, so addresses are 64 bit 2677 */ 2678 lda [%g7]ASI_MEM, %g6 2679 brz,a,pt %g6, 1f 2680 nop 2681 /* 2682 * We are going to update cpu_m.tl1_hdlr using physical address. 2683 * Flush the D$ line, so that stale data won't be accessed later. 2684 */ 2685 CPU_ADDR(%g6, %g5) 2686 add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA) 2687 GET_CPU_IMPL(%g5) 2688 cmp %g5, CHEETAH_IMPL 2689 bl,pt %icc, 3f 2690 cmp %g5, SPITFIRE_IMPL 2691 stxa %g0, [%g7]ASI_DC_INVAL 2692 membar #Sync 2693 ba,pt %xcc, 2f 2694 nop 26953: 2696 bl,pt %icc, 2f 2697 sethi %hi(dcache_line_mask), %g5 2698 ld [%g5 + %lo(dcache_line_mask)], %g5 2699 and %g6, %g5, %g5 2700 stxa %g0, [%g5]ASI_DC_TAG 2701 membar #Sync 27022: 2703 sta %g0, [%g7]ASI_MEM 2704 SWITCH_GLOBALS ! back to mmu globals 2705 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 27061: 2707 rdpr %tt, %g5 2708 rdpr %tl, %g7 2709 sub %g7, 1, %g6 2710 wrpr %g6, %tl 2711 rdpr %tt, %g6 2712 wrpr %g7, %tl 2713 and %g6, WTRAP_TTMASK, %g6 2714 cmp %g6, WTRAP_TYPE 2715 bne,a,pn %xcc, ptl1_panic 2716 mov PTL1_BAD_MMUTRAP, %g1 2717 rdpr %tpc, %g7 2718 /* tpc should be in the trap table */ 2719 set trap_table, %g6 2720 cmp %g7, %g6 2721 blt,a,pn %xcc, ptl1_panic 2722 mov PTL1_BAD_MMUTRAP, %g1 2723 set etrap_table, %g6 2724 cmp %g7, %g6 2725 bge,a,pn %xcc, ptl1_panic 2726 mov PTL1_BAD_MMUTRAP, %g1 2727 cmp %g5, T_ALIGNMENT 2728 move %icc, MMU_SFAR, %g6 2729 movne %icc, MMU_TAG_ACCESS, %g6 2730 ldxa [%g6]ASI_DMMU, %g6 2731 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2732 add %g7, WTRAP_FAULTOFF, %g7 2733 wrpr %g0, %g7, %tnpc 2734 done 2735 SET_SIZE(mmu_trap_tl1) 2736 2737/* 2738 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2739 * traps are valid only when kmdb is loaded. When the debugger is active, 2740 * the code below is rewritten to transfer control to the appropriate 2741 * debugger entry points. 2742 */ 2743 .global kmdb_trap 2744 .align 8 2745kmdb_trap: 2746 ba,a trap_table0 2747 jmp %g1 + 0 2748 nop 2749 2750 .global kmdb_trap_tl1 2751 .align 8 2752kmdb_trap_tl1: 2753 ba,a trap_table0 2754 jmp %g1 + 0 2755 nop 2756 2757/* 2758 * This entry is copied from OBP's trap table during boot. 2759 */ 2760 .global obp_bpt 2761 .align 8 2762obp_bpt: 2763 NOT 2764 2765/* 2766 * if kernel, set PCONTEXT to 0 for debuggers 2767 * if user, clear nucleus page sizes 2768 */ 2769 .global kctx_obp_bpt 2770kctx_obp_bpt: 2771 set obp_bpt, %g2 27721: 2773 mov MMU_PCONTEXT, %g1 2774 ldxa [%g1]ASI_DMMU, %g1 2775 srlx %g1, CTXREG_NEXT_SHIFT, %g3 2776 brz,pt %g3, 3f ! nucleus pgsz is 0, no problem 2777 sllx %g3, CTXREG_NEXT_SHIFT, %g3 2778 set CTXREG_CTX_MASK, %g4 ! check Pcontext 2779 btst %g4, %g1 2780 bz,a,pt %xcc, 2f 2781 clr %g3 ! kernel: PCONTEXT=0 2782 xor %g3, %g1, %g3 ! user: clr N_pgsz0/1 bits 27832: 2784 set DEMAP_ALL_TYPE, %g1 2785 stxa %g0, [%g1]ASI_DTLB_DEMAP 2786 stxa %g0, [%g1]ASI_ITLB_DEMAP 2787 mov MMU_PCONTEXT, %g1 2788 stxa %g3, [%g1]ASI_DMMU 2789 membar #Sync 2790 sethi %hi(FLUSH_ADDR), %g1 2791 flush %g1 ! flush required by immu 27923: 2793 jmp %g2 2794 nop 2795 2796 2797#ifdef TRAPTRACE 2798/* 2799 * TRAPTRACE support. 2800 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2801 * Return is done by "jmp %g7 + 4". 2802 */ 2803 2804trace_gen: 2805 TRACE_PTR(%g3, %g6) 2806 GET_TRACE_TICK(%g6) 2807 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2808 rdpr %tl, %g6 2809 stha %g6, [%g3 + TRAP_ENT_TL]%asi 2810 rdpr %tt, %g6 2811 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2812 rdpr %tstate, %g6 2813 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2814 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2815 rdpr %tpc, %g6 2816 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2817 TRACE_NEXT(%g3, %g4, %g5) 2818 jmp %g7 + 4 2819 nop 2820 2821trace_win: 2822 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2823 ! Keep the locals as clean as possible, caller cleans %l4 2824 clr %l2 2825 clr %l1 2826 jmp %l4 + 4 2827 clr %l0 2828 2829/* 2830 * Trace a tsb hit 2831 * g1 = tsbe pointer (in/clobbered) 2832 * g2 = tag access register (in) 2833 * g3 - g4 = scratch (clobbered) 2834 * g5 = tsbe data (in) 2835 * g6 = scratch (clobbered) 2836 * g7 = pc we jumped here from (in) 2837 */ 2838 2839 ! Do not disturb %g5, it will be used after the trace 2840 ALTENTRY(trace_tsbhit) 2841 TRACE_TSBHIT(0) 2842 jmp %g7 + 4 2843 nop 2844 2845/* 2846 * Trace a TSB miss 2847 * 2848 * g1 = tsb8k pointer (in) 2849 * g2 = tag access register (in) 2850 * g3 = tsb4m pointer (in) 2851 * g4 = tsbe tag (in/clobbered) 2852 * g5 - g6 = scratch (clobbered) 2853 * g7 = pc we jumped here from (in) 2854 */ 2855 .global trace_tsbmiss 2856trace_tsbmiss: 2857 membar #Sync 2858 sethi %hi(FLUSH_ADDR), %g6 2859 flush %g6 2860 TRACE_PTR(%g5, %g6) 2861 GET_TRACE_TICK(%g6) 2862 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2863 stxa %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2864 stxa %g4, [%g5 + TRAP_ENT_F1]%asi ! tsb tag 2865 rdpr %tnpc, %g6 2866 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2867 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2868 srlx %g1, 32, %g6 2869 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2870 rdpr %tpc, %g6 2871 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2872 rdpr %tl, %g6 2873 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2874 rdpr %tt, %g6 2875 or %g6, TT_MMU_MISS, %g4 2876 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2877 cmp %g6, FAST_IMMU_MISS_TT 2878 be,a %icc, 1f 2879 ldxa [%g0]ASI_IMMU, %g6 2880 ldxa [%g0]ASI_DMMU, %g6 28811: stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2882 stxa %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2883 TRACE_NEXT(%g5, %g4, %g6) 2884 jmp %g7 + 4 2885 nop 2886 2887/* 2888 * g2 = tag access register (in) 2889 * g3 = ctx number (in) 2890 */ 2891trace_dataprot: 2892 membar #Sync 2893 sethi %hi(FLUSH_ADDR), %g6 2894 flush %g6 2895 TRACE_PTR(%g1, %g6) 2896 GET_TRACE_TICK(%g6) 2897 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2898 rdpr %tpc, %g6 2899 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2900 rdpr %tstate, %g6 2901 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2902 stxa %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2903 stxa %g0, [%g1 + TRAP_ENT_TR]%asi 2904 stxa %g0, [%g1 + TRAP_ENT_F1]%asi 2905 stxa %g0, [%g1 + TRAP_ENT_F2]%asi 2906 stxa %g0, [%g1 + TRAP_ENT_F3]%asi 2907 stxa %g0, [%g1 + TRAP_ENT_F4]%asi 2908 rdpr %tl, %g6 2909 stha %g6, [%g1 + TRAP_ENT_TL]%asi 2910 rdpr %tt, %g6 2911 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2912 TRACE_NEXT(%g1, %g4, %g5) 2913 jmp %g7 + 4 2914 nop 2915 2916#endif /* TRAPTRACE */ 2917 2918/* 2919 * expects offset into tsbmiss area in %g1 and return pc in %g7 2920 */ 2921stat_mmu: 2922 CPU_INDEX(%g5, %g6) 2923 sethi %hi(tsbmiss_area), %g6 2924 sllx %g5, TSBMISS_SHIFT, %g5 2925 or %g6, %lo(tsbmiss_area), %g6 2926 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2927 ld [%g6 + %g1], %g5 2928 add %g5, 1, %g5 2929 jmp %g7 + 4 2930 st %g5, [%g6 + %g1] 2931 2932 2933/* 2934 * fast_trap_done, fast_trap_done_chk_intr: 2935 * 2936 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2937 * taken immediately after a RETRY or DONE instruction which causes IE to 2938 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2939 * to execute first before taking any interrupts. If that instruction 2940 * results in other traps, and if the corresponding trap handler runs 2941 * entirely at TL=1 with interrupts disabled, then pending interrupts 2942 * won't be taken until after yet another instruction following the %tpc 2943 * or %tnpc. 2944 * 2945 * A malicious user program can use this feature to block out interrupts 2946 * for extended durations, which can result in send_mondo_timeout kernel 2947 * panic. 2948 * 2949 * This problem is addressed by servicing any pending interrupts via 2950 * sys_trap before returning back to the user mode from a fast trap 2951 * handler. The "done" instruction within a fast trap handler, which 2952 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2953 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2954 * entry point. 2955 * 2956 * We check for any pending interrupts here and force a sys_trap to 2957 * service those interrupts, if any. To minimize overhead, pending 2958 * interrupts are checked if the %tpc happens to be at 16K boundary, 2959 * which allows a malicious program to execute at most 4K consecutive 2960 * instructions before we service any pending interrupts. If a worst 2961 * case fast trap handler takes about 2 usec, then interrupts will be 2962 * blocked for at most 8 msec, less than a clock tick. 2963 * 2964 * For the cases where we don't know if the %tpc will cross a 16K 2965 * boundary, we can't use the above optimization and always process 2966 * any pending interrupts via fast_frap_done_chk_intr entry point. 2967 * 2968 * Entry Conditions: 2969 * %pstate am:0 priv:1 ie:0 2970 * globals are AG (not normal globals) 2971 */ 2972 2973 .global fast_trap_done, fast_trap_done_chk_intr 2974fast_trap_done: 2975 rdpr %tpc, %g5 2976 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2977 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2978 bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only) 2979 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2980 done 2981 2982 ALTENTRY(fast_trap_done_check_interrupts) 2983fast_trap_done_chk_intr: 2984 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2985 29861: rd SOFTINT, %g6 2987 and %g5, IRSR_BUSY, %g5 2988 orcc %g5, %g6, %g0 2989 bnz,pn %xcc, 2f ! branch if any pending intr 2990 nop 2991 done 2992 29932: 2994 /* 2995 * We get here if there are any pending interrupts. 2996 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2997 * instruction. 2998 */ 2999 rdpr %tnpc, %g5 3000 wrpr %g0, %g5, %tpc 3001 add %g5, 4, %g5 3002 wrpr %g0, %g5, %tnpc 3003 3004 /* 3005 * Force a dummy sys_trap call so that interrupts can be serviced. 3006 */ 3007 set fast_trap_dummy_call, %g1 3008 ba,pt %xcc, sys_trap 3009 mov -1, %g4 3010 3011fast_trap_dummy_call: 3012 retl 3013 nop 3014 3015#ifdef DEBUG 3016/* 3017 * Currently we only support syscall interposition for branded zones on 3018 * DEBUG kernels. The only brand that makes use of this functionality is 3019 * the fake Solaris 10 brand. Since this brand is only used for exercising 3020 * the framework, we don't want this overhead incurred on production 3021 * systems. 3022 */ 3023#define BRAND_CALLBACK(callback_id) \ 3024 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\ 3025 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\ 3026 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\ 3027 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\ 3028 brz %g3, 1f /* No brand? No callback. */ ;\ 3029 nop ;\ 3030 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\ 3031 ldn [%g3 + (callback_id << 3)], %g3 ;\ 3032 brz %g3, 1f ;\ 3033 /* \ 3034 * This isn't pretty. We want a low-latency way for the callback \ 3035 * routine to decline to do anything. We just pass in an address \ 3036 * the routine can directly jmp back to, pretending that nothing \ 3037 * has happened. \ 3038 * \ 3039 * %g1: return address (where the brand handler jumps back to) \ 3040 * %g2: address of CPU structure \ 3041 * %g3: address of brand handler (where we will jump to) \ 3042 */ \ 3043 mov %pc, %g1 ;\ 3044 add %g1, 16, %g1 ;\ 3045 jmp %g3 ;\ 3046 nop ;\ 30471: 3048 3049 ENTRY_NP(syscall_wrapper32) 3050 TT_TRACE(trace_gen) 3051 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 3052 SYSCALL_NOTT(syscall_trap32) 3053 SET_SIZE(syscall_wrapper32) 3054 3055 ENTRY_NP(syscall_wrapper) 3056 TT_TRACE(trace_gen) 3057 BRAND_CALLBACK(BRAND_CB_SYSCALL) 3058 SYSCALL_NOTT(syscall_trap) 3059 SET_SIZE(syscall_wrapper) 3060 3061#endif /* DEBUG */ 3062 3063#endif /* lint */ 3064