1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif /* !lint */ 31#include <sys/asm_linkage.h> 32#include <sys/privregs.h> 33#include <sys/sun4asi.h> 34#include <sys/spitregs.h> 35#include <sys/cheetahregs.h> 36#include <sys/machtrap.h> 37#include <sys/machthread.h> 38#include <sys/machbrand.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This macro is used to update per cpu mmu stats in perf critical 116 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER 117 * is defined. 118 */ 119#if defined(DEBUG) || defined(SFMMU_STAT_GATHER) 120#define HAT_PERCPU_DBSTAT(stat) \ 121 mov stat, %g1 ;\ 122 ba stat_mmu ;\ 123 rd %pc, %g7 124#else 125#define HAT_PERCPU_DBSTAT(stat) 126#endif /* DEBUG || SFMMU_STAT_GATHER */ 127 128/* 129 * This first set are funneled to trap() with %tt as the type. 130 * Trap will then either panic or send the user a signal. 131 */ 132/* 133 * NOT is used for traps that just shouldn't happen. 134 * It comes in both single and quadruple flavors. 135 */ 136#if !defined(lint) 137 .global trap 138#endif /* !lint */ 139#define NOT \ 140 TT_TRACE(trace_gen) ;\ 141 set trap, %g1 ;\ 142 rdpr %tt, %g3 ;\ 143 ba,pt %xcc, sys_trap ;\ 144 sub %g0, 1, %g4 ;\ 145 .align 32 146#define NOT4 NOT; NOT; NOT; NOT 147/* 148 * RED is for traps that use the red mode handler. 149 * We should never see these either. 150 */ 151#define RED NOT 152/* 153 * BAD is used for trap vectors we don't have a kernel 154 * handler for. 155 * It also comes in single and quadruple versions. 156 */ 157#define BAD NOT 158#define BAD4 NOT4 159 160#define DONE \ 161 done; \ 162 .align 32 163 164/* 165 * TRAP vectors to the trap() function. 166 * It's main use is for user errors. 167 */ 168#if !defined(lint) 169 .global trap 170#endif /* !lint */ 171#define TRAP(arg) \ 172 TT_TRACE(trace_gen) ;\ 173 set trap, %g1 ;\ 174 mov arg, %g3 ;\ 175 ba,pt %xcc, sys_trap ;\ 176 sub %g0, 1, %g4 ;\ 177 .align 32 178 179/* 180 * SYSCALL is used for system calls on both ILP32 and LP64 kernels 181 * depending on the "which" parameter (should be syscall_trap, 182 * syscall_trap32, or nosys for unused system call traps). 183 */ 184#define SYSCALL(which) \ 185 TT_TRACE(trace_gen) ;\ 186 set (which), %g1 ;\ 187 ba,pt %xcc, sys_trap ;\ 188 sub %g0, 1, %g4 ;\ 189 .align 32 190 191#define FLUSHW() \ 192 set trap, %g1 ;\ 193 mov T_FLUSHW, %g3 ;\ 194 sub %g0, 1, %g4 ;\ 195 save ;\ 196 flushw ;\ 197 restore ;\ 198 FAST_TRAP_DONE ;\ 199 .align 32 200 201/* 202 * GOTO just jumps to a label. 203 * It's used for things that can be fixed without going thru sys_trap. 204 */ 205#define GOTO(label) \ 206 .global label ;\ 207 ba,a label ;\ 208 .empty ;\ 209 .align 32 210 211/* 212 * GOTO_TT just jumps to a label. 213 * correctable ECC error traps at level 0 and 1 will use this macro. 214 * It's used for things that can be fixed without going thru sys_trap. 215 */ 216#define GOTO_TT(label, ttlabel) \ 217 .global label ;\ 218 TT_TRACE(ttlabel) ;\ 219 ba,a label ;\ 220 .empty ;\ 221 .align 32 222 223/* 224 * Privileged traps 225 * Takes breakpoint if privileged, calls trap() if not. 226 */ 227#define PRIV(label) \ 228 rdpr %tstate, %g1 ;\ 229 btst TSTATE_PRIV, %g1 ;\ 230 bnz label ;\ 231 rdpr %tt, %g3 ;\ 232 set trap, %g1 ;\ 233 ba,pt %xcc, sys_trap ;\ 234 sub %g0, 1, %g4 ;\ 235 .align 32 236 237 238/* 239 * DTrace traps. 240 */ 241#define DTRACE_PID \ 242 .global dtrace_pid_probe ;\ 243 set dtrace_pid_probe, %g1 ;\ 244 ba,pt %xcc, user_trap ;\ 245 sub %g0, 1, %g4 ;\ 246 .align 32 247 248#define DTRACE_RETURN \ 249 .global dtrace_return_probe ;\ 250 set dtrace_return_probe, %g1 ;\ 251 ba,pt %xcc, user_trap ;\ 252 sub %g0, 1, %g4 ;\ 253 .align 32 254 255/* 256 * REGISTER WINDOW MANAGEMENT MACROS 257 */ 258 259/* 260 * various convenient units of padding 261 */ 262#define SKIP(n) .skip 4*(n) 263 264/* 265 * CLEAN_WINDOW is the simple handler for cleaning a register window. 266 */ 267#define CLEAN_WINDOW \ 268 TT_TRACE_L(trace_win) ;\ 269 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 270 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 271 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 272 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 273 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 274 retry; .align 128 275 276#if !defined(lint) 277 278/* 279 * If we get an unresolved tlb miss while in a window handler, the fault 280 * handler will resume execution at the last instruction of the window 281 * hander, instead of delivering the fault to the kernel. Spill handlers 282 * use this to spill windows into the wbuf. 283 * 284 * The mixed handler works by checking %sp, and branching to the correct 285 * handler. This is done by branching back to label 1: for 32b frames, 286 * or label 2: for 64b frames; which implies the handler order is: 32b, 287 * 64b, mixed. The 1: and 2: labels are offset into the routines to 288 * allow the branchs' delay slots to contain useful instructions. 289 */ 290 291/* 292 * SPILL_32bit spills a 32-bit-wide kernel register window. It 293 * assumes that the kernel context and the nucleus context are the 294 * same. The stack pointer is required to be eight-byte aligned even 295 * though this code only needs it to be four-byte aligned. 296 */ 297#define SPILL_32bit(tail) \ 298 srl %sp, 0, %sp ;\ 2991: st %l0, [%sp + 0] ;\ 300 st %l1, [%sp + 4] ;\ 301 st %l2, [%sp + 8] ;\ 302 st %l3, [%sp + 12] ;\ 303 st %l4, [%sp + 16] ;\ 304 st %l5, [%sp + 20] ;\ 305 st %l6, [%sp + 24] ;\ 306 st %l7, [%sp + 28] ;\ 307 st %i0, [%sp + 32] ;\ 308 st %i1, [%sp + 36] ;\ 309 st %i2, [%sp + 40] ;\ 310 st %i3, [%sp + 44] ;\ 311 st %i4, [%sp + 48] ;\ 312 st %i5, [%sp + 52] ;\ 313 st %i6, [%sp + 56] ;\ 314 st %i7, [%sp + 60] ;\ 315 TT_TRACE_L(trace_win) ;\ 316 saved ;\ 317 retry ;\ 318 SKIP(31-19-TT_TRACE_L_INS) ;\ 319 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 320 .empty 321 322/* 323 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 324 * wide address space via the designated asi. It is used to spill 325 * non-kernel windows. The stack pointer is required to be eight-byte 326 * aligned even though this code only needs it to be four-byte 327 * aligned. 328 */ 329#define SPILL_32bit_asi(asi_num, tail) \ 330 srl %sp, 0, %sp ;\ 3311: sta %l0, [%sp + %g0]asi_num ;\ 332 mov 4, %g1 ;\ 333 sta %l1, [%sp + %g1]asi_num ;\ 334 mov 8, %g2 ;\ 335 sta %l2, [%sp + %g2]asi_num ;\ 336 mov 12, %g3 ;\ 337 sta %l3, [%sp + %g3]asi_num ;\ 338 add %sp, 16, %g4 ;\ 339 sta %l4, [%g4 + %g0]asi_num ;\ 340 sta %l5, [%g4 + %g1]asi_num ;\ 341 sta %l6, [%g4 + %g2]asi_num ;\ 342 sta %l7, [%g4 + %g3]asi_num ;\ 343 add %g4, 16, %g4 ;\ 344 sta %i0, [%g4 + %g0]asi_num ;\ 345 sta %i1, [%g4 + %g1]asi_num ;\ 346 sta %i2, [%g4 + %g2]asi_num ;\ 347 sta %i3, [%g4 + %g3]asi_num ;\ 348 add %g4, 16, %g4 ;\ 349 sta %i4, [%g4 + %g0]asi_num ;\ 350 sta %i5, [%g4 + %g1]asi_num ;\ 351 sta %i6, [%g4 + %g2]asi_num ;\ 352 sta %i7, [%g4 + %g3]asi_num ;\ 353 TT_TRACE_L(trace_win) ;\ 354 saved ;\ 355 retry ;\ 356 SKIP(31-25-TT_TRACE_L_INS) ;\ 357 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 358 .empty 359 360/* 361 * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit 362 * wide address space via the designated asi. It is used to spill 363 * windows at tl>1 where performance isn't the primary concern and 364 * where we don't want to use unnecessary registers. The stack 365 * pointer is required to be eight-byte aligned even though this code 366 * only needs it to be four-byte aligned. 367 */ 368#define SPILL_32bit_tt1(asi_num, tail) \ 369 mov asi_num, %asi ;\ 3701: srl %sp, 0, %sp ;\ 371 sta %l0, [%sp + 0]%asi ;\ 372 sta %l1, [%sp + 4]%asi ;\ 373 sta %l2, [%sp + 8]%asi ;\ 374 sta %l3, [%sp + 12]%asi ;\ 375 sta %l4, [%sp + 16]%asi ;\ 376 sta %l5, [%sp + 20]%asi ;\ 377 sta %l6, [%sp + 24]%asi ;\ 378 sta %l7, [%sp + 28]%asi ;\ 379 sta %i0, [%sp + 32]%asi ;\ 380 sta %i1, [%sp + 36]%asi ;\ 381 sta %i2, [%sp + 40]%asi ;\ 382 sta %i3, [%sp + 44]%asi ;\ 383 sta %i4, [%sp + 48]%asi ;\ 384 sta %i5, [%sp + 52]%asi ;\ 385 sta %i6, [%sp + 56]%asi ;\ 386 sta %i7, [%sp + 60]%asi ;\ 387 TT_TRACE_L(trace_win) ;\ 388 saved ;\ 389 retry ;\ 390 SKIP(31-20-TT_TRACE_L_INS) ;\ 391 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 392 .empty 393 394 395/* 396 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 397 * that the kernel context and the nucleus context are the same. The 398 * stack pointer is required to be eight-byte aligned even though this 399 * code only needs it to be four-byte aligned. 400 */ 401#define FILL_32bit(tail) \ 402 srl %sp, 0, %sp ;\ 4031: TT_TRACE_L(trace_win) ;\ 404 ld [%sp + 0], %l0 ;\ 405 ld [%sp + 4], %l1 ;\ 406 ld [%sp + 8], %l2 ;\ 407 ld [%sp + 12], %l3 ;\ 408 ld [%sp + 16], %l4 ;\ 409 ld [%sp + 20], %l5 ;\ 410 ld [%sp + 24], %l6 ;\ 411 ld [%sp + 28], %l7 ;\ 412 ld [%sp + 32], %i0 ;\ 413 ld [%sp + 36], %i1 ;\ 414 ld [%sp + 40], %i2 ;\ 415 ld [%sp + 44], %i3 ;\ 416 ld [%sp + 48], %i4 ;\ 417 ld [%sp + 52], %i5 ;\ 418 ld [%sp + 56], %i6 ;\ 419 ld [%sp + 60], %i7 ;\ 420 restored ;\ 421 retry ;\ 422 SKIP(31-19-TT_TRACE_L_INS) ;\ 423 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 424 .empty 425 426/* 427 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 428 * wide address space via the designated asi. It is used to fill 429 * non-kernel windows. The stack pointer is required to be eight-byte 430 * aligned even though this code only needs it to be four-byte 431 * aligned. 432 */ 433#define FILL_32bit_asi(asi_num, tail) \ 434 srl %sp, 0, %sp ;\ 4351: TT_TRACE_L(trace_win) ;\ 436 mov 4, %g1 ;\ 437 lda [%sp + %g0]asi_num, %l0 ;\ 438 mov 8, %g2 ;\ 439 lda [%sp + %g1]asi_num, %l1 ;\ 440 mov 12, %g3 ;\ 441 lda [%sp + %g2]asi_num, %l2 ;\ 442 lda [%sp + %g3]asi_num, %l3 ;\ 443 add %sp, 16, %g4 ;\ 444 lda [%g4 + %g0]asi_num, %l4 ;\ 445 lda [%g4 + %g1]asi_num, %l5 ;\ 446 lda [%g4 + %g2]asi_num, %l6 ;\ 447 lda [%g4 + %g3]asi_num, %l7 ;\ 448 add %g4, 16, %g4 ;\ 449 lda [%g4 + %g0]asi_num, %i0 ;\ 450 lda [%g4 + %g1]asi_num, %i1 ;\ 451 lda [%g4 + %g2]asi_num, %i2 ;\ 452 lda [%g4 + %g3]asi_num, %i3 ;\ 453 add %g4, 16, %g4 ;\ 454 lda [%g4 + %g0]asi_num, %i4 ;\ 455 lda [%g4 + %g1]asi_num, %i5 ;\ 456 lda [%g4 + %g2]asi_num, %i6 ;\ 457 lda [%g4 + %g3]asi_num, %i7 ;\ 458 restored ;\ 459 retry ;\ 460 SKIP(31-25-TT_TRACE_L_INS) ;\ 461 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 462 .empty 463 464/* 465 * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit 466 * wide address space via the designated asi. It is used to fill 467 * windows at tl>1 where performance isn't the primary concern and 468 * where we don't want to use unnecessary registers. The stack 469 * pointer is required to be eight-byte aligned even though this code 470 * only needs it to be four-byte aligned. 471 */ 472#define FILL_32bit_tt1(asi_num, tail) \ 473 mov asi_num, %asi ;\ 4741: srl %sp, 0, %sp ;\ 475 TT_TRACE_L(trace_win) ;\ 476 lda [%sp + 0]%asi, %l0 ;\ 477 lda [%sp + 4]%asi, %l1 ;\ 478 lda [%sp + 8]%asi, %l2 ;\ 479 lda [%sp + 12]%asi, %l3 ;\ 480 lda [%sp + 16]%asi, %l4 ;\ 481 lda [%sp + 20]%asi, %l5 ;\ 482 lda [%sp + 24]%asi, %l6 ;\ 483 lda [%sp + 28]%asi, %l7 ;\ 484 lda [%sp + 32]%asi, %i0 ;\ 485 lda [%sp + 36]%asi, %i1 ;\ 486 lda [%sp + 40]%asi, %i2 ;\ 487 lda [%sp + 44]%asi, %i3 ;\ 488 lda [%sp + 48]%asi, %i4 ;\ 489 lda [%sp + 52]%asi, %i5 ;\ 490 lda [%sp + 56]%asi, %i6 ;\ 491 lda [%sp + 60]%asi, %i7 ;\ 492 restored ;\ 493 retry ;\ 494 SKIP(31-20-TT_TRACE_L_INS) ;\ 495 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 496 .empty 497 498 499/* 500 * SPILL_64bit spills a 64-bit-wide kernel register window. It 501 * assumes that the kernel context and the nucleus context are the 502 * same. The stack pointer is required to be eight-byte aligned. 503 */ 504#define SPILL_64bit(tail) \ 5052: stx %l0, [%sp + V9BIAS64 + 0] ;\ 506 stx %l1, [%sp + V9BIAS64 + 8] ;\ 507 stx %l2, [%sp + V9BIAS64 + 16] ;\ 508 stx %l3, [%sp + V9BIAS64 + 24] ;\ 509 stx %l4, [%sp + V9BIAS64 + 32] ;\ 510 stx %l5, [%sp + V9BIAS64 + 40] ;\ 511 stx %l6, [%sp + V9BIAS64 + 48] ;\ 512 stx %l7, [%sp + V9BIAS64 + 56] ;\ 513 stx %i0, [%sp + V9BIAS64 + 64] ;\ 514 stx %i1, [%sp + V9BIAS64 + 72] ;\ 515 stx %i2, [%sp + V9BIAS64 + 80] ;\ 516 stx %i3, [%sp + V9BIAS64 + 88] ;\ 517 stx %i4, [%sp + V9BIAS64 + 96] ;\ 518 stx %i5, [%sp + V9BIAS64 + 104] ;\ 519 stx %i6, [%sp + V9BIAS64 + 112] ;\ 520 stx %i7, [%sp + V9BIAS64 + 120] ;\ 521 TT_TRACE_L(trace_win) ;\ 522 saved ;\ 523 retry ;\ 524 SKIP(31-18-TT_TRACE_L_INS) ;\ 525 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 526 .empty 527 528/* 529 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 530 * wide address space via the designated asi. It is used to spill 531 * non-kernel windows. The stack pointer is required to be eight-byte 532 * aligned. 533 */ 534#define SPILL_64bit_asi(asi_num, tail) \ 535 mov 0 + V9BIAS64, %g1 ;\ 5362: stxa %l0, [%sp + %g1]asi_num ;\ 537 mov 8 + V9BIAS64, %g2 ;\ 538 stxa %l1, [%sp + %g2]asi_num ;\ 539 mov 16 + V9BIAS64, %g3 ;\ 540 stxa %l2, [%sp + %g3]asi_num ;\ 541 mov 24 + V9BIAS64, %g4 ;\ 542 stxa %l3, [%sp + %g4]asi_num ;\ 543 add %sp, 32, %g5 ;\ 544 stxa %l4, [%g5 + %g1]asi_num ;\ 545 stxa %l5, [%g5 + %g2]asi_num ;\ 546 stxa %l6, [%g5 + %g3]asi_num ;\ 547 stxa %l7, [%g5 + %g4]asi_num ;\ 548 add %g5, 32, %g5 ;\ 549 stxa %i0, [%g5 + %g1]asi_num ;\ 550 stxa %i1, [%g5 + %g2]asi_num ;\ 551 stxa %i2, [%g5 + %g3]asi_num ;\ 552 stxa %i3, [%g5 + %g4]asi_num ;\ 553 add %g5, 32, %g5 ;\ 554 stxa %i4, [%g5 + %g1]asi_num ;\ 555 stxa %i5, [%g5 + %g2]asi_num ;\ 556 stxa %i6, [%g5 + %g3]asi_num ;\ 557 stxa %i7, [%g5 + %g4]asi_num ;\ 558 TT_TRACE_L(trace_win) ;\ 559 saved ;\ 560 retry ;\ 561 SKIP(31-25-TT_TRACE_L_INS) ;\ 562 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 563 .empty 564 565/* 566 * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit 567 * wide address space via the designated asi. It is used to spill 568 * windows at tl>1 where performance isn't the primary concern and 569 * where we don't want to use unnecessary registers. The stack 570 * pointer is required to be eight-byte aligned. 571 */ 572#define SPILL_64bit_tt1(asi_num, tail) \ 573 mov asi_num, %asi ;\ 5742: stxa %l0, [%sp + V9BIAS64 + 0]%asi ;\ 575 stxa %l1, [%sp + V9BIAS64 + 8]%asi ;\ 576 stxa %l2, [%sp + V9BIAS64 + 16]%asi ;\ 577 stxa %l3, [%sp + V9BIAS64 + 24]%asi ;\ 578 stxa %l4, [%sp + V9BIAS64 + 32]%asi ;\ 579 stxa %l5, [%sp + V9BIAS64 + 40]%asi ;\ 580 stxa %l6, [%sp + V9BIAS64 + 48]%asi ;\ 581 stxa %l7, [%sp + V9BIAS64 + 56]%asi ;\ 582 stxa %i0, [%sp + V9BIAS64 + 64]%asi ;\ 583 stxa %i1, [%sp + V9BIAS64 + 72]%asi ;\ 584 stxa %i2, [%sp + V9BIAS64 + 80]%asi ;\ 585 stxa %i3, [%sp + V9BIAS64 + 88]%asi ;\ 586 stxa %i4, [%sp + V9BIAS64 + 96]%asi ;\ 587 stxa %i5, [%sp + V9BIAS64 + 104]%asi ;\ 588 stxa %i6, [%sp + V9BIAS64 + 112]%asi ;\ 589 stxa %i7, [%sp + V9BIAS64 + 120]%asi ;\ 590 TT_TRACE_L(trace_win) ;\ 591 saved ;\ 592 retry ;\ 593 SKIP(31-19-TT_TRACE_L_INS) ;\ 594 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 595 .empty 596 597 598/* 599 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 600 * that the kernel context and the nucleus context are the same. The 601 * stack pointer is required to be eight-byte aligned. 602 */ 603#define FILL_64bit(tail) \ 6042: TT_TRACE_L(trace_win) ;\ 605 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 606 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 607 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 608 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 609 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 610 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 611 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 612 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 613 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 614 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 615 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 616 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 617 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 618 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 619 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 620 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 621 restored ;\ 622 retry ;\ 623 SKIP(31-18-TT_TRACE_L_INS) ;\ 624 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 625 .empty 626 627/* 628 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 629 * wide address space via the designated asi. It is used to fill 630 * non-kernel windows. The stack pointer is required to be eight-byte 631 * aligned. 632 */ 633#define FILL_64bit_asi(asi_num, tail) \ 634 mov V9BIAS64 + 0, %g1 ;\ 6352: TT_TRACE_L(trace_win) ;\ 636 ldxa [%sp + %g1]asi_num, %l0 ;\ 637 mov V9BIAS64 + 8, %g2 ;\ 638 ldxa [%sp + %g2]asi_num, %l1 ;\ 639 mov V9BIAS64 + 16, %g3 ;\ 640 ldxa [%sp + %g3]asi_num, %l2 ;\ 641 mov V9BIAS64 + 24, %g4 ;\ 642 ldxa [%sp + %g4]asi_num, %l3 ;\ 643 add %sp, 32, %g5 ;\ 644 ldxa [%g5 + %g1]asi_num, %l4 ;\ 645 ldxa [%g5 + %g2]asi_num, %l5 ;\ 646 ldxa [%g5 + %g3]asi_num, %l6 ;\ 647 ldxa [%g5 + %g4]asi_num, %l7 ;\ 648 add %g5, 32, %g5 ;\ 649 ldxa [%g5 + %g1]asi_num, %i0 ;\ 650 ldxa [%g5 + %g2]asi_num, %i1 ;\ 651 ldxa [%g5 + %g3]asi_num, %i2 ;\ 652 ldxa [%g5 + %g4]asi_num, %i3 ;\ 653 add %g5, 32, %g5 ;\ 654 ldxa [%g5 + %g1]asi_num, %i4 ;\ 655 ldxa [%g5 + %g2]asi_num, %i5 ;\ 656 ldxa [%g5 + %g3]asi_num, %i6 ;\ 657 ldxa [%g5 + %g4]asi_num, %i7 ;\ 658 restored ;\ 659 retry ;\ 660 SKIP(31-25-TT_TRACE_L_INS) ;\ 661 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 662 .empty 663 664/* 665 * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit 666 * wide address space via the designated asi. It is used to fill 667 * windows at tl>1 where performance isn't the primary concern and 668 * where we don't want to use unnecessary registers. The stack 669 * pointer is required to be eight-byte aligned. 670 */ 671#define FILL_64bit_tt1(asi_num, tail) \ 672 mov asi_num, %asi ;\ 673 TT_TRACE_L(trace_win) ;\ 674 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ 675 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ 676 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ 677 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ 678 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ 679 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ 680 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ 681 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ 682 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ 683 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ 684 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ 685 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ 686 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ 687 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ 688 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ 689 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ 690 restored ;\ 691 retry ;\ 692 SKIP(31-19-TT_TRACE_L_INS) ;\ 693 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 694 .empty 695 696#endif /* !lint */ 697 698/* 699 * SPILL_mixed spills either size window, depending on 700 * whether %sp is even or odd, to a 32-bit address space. 701 * This may only be used in conjunction with SPILL_32bit/ 702 * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be 703 * needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular 704 * attention should be paid to the instructions that belong 705 * in the delay slots of the branches depending on the type 706 * of spill handler being branched to. 707 * Clear upper 32 bits of %sp if it is odd. 708 * We won't need to clear them in 64 bit kernel. 709 */ 710#define SPILL_mixed \ 711 btst 1, %sp ;\ 712 bz,a,pt %xcc, 1b ;\ 713 srl %sp, 0, %sp ;\ 714 ba,pt %xcc, 2b ;\ 715 nop ;\ 716 .align 128 717 718/* 719 * FILL_mixed(ASI) fills either size window, depending on 720 * whether %sp is even or odd, from a 32-bit address space. 721 * This may only be used in conjunction with FILL_32bit/ 722 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 723 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 724 * attention should be paid to the instructions that belong 725 * in the delay slots of the branches depending on the type 726 * of fill handler being branched to. 727 * Clear upper 32 bits of %sp if it is odd. 728 * We won't need to clear them in 64 bit kernel. 729 */ 730#define FILL_mixed \ 731 btst 1, %sp ;\ 732 bz,a,pt %xcc, 1b ;\ 733 srl %sp, 0, %sp ;\ 734 ba,pt %xcc, 2b ;\ 735 nop ;\ 736 .align 128 737 738 739/* 740 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 741 * respectively, into the address space via the designated asi. The 742 * unbiased stack pointer is required to be eight-byte aligned (even for 743 * the 32-bit case even though this code does not require such strict 744 * alignment). 745 * 746 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 747 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 748 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 749 * window may contain kernel data so in user_rtt we set wstate to call 750 * these spill handlers on the first user spill trap. These handler then 751 * spill the appropriate window but also back up a window and clean the 752 * window that didn't get a cleanwin trap. 753 */ 754#define SPILL_32clean(asi_num, tail) \ 755 srl %sp, 0, %sp ;\ 756 sta %l0, [%sp + %g0]asi_num ;\ 757 mov 4, %g1 ;\ 758 sta %l1, [%sp + %g1]asi_num ;\ 759 mov 8, %g2 ;\ 760 sta %l2, [%sp + %g2]asi_num ;\ 761 mov 12, %g3 ;\ 762 sta %l3, [%sp + %g3]asi_num ;\ 763 add %sp, 16, %g4 ;\ 764 sta %l4, [%g4 + %g0]asi_num ;\ 765 sta %l5, [%g4 + %g1]asi_num ;\ 766 sta %l6, [%g4 + %g2]asi_num ;\ 767 sta %l7, [%g4 + %g3]asi_num ;\ 768 add %g4, 16, %g4 ;\ 769 sta %i0, [%g4 + %g0]asi_num ;\ 770 sta %i1, [%g4 + %g1]asi_num ;\ 771 sta %i2, [%g4 + %g2]asi_num ;\ 772 sta %i3, [%g4 + %g3]asi_num ;\ 773 add %g4, 16, %g4 ;\ 774 sta %i4, [%g4 + %g0]asi_num ;\ 775 sta %i5, [%g4 + %g1]asi_num ;\ 776 sta %i6, [%g4 + %g2]asi_num ;\ 777 sta %i7, [%g4 + %g3]asi_num ;\ 778 TT_TRACE_L(trace_win) ;\ 779 b .spill_clean ;\ 780 mov WSTATE_USER32, %g7 ;\ 781 SKIP(31-25-TT_TRACE_L_INS) ;\ 782 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 783 .empty 784 785#define SPILL_64clean(asi_num, tail) \ 786 mov 0 + V9BIAS64, %g1 ;\ 787 stxa %l0, [%sp + %g1]asi_num ;\ 788 mov 8 + V9BIAS64, %g2 ;\ 789 stxa %l1, [%sp + %g2]asi_num ;\ 790 mov 16 + V9BIAS64, %g3 ;\ 791 stxa %l2, [%sp + %g3]asi_num ;\ 792 mov 24 + V9BIAS64, %g4 ;\ 793 stxa %l3, [%sp + %g4]asi_num ;\ 794 add %sp, 32, %g5 ;\ 795 stxa %l4, [%g5 + %g1]asi_num ;\ 796 stxa %l5, [%g5 + %g2]asi_num ;\ 797 stxa %l6, [%g5 + %g3]asi_num ;\ 798 stxa %l7, [%g5 + %g4]asi_num ;\ 799 add %g5, 32, %g5 ;\ 800 stxa %i0, [%g5 + %g1]asi_num ;\ 801 stxa %i1, [%g5 + %g2]asi_num ;\ 802 stxa %i2, [%g5 + %g3]asi_num ;\ 803 stxa %i3, [%g5 + %g4]asi_num ;\ 804 add %g5, 32, %g5 ;\ 805 stxa %i4, [%g5 + %g1]asi_num ;\ 806 stxa %i5, [%g5 + %g2]asi_num ;\ 807 stxa %i6, [%g5 + %g3]asi_num ;\ 808 stxa %i7, [%g5 + %g4]asi_num ;\ 809 TT_TRACE_L(trace_win) ;\ 810 b .spill_clean ;\ 811 mov WSTATE_USER64, %g7 ;\ 812 SKIP(31-25-TT_TRACE_L_INS) ;\ 813 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 814 .empty 815 816 817/* 818 * Floating point disabled. 819 */ 820#define FP_DISABLED_TRAP \ 821 TT_TRACE(trace_gen) ;\ 822 ba,pt %xcc,.fp_disabled ;\ 823 nop ;\ 824 .align 32 825 826/* 827 * Floating point exceptions. 828 */ 829#define FP_IEEE_TRAP \ 830 TT_TRACE(trace_gen) ;\ 831 ba,pt %xcc,.fp_ieee_exception ;\ 832 nop ;\ 833 .align 32 834 835#define FP_TRAP \ 836 TT_TRACE(trace_gen) ;\ 837 ba,pt %xcc,.fp_exception ;\ 838 nop ;\ 839 .align 32 840 841#if !defined(lint) 842/* 843 * asynchronous traps at level 0 and level 1 844 * 845 * The first instruction must be a membar for UltraSPARC-III 846 * to stop RED state entry if the store queue has many 847 * pending bad stores (PRM, Chapter 11). 848 */ 849#define ASYNC_TRAP(ttype, ttlabel, table_name)\ 850 .global table_name ;\ 851table_name: ;\ 852 membar #Sync ;\ 853 TT_TRACE(ttlabel) ;\ 854 ba async_err ;\ 855 mov ttype, %g5 ;\ 856 .align 32 857 858/* 859 * Defaults to BAD entry, but establishes label to be used for 860 * architecture-specific overwrite of trap table entry. 861 */ 862#define LABELED_BAD(table_name) \ 863 .global table_name ;\ 864table_name: ;\ 865 BAD 866 867#endif /* !lint */ 868 869/* 870 * illegal instruction trap 871 */ 872#define ILLTRAP_INSTR \ 873 membar #Sync ;\ 874 TT_TRACE(trace_gen) ;\ 875 or %g0, P_UTRAP4, %g2 ;\ 876 or %g0, T_UNIMP_INSTR, %g3 ;\ 877 sethi %hi(.check_v9utrap), %g4 ;\ 878 jmp %g4 + %lo(.check_v9utrap) ;\ 879 nop ;\ 880 .align 32 881 882/* 883 * tag overflow trap 884 */ 885#define TAG_OVERFLOW \ 886 TT_TRACE(trace_gen) ;\ 887 or %g0, P_UTRAP10, %g2 ;\ 888 or %g0, T_TAG_OVERFLOW, %g3 ;\ 889 sethi %hi(.check_v9utrap), %g4 ;\ 890 jmp %g4 + %lo(.check_v9utrap) ;\ 891 nop ;\ 892 .align 32 893 894/* 895 * divide by zero trap 896 */ 897#define DIV_BY_ZERO \ 898 TT_TRACE(trace_gen) ;\ 899 or %g0, P_UTRAP11, %g2 ;\ 900 or %g0, T_IDIV0, %g3 ;\ 901 sethi %hi(.check_v9utrap), %g4 ;\ 902 jmp %g4 + %lo(.check_v9utrap) ;\ 903 nop ;\ 904 .align 32 905 906/* 907 * trap instruction for V9 user trap handlers 908 */ 909#define TRAP_INSTR \ 910 TT_TRACE(trace_gen) ;\ 911 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 912 sethi %hi(.check_v9utrap), %g4 ;\ 913 jmp %g4 + %lo(.check_v9utrap) ;\ 914 nop ;\ 915 .align 32 916#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 917 918/* 919 * LEVEL_INTERRUPT is for level N interrupts. 920 * VECTOR_INTERRUPT is for the vector trap. 921 */ 922#define LEVEL_INTERRUPT(level) \ 923 .global tt_pil/**/level ;\ 924tt_pil/**/level: ;\ 925 ba,pt %xcc, pil_interrupt ;\ 926 mov level, %g4 ;\ 927 .align 32 928 929#define LEVEL14_INTERRUPT \ 930 ba pil14_interrupt ;\ 931 mov PIL_14, %g4 ;\ 932 .align 32 933 934#define VECTOR_INTERRUPT \ 935 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g1 ;\ 936 btst IRSR_BUSY, %g1 ;\ 937 bnz,pt %xcc, vec_interrupt ;\ 938 nop ;\ 939 ba,a,pt %xcc, vec_intr_spurious ;\ 940 .empty ;\ 941 .align 32 942 943/* 944 * MMU Trap Handlers. 945 */ 946#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \ 947 rdpr %pstate, %g5 ;\ 948 wrpr %g5, PSTATE_MG | PSTATE_AG, %pstate 949 950#define IMMU_EXCEPTION \ 951 membar #Sync ;\ 952 SWITCH_GLOBALS ;\ 953 wr %g0, ASI_IMMU, %asi ;\ 954 rdpr %tpc, %g2 ;\ 955 ldxa [MMU_SFSR]%asi, %g3 ;\ 956 ba,pt %xcc, .mmu_exception_end ;\ 957 mov T_INSTR_EXCEPTION, %g1 ;\ 958 .align 32 959 960#define DMMU_EXCEPTION \ 961 SWITCH_GLOBALS ;\ 962 wr %g0, ASI_DMMU, %asi ;\ 963 ldxa [MMU_TAG_ACCESS]%asi, %g2 ;\ 964 ldxa [MMU_SFSR]%asi, %g3 ;\ 965 ba,pt %xcc, .mmu_exception_end ;\ 966 mov T_DATA_EXCEPTION, %g1 ;\ 967 .align 32 968 969#define DMMU_EXC_AG_PRIV \ 970 wr %g0, ASI_DMMU, %asi ;\ 971 ldxa [MMU_SFAR]%asi, %g2 ;\ 972 ba,pt %xcc, .mmu_priv_exception ;\ 973 ldxa [MMU_SFSR]%asi, %g3 ;\ 974 .align 32 975 976#define DMMU_EXC_AG_NOT_ALIGNED \ 977 wr %g0, ASI_DMMU, %asi ;\ 978 ldxa [MMU_SFAR]%asi, %g2 ;\ 979 ba,pt %xcc, .mmu_exception_not_aligned ;\ 980 ldxa [MMU_SFSR]%asi, %g3 ;\ 981 .align 32 982 983/* 984 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 985 */ 986#define DMMU_EXC_LDDF_NOT_ALIGNED \ 987 btst 1, %sp ;\ 988 bnz,pt %xcc, .lddf_exception_not_aligned ;\ 989 wr %g0, ASI_DMMU, %asi ;\ 990 ldxa [MMU_SFAR]%asi, %g2 ;\ 991 ba,pt %xcc, .mmu_exception_not_aligned ;\ 992 ldxa [MMU_SFSR]%asi, %g3 ;\ 993 .align 32 994 995#define DMMU_EXC_STDF_NOT_ALIGNED \ 996 btst 1, %sp ;\ 997 bnz,pt %xcc, .stdf_exception_not_aligned ;\ 998 wr %g0, ASI_DMMU, %asi ;\ 999 ldxa [MMU_SFAR]%asi, %g2 ;\ 1000 ba,pt %xcc, .mmu_exception_not_aligned ;\ 1001 ldxa [MMU_SFSR]%asi, %g3 ;\ 1002 .align 32 1003 1004/* 1005 * Flush the TLB using either the primary, secondary, or nucleus flush 1006 * operation based on whether the ctx from the tag access register matches 1007 * the primary or secondary context (flush the nucleus if neither matches). 1008 * 1009 * Requires a membar #Sync before next ld/st. 1010 * exits with: 1011 * g2 = tag access register 1012 * g3 = ctx number 1013 */ 1014#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 1015#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 1016#endif 1017#define DTLB_DEMAP_ENTRY \ 1018 mov MMU_TAG_ACCESS, %g1 ;\ 1019 mov MMU_PCONTEXT, %g5 ;\ 1020 ldxa [%g1]ASI_DMMU, %g2 ;\ 1021 sethi %hi(TAGACC_CTX_MASK), %g4 ;\ 1022 or %g4, %lo(TAGACC_CTX_MASK), %g4 ;\ 1023 and %g2, %g4, %g3 /* g3 = ctx */ ;\ 1024 ldxa [%g5]ASI_DMMU, %g6 /* g6 = primary ctx */ ;\ 1025 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1026 cmp %g3, %g6 ;\ 1027 be,pt %xcc, 1f ;\ 1028 andn %g2, %g4, %g1 /* ctx = primary */ ;\ 1029 mov MMU_SCONTEXT, %g5 ;\ 1030 ldxa [%g5]ASI_DMMU, %g6 /* g6 = secondary ctx */ ;\ 1031 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1032 cmp %g3, %g6 ;\ 1033 be,a,pt %xcc, 1f ;\ 1034 or %g1, DEMAP_SECOND, %g1 ;\ 1035 or %g1, DEMAP_NUCLEUS, %g1 ;\ 10361: stxa %g0, [%g1]ASI_DTLB_DEMAP /* MMU_DEMAP_PAGE */ ;\ 1037 membar #Sync 1038 1039#if defined(cscope) 1040/* 1041 * Define labels to direct cscope quickly to labels that 1042 * are generated by macro expansion of DTLB_MISS(). 1043 */ 1044 .global tt0_dtlbmiss 1045tt0_dtlbmiss: 1046 .global tt1_dtlbmiss 1047tt1_dtlbmiss: 1048 nop 1049#endif 1050 1051/* 1052 * Needs to be exactly 32 instructions 1053 * 1054 * UTLB NOTE: If we don't hit on the 8k pointer then we branch 1055 * to a special 4M tsb handler. It would be nice if that handler 1056 * could live in this file but currently it seems better to allow 1057 * it to fall thru to sfmmu_tsb_miss. 1058 */ 1059#ifdef UTSB_PHYS 1060#define DTLB_MISS(table_name) ;\ 1061 .global table_name/**/_dtlbmiss ;\ 1062table_name/**/_dtlbmiss: ;\ 1063 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1064 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1065 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1066 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1067 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1068 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1069 cmp %g3, INVALID_CONTEXT ;\ 1070 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1071 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1072 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1073 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1074 brgez,pn %g3, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 1075 nop ;\ 1076 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, %g5 data */;\ 1077 cmp %g4, %g7 ;\ 1078 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1079 mov %g0, %g3 /* clear 4M tsbe ptr */ ;\ 1080 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1081 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1082 retry /* in %g5 */ ;\ 1083 unimp 0 ;\ 1084 unimp 0 ;\ 1085 unimp 0 ;\ 1086 unimp 0 ;\ 1087 unimp 0 ;\ 1088 unimp 0 ;\ 1089 unimp 0 ;\ 1090 unimp 0 ;\ 1091 unimp 0 ;\ 1092 .align 128 1093#else /* UTSB_PHYS */ 1094#define DTLB_MISS(table_name) ;\ 1095 .global table_name/**/_dtlbmiss ;\ 1096table_name/**/_dtlbmiss: ;\ 1097 HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1098 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1099 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1100 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1101 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1102 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1103 cmp %g3, INVALID_CONTEXT ;\ 1104 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1105 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1106 brlz,pn %g1, sfmmu_udtlb_slowpath ;\ 1107 nop ;\ 1108 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, %g5 data */ ;\ 1109 cmp %g4, %g7 ;\ 1110 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1111 mov %g0, %g3 /* clear 4M tsbe ptr */ ;\ 1112 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1113 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1114 retry /* in %g5 */ ;\ 1115 unimp 0 ;\ 1116 unimp 0 ;\ 1117 unimp 0 ;\ 1118 unimp 0 ;\ 1119 unimp 0 ;\ 1120 unimp 0 ;\ 1121 unimp 0 ;\ 1122 unimp 0 ;\ 1123 unimp 0 ;\ 1124 unimp 0 ;\ 1125 unimp 0 ;\ 1126 .align 128 1127#endif /* UTSB_PHYS */ 1128 1129#if defined(cscope) 1130/* 1131 * Define labels to direct cscope quickly to labels that 1132 * are generated by macro expansion of ITLB_MISS(). 1133 */ 1134 .global tt0_itlbmiss 1135tt0_itlbmiss: 1136 .global tt1_itlbmiss 1137tt1_itlbmiss: 1138 nop 1139#endif 1140 1141/* 1142 * Instruction miss handler. 1143 * ldda instructions will have their ASI patched 1144 * by sfmmu_patch_ktsb at runtime. 1145 * MUST be EXACTLY 32 instructions or we'll break. 1146 */ 1147#ifdef UTSB_PHYS 1148#define ITLB_MISS(table_name) \ 1149 .global table_name/**/_itlbmiss ;\ 1150table_name/**/_itlbmiss: ;\ 1151 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1152 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1153 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1154 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1155 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1156 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1157 cmp %g3, INVALID_CONTEXT ;\ 1158 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1159 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1160 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1161 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1162 brgez,pn %g3, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 1163 nop ;\ 1164 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\ 1165 cmp %g4, %g7 ;\ 1166 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1167 mov %g0, %g3 /* no 4M TSB */ ;\ 1168 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1169 bz,pn %icc, exec_fault ;\ 1170 nop ;\ 1171 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1172 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1173 retry ;\ 1174 unimp 0 ;\ 1175 unimp 0 ;\ 1176 unimp 0 ;\ 1177 unimp 0 ;\ 1178 unimp 0 ;\ 1179 unimp 0 ;\ 1180 .align 128 1181#else /* UTSB_PHYS */ 1182#define ITLB_MISS(table_name) \ 1183 .global table_name/**/_itlbmiss ;\ 1184table_name/**/_itlbmiss: ;\ 1185 HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\ 1186 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1187 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1188 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1189 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1190 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1191 cmp %g3, INVALID_CONTEXT ;\ 1192 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1193 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1194 brlz,pn %g1, sfmmu_uitlb_slowpath /* if >1 TSB branch */ ;\ 1195 nop ;\ 1196 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, g5 = data */ ;\ 1197 cmp %g4, %g7 ;\ 1198 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1199 mov %g0, %g3 /* no 4M TSB */ ;\ 1200 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1201 bz,pn %icc, exec_fault ;\ 1202 nop ;\ 1203 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1204 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1205 retry ;\ 1206 unimp 0 ;\ 1207 unimp 0 ;\ 1208 unimp 0 ;\ 1209 unimp 0 ;\ 1210 unimp 0 ;\ 1211 unimp 0 ;\ 1212 unimp 0 ;\ 1213 unimp 0 ;\ 1214 .align 128 1215#endif /* UTSB_PHYS */ 1216 1217 1218/* 1219 * This macro is the first level handler for fast protection faults. 1220 * It first demaps the tlb entry which generated the fault and then 1221 * attempts to set the modify bit on the hash. It needs to be 1222 * exactly 32 instructions. 1223 */ 1224#define DTLB_PROT \ 1225 DTLB_DEMAP_ENTRY /* 20 instructions */ ;\ 1226 /* ;\ 1227 * At this point: ;\ 1228 * g1 = ???? ;\ 1229 * g2 = tag access register ;\ 1230 * g3 = ctx number ;\ 1231 * g4 = ???? ;\ 1232 */ ;\ 1233 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1234 /* clobbers g1 and g6 */ ;\ 1235 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1236 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1237 nop ;\ 1238 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1239 unimp 0 ;\ 1240 unimp 0 ;\ 1241 unimp 0 ;\ 1242 unimp 0 ;\ 1243 unimp 0 ;\ 1244 unimp 0 ;\ 1245 .align 128 1246 1247#define DMMU_EXCEPTION_TL1 ;\ 1248 SWITCH_GLOBALS ;\ 1249 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1250 nop ;\ 1251 .align 32 1252 1253#define MISALIGN_ADDR_TL1 ;\ 1254 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1255 nop ;\ 1256 .align 32 1257 1258/* 1259 * Trace a tsb hit 1260 * g1 = tsbe pointer (in/clobbered) 1261 * g2 = tag access register (in) 1262 * g3 - g4 = scratch (clobbered) 1263 * g5 = tsbe data (in) 1264 * g6 = scratch (clobbered) 1265 * g7 = pc we jumped here from (in) 1266 * ttextra = value to OR in to trap type (%tt) (in) 1267 */ 1268#ifdef TRAPTRACE 1269#define TRACE_TSBHIT(ttextra) \ 1270 membar #Sync ;\ 1271 sethi %hi(FLUSH_ADDR), %g6 ;\ 1272 flush %g6 ;\ 1273 TRACE_PTR(%g3, %g6) ;\ 1274 GET_TRACE_TICK(%g6) ;\ 1275 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1276 stxa %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1277 stxa %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1278 rdpr %tnpc, %g6 ;\ 1279 stxa %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1280 stxa %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1281 stxa %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1282 rdpr %tpc, %g6 ;\ 1283 stxa %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1284 rdpr %tl, %g6 ;\ 1285 stha %g6, [%g3 + TRAP_ENT_TL]%asi ;\ 1286 rdpr %tt, %g6 ;\ 1287 or %g6, (ttextra), %g6 ;\ 1288 stha %g6, [%g3 + TRAP_ENT_TT]%asi ;\ 1289 ldxa [%g0]ASI_IMMU, %g1 /* tag target */ ;\ 1290 ldxa [%g0]ASI_DMMU, %g4 ;\ 1291 cmp %g6, FAST_IMMU_MISS_TT ;\ 1292 movne %icc, %g4, %g1 ;\ 1293 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */ ;\ 1294 stxa %g0, [%g3 + TRAP_ENT_TR]%asi ;\ 1295 TRACE_NEXT(%g3, %g4, %g6) 1296#else 1297#define TRACE_TSBHIT(ttextra) 1298#endif 1299 1300#if defined(lint) 1301 1302struct scb trap_table; 1303struct scb scb; /* trap_table/scb are the same object */ 1304 1305#else /* lint */ 1306 1307/* 1308 * ======================================================================= 1309 * SPARC V9 TRAP TABLE 1310 * 1311 * The trap table is divided into two halves: the first half is used when 1312 * taking traps when TL=0; the second half is used when taking traps from 1313 * TL>0. Note that handlers in the second half of the table might not be able 1314 * to make the same assumptions as handlers in the first half of the table. 1315 * 1316 * Worst case trap nesting so far: 1317 * 1318 * at TL=0 client issues software trap requesting service 1319 * at TL=1 nucleus wants a register window 1320 * at TL=2 register window clean/spill/fill takes a TLB miss 1321 * at TL=3 processing TLB miss 1322 * at TL=4 handle asynchronous error 1323 * 1324 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1325 * 1326 * ======================================================================= 1327 */ 1328 .section ".text" 1329 .align 4 1330 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1331 .type trap_table, #object 1332 .type scb, #object 1333trap_table: 1334scb: 1335trap_table0: 1336 /* hardware traps */ 1337 NOT; /* 000 reserved */ 1338 RED; /* 001 power on reset */ 1339 RED; /* 002 watchdog reset */ 1340 RED; /* 003 externally initiated reset */ 1341 RED; /* 004 software initiated reset */ 1342 RED; /* 005 red mode exception */ 1343 NOT; NOT; /* 006 - 007 reserved */ 1344 IMMU_EXCEPTION; /* 008 instruction access exception */ 1345 NOT; /* 009 instruction access MMU miss */ 1346 ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae); 1347 /* 00A instruction access error */ 1348 NOT; NOT4; /* 00B - 00F reserved */ 1349 ILLTRAP_INSTR; /* 010 illegal instruction */ 1350 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1351 NOT; /* 012 unimplemented LDD */ 1352 NOT; /* 013 unimplemented STD */ 1353 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1354 FP_DISABLED_TRAP; /* 020 fp disabled */ 1355 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1356 FP_TRAP; /* 022 fp exception other */ 1357 TAG_OVERFLOW; /* 023 tag overflow */ 1358 CLEAN_WINDOW; /* 024 - 027 clean window */ 1359 DIV_BY_ZERO; /* 028 division by zero */ 1360 NOT; /* 029 internal processor error */ 1361 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1362 DMMU_EXCEPTION; /* 030 data access exception */ 1363 NOT; /* 031 data access MMU miss */ 1364 ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae); 1365 /* 032 data access error */ 1366 NOT; /* 033 data access protection */ 1367 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1368 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1369 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1370 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1371 NOT; /* 038 LDQF mem address not aligned */ 1372 NOT; /* 039 STQF mem address not aligned */ 1373 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1374 LABELED_BAD(tt0_asdat); /* 040 async data error */ 1375 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1376 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1377 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1378 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1379 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1380 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1381 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1382 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1383 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1384 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1385 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1386 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1387 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1388 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1389 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1390 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1391 VECTOR_INTERRUPT; /* 060 interrupt vector */ 1392 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1393 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1394 GOTO_TT(ce_err, trace_gen); /* 063 corrected ECC error */ 1395 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1396 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1397 DTLB_PROT; /* 06C data access protection */ 1398 LABELED_BAD(tt0_fecc); /* 070 fast ecache ECC error */ 1399 LABELED_BAD(tt0_dperr); /* 071 Cheetah+ dcache parity error */ 1400 LABELED_BAD(tt0_iperr); /* 072 Cheetah+ icache parity error */ 1401 NOT; /* 073 reserved */ 1402 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1403 NOT4; /* 080 spill 0 normal */ 1404 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1405 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1406 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1407 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1408 SPILL_32bit(not); /* 094 spill 5 normal */ 1409 SPILL_64bit(not); /* 098 spill 6 normal */ 1410 SPILL_mixed; /* 09C spill 7 normal */ 1411 NOT4; /* 0A0 spill 0 other */ 1412 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1413 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1414 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1415 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1416 NOT4; /* 0B4 spill 5 other */ 1417 NOT4; /* 0B8 spill 6 other */ 1418 NOT4; /* 0BC spill 7 other */ 1419 NOT4; /* 0C0 fill 0 normal */ 1420 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1421 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1422 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1423 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1424 FILL_32bit(not); /* 0D4 fill 5 normal */ 1425 FILL_64bit(not); /* 0D8 fill 6 normal */ 1426 FILL_mixed; /* 0DC fill 7 normal */ 1427 NOT4; /* 0E0 fill 0 other */ 1428 NOT4; /* 0E4 fill 1 other */ 1429 NOT4; /* 0E8 fill 2 other */ 1430 NOT4; /* 0EC fill 3 other */ 1431 NOT4; /* 0F0 fill 4 other */ 1432 NOT4; /* 0F4 fill 5 other */ 1433 NOT4; /* 0F8 fill 6 other */ 1434 NOT4; /* 0FC fill 7 other */ 1435 /* user traps */ 1436 GOTO(syscall_trap_4x); /* 100 old system call */ 1437 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1438 TRAP(T_DIV0); /* 102 user divide by zero */ 1439 FLUSHW(); /* 103 flush windows */ 1440 GOTO(.clean_windows); /* 104 clean windows */ 1441 BAD; /* 105 range check ?? */ 1442 GOTO(.fix_alignment); /* 106 do unaligned references */ 1443 BAD; /* 107 unused */ 1444#ifdef DEBUG 1445 GOTO(syscall_wrapper32) /* 108 ILP32 system call on LP64 */ 1446#else 1447 SYSCALL(syscall_trap32) /* 108 ILP32 system call on LP64 */ 1448#endif 1449 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1450 BAD; BAD; BAD4; /* 10A - 10F unused */ 1451 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1452 GOTO(.getcc); /* 120 get condition codes */ 1453 GOTO(.setcc); /* 121 set condition codes */ 1454 GOTO(.getpsr); /* 122 get psr */ 1455 GOTO(.setpsr); /* 123 set psr (some fields) */ 1456 GOTO(get_timestamp); /* 124 get timestamp */ 1457 GOTO(get_virtime); /* 125 get lwp virtual time */ 1458 PRIV(self_xcall); /* 126 self xcall */ 1459 GOTO(get_hrestime); /* 127 get hrestime */ 1460 BAD; /* 128 ST_SETV9STACK */ 1461 GOTO(.getlgrp); /* 129 get lgrpid */ 1462 BAD; BAD; BAD4; /* 12A - 12F unused */ 1463 BAD4; BAD4; /* 130 - 137 unused */ 1464 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1465 BAD; /* 139 unused */ 1466 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1467 BAD; BAD4; /* 13B - 13F unused */ 1468#ifdef DEBUG 1469 GOTO(syscall_wrapper) /* 140 LP64 system call */ 1470#else 1471 SYSCALL(syscall_trap) /* 140 LP64 system call */ 1472#endif 1473 SYSCALL(nosys); /* 141 unused system call trap */ 1474#ifdef DEBUG_USER_TRAPTRACECTL 1475 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1476 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1477#else 1478 SYSCALL(nosys); /* 142 unused system call trap */ 1479 SYSCALL(nosys); /* 143 unused system call trap */ 1480#endif 1481 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1482 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1483 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1484 BAD; /* 170 - unused */ 1485 BAD; /* 171 - unused */ 1486 BAD; BAD; /* 172 - 173 unused */ 1487 BAD4; BAD4; /* 174 - 17B unused */ 1488#ifdef PTL1_PANIC_DEBUG 1489 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1490 /* 17C test ptl1_panic */ 1491#else 1492 BAD; /* 17C unused */ 1493#endif /* PTL1_PANIC_DEBUG */ 1494 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1495 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1496 PRIV(kctx_obp_bpt); /* 17F obp breakpoint */ 1497 /* reserved */ 1498 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1499 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1500 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1501 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1502 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1503 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1504 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1505 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1506trap_table1: 1507 NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */ 1508 ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae); 1509 /* 00A instruction access error */ 1510 NOT; NOT4; /* 00B - 00F unused */ 1511 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1512 NOT4; /* 020 - 023 unused */ 1513 CLEAN_WINDOW; /* 024 - 027 clean window */ 1514 NOT4; NOT4; /* 028 - 02F unused */ 1515 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1516 NOT; /* 031 unused */ 1517 ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae); 1518 /* 032 data access error */ 1519 NOT; /* 033 unused */ 1520 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1521 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1522 LABELED_BAD(tt1_asdat); /* 040 async data error */ 1523 NOT; NOT; NOT; /* 041 - 043 unused */ 1524 NOT4; NOT4; NOT4; /* 044 - 04F unused */ 1525 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1526 NOT; /* 060 unused */ 1527 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1528 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1529 GOTO_TT(ce_err_tl1, trace_gen); /* 063 corrected ECC error */ 1530 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1531 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1532 DTLB_PROT; /* 06C data access protection */ 1533 LABELED_BAD(tt1_fecc); /* 070 fast ecache ECC error */ 1534 LABELED_BAD(tt1_dperr); /* 071 Cheetah+ dcache parity error */ 1535 LABELED_BAD(tt1_iperr); /* 072 Cheetah+ icache parity error */ 1536 NOT; /* 073 reserved */ 1537 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1538 NOT4; /* 080 spill 0 normal */ 1539 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1540 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1541 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1542 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1543 SPILL_32bit(not); /* 094 spill 5 normal */ 1544 SPILL_64bit(not); /* 098 spill 6 normal */ 1545 SPILL_mixed; /* 09C spill 7 normal */ 1546 NOT4; /* 0A0 spill 0 other */ 1547 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1548 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1549 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1550 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1551 NOT4; /* 0B4 spill 5 other */ 1552 NOT4; /* 0B8 spill 6 other */ 1553 NOT4; /* 0BC spill 7 other */ 1554 NOT4; /* 0C0 fill 0 normal */ 1555 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0C4 fill 1 normal */ 1556 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0C8 fill 2 normal */ 1557 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0CC fill 3 normal */ 1558 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0D0 fill 4 normal */ 1559 FILL_32bit(not); /* 0D4 fill 5 normal */ 1560 FILL_64bit(not); /* 0D8 fill 6 normal */ 1561 FILL_mixed; /* 0DC fill 7 normal */ 1562 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1563 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1564 LABELED_BAD(tt1_swtrap0); /* 100 fast ecache ECC error (cont) */ 1565 LABELED_BAD(tt1_swtrap1); /* 101 Ch+ D$ parity error (cont) */ 1566 LABELED_BAD(tt1_swtrap2); /* 102 Ch+ I$ parity error (cont) */ 1567 NOT; /* 103 reserved */ 1568/* 1569 * We only reserve the above four special case soft traps for code running 1570 * at TL>0, so we can truncate the trap table here. 1571 */ 1572etrap_table: 1573 .size trap_table, (.-trap_table) 1574 .size scb, (.-scb) 1575 1576/* 1577 * We get to exec_fault in the case of an instruction miss and tte 1578 * has no execute bit set. We go to tl0 to handle it. 1579 * 1580 * g1 = tsbe pointer (in/clobbered) 1581 * g2 = tag access register (in) 1582 * g3 - g4 = scratch (clobbered) 1583 * g5 = tsbe data (in) 1584 * g6 = scratch (clobbered) 1585 */ 1586 ALTENTRY(exec_fault) 1587 TRACE_TSBHIT(0x200) 1588 SWITCH_GLOBALS 1589 mov MMU_TAG_ACCESS, %g4 1590 ldxa [%g4]ASI_IMMU, %g2 ! arg1 = addr 1591 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1592 set trap, %g1 1593 ba,pt %xcc, sys_trap 1594 mov -1, %g4 1595 1596.mmu_exception_not_aligned: 1597 rdpr %tstate, %g1 1598 btst TSTATE_PRIV, %g1 1599 bnz,pn %icc, 2f 1600 nop 1601 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1602 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1603 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1604 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1605 brz,pt %g5, 2f 1606 nop 1607 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1608 brz,pn %g5, 2f 1609 nop 1610 btst 1, %sp 1611 bz,pt %xcc, 1f ! 32 bit user program 1612 nop 1613 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1614 nop 16151: 1616 ba,pt %xcc, .setup_utrap 1617 or %g2, %g0, %g7 16182: 1619 ba,pt %xcc, .mmu_exception_end 1620 mov T_ALIGNMENT, %g1 1621 1622.mmu_priv_exception: 1623 rdpr %tstate, %g1 1624 btst TSTATE_PRIV, %g1 1625 bnz,pn %icc, 1f 1626 nop 1627 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1628 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1629 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1630 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1631 brz,pt %g5, 1f 1632 nop 1633 ldn [%g5 + P_UTRAP16], %g5 1634 brnz,pt %g5, .setup_v9utrap 1635 nop 16361: 1637 mov T_PRIV_INSTR, %g1 1638 1639.mmu_exception_end: 1640 CPU_INDEX(%g4, %g5) 1641 set cpu_core, %g5 1642 sllx %g4, CPU_CORE_SHIFT, %g4 1643 add %g4, %g5, %g4 1644 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1645 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1646 bz %xcc, .mmu_exception_tlb_chk 1647 or %g5, CPU_DTRACE_BADADDR, %g5 1648 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1649 done 1650 1651.mmu_exception_tlb_chk: 1652 GET_CPU_IMPL(%g5) ! check SFSR.FT to see if this 1653 cmp %g5, PANTHER_IMPL ! is a TLB parity error. But 1654 bne 2f ! we only do this check while 1655 mov 1, %g4 ! running on Panther CPUs 1656 sllx %g4, PN_SFSR_PARITY_SHIFT, %g4 ! since US-I/II use the same 1657 andcc %g3, %g4, %g0 ! bit for something else which 1658 bz 2f ! will be handled later. 1659 nop 1660.mmu_exception_is_tlb_parity: 1661 .weak itlb_parity_trap 1662 .weak dtlb_parity_trap 1663 set itlb_parity_trap, %g4 1664 cmp %g1, T_INSTR_EXCEPTION ! branch to the itlb or 1665 be 3f ! dtlb parity handler 1666 nop ! if this trap is due 1667 set dtlb_parity_trap, %g4 1668 cmp %g1, T_DATA_EXCEPTION ! to a IMMU exception 1669 be 3f ! or DMMU exception. 1670 nop 16712: 1672 sllx %g3, 32, %g3 1673 or %g3, %g1, %g3 1674 set trap, %g1 1675 ba,pt %xcc, sys_trap 1676 sub %g0, 1, %g4 16773: 1678 jmp %g4 ! off to the appropriate 1679 nop ! TLB parity handler 1680 1681.fp_disabled: 1682 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1683 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1684#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1685 brz,a,pn %g1, 2f 1686 nop 1687#endif 1688 rdpr %tstate, %g4 1689 btst TSTATE_PRIV, %g4 1690#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1691 bnz,pn %icc, 2f 1692 nop 1693#else 1694 bnz,a,pn %icc, ptl1_panic 1695 mov PTL1_BAD_FPTRAP, %g1 1696#endif 1697 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1698 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1699 brz,a,pt %g5, 2f 1700 nop 1701 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1702 brz,a,pn %g5, 2f 1703 nop 1704 btst 1, %sp 1705 bz,a,pt %xcc, 1f ! 32 bit user program 1706 nop 1707 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1708 nop 17091: 1710 ba,pt %xcc, .setup_utrap 1711 or %g0, %g0, %g7 17122: 1713 set fp_disabled, %g1 1714 ba,pt %xcc, sys_trap 1715 sub %g0, 1, %g4 1716 1717.fp_ieee_exception: 1718 rdpr %tstate, %g1 1719 btst TSTATE_PRIV, %g1 1720 bnz,a,pn %icc, ptl1_panic 1721 mov PTL1_BAD_FPTRAP, %g1 1722 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1723 stx %fsr, [%g1 + CPU_TMP1] 1724 ldx [%g1 + CPU_TMP1], %g2 1725 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1726 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1727 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1728 brz,a,pt %g5, 1f 1729 nop 1730 ldn [%g5 + P_UTRAP8], %g5 1731 brnz,a,pt %g5, .setup_v9utrap 1732 nop 17331: 1734 set _fp_ieee_exception, %g1 1735 ba,pt %xcc, sys_trap 1736 sub %g0, 1, %g4 1737 1738/* 1739 * Register Inputs: 1740 * %g5 user trap handler 1741 * %g7 misaligned addr - for alignment traps only 1742 */ 1743.setup_utrap: 1744 set trap, %g1 ! setup in case we go 1745 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1746 sub %g0, 1, %g4 ! the save instruction below 1747 1748 /* 1749 * If the DTrace pid provider is single stepping a copied-out 1750 * instruction, t->t_dtrace_step will be set. In that case we need 1751 * to abort the single-stepping (since execution of the instruction 1752 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1753 */ 1754 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1755 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1756 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1757 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1758 rdpr %tnpc, %l2 ! arg1 == tnpc 1759 brz,pt %g2, 1f 1760 rdpr %tpc, %l1 ! arg0 == tpc 1761 1762 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1763 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1764 brz,pt %g2, 1f 1765 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1766 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 17671: 1768 mov %g7, %l3 ! arg2 == misaligned address 1769 1770 rdpr %tstate, %g1 ! cwp for trap handler 1771 rdpr %cwp, %g4 1772 bclr TSTATE_CWP_MASK, %g1 1773 wrpr %g1, %g4, %tstate 1774 wrpr %g0, %g5, %tnpc ! trap handler address 1775 FAST_TRAP_DONE 1776 /* NOTREACHED */ 1777 1778.check_v9utrap: 1779 rdpr %tstate, %g1 1780 btst TSTATE_PRIV, %g1 1781 bnz,a,pn %icc, 3f 1782 nop 1783 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1784 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1785 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1786 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1787 1788 cmp %g3, T_SOFTWARE_TRAP 1789 bne,a,pt %icc, 1f 1790 nop 1791 1792 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1793 rdpr %tt, %g3 ! delay - get actual hw trap type 1794 1795 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1796 ba,pt %icc, 2f 1797 smul %g1, CPTRSIZE, %g2 17981: 1799 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1800 nop 1801 1802 cmp %g3, T_UNIMP_INSTR 1803 bne,a,pt %icc, 2f 1804 nop 1805 1806 mov 1, %g1 1807 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1808 rdpr %tpc, %g1 ! ld trapping instruction using 1809 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1810 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1811 1812 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1813 andcc %g1, %g4, %g4 ! and instruction with mask 1814 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1815 nop ! fall thru to setup 18162: 1817 ldn [%g5 + %g2], %g5 1818 brnz,a,pt %g5, .setup_v9utrap 1819 nop 18203: 1821 set trap, %g1 1822 ba,pt %xcc, sys_trap 1823 sub %g0, 1, %g4 1824 /* NOTREACHED */ 1825 1826/* 1827 * Register Inputs: 1828 * %g5 user trap handler 1829 */ 1830.setup_v9utrap: 1831 set trap, %g1 ! setup in case we go 1832 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1833 sub %g0, 1, %g4 ! the save instruction below 1834 1835 /* 1836 * If the DTrace pid provider is single stepping a copied-out 1837 * instruction, t->t_dtrace_step will be set. In that case we need 1838 * to abort the single-stepping (since execution of the instruction 1839 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1840 */ 1841 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1842 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1843 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1844 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1845 rdpr %tnpc, %l7 ! arg1 == tnpc 1846 brz,pt %g2, 1f 1847 rdpr %tpc, %l6 ! arg0 == tpc 1848 1849 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1850 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1851 brz,pt %g2, 1f 1852 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1853 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 18541: 1855 rdpr %tstate, %g2 ! cwp for trap handler 1856 rdpr %cwp, %g4 1857 bclr TSTATE_CWP_MASK, %g2 1858 wrpr %g2, %g4, %tstate 1859 1860 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1861 ldn [%g4 + P_AS], %g4 ! load as pointer 1862 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1863 cmp %l7, %g4 ! check for single-step set 1864 bne,pt %xcc, 4f 1865 nop 1866 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1867 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1868 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1869 bne,pt %icc, 4f 1870 nop 1871 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1872 mov %l7, %g4 ! on entry to precise user trap 1873 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1874 ! at time of trap 1875 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1876 ! %g4 == userlimit 1877 FAST_TRAP_DONE 1878 /* NOTREACHED */ 18794: 1880 wrpr %g0, %g5, %tnpc ! trap handler address 1881 FAST_TRAP_DONE_CHK_INTR 1882 /* NOTREACHED */ 1883 1884.fp_exception: 1885 CPU_ADDR(%g1, %g4) 1886 stx %fsr, [%g1 + CPU_TMP1] 1887 ldx [%g1 + CPU_TMP1], %g2 1888 1889 /* 1890 * Cheetah takes unfinished_FPop trap for certain range of operands 1891 * to the "fitos" instruction. Instead of going through the slow 1892 * software emulation path, we try to simulate the "fitos" instruction 1893 * via "fitod" and "fdtos" provided the following conditions are met: 1894 * 1895 * fpu_exists is set (if DEBUG) 1896 * not in privileged mode 1897 * ftt is unfinished_FPop 1898 * NXM IEEE trap is not enabled 1899 * instruction at %tpc is "fitos" 1900 * 1901 * Usage: 1902 * %g1 per cpu address 1903 * %g2 %fsr 1904 * %g6 user instruction 1905 * 1906 * Note that we can take a memory access related trap while trying 1907 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1908 * flag to catch those traps and let the SFMMU code deal with page 1909 * fault and data access exception. 1910 */ 1911#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1912 sethi %hi(fpu_exists), %g7 1913 ld [%g7 + %lo(fpu_exists)], %g7 1914 brz,pn %g7, .fp_exception_cont 1915 nop 1916#endif 1917 rdpr %tstate, %g7 ! branch if in privileged mode 1918 btst TSTATE_PRIV, %g7 1919 bnz,pn %xcc, .fp_exception_cont 1920 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1921 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1922 cmp %g7, FTT_UNFIN 1923 set FSR_TEM_NX, %g5 1924 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1925 andcc %g2, %g5, %g0 1926 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1927 rdpr %tpc, %g5 ! get faulting PC 1928 1929 or %g0, 1, %g7 1930 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1931 lda [%g5]ASI_USER, %g6 ! get user's instruction 1932 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1933 1934 set FITOS_INSTR_MASK, %g7 1935 and %g6, %g7, %g7 1936 set FITOS_INSTR, %g5 1937 cmp %g7, %g5 1938 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1939 nop 1940 1941 /* 1942 * This is unfinished FPops trap for "fitos" instruction. We 1943 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1944 * sequence. 1945 * 1946 * We need a temporary FP register to do the conversion. Since 1947 * both source and destination operands for the "fitos" instruction 1948 * have to be within %f0-%f31, we use an FP register from the upper 1949 * half to guarantee that it won't collide with the source or the 1950 * dest operand. However, we do have to save and restore its value. 1951 * 1952 * We use %d62 as a temporary FP register for the conversion and 1953 * branch to appropriate instruction within the conversion tables 1954 * based upon the rs2 and rd values. 1955 */ 1956 1957 std %d62, [%g1 + CPU_TMP1] ! save original value 1958 1959 srl %g6, FITOS_RS2_SHIFT, %g7 1960 and %g7, FITOS_REG_MASK, %g7 1961 set _fitos_fitod_table, %g4 1962 sllx %g7, 2, %g7 1963 jmp %g4 + %g7 1964 ba,pt %xcc, _fitos_fitod_done 1965 .empty 1966 1967_fitos_fitod_table: 1968 fitod %f0, %d62 1969 fitod %f1, %d62 1970 fitod %f2, %d62 1971 fitod %f3, %d62 1972 fitod %f4, %d62 1973 fitod %f5, %d62 1974 fitod %f6, %d62 1975 fitod %f7, %d62 1976 fitod %f8, %d62 1977 fitod %f9, %d62 1978 fitod %f10, %d62 1979 fitod %f11, %d62 1980 fitod %f12, %d62 1981 fitod %f13, %d62 1982 fitod %f14, %d62 1983 fitod %f15, %d62 1984 fitod %f16, %d62 1985 fitod %f17, %d62 1986 fitod %f18, %d62 1987 fitod %f19, %d62 1988 fitod %f20, %d62 1989 fitod %f21, %d62 1990 fitod %f22, %d62 1991 fitod %f23, %d62 1992 fitod %f24, %d62 1993 fitod %f25, %d62 1994 fitod %f26, %d62 1995 fitod %f27, %d62 1996 fitod %f28, %d62 1997 fitod %f29, %d62 1998 fitod %f30, %d62 1999 fitod %f31, %d62 2000_fitos_fitod_done: 2001 2002 /* 2003 * Now convert data back into single precision 2004 */ 2005 srl %g6, FITOS_RD_SHIFT, %g7 2006 and %g7, FITOS_REG_MASK, %g7 2007 set _fitos_fdtos_table, %g4 2008 sllx %g7, 2, %g7 2009 jmp %g4 + %g7 2010 ba,pt %xcc, _fitos_fdtos_done 2011 .empty 2012 2013_fitos_fdtos_table: 2014 fdtos %d62, %f0 2015 fdtos %d62, %f1 2016 fdtos %d62, %f2 2017 fdtos %d62, %f3 2018 fdtos %d62, %f4 2019 fdtos %d62, %f5 2020 fdtos %d62, %f6 2021 fdtos %d62, %f7 2022 fdtos %d62, %f8 2023 fdtos %d62, %f9 2024 fdtos %d62, %f10 2025 fdtos %d62, %f11 2026 fdtos %d62, %f12 2027 fdtos %d62, %f13 2028 fdtos %d62, %f14 2029 fdtos %d62, %f15 2030 fdtos %d62, %f16 2031 fdtos %d62, %f17 2032 fdtos %d62, %f18 2033 fdtos %d62, %f19 2034 fdtos %d62, %f20 2035 fdtos %d62, %f21 2036 fdtos %d62, %f22 2037 fdtos %d62, %f23 2038 fdtos %d62, %f24 2039 fdtos %d62, %f25 2040 fdtos %d62, %f26 2041 fdtos %d62, %f27 2042 fdtos %d62, %f28 2043 fdtos %d62, %f29 2044 fdtos %d62, %f30 2045 fdtos %d62, %f31 2046_fitos_fdtos_done: 2047 2048 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 2049 2050#if DEBUG 2051 /* 2052 * Update FPop_unfinished trap kstat 2053 */ 2054 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 2055 ldx [%g7], %g5 20561: 2057 add %g5, 1, %g6 2058 2059 casxa [%g7] ASI_N, %g5, %g6 2060 cmp %g5, %g6 2061 bne,a,pn %xcc, 1b 2062 or %g0, %g6, %g5 2063 2064 /* 2065 * Update fpu_sim_fitos kstat 2066 */ 2067 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 2068 ldx [%g7], %g5 20691: 2070 add %g5, 1, %g6 2071 2072 casxa [%g7] ASI_N, %g5, %g6 2073 cmp %g5, %g6 2074 bne,a,pn %xcc, 1b 2075 or %g0, %g6, %g5 2076#endif /* DEBUG */ 2077 2078 FAST_TRAP_DONE 2079 2080.fp_exception_cont: 2081 /* 2082 * Let _fp_exception deal with simulating FPop instruction. 2083 * Note that we need to pass %fsr in %g2 (already read above). 2084 */ 2085 2086 set _fp_exception, %g1 2087 ba,pt %xcc, sys_trap 2088 sub %g0, 1, %g4 2089 2090.clean_windows: 2091 set trap, %g1 2092 mov T_FLUSH_PCB, %g3 2093 sub %g0, 1, %g4 2094 save 2095 flushw 2096 restore 2097 wrpr %g0, %g0, %cleanwin ! no clean windows 2098 2099 CPU_ADDR(%g4, %g5) 2100 ldn [%g4 + CPU_MPCB], %g4 2101 brz,a,pn %g4, 1f 2102 nop 2103 ld [%g4 + MPCB_WSTATE], %g5 2104 add %g5, WSTATE_CLEAN_OFFSET, %g5 2105 wrpr %g0, %g5, %wstate 21061: FAST_TRAP_DONE 2107 2108/* 2109 * .spill_clean: clean the previous window, restore the wstate, and 2110 * "done". 2111 * 2112 * Entry: %g7 contains new wstate 2113 */ 2114.spill_clean: 2115 sethi %hi(nwin_minus_one), %g5 2116 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 2117 rdpr %cwp, %g6 ! %g6 = %cwp 2118 deccc %g6 ! %g6-- 2119 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 2120 wrpr %g6, %cwp 2121 TT_TRACE_L(trace_win) 2122 clr %l0 2123 clr %l1 2124 clr %l2 2125 clr %l3 2126 clr %l4 2127 clr %l5 2128 clr %l6 2129 clr %l7 2130 wrpr %g0, %g7, %wstate 2131 saved 2132 retry ! restores correct %cwp 2133 2134.fix_alignment: 2135 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2136 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 2137 ldn [%g1 + T_PROCP], %g1 2138 mov 1, %g2 2139 stb %g2, [%g1 + P_FIXALIGNMENT] 2140 FAST_TRAP_DONE 2141 2142#define STDF_REG(REG, ADDR, TMP) \ 2143 sll REG, 3, REG ;\ 2144mark1: set start1, TMP ;\ 2145 jmp REG + TMP ;\ 2146 nop ;\ 2147start1: ba,pt %xcc, done1 ;\ 2148 std %f0, [ADDR + CPU_TMP1] ;\ 2149 ba,pt %xcc, done1 ;\ 2150 std %f32, [ADDR + CPU_TMP1] ;\ 2151 ba,pt %xcc, done1 ;\ 2152 std %f2, [ADDR + CPU_TMP1] ;\ 2153 ba,pt %xcc, done1 ;\ 2154 std %f34, [ADDR + CPU_TMP1] ;\ 2155 ba,pt %xcc, done1 ;\ 2156 std %f4, [ADDR + CPU_TMP1] ;\ 2157 ba,pt %xcc, done1 ;\ 2158 std %f36, [ADDR + CPU_TMP1] ;\ 2159 ba,pt %xcc, done1 ;\ 2160 std %f6, [ADDR + CPU_TMP1] ;\ 2161 ba,pt %xcc, done1 ;\ 2162 std %f38, [ADDR + CPU_TMP1] ;\ 2163 ba,pt %xcc, done1 ;\ 2164 std %f8, [ADDR + CPU_TMP1] ;\ 2165 ba,pt %xcc, done1 ;\ 2166 std %f40, [ADDR + CPU_TMP1] ;\ 2167 ba,pt %xcc, done1 ;\ 2168 std %f10, [ADDR + CPU_TMP1] ;\ 2169 ba,pt %xcc, done1 ;\ 2170 std %f42, [ADDR + CPU_TMP1] ;\ 2171 ba,pt %xcc, done1 ;\ 2172 std %f12, [ADDR + CPU_TMP1] ;\ 2173 ba,pt %xcc, done1 ;\ 2174 std %f44, [ADDR + CPU_TMP1] ;\ 2175 ba,pt %xcc, done1 ;\ 2176 std %f14, [ADDR + CPU_TMP1] ;\ 2177 ba,pt %xcc, done1 ;\ 2178 std %f46, [ADDR + CPU_TMP1] ;\ 2179 ba,pt %xcc, done1 ;\ 2180 std %f16, [ADDR + CPU_TMP1] ;\ 2181 ba,pt %xcc, done1 ;\ 2182 std %f48, [ADDR + CPU_TMP1] ;\ 2183 ba,pt %xcc, done1 ;\ 2184 std %f18, [ADDR + CPU_TMP1] ;\ 2185 ba,pt %xcc, done1 ;\ 2186 std %f50, [ADDR + CPU_TMP1] ;\ 2187 ba,pt %xcc, done1 ;\ 2188 std %f20, [ADDR + CPU_TMP1] ;\ 2189 ba,pt %xcc, done1 ;\ 2190 std %f52, [ADDR + CPU_TMP1] ;\ 2191 ba,pt %xcc, done1 ;\ 2192 std %f22, [ADDR + CPU_TMP1] ;\ 2193 ba,pt %xcc, done1 ;\ 2194 std %f54, [ADDR + CPU_TMP1] ;\ 2195 ba,pt %xcc, done1 ;\ 2196 std %f24, [ADDR + CPU_TMP1] ;\ 2197 ba,pt %xcc, done1 ;\ 2198 std %f56, [ADDR + CPU_TMP1] ;\ 2199 ba,pt %xcc, done1 ;\ 2200 std %f26, [ADDR + CPU_TMP1] ;\ 2201 ba,pt %xcc, done1 ;\ 2202 std %f58, [ADDR + CPU_TMP1] ;\ 2203 ba,pt %xcc, done1 ;\ 2204 std %f28, [ADDR + CPU_TMP1] ;\ 2205 ba,pt %xcc, done1 ;\ 2206 std %f60, [ADDR + CPU_TMP1] ;\ 2207 ba,pt %xcc, done1 ;\ 2208 std %f30, [ADDR + CPU_TMP1] ;\ 2209 ba,pt %xcc, done1 ;\ 2210 std %f62, [ADDR + CPU_TMP1] ;\ 2211done1: 2212 2213#define LDDF_REG(REG, ADDR, TMP) \ 2214 sll REG, 3, REG ;\ 2215mark2: set start2, TMP ;\ 2216 jmp REG + TMP ;\ 2217 nop ;\ 2218start2: ba,pt %xcc, done2 ;\ 2219 ldd [ADDR + CPU_TMP1], %f0 ;\ 2220 ba,pt %xcc, done2 ;\ 2221 ldd [ADDR + CPU_TMP1], %f32 ;\ 2222 ba,pt %xcc, done2 ;\ 2223 ldd [ADDR + CPU_TMP1], %f2 ;\ 2224 ba,pt %xcc, done2 ;\ 2225 ldd [ADDR + CPU_TMP1], %f34 ;\ 2226 ba,pt %xcc, done2 ;\ 2227 ldd [ADDR + CPU_TMP1], %f4 ;\ 2228 ba,pt %xcc, done2 ;\ 2229 ldd [ADDR + CPU_TMP1], %f36 ;\ 2230 ba,pt %xcc, done2 ;\ 2231 ldd [ADDR + CPU_TMP1], %f6 ;\ 2232 ba,pt %xcc, done2 ;\ 2233 ldd [ADDR + CPU_TMP1], %f38 ;\ 2234 ba,pt %xcc, done2 ;\ 2235 ldd [ADDR + CPU_TMP1], %f8 ;\ 2236 ba,pt %xcc, done2 ;\ 2237 ldd [ADDR + CPU_TMP1], %f40 ;\ 2238 ba,pt %xcc, done2 ;\ 2239 ldd [ADDR + CPU_TMP1], %f10 ;\ 2240 ba,pt %xcc, done2 ;\ 2241 ldd [ADDR + CPU_TMP1], %f42 ;\ 2242 ba,pt %xcc, done2 ;\ 2243 ldd [ADDR + CPU_TMP1], %f12 ;\ 2244 ba,pt %xcc, done2 ;\ 2245 ldd [ADDR + CPU_TMP1], %f44 ;\ 2246 ba,pt %xcc, done2 ;\ 2247 ldd [ADDR + CPU_TMP1], %f14 ;\ 2248 ba,pt %xcc, done2 ;\ 2249 ldd [ADDR + CPU_TMP1], %f46 ;\ 2250 ba,pt %xcc, done2 ;\ 2251 ldd [ADDR + CPU_TMP1], %f16 ;\ 2252 ba,pt %xcc, done2 ;\ 2253 ldd [ADDR + CPU_TMP1], %f48 ;\ 2254 ba,pt %xcc, done2 ;\ 2255 ldd [ADDR + CPU_TMP1], %f18 ;\ 2256 ba,pt %xcc, done2 ;\ 2257 ldd [ADDR + CPU_TMP1], %f50 ;\ 2258 ba,pt %xcc, done2 ;\ 2259 ldd [ADDR + CPU_TMP1], %f20 ;\ 2260 ba,pt %xcc, done2 ;\ 2261 ldd [ADDR + CPU_TMP1], %f52 ;\ 2262 ba,pt %xcc, done2 ;\ 2263 ldd [ADDR + CPU_TMP1], %f22 ;\ 2264 ba,pt %xcc, done2 ;\ 2265 ldd [ADDR + CPU_TMP1], %f54 ;\ 2266 ba,pt %xcc, done2 ;\ 2267 ldd [ADDR + CPU_TMP1], %f24 ;\ 2268 ba,pt %xcc, done2 ;\ 2269 ldd [ADDR + CPU_TMP1], %f56 ;\ 2270 ba,pt %xcc, done2 ;\ 2271 ldd [ADDR + CPU_TMP1], %f26 ;\ 2272 ba,pt %xcc, done2 ;\ 2273 ldd [ADDR + CPU_TMP1], %f58 ;\ 2274 ba,pt %xcc, done2 ;\ 2275 ldd [ADDR + CPU_TMP1], %f28 ;\ 2276 ba,pt %xcc, done2 ;\ 2277 ldd [ADDR + CPU_TMP1], %f60 ;\ 2278 ba,pt %xcc, done2 ;\ 2279 ldd [ADDR + CPU_TMP1], %f30 ;\ 2280 ba,pt %xcc, done2 ;\ 2281 ldd [ADDR + CPU_TMP1], %f62 ;\ 2282done2: 2283 2284.lddf_exception_not_aligned: 2285 /* 2286 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2287 */ 2288 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2289 2290#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2291 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2292 ld [%g2 + %lo(fpu_exists)], %g2 2293 brz,a,pn %g2, 4f 2294 nop 2295#endif 2296 CPU_ADDR(%g1, %g4) 2297 or %g0, 1, %g4 2298 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2299 2300 rdpr %tpc, %g2 2301 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2302 srl %g6, 23, %g1 ! using ldda or not? 2303 and %g1, 1, %g1 2304 brz,a,pt %g1, 2f ! check for ldda instruction 2305 nop 2306 srl %g6, 13, %g1 ! check immflag 2307 and %g1, 1, %g1 2308 rdpr %tstate, %g2 ! %tstate in %g2 2309 brnz,a,pn %g1, 1f 2310 srl %g2, 31, %g1 ! get asi from %tstate 2311 srl %g6, 5, %g1 ! get asi from instruction 2312 and %g1, 0xFF, %g1 ! imm_asi field 23131: 2314 cmp %g1, ASI_P ! primary address space 2315 be,a,pt %icc, 2f 2316 nop 2317 cmp %g1, ASI_PNF ! primary no fault address space 2318 be,a,pt %icc, 2f 2319 nop 2320 cmp %g1, ASI_S ! secondary address space 2321 be,a,pt %icc, 2f 2322 nop 2323 cmp %g1, ASI_SNF ! secondary no fault address space 2324 bne,a,pn %icc, 3f 2325 nop 23262: 2327 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2328 add %g5, 4, %g5 ! increment misaligned data address 2329 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2330 2331 sllx %g7, 32, %g7 2332 or %g5, %g7, %g5 ! combine data 2333 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2334 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2335 2336 srl %g6, 25, %g3 ! %g6 has the instruction 2337 and %g3, 0x1F, %g3 ! %g3 has rd 2338 LDDF_REG(%g3, %g7, %g4) 2339 2340 CPU_ADDR(%g1, %g4) 2341 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2342 FAST_TRAP_DONE 23433: 2344 CPU_ADDR(%g1, %g4) 2345 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 23464: 2347 set T_USER, %g3 ! trap type in %g3 2348 or %g3, T_LDDF_ALIGN, %g3 2349 mov %g5, %g2 ! misaligned vaddr in %g2 2350 set fpu_trap, %g1 ! goto C for the little and 2351 ba,pt %xcc, sys_trap ! no fault little asi's 2352 sub %g0, 1, %g4 2353 2354.stdf_exception_not_aligned: 2355 /* 2356 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2357 */ 2358 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2359 2360#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2361 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2362 ld [%g7 + %lo(fpu_exists)], %g3 2363 brz,a,pn %g3, 4f 2364 nop 2365#endif 2366 CPU_ADDR(%g1, %g4) 2367 or %g0, 1, %g4 2368 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2369 2370 rdpr %tpc, %g2 2371 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2372 2373 srl %g6, 23, %g1 ! using stda or not? 2374 and %g1, 1, %g1 2375 brz,a,pt %g1, 2f ! check for stda instruction 2376 nop 2377 srl %g6, 13, %g1 ! check immflag 2378 and %g1, 1, %g1 2379 rdpr %tstate, %g2 ! %tstate in %g2 2380 brnz,a,pn %g1, 1f 2381 srl %g2, 31, %g1 ! get asi from %tstate 2382 srl %g6, 5, %g1 ! get asi from instruction 2383 and %g1, 0xFF, %g1 ! imm_asi field 23841: 2385 cmp %g1, ASI_P ! primary address space 2386 be,a,pt %icc, 2f 2387 nop 2388 cmp %g1, ASI_S ! secondary address space 2389 bne,a,pn %icc, 3f 2390 nop 23912: 2392 srl %g6, 25, %g6 2393 and %g6, 0x1F, %g6 ! %g6 has rd 2394 CPU_ADDR(%g7, %g1) 2395 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2396 2397 ldx [%g7 + CPU_TMP1], %g6 2398 srlx %g6, 32, %g7 2399 stuwa %g7, [%g5]ASI_USER ! first half 2400 add %g5, 4, %g5 ! increment misaligned data address 2401 stuwa %g6, [%g5]ASI_USER ! second half 2402 2403 CPU_ADDR(%g1, %g4) 2404 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2405 FAST_TRAP_DONE 24063: 2407 CPU_ADDR(%g1, %g4) 2408 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 24094: 2410 set T_USER, %g3 ! trap type in %g3 2411 or %g3, T_STDF_ALIGN, %g3 2412 mov %g5, %g2 ! misaligned vaddr in %g2 2413 set fpu_trap, %g1 ! goto C for the little and 2414 ba,pt %xcc, sys_trap ! nofault little asi's 2415 sub %g0, 1, %g4 2416 2417#ifdef DEBUG_USER_TRAPTRACECTL 2418 2419.traptrace_freeze: 2420 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2421 TT_TRACE_L(trace_win) 2422 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2423 set trap_freeze, %g1 2424 mov 1, %g2 2425 st %g2, [%g1] 2426 FAST_TRAP_DONE 2427 2428.traptrace_unfreeze: 2429 set trap_freeze, %g1 2430 st %g0, [%g1] 2431 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2432 TT_TRACE_L(trace_win) 2433 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2434 FAST_TRAP_DONE 2435 2436#endif /* DEBUG_USER_TRAPTRACECTL */ 2437 2438.getcc: 2439 CPU_ADDR(%g1, %g2) 2440 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2441 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2442 rdpr %tstate, %g3 ! get tstate 2443 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2444 set PSR_ICC, %g2 2445 and %o0, %g2, %o0 ! mask out the rest 2446 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2447 rdpr %pstate, %o1 2448 wrpr %o1, PSTATE_AG, %pstate ! get into normal globals 2449 mov %o0, %g1 ! move ccr to normal %g1 2450 wrpr %g0, %o1, %pstate ! back into alternate globals 2451 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2452 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2453 FAST_TRAP_DONE 2454 2455.setcc: 2456 CPU_ADDR(%g1, %g2) 2457 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2458 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2459 rdpr %pstate, %o0 2460 wrpr %o0, PSTATE_AG, %pstate ! get into normal globals 2461 mov %g1, %o1 2462 wrpr %g0, %o0, %pstate ! back to alternates 2463 sll %o1, PSR_ICC_SHIFT, %g2 2464 set PSR_ICC, %g3 2465 and %g2, %g3, %g2 ! mask out rest 2466 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2467 rdpr %tstate, %g3 ! get tstate 2468 srl %g3, 0, %g3 ! clear upper word 2469 or %g3, %g2, %g3 ! or in new bits 2470 wrpr %g3, %tstate 2471 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2472 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2473 FAST_TRAP_DONE 2474 2475/* 2476 * getpsr(void) 2477 * Note that the xcc part of the ccr is not provided. 2478 * The V8 code shows why the V9 trap is not faster: 2479 * #define GETPSR_TRAP() \ 2480 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2481 */ 2482 2483 .type .getpsr, #function 2484.getpsr: 2485 rdpr %tstate, %g1 ! get tstate 2486 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2487 set PSR_ICC, %g2 2488 and %o0, %g2, %o0 ! mask out the rest 2489 2490 rd %fprs, %g1 ! get fprs 2491 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2492 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2493 or %o0, %g2, %o0 ! or result into psr.ef 2494 2495 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2496 or %o0, %g2, %o0 ! or psr.impl/ver 2497 FAST_TRAP_DONE 2498 SET_SIZE(.getpsr) 2499 2500/* 2501 * setpsr(newpsr) 2502 * Note that there is no support for ccr.xcc in the V9 code. 2503 */ 2504 2505 .type .setpsr, #function 2506.setpsr: 2507 rdpr %tstate, %g1 ! get tstate 2508! setx TSTATE_V8_UBITS, %g2 2509 or %g0, CCR_ICC, %g3 2510 sllx %g3, TSTATE_CCR_SHIFT, %g2 2511 2512 andn %g1, %g2, %g1 ! zero current user bits 2513 set PSR_ICC, %g2 2514 and %g2, %o0, %g2 ! clear all but psr.icc bits 2515 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2516 wrpr %g1, %g3, %tstate ! write tstate 2517 2518 set PSR_EF, %g2 2519 and %g2, %o0, %g2 ! clear all but fp enable bit 2520 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2521 wr %g0, %g4, %fprs ! write fprs 2522 2523 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2524 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2525 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2526 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2527 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2528 srlx %g4, 2, %g4 ! shift fef value to bit 0 2529 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2530 FAST_TRAP_DONE 2531 SET_SIZE(.setpsr) 2532 2533/* 2534 * getlgrp 2535 * get home lgrpid on which the calling thread is currently executing. 2536 */ 2537 .type .getlgrp, #function 2538.getlgrp: 2539 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2540 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2541 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2542 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2543 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2544 sra %g1, 0, %o1 2545 FAST_TRAP_DONE 2546 SET_SIZE(.getlgrp) 2547 2548/* 2549 * Entry for old 4.x trap (trap 0). 2550 */ 2551 ENTRY_NP(syscall_trap_4x) 2552 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2553 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2554 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2555 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2556 brz,pn %g2, 1f ! has it been set? 2557 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2558 st %l1, [%g1 + CPU_TMP2] 2559 rdpr %tnpc, %l1 ! save old tnpc 2560 wrpr %g0, %g2, %tnpc ! setup tnpc 2561 2562 rdpr %pstate, %l0 2563 wrpr %l0, PSTATE_AG, %pstate ! switch to normal globals 2564 mov %l1, %g6 ! pass tnpc to user code in %g6 2565 wrpr %l0, %g0, %pstate ! switch back to alternate globals 2566 2567 ! Note that %g1 still contains CPU struct addr 2568 ld [%g1 + CPU_TMP2], %l1 ! restore locals 2569 ld [%g1 + CPU_TMP1], %l0 2570 FAST_TRAP_DONE_CHK_INTR 25711: 2572 mov %g1, %l0 2573 st %l1, [%g1 + CPU_TMP2] 2574 rdpr %pstate, %l1 2575 wrpr %l1, PSTATE_AG, %pstate 2576 ! 2577 ! check for old syscall mmap which is the only different one which 2578 ! must be the same. Others are handled in the compatibility library. 2579 ! 2580 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2581 movz %icc, SYS_mmap, %g1 2582 wrpr %g0, %l1, %pstate 2583 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2584 ld [%l0 + CPU_TMP1], %l0 2585 SYSCALL(syscall_trap32) 2586 SET_SIZE(syscall_trap_4x) 2587 2588/* 2589 * Handler for software trap 9. 2590 * Set trap0 emulation address for old 4.x system call trap. 2591 * XXX - this should be a system call. 2592 */ 2593 ENTRY_NP(set_trap0_addr) 2594 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2595 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2596 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2597 st %l0, [%g1 + CPU_TMP1] ! save some locals 2598 st %l1, [%g1 + CPU_TMP2] 2599 rdpr %pstate, %l0 2600 wrpr %l0, PSTATE_AG, %pstate 2601 mov %g1, %l1 2602 wrpr %g0, %l0, %pstate 2603 andn %l1, 3, %l1 ! force alignment 2604 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2605 ld [%g1 + CPU_TMP1], %l0 ! restore locals 2606 ld [%g1 + CPU_TMP2], %l1 2607 FAST_TRAP_DONE 2608 SET_SIZE(set_trap0_addr) 2609 2610/* 2611 * mmu_trap_tl1 2612 * trap handler for unexpected mmu traps. 2613 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2614 * case we go to fpu_trap or a user trap from the window handler, in which 2615 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2616 */ 2617 .type mmu_trap_tl1, #function 2618mmu_trap_tl1: 2619#ifdef TRAPTRACE 2620 TRACE_PTR(%g5, %g6) 2621 GET_TRACE_TICK(%g6) 2622 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2623 rdpr %tl, %g6 2624 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2625 rdpr %tt, %g6 2626 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2627 rdpr %tstate, %g6 2628 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2629 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2630 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2631 rdpr %tpc, %g6 2632 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2633 set MMU_SFAR, %g6 2634 ldxa [%g6]ASI_DMMU, %g6 2635 stxa %g6, [%g5 + TRAP_ENT_F1]%asi 2636 CPU_PADDR(%g7, %g6); 2637 add %g7, CPU_TL1_HDLR, %g7 2638 lda [%g7]ASI_MEM, %g6 2639 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2640 set 0xdeadbeef, %g6 2641 stna %g6, [%g5 + TRAP_ENT_F3]%asi 2642 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2643 TRACE_NEXT(%g5, %g6, %g7) 2644#endif /* TRAPTRACE */ 2645 2646 GET_CPU_IMPL(%g5) 2647 cmp %g5, PANTHER_IMPL 2648 bne mmu_trap_tl1_4 2649 nop 2650 rdpr %tt, %g5 2651 cmp %g5, T_DATA_EXCEPTION 2652 bne mmu_trap_tl1_4 2653 nop 2654 wr %g0, ASI_DMMU, %asi 2655 ldxa [MMU_SFSR]%asi, %g5 2656 mov 1, %g6 2657 sllx %g6, PN_SFSR_PARITY_SHIFT, %g6 2658 andcc %g5, %g6, %g0 2659 bz mmu_trap_tl1_4 2660 2661 /* 2662 * We are running on a Panther and have hit a DTLB parity error. 2663 */ 2664 ldxa [MMU_TAG_ACCESS]%asi, %g2 2665 mov %g5, %g3 2666 ba,pt %xcc, .mmu_exception_is_tlb_parity 2667 mov T_DATA_EXCEPTION, %g1 2668 2669mmu_trap_tl1_4: 2670 CPU_PADDR(%g7, %g6); 2671 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2672 /* 2673 * AM is cleared on trap, so addresses are 64 bit 2674 */ 2675 lda [%g7]ASI_MEM, %g6 2676 brz,a,pt %g6, 1f 2677 nop 2678 /* 2679 * We are going to update cpu_m.tl1_hdlr using physical address. 2680 * Flush the D$ line, so that stale data won't be accessed later. 2681 */ 2682 CPU_ADDR(%g6, %g5) 2683 add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA) 2684 GET_CPU_IMPL(%g5) 2685 cmp %g5, CHEETAH_IMPL 2686 bl,pt %icc, 3f 2687 cmp %g5, SPITFIRE_IMPL 2688 stxa %g0, [%g7]ASI_DC_INVAL 2689 membar #Sync 2690 ba,pt %xcc, 2f 2691 nop 26923: 2693 bl,pt %icc, 2f 2694 sethi %hi(dcache_line_mask), %g5 2695 ld [%g5 + %lo(dcache_line_mask)], %g5 2696 and %g6, %g5, %g5 2697 stxa %g0, [%g5]ASI_DC_TAG 2698 membar #Sync 26992: 2700 sta %g0, [%g7]ASI_MEM 2701 SWITCH_GLOBALS ! back to mmu globals 2702 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 27031: 2704 rdpr %tt, %g5 2705 rdpr %tl, %g7 2706 sub %g7, 1, %g6 2707 wrpr %g6, %tl 2708 rdpr %tt, %g6 2709 wrpr %g7, %tl 2710 and %g6, WTRAP_TTMASK, %g6 2711 cmp %g6, WTRAP_TYPE 2712 bne,a,pn %xcc, ptl1_panic 2713 mov PTL1_BAD_MMUTRAP, %g1 2714 rdpr %tpc, %g7 2715 /* tpc should be in the trap table */ 2716 set trap_table, %g6 2717 cmp %g7, %g6 2718 blt,a,pn %xcc, ptl1_panic 2719 mov PTL1_BAD_MMUTRAP, %g1 2720 set etrap_table, %g6 2721 cmp %g7, %g6 2722 bge,a,pn %xcc, ptl1_panic 2723 mov PTL1_BAD_MMUTRAP, %g1 2724 cmp %g5, T_ALIGNMENT 2725 move %icc, MMU_SFAR, %g6 2726 movne %icc, MMU_TAG_ACCESS, %g6 2727 ldxa [%g6]ASI_DMMU, %g6 2728 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2729 add %g7, WTRAP_FAULTOFF, %g7 2730 wrpr %g0, %g7, %tnpc 2731 done 2732 SET_SIZE(mmu_trap_tl1) 2733 2734/* 2735 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2736 * traps are valid only when kmdb is loaded. When the debugger is active, 2737 * the code below is rewritten to transfer control to the appropriate 2738 * debugger entry points. 2739 */ 2740 .global kmdb_trap 2741 .align 8 2742kmdb_trap: 2743 ba,a trap_table0 2744 jmp %g1 + 0 2745 nop 2746 2747 .global kmdb_trap_tl1 2748 .align 8 2749kmdb_trap_tl1: 2750 ba,a trap_table0 2751 jmp %g1 + 0 2752 nop 2753 2754/* 2755 * This entry is copied from OBP's trap table during boot. 2756 */ 2757 .global obp_bpt 2758 .align 8 2759obp_bpt: 2760 NOT 2761 2762/* 2763 * if kernel, set PCONTEXT to 0 for debuggers 2764 * if user, clear nucleus page sizes 2765 */ 2766 .global kctx_obp_bpt 2767kctx_obp_bpt: 2768 set obp_bpt, %g2 27691: 2770 mov MMU_PCONTEXT, %g1 2771 ldxa [%g1]ASI_DMMU, %g1 2772 srlx %g1, CTXREG_NEXT_SHIFT, %g3 2773 brz,pt %g3, 3f ! nucleus pgsz is 0, no problem 2774 sllx %g3, CTXREG_NEXT_SHIFT, %g3 2775 set CTXREG_CTX_MASK, %g4 ! check Pcontext 2776 btst %g4, %g1 2777 bz,a,pt %xcc, 2f 2778 clr %g3 ! kernel: PCONTEXT=0 2779 xor %g3, %g1, %g3 ! user: clr N_pgsz0/1 bits 27802: 2781 set DEMAP_ALL_TYPE, %g1 2782 stxa %g0, [%g1]ASI_DTLB_DEMAP 2783 stxa %g0, [%g1]ASI_ITLB_DEMAP 2784 mov MMU_PCONTEXT, %g1 2785 stxa %g3, [%g1]ASI_DMMU 2786 membar #Sync 2787 sethi %hi(FLUSH_ADDR), %g1 2788 flush %g1 ! flush required by immu 27893: 2790 jmp %g2 2791 nop 2792 2793 2794#ifdef TRAPTRACE 2795/* 2796 * TRAPTRACE support. 2797 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2798 * Return is done by "jmp %g7 + 4". 2799 */ 2800 2801trace_gen: 2802 TRACE_PTR(%g3, %g6) 2803 GET_TRACE_TICK(%g6) 2804 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2805 rdpr %tl, %g6 2806 stha %g6, [%g3 + TRAP_ENT_TL]%asi 2807 rdpr %tt, %g6 2808 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2809 rdpr %tstate, %g6 2810 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2811 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2812 rdpr %tpc, %g6 2813 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2814 TRACE_NEXT(%g3, %g4, %g5) 2815 jmp %g7 + 4 2816 nop 2817 2818trace_win: 2819 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2820 ! Keep the locals as clean as possible, caller cleans %l4 2821 clr %l2 2822 clr %l1 2823 jmp %l4 + 4 2824 clr %l0 2825 2826/* 2827 * Trace a tsb hit 2828 * g1 = tsbe pointer (in/clobbered) 2829 * g2 = tag access register (in) 2830 * g3 - g4 = scratch (clobbered) 2831 * g5 = tsbe data (in) 2832 * g6 = scratch (clobbered) 2833 * g7 = pc we jumped here from (in) 2834 */ 2835 2836 ! Do not disturb %g5, it will be used after the trace 2837 ALTENTRY(trace_tsbhit) 2838 TRACE_TSBHIT(0) 2839 jmp %g7 + 4 2840 nop 2841 2842/* 2843 * Trace a TSB miss 2844 * 2845 * g1 = tsb8k pointer (in) 2846 * g2 = tag access register (in) 2847 * g3 = tsb4m pointer (in) 2848 * g4 = tsbe tag (in/clobbered) 2849 * g5 - g6 = scratch (clobbered) 2850 * g7 = pc we jumped here from (in) 2851 */ 2852 .global trace_tsbmiss 2853trace_tsbmiss: 2854 membar #Sync 2855 sethi %hi(FLUSH_ADDR), %g6 2856 flush %g6 2857 TRACE_PTR(%g5, %g6) 2858 GET_TRACE_TICK(%g6) 2859 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2860 stxa %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2861 stxa %g4, [%g5 + TRAP_ENT_F1]%asi ! tsb tag 2862 rdpr %tnpc, %g6 2863 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2864 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2865 srlx %g1, 32, %g6 2866 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2867 rdpr %tpc, %g6 2868 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2869 rdpr %tl, %g6 2870 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2871 rdpr %tt, %g6 2872 or %g6, TT_MMU_MISS, %g4 2873 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2874 cmp %g6, FAST_IMMU_MISS_TT 2875 be,a %icc, 1f 2876 ldxa [%g0]ASI_IMMU, %g6 2877 ldxa [%g0]ASI_DMMU, %g6 28781: stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2879 stxa %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2880 TRACE_NEXT(%g5, %g4, %g6) 2881 jmp %g7 + 4 2882 nop 2883 2884/* 2885 * g2 = tag access register (in) 2886 * g3 = ctx number (in) 2887 */ 2888trace_dataprot: 2889 membar #Sync 2890 sethi %hi(FLUSH_ADDR), %g6 2891 flush %g6 2892 TRACE_PTR(%g1, %g6) 2893 GET_TRACE_TICK(%g6) 2894 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2895 rdpr %tpc, %g6 2896 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2897 rdpr %tstate, %g6 2898 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2899 stxa %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2900 stxa %g0, [%g1 + TRAP_ENT_TR]%asi 2901 stxa %g0, [%g1 + TRAP_ENT_F1]%asi 2902 stxa %g0, [%g1 + TRAP_ENT_F2]%asi 2903 stxa %g0, [%g1 + TRAP_ENT_F3]%asi 2904 stxa %g0, [%g1 + TRAP_ENT_F4]%asi 2905 rdpr %tl, %g6 2906 stha %g6, [%g1 + TRAP_ENT_TL]%asi 2907 rdpr %tt, %g6 2908 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2909 TRACE_NEXT(%g1, %g4, %g5) 2910 jmp %g7 + 4 2911 nop 2912 2913#endif /* TRAPTRACE */ 2914 2915/* 2916 * expects offset into tsbmiss area in %g1 and return pc in %g7 2917 */ 2918stat_mmu: 2919 CPU_INDEX(%g5, %g6) 2920 sethi %hi(tsbmiss_area), %g6 2921 sllx %g5, TSBMISS_SHIFT, %g5 2922 or %g6, %lo(tsbmiss_area), %g6 2923 add %g6, %g5, %g6 /* g6 = tsbmiss area */ 2924 ld [%g6 + %g1], %g5 2925 add %g5, 1, %g5 2926 jmp %g7 + 4 2927 st %g5, [%g6 + %g1] 2928 2929 2930/* 2931 * fast_trap_done, fast_trap_done_chk_intr: 2932 * 2933 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2934 * taken immediately after a RETRY or DONE instruction which causes IE to 2935 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2936 * to execute first before taking any interrupts. If that instruction 2937 * results in other traps, and if the corresponding trap handler runs 2938 * entirely at TL=1 with interrupts disabled, then pending interrupts 2939 * won't be taken until after yet another instruction following the %tpc 2940 * or %tnpc. 2941 * 2942 * A malicious user program can use this feature to block out interrupts 2943 * for extended durations, which can result in send_mondo_timeout kernel 2944 * panic. 2945 * 2946 * This problem is addressed by servicing any pending interrupts via 2947 * sys_trap before returning back to the user mode from a fast trap 2948 * handler. The "done" instruction within a fast trap handler, which 2949 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2950 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2951 * entry point. 2952 * 2953 * We check for any pending interrupts here and force a sys_trap to 2954 * service those interrupts, if any. To minimize overhead, pending 2955 * interrupts are checked if the %tpc happens to be at 16K boundary, 2956 * which allows a malicious program to execute at most 4K consecutive 2957 * instructions before we service any pending interrupts. If a worst 2958 * case fast trap handler takes about 2 usec, then interrupts will be 2959 * blocked for at most 8 msec, less than a clock tick. 2960 * 2961 * For the cases where we don't know if the %tpc will cross a 16K 2962 * boundary, we can't use the above optimization and always process 2963 * any pending interrupts via fast_frap_done_chk_intr entry point. 2964 * 2965 * Entry Conditions: 2966 * %pstate am:0 priv:1 ie:0 2967 * globals are AG (not normal globals) 2968 */ 2969 2970 .global fast_trap_done, fast_trap_done_chk_intr 2971fast_trap_done: 2972 rdpr %tpc, %g5 2973 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2974 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2975 bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only) 2976 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2977 done 2978 2979 ALTENTRY(fast_trap_done_check_interrupts) 2980fast_trap_done_chk_intr: 2981 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2982 29831: rd SOFTINT, %g6 2984 and %g5, IRSR_BUSY, %g5 2985 orcc %g5, %g6, %g0 2986 bnz,pn %xcc, 2f ! branch if any pending intr 2987 nop 2988 done 2989 29902: 2991 /* 2992 * We get here if there are any pending interrupts. 2993 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2994 * instruction. 2995 */ 2996 rdpr %tnpc, %g5 2997 wrpr %g0, %g5, %tpc 2998 add %g5, 4, %g5 2999 wrpr %g0, %g5, %tnpc 3000 3001 /* 3002 * Force a dummy sys_trap call so that interrupts can be serviced. 3003 */ 3004 set fast_trap_dummy_call, %g1 3005 ba,pt %xcc, sys_trap 3006 mov -1, %g4 3007 3008fast_trap_dummy_call: 3009 retl 3010 nop 3011 3012#ifdef DEBUG 3013/* 3014 * Currently we only support syscall interposition for branded zones on 3015 * DEBUG kernels. The only brand that makes use of this functionality is 3016 * the fake Solaris 10 brand. Since this brand is only used for exercising 3017 * the framework, we don't want this overhead incurred on production 3018 * systems. 3019 */ 3020#define BRAND_CALLBACK(callback_id) \ 3021 CPU_ADDR(%g1, %g2) /* load CPU struct addr to %g1 */ ;\ 3022 ldn [%g1 + CPU_THREAD], %g2 /* load thread pointer */ ;\ 3023 ldn [%g2 + T_PROCP], %g2 /* get proc pointer */ ;\ 3024 ldn [%g2 + P_BRAND], %g2 /* get brand pointer */ ;\ 3025 brz %g2, 1f /* No brand? No callback. */ ;\ 3026 nop ;\ 3027 ldn [%g2 + B_MACHOPS], %g2 /* get machops list */ ;\ 3028 ldn [%g2 + (callback_id << 3)], %g2 ;\ 3029 brz %g2, 1f ;\ 3030 /* \ 3031 * This isn't pretty. We want a low-latency way for the callback \ 3032 * routine to decline to do anything. We just pass in an address \ 3033 * the routine can directly jmp back to, pretending that nothing \ 3034 * has happened. \ 3035 */ \ 3036 mov %pc, %g1 ;\ 3037 add %g1, 16, %g1 ;\ 3038 jmp %g2 ;\ 3039 nop ;\ 30401: 3041 3042 ENTRY_NP(syscall_wrapper32) 3043 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 3044 SYSCALL(syscall_trap32) 3045 SET_SIZE(syscall_wrapper32) 3046 3047 ENTRY_NP(syscall_wrapper) 3048 BRAND_CALLBACK(BRAND_CB_SYSCALL) 3049 SYSCALL(syscall_trap) 3050 SET_SIZE(syscall_wrapper) 3051 3052#endif /* DEBUG */ 3053 3054#endif /* lint */ 3055