1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif /* !lint */ 31#include <sys/asm_linkage.h> 32#include <sys/privregs.h> 33#include <sys/sun4asi.h> 34#include <sys/spitregs.h> 35#include <sys/cheetahregs.h> 36#include <sys/machtrap.h> 37#include <sys/machthread.h> 38#include <sys/machbrand.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This first set are funneled to trap() with %tt as the type. 116 * Trap will then either panic or send the user a signal. 117 */ 118/* 119 * NOT is used for traps that just shouldn't happen. 120 * It comes in both single and quadruple flavors. 121 */ 122#if !defined(lint) 123 .global trap 124#endif /* !lint */ 125#define NOT \ 126 TT_TRACE(trace_gen) ;\ 127 set trap, %g1 ;\ 128 rdpr %tt, %g3 ;\ 129 ba,pt %xcc, sys_trap ;\ 130 sub %g0, 1, %g4 ;\ 131 .align 32 132#define NOT4 NOT; NOT; NOT; NOT 133/* 134 * RED is for traps that use the red mode handler. 135 * We should never see these either. 136 */ 137#define RED NOT 138/* 139 * BAD is used for trap vectors we don't have a kernel 140 * handler for. 141 * It also comes in single and quadruple versions. 142 */ 143#define BAD NOT 144#define BAD4 NOT4 145 146#define DONE \ 147 done; \ 148 .align 32 149 150/* 151 * TRAP vectors to the trap() function. 152 * It's main use is for user errors. 153 */ 154#if !defined(lint) 155 .global trap 156#endif /* !lint */ 157#define TRAP(arg) \ 158 TT_TRACE(trace_gen) ;\ 159 set trap, %g1 ;\ 160 mov arg, %g3 ;\ 161 ba,pt %xcc, sys_trap ;\ 162 sub %g0, 1, %g4 ;\ 163 .align 32 164 165/* 166 * SYSCALL is used for unsupported syscall interfaces (with 'which' 167 * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with 168 * 'which' set to 'syscall_trap32'). 169 * 170 * The SYSCALL_TRAP* macros are used for syscall entry points. 171 * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32 172 * is used to support ILP32. Each macro can only be used once 173 * since they each define a symbol. The symbols are used as hot patch 174 * points by the brand infrastructure to dynamically enable and disable 175 * brand syscall interposition. See the comments around BRAND_CALLBACK 176 * and brand_plat_interposition_enable() for more information. 177 */ 178#define SYSCALL_NOTT(which) \ 179 set (which), %g1 ;\ 180 ba,pt %xcc, sys_trap ;\ 181 sub %g0, 1, %g4 ;\ 182 .align 32 183 184#define SYSCALL(which) \ 185 TT_TRACE(trace_gen) ;\ 186 SYSCALL_NOTT(which) 187 188#define SYSCALL_TRAP32 \ 189 TT_TRACE(trace_gen) ;\ 190 ALTENTRY(syscall_trap32_patch_point) \ 191 SYSCALL_NOTT(syscall_trap32) 192 193#define SYSCALL_TRAP \ 194 TT_TRACE(trace_gen) ;\ 195 ALTENTRY(syscall_trap_patch_point) \ 196 SYSCALL_NOTT(syscall_trap) 197 198#define FLUSHW(h_name) \ 199 .global h_name ;\ 200h_name: ;\ 201 set trap, %g1 ;\ 202 mov T_FLUSHW, %g3 ;\ 203 sub %g0, 1, %g4 ;\ 204 save ;\ 205 flushw ;\ 206 restore ;\ 207 FAST_TRAP_DONE ;\ 208 .align 32 209 210/* 211 * GOTO just jumps to a label. 212 * It's used for things that can be fixed without going thru sys_trap. 213 */ 214#define GOTO(label) \ 215 .global label ;\ 216 ba,a label ;\ 217 .empty ;\ 218 .align 32 219 220/* 221 * GOTO_TT just jumps to a label. 222 * correctable ECC error traps at level 0 and 1 will use this macro. 223 * It's used for things that can be fixed without going thru sys_trap. 224 */ 225#define GOTO_TT(label, ttlabel) \ 226 .global label ;\ 227 TT_TRACE(ttlabel) ;\ 228 ba,a label ;\ 229 .empty ;\ 230 .align 32 231 232/* 233 * Privileged traps 234 * Takes breakpoint if privileged, calls trap() if not. 235 */ 236#define PRIV(label) \ 237 rdpr %tstate, %g1 ;\ 238 btst TSTATE_PRIV, %g1 ;\ 239 bnz label ;\ 240 rdpr %tt, %g3 ;\ 241 set trap, %g1 ;\ 242 ba,pt %xcc, sys_trap ;\ 243 sub %g0, 1, %g4 ;\ 244 .align 32 245 246 247/* 248 * DTrace traps. 249 */ 250#define DTRACE_PID \ 251 .global dtrace_pid_probe ;\ 252 set dtrace_pid_probe, %g1 ;\ 253 ba,pt %xcc, user_trap ;\ 254 sub %g0, 1, %g4 ;\ 255 .align 32 256 257#define DTRACE_RETURN \ 258 .global dtrace_return_probe ;\ 259 set dtrace_return_probe, %g1 ;\ 260 ba,pt %xcc, user_trap ;\ 261 sub %g0, 1, %g4 ;\ 262 .align 32 263 264/* 265 * REGISTER WINDOW MANAGEMENT MACROS 266 */ 267 268/* 269 * various convenient units of padding 270 */ 271#define SKIP(n) .skip 4*(n) 272 273/* 274 * CLEAN_WINDOW is the simple handler for cleaning a register window. 275 */ 276#define CLEAN_WINDOW \ 277 TT_TRACE_L(trace_win) ;\ 278 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 279 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 280 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 281 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 282 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 283 retry; .align 128 284 285#if !defined(lint) 286 287/* 288 * If we get an unresolved tlb miss while in a window handler, the fault 289 * handler will resume execution at the last instruction of the window 290 * hander, instead of delivering the fault to the kernel. Spill handlers 291 * use this to spill windows into the wbuf. 292 * 293 * The mixed handler works by checking %sp, and branching to the correct 294 * handler. This is done by branching back to label 1: for 32b frames, 295 * or label 2: for 64b frames; which implies the handler order is: 32b, 296 * 64b, mixed. The 1: and 2: labels are offset into the routines to 297 * allow the branchs' delay slots to contain useful instructions. 298 */ 299 300/* 301 * SPILL_32bit spills a 32-bit-wide kernel register window. It 302 * assumes that the kernel context and the nucleus context are the 303 * same. The stack pointer is required to be eight-byte aligned even 304 * though this code only needs it to be four-byte aligned. 305 */ 306#define SPILL_32bit(tail) \ 307 srl %sp, 0, %sp ;\ 3081: st %l0, [%sp + 0] ;\ 309 st %l1, [%sp + 4] ;\ 310 st %l2, [%sp + 8] ;\ 311 st %l3, [%sp + 12] ;\ 312 st %l4, [%sp + 16] ;\ 313 st %l5, [%sp + 20] ;\ 314 st %l6, [%sp + 24] ;\ 315 st %l7, [%sp + 28] ;\ 316 st %i0, [%sp + 32] ;\ 317 st %i1, [%sp + 36] ;\ 318 st %i2, [%sp + 40] ;\ 319 st %i3, [%sp + 44] ;\ 320 st %i4, [%sp + 48] ;\ 321 st %i5, [%sp + 52] ;\ 322 st %i6, [%sp + 56] ;\ 323 st %i7, [%sp + 60] ;\ 324 TT_TRACE_L(trace_win) ;\ 325 saved ;\ 326 retry ;\ 327 SKIP(31-19-TT_TRACE_L_INS) ;\ 328 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 329 .empty 330 331/* 332 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 333 * wide address space via the designated asi. It is used to spill 334 * non-kernel windows. The stack pointer is required to be eight-byte 335 * aligned even though this code only needs it to be four-byte 336 * aligned. 337 */ 338#define SPILL_32bit_asi(asi_num, tail) \ 339 srl %sp, 0, %sp ;\ 3401: sta %l0, [%sp + %g0]asi_num ;\ 341 mov 4, %g1 ;\ 342 sta %l1, [%sp + %g1]asi_num ;\ 343 mov 8, %g2 ;\ 344 sta %l2, [%sp + %g2]asi_num ;\ 345 mov 12, %g3 ;\ 346 sta %l3, [%sp + %g3]asi_num ;\ 347 add %sp, 16, %g4 ;\ 348 sta %l4, [%g4 + %g0]asi_num ;\ 349 sta %l5, [%g4 + %g1]asi_num ;\ 350 sta %l6, [%g4 + %g2]asi_num ;\ 351 sta %l7, [%g4 + %g3]asi_num ;\ 352 add %g4, 16, %g4 ;\ 353 sta %i0, [%g4 + %g0]asi_num ;\ 354 sta %i1, [%g4 + %g1]asi_num ;\ 355 sta %i2, [%g4 + %g2]asi_num ;\ 356 sta %i3, [%g4 + %g3]asi_num ;\ 357 add %g4, 16, %g4 ;\ 358 sta %i4, [%g4 + %g0]asi_num ;\ 359 sta %i5, [%g4 + %g1]asi_num ;\ 360 sta %i6, [%g4 + %g2]asi_num ;\ 361 sta %i7, [%g4 + %g3]asi_num ;\ 362 TT_TRACE_L(trace_win) ;\ 363 saved ;\ 364 retry ;\ 365 SKIP(31-25-TT_TRACE_L_INS) ;\ 366 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 367 .empty 368 369/* 370 * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit 371 * wide address space via the designated asi. It is used to spill 372 * windows at tl>1 where performance isn't the primary concern and 373 * where we don't want to use unnecessary registers. The stack 374 * pointer is required to be eight-byte aligned even though this code 375 * only needs it to be four-byte aligned. 376 */ 377#define SPILL_32bit_tt1(asi_num, tail) \ 378 mov asi_num, %asi ;\ 3791: srl %sp, 0, %sp ;\ 380 sta %l0, [%sp + 0]%asi ;\ 381 sta %l1, [%sp + 4]%asi ;\ 382 sta %l2, [%sp + 8]%asi ;\ 383 sta %l3, [%sp + 12]%asi ;\ 384 sta %l4, [%sp + 16]%asi ;\ 385 sta %l5, [%sp + 20]%asi ;\ 386 sta %l6, [%sp + 24]%asi ;\ 387 sta %l7, [%sp + 28]%asi ;\ 388 sta %i0, [%sp + 32]%asi ;\ 389 sta %i1, [%sp + 36]%asi ;\ 390 sta %i2, [%sp + 40]%asi ;\ 391 sta %i3, [%sp + 44]%asi ;\ 392 sta %i4, [%sp + 48]%asi ;\ 393 sta %i5, [%sp + 52]%asi ;\ 394 sta %i6, [%sp + 56]%asi ;\ 395 sta %i7, [%sp + 60]%asi ;\ 396 TT_TRACE_L(trace_win) ;\ 397 saved ;\ 398 retry ;\ 399 SKIP(31-20-TT_TRACE_L_INS) ;\ 400 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 401 .empty 402 403 404/* 405 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 406 * that the kernel context and the nucleus context are the same. The 407 * stack pointer is required to be eight-byte aligned even though this 408 * code only needs it to be four-byte aligned. 409 */ 410#define FILL_32bit(tail) \ 411 srl %sp, 0, %sp ;\ 4121: TT_TRACE_L(trace_win) ;\ 413 ld [%sp + 0], %l0 ;\ 414 ld [%sp + 4], %l1 ;\ 415 ld [%sp + 8], %l2 ;\ 416 ld [%sp + 12], %l3 ;\ 417 ld [%sp + 16], %l4 ;\ 418 ld [%sp + 20], %l5 ;\ 419 ld [%sp + 24], %l6 ;\ 420 ld [%sp + 28], %l7 ;\ 421 ld [%sp + 32], %i0 ;\ 422 ld [%sp + 36], %i1 ;\ 423 ld [%sp + 40], %i2 ;\ 424 ld [%sp + 44], %i3 ;\ 425 ld [%sp + 48], %i4 ;\ 426 ld [%sp + 52], %i5 ;\ 427 ld [%sp + 56], %i6 ;\ 428 ld [%sp + 60], %i7 ;\ 429 restored ;\ 430 retry ;\ 431 SKIP(31-19-TT_TRACE_L_INS) ;\ 432 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 433 .empty 434 435/* 436 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 437 * wide address space via the designated asi. It is used to fill 438 * non-kernel windows. The stack pointer is required to be eight-byte 439 * aligned even though this code only needs it to be four-byte 440 * aligned. 441 */ 442#define FILL_32bit_asi(asi_num, tail) \ 443 srl %sp, 0, %sp ;\ 4441: TT_TRACE_L(trace_win) ;\ 445 mov 4, %g1 ;\ 446 lda [%sp + %g0]asi_num, %l0 ;\ 447 mov 8, %g2 ;\ 448 lda [%sp + %g1]asi_num, %l1 ;\ 449 mov 12, %g3 ;\ 450 lda [%sp + %g2]asi_num, %l2 ;\ 451 lda [%sp + %g3]asi_num, %l3 ;\ 452 add %sp, 16, %g4 ;\ 453 lda [%g4 + %g0]asi_num, %l4 ;\ 454 lda [%g4 + %g1]asi_num, %l5 ;\ 455 lda [%g4 + %g2]asi_num, %l6 ;\ 456 lda [%g4 + %g3]asi_num, %l7 ;\ 457 add %g4, 16, %g4 ;\ 458 lda [%g4 + %g0]asi_num, %i0 ;\ 459 lda [%g4 + %g1]asi_num, %i1 ;\ 460 lda [%g4 + %g2]asi_num, %i2 ;\ 461 lda [%g4 + %g3]asi_num, %i3 ;\ 462 add %g4, 16, %g4 ;\ 463 lda [%g4 + %g0]asi_num, %i4 ;\ 464 lda [%g4 + %g1]asi_num, %i5 ;\ 465 lda [%g4 + %g2]asi_num, %i6 ;\ 466 lda [%g4 + %g3]asi_num, %i7 ;\ 467 restored ;\ 468 retry ;\ 469 SKIP(31-25-TT_TRACE_L_INS) ;\ 470 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 471 .empty 472 473/* 474 * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit 475 * wide address space via the designated asi. It is used to fill 476 * windows at tl>1 where performance isn't the primary concern and 477 * where we don't want to use unnecessary registers. The stack 478 * pointer is required to be eight-byte aligned even though this code 479 * only needs it to be four-byte aligned. 480 */ 481#define FILL_32bit_tt1(asi_num, tail) \ 482 mov asi_num, %asi ;\ 4831: srl %sp, 0, %sp ;\ 484 TT_TRACE_L(trace_win) ;\ 485 lda [%sp + 0]%asi, %l0 ;\ 486 lda [%sp + 4]%asi, %l1 ;\ 487 lda [%sp + 8]%asi, %l2 ;\ 488 lda [%sp + 12]%asi, %l3 ;\ 489 lda [%sp + 16]%asi, %l4 ;\ 490 lda [%sp + 20]%asi, %l5 ;\ 491 lda [%sp + 24]%asi, %l6 ;\ 492 lda [%sp + 28]%asi, %l7 ;\ 493 lda [%sp + 32]%asi, %i0 ;\ 494 lda [%sp + 36]%asi, %i1 ;\ 495 lda [%sp + 40]%asi, %i2 ;\ 496 lda [%sp + 44]%asi, %i3 ;\ 497 lda [%sp + 48]%asi, %i4 ;\ 498 lda [%sp + 52]%asi, %i5 ;\ 499 lda [%sp + 56]%asi, %i6 ;\ 500 lda [%sp + 60]%asi, %i7 ;\ 501 restored ;\ 502 retry ;\ 503 SKIP(31-20-TT_TRACE_L_INS) ;\ 504 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 505 .empty 506 507 508/* 509 * SPILL_64bit spills a 64-bit-wide kernel register window. It 510 * assumes that the kernel context and the nucleus context are the 511 * same. The stack pointer is required to be eight-byte aligned. 512 */ 513#define SPILL_64bit(tail) \ 5142: stx %l0, [%sp + V9BIAS64 + 0] ;\ 515 stx %l1, [%sp + V9BIAS64 + 8] ;\ 516 stx %l2, [%sp + V9BIAS64 + 16] ;\ 517 stx %l3, [%sp + V9BIAS64 + 24] ;\ 518 stx %l4, [%sp + V9BIAS64 + 32] ;\ 519 stx %l5, [%sp + V9BIAS64 + 40] ;\ 520 stx %l6, [%sp + V9BIAS64 + 48] ;\ 521 stx %l7, [%sp + V9BIAS64 + 56] ;\ 522 stx %i0, [%sp + V9BIAS64 + 64] ;\ 523 stx %i1, [%sp + V9BIAS64 + 72] ;\ 524 stx %i2, [%sp + V9BIAS64 + 80] ;\ 525 stx %i3, [%sp + V9BIAS64 + 88] ;\ 526 stx %i4, [%sp + V9BIAS64 + 96] ;\ 527 stx %i5, [%sp + V9BIAS64 + 104] ;\ 528 stx %i6, [%sp + V9BIAS64 + 112] ;\ 529 stx %i7, [%sp + V9BIAS64 + 120] ;\ 530 TT_TRACE_L(trace_win) ;\ 531 saved ;\ 532 retry ;\ 533 SKIP(31-18-TT_TRACE_L_INS) ;\ 534 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 535 .empty 536 537/* 538 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 539 * wide address space via the designated asi. It is used to spill 540 * non-kernel windows. The stack pointer is required to be eight-byte 541 * aligned. 542 */ 543#define SPILL_64bit_asi(asi_num, tail) \ 544 mov 0 + V9BIAS64, %g1 ;\ 5452: stxa %l0, [%sp + %g1]asi_num ;\ 546 mov 8 + V9BIAS64, %g2 ;\ 547 stxa %l1, [%sp + %g2]asi_num ;\ 548 mov 16 + V9BIAS64, %g3 ;\ 549 stxa %l2, [%sp + %g3]asi_num ;\ 550 mov 24 + V9BIAS64, %g4 ;\ 551 stxa %l3, [%sp + %g4]asi_num ;\ 552 add %sp, 32, %g5 ;\ 553 stxa %l4, [%g5 + %g1]asi_num ;\ 554 stxa %l5, [%g5 + %g2]asi_num ;\ 555 stxa %l6, [%g5 + %g3]asi_num ;\ 556 stxa %l7, [%g5 + %g4]asi_num ;\ 557 add %g5, 32, %g5 ;\ 558 stxa %i0, [%g5 + %g1]asi_num ;\ 559 stxa %i1, [%g5 + %g2]asi_num ;\ 560 stxa %i2, [%g5 + %g3]asi_num ;\ 561 stxa %i3, [%g5 + %g4]asi_num ;\ 562 add %g5, 32, %g5 ;\ 563 stxa %i4, [%g5 + %g1]asi_num ;\ 564 stxa %i5, [%g5 + %g2]asi_num ;\ 565 stxa %i6, [%g5 + %g3]asi_num ;\ 566 stxa %i7, [%g5 + %g4]asi_num ;\ 567 TT_TRACE_L(trace_win) ;\ 568 saved ;\ 569 retry ;\ 570 SKIP(31-25-TT_TRACE_L_INS) ;\ 571 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 572 .empty 573 574/* 575 * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit 576 * wide address space via the designated asi. It is used to spill 577 * windows at tl>1 where performance isn't the primary concern and 578 * where we don't want to use unnecessary registers. The stack 579 * pointer is required to be eight-byte aligned. 580 */ 581#define SPILL_64bit_tt1(asi_num, tail) \ 582 mov asi_num, %asi ;\ 5832: stxa %l0, [%sp + V9BIAS64 + 0]%asi ;\ 584 stxa %l1, [%sp + V9BIAS64 + 8]%asi ;\ 585 stxa %l2, [%sp + V9BIAS64 + 16]%asi ;\ 586 stxa %l3, [%sp + V9BIAS64 + 24]%asi ;\ 587 stxa %l4, [%sp + V9BIAS64 + 32]%asi ;\ 588 stxa %l5, [%sp + V9BIAS64 + 40]%asi ;\ 589 stxa %l6, [%sp + V9BIAS64 + 48]%asi ;\ 590 stxa %l7, [%sp + V9BIAS64 + 56]%asi ;\ 591 stxa %i0, [%sp + V9BIAS64 + 64]%asi ;\ 592 stxa %i1, [%sp + V9BIAS64 + 72]%asi ;\ 593 stxa %i2, [%sp + V9BIAS64 + 80]%asi ;\ 594 stxa %i3, [%sp + V9BIAS64 + 88]%asi ;\ 595 stxa %i4, [%sp + V9BIAS64 + 96]%asi ;\ 596 stxa %i5, [%sp + V9BIAS64 + 104]%asi ;\ 597 stxa %i6, [%sp + V9BIAS64 + 112]%asi ;\ 598 stxa %i7, [%sp + V9BIAS64 + 120]%asi ;\ 599 TT_TRACE_L(trace_win) ;\ 600 saved ;\ 601 retry ;\ 602 SKIP(31-19-TT_TRACE_L_INS) ;\ 603 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 604 .empty 605 606 607/* 608 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 609 * that the kernel context and the nucleus context are the same. The 610 * stack pointer is required to be eight-byte aligned. 611 */ 612#define FILL_64bit(tail) \ 6132: TT_TRACE_L(trace_win) ;\ 614 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 615 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 616 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 617 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 618 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 619 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 620 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 621 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 622 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 623 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 624 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 625 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 626 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 627 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 628 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 629 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 630 restored ;\ 631 retry ;\ 632 SKIP(31-18-TT_TRACE_L_INS) ;\ 633 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 634 .empty 635 636/* 637 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 638 * wide address space via the designated asi. It is used to fill 639 * non-kernel windows. The stack pointer is required to be eight-byte 640 * aligned. 641 */ 642#define FILL_64bit_asi(asi_num, tail) \ 643 mov V9BIAS64 + 0, %g1 ;\ 6442: TT_TRACE_L(trace_win) ;\ 645 ldxa [%sp + %g1]asi_num, %l0 ;\ 646 mov V9BIAS64 + 8, %g2 ;\ 647 ldxa [%sp + %g2]asi_num, %l1 ;\ 648 mov V9BIAS64 + 16, %g3 ;\ 649 ldxa [%sp + %g3]asi_num, %l2 ;\ 650 mov V9BIAS64 + 24, %g4 ;\ 651 ldxa [%sp + %g4]asi_num, %l3 ;\ 652 add %sp, 32, %g5 ;\ 653 ldxa [%g5 + %g1]asi_num, %l4 ;\ 654 ldxa [%g5 + %g2]asi_num, %l5 ;\ 655 ldxa [%g5 + %g3]asi_num, %l6 ;\ 656 ldxa [%g5 + %g4]asi_num, %l7 ;\ 657 add %g5, 32, %g5 ;\ 658 ldxa [%g5 + %g1]asi_num, %i0 ;\ 659 ldxa [%g5 + %g2]asi_num, %i1 ;\ 660 ldxa [%g5 + %g3]asi_num, %i2 ;\ 661 ldxa [%g5 + %g4]asi_num, %i3 ;\ 662 add %g5, 32, %g5 ;\ 663 ldxa [%g5 + %g1]asi_num, %i4 ;\ 664 ldxa [%g5 + %g2]asi_num, %i5 ;\ 665 ldxa [%g5 + %g3]asi_num, %i6 ;\ 666 ldxa [%g5 + %g4]asi_num, %i7 ;\ 667 restored ;\ 668 retry ;\ 669 SKIP(31-25-TT_TRACE_L_INS) ;\ 670 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 671 .empty 672 673/* 674 * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit 675 * wide address space via the designated asi. It is used to fill 676 * windows at tl>1 where performance isn't the primary concern and 677 * where we don't want to use unnecessary registers. The stack 678 * pointer is required to be eight-byte aligned. 679 */ 680#define FILL_64bit_tt1(asi_num, tail) \ 681 mov asi_num, %asi ;\ 682 TT_TRACE_L(trace_win) ;\ 683 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ 684 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ 685 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ 686 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ 687 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ 688 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ 689 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ 690 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ 691 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ 692 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ 693 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ 694 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ 695 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ 696 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ 697 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ 698 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ 699 restored ;\ 700 retry ;\ 701 SKIP(31-19-TT_TRACE_L_INS) ;\ 702 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 703 .empty 704 705#endif /* !lint */ 706 707/* 708 * SPILL_mixed spills either size window, depending on 709 * whether %sp is even or odd, to a 32-bit address space. 710 * This may only be used in conjunction with SPILL_32bit/ 711 * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be 712 * needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular 713 * attention should be paid to the instructions that belong 714 * in the delay slots of the branches depending on the type 715 * of spill handler being branched to. 716 * Clear upper 32 bits of %sp if it is odd. 717 * We won't need to clear them in 64 bit kernel. 718 */ 719#define SPILL_mixed \ 720 btst 1, %sp ;\ 721 bz,a,pt %xcc, 1b ;\ 722 srl %sp, 0, %sp ;\ 723 ba,pt %xcc, 2b ;\ 724 nop ;\ 725 .align 128 726 727/* 728 * FILL_mixed(ASI) fills either size window, depending on 729 * whether %sp is even or odd, from a 32-bit address space. 730 * This may only be used in conjunction with FILL_32bit/ 731 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 732 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 733 * attention should be paid to the instructions that belong 734 * in the delay slots of the branches depending on the type 735 * of fill handler being branched to. 736 * Clear upper 32 bits of %sp if it is odd. 737 * We won't need to clear them in 64 bit kernel. 738 */ 739#define FILL_mixed \ 740 btst 1, %sp ;\ 741 bz,a,pt %xcc, 1b ;\ 742 srl %sp, 0, %sp ;\ 743 ba,pt %xcc, 2b ;\ 744 nop ;\ 745 .align 128 746 747 748/* 749 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 750 * respectively, into the address space via the designated asi. The 751 * unbiased stack pointer is required to be eight-byte aligned (even for 752 * the 32-bit case even though this code does not require such strict 753 * alignment). 754 * 755 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 756 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 757 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 758 * window may contain kernel data so in user_rtt we set wstate to call 759 * these spill handlers on the first user spill trap. These handler then 760 * spill the appropriate window but also back up a window and clean the 761 * window that didn't get a cleanwin trap. 762 */ 763#define SPILL_32clean(asi_num, tail) \ 764 srl %sp, 0, %sp ;\ 765 sta %l0, [%sp + %g0]asi_num ;\ 766 mov 4, %g1 ;\ 767 sta %l1, [%sp + %g1]asi_num ;\ 768 mov 8, %g2 ;\ 769 sta %l2, [%sp + %g2]asi_num ;\ 770 mov 12, %g3 ;\ 771 sta %l3, [%sp + %g3]asi_num ;\ 772 add %sp, 16, %g4 ;\ 773 sta %l4, [%g4 + %g0]asi_num ;\ 774 sta %l5, [%g4 + %g1]asi_num ;\ 775 sta %l6, [%g4 + %g2]asi_num ;\ 776 sta %l7, [%g4 + %g3]asi_num ;\ 777 add %g4, 16, %g4 ;\ 778 sta %i0, [%g4 + %g0]asi_num ;\ 779 sta %i1, [%g4 + %g1]asi_num ;\ 780 sta %i2, [%g4 + %g2]asi_num ;\ 781 sta %i3, [%g4 + %g3]asi_num ;\ 782 add %g4, 16, %g4 ;\ 783 sta %i4, [%g4 + %g0]asi_num ;\ 784 sta %i5, [%g4 + %g1]asi_num ;\ 785 sta %i6, [%g4 + %g2]asi_num ;\ 786 sta %i7, [%g4 + %g3]asi_num ;\ 787 TT_TRACE_L(trace_win) ;\ 788 b .spill_clean ;\ 789 mov WSTATE_USER32, %g7 ;\ 790 SKIP(31-25-TT_TRACE_L_INS) ;\ 791 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 792 .empty 793 794#define SPILL_64clean(asi_num, tail) \ 795 mov 0 + V9BIAS64, %g1 ;\ 796 stxa %l0, [%sp + %g1]asi_num ;\ 797 mov 8 + V9BIAS64, %g2 ;\ 798 stxa %l1, [%sp + %g2]asi_num ;\ 799 mov 16 + V9BIAS64, %g3 ;\ 800 stxa %l2, [%sp + %g3]asi_num ;\ 801 mov 24 + V9BIAS64, %g4 ;\ 802 stxa %l3, [%sp + %g4]asi_num ;\ 803 add %sp, 32, %g5 ;\ 804 stxa %l4, [%g5 + %g1]asi_num ;\ 805 stxa %l5, [%g5 + %g2]asi_num ;\ 806 stxa %l6, [%g5 + %g3]asi_num ;\ 807 stxa %l7, [%g5 + %g4]asi_num ;\ 808 add %g5, 32, %g5 ;\ 809 stxa %i0, [%g5 + %g1]asi_num ;\ 810 stxa %i1, [%g5 + %g2]asi_num ;\ 811 stxa %i2, [%g5 + %g3]asi_num ;\ 812 stxa %i3, [%g5 + %g4]asi_num ;\ 813 add %g5, 32, %g5 ;\ 814 stxa %i4, [%g5 + %g1]asi_num ;\ 815 stxa %i5, [%g5 + %g2]asi_num ;\ 816 stxa %i6, [%g5 + %g3]asi_num ;\ 817 stxa %i7, [%g5 + %g4]asi_num ;\ 818 TT_TRACE_L(trace_win) ;\ 819 b .spill_clean ;\ 820 mov WSTATE_USER64, %g7 ;\ 821 SKIP(31-25-TT_TRACE_L_INS) ;\ 822 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 823 .empty 824 825 826/* 827 * Floating point disabled. 828 */ 829#define FP_DISABLED_TRAP \ 830 TT_TRACE(trace_gen) ;\ 831 ba,pt %xcc,.fp_disabled ;\ 832 nop ;\ 833 .align 32 834 835/* 836 * Floating point exceptions. 837 */ 838#define FP_IEEE_TRAP \ 839 TT_TRACE(trace_gen) ;\ 840 ba,pt %xcc,.fp_ieee_exception ;\ 841 nop ;\ 842 .align 32 843 844#define FP_TRAP \ 845 TT_TRACE(trace_gen) ;\ 846 ba,pt %xcc,.fp_exception ;\ 847 nop ;\ 848 .align 32 849 850#if !defined(lint) 851/* 852 * asynchronous traps at level 0 and level 1 853 * 854 * The first instruction must be a membar for UltraSPARC-III 855 * to stop RED state entry if the store queue has many 856 * pending bad stores (PRM, Chapter 11). 857 */ 858#define ASYNC_TRAP(ttype, ttlabel, table_name)\ 859 .global table_name ;\ 860table_name: ;\ 861 membar #Sync ;\ 862 TT_TRACE(ttlabel) ;\ 863 ba async_err ;\ 864 mov ttype, %g5 ;\ 865 .align 32 866 867/* 868 * Defaults to BAD entry, but establishes label to be used for 869 * architecture-specific overwrite of trap table entry. 870 */ 871#define LABELED_BAD(table_name) \ 872 .global table_name ;\ 873table_name: ;\ 874 BAD 875 876#endif /* !lint */ 877 878/* 879 * illegal instruction trap 880 */ 881#define ILLTRAP_INSTR \ 882 membar #Sync ;\ 883 TT_TRACE(trace_gen) ;\ 884 or %g0, P_UTRAP4, %g2 ;\ 885 or %g0, T_UNIMP_INSTR, %g3 ;\ 886 sethi %hi(.check_v9utrap), %g4 ;\ 887 jmp %g4 + %lo(.check_v9utrap) ;\ 888 nop ;\ 889 .align 32 890 891/* 892 * tag overflow trap 893 */ 894#define TAG_OVERFLOW \ 895 TT_TRACE(trace_gen) ;\ 896 or %g0, P_UTRAP10, %g2 ;\ 897 or %g0, T_TAG_OVERFLOW, %g3 ;\ 898 sethi %hi(.check_v9utrap), %g4 ;\ 899 jmp %g4 + %lo(.check_v9utrap) ;\ 900 nop ;\ 901 .align 32 902 903/* 904 * divide by zero trap 905 */ 906#define DIV_BY_ZERO \ 907 TT_TRACE(trace_gen) ;\ 908 or %g0, P_UTRAP11, %g2 ;\ 909 or %g0, T_IDIV0, %g3 ;\ 910 sethi %hi(.check_v9utrap), %g4 ;\ 911 jmp %g4 + %lo(.check_v9utrap) ;\ 912 nop ;\ 913 .align 32 914 915/* 916 * trap instruction for V9 user trap handlers 917 */ 918#define TRAP_INSTR \ 919 TT_TRACE(trace_gen) ;\ 920 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 921 sethi %hi(.check_v9utrap), %g4 ;\ 922 jmp %g4 + %lo(.check_v9utrap) ;\ 923 nop ;\ 924 .align 32 925#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 926 927/* 928 * LEVEL_INTERRUPT is for level N interrupts. 929 * VECTOR_INTERRUPT is for the vector trap. 930 */ 931#define LEVEL_INTERRUPT(level) \ 932 .global tt_pil/**/level ;\ 933tt_pil/**/level: ;\ 934 ba,pt %xcc, pil_interrupt ;\ 935 mov level, %g4 ;\ 936 .align 32 937 938#define LEVEL14_INTERRUPT \ 939 ba pil14_interrupt ;\ 940 mov PIL_14, %g4 ;\ 941 .align 32 942 943#define VECTOR_INTERRUPT \ 944 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g1 ;\ 945 btst IRSR_BUSY, %g1 ;\ 946 bnz,pt %xcc, vec_interrupt ;\ 947 nop ;\ 948 ba,a,pt %xcc, vec_intr_spurious ;\ 949 .empty ;\ 950 .align 32 951 952/* 953 * MMU Trap Handlers. 954 */ 955#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \ 956 rdpr %pstate, %g5 ;\ 957 wrpr %g5, PSTATE_MG | PSTATE_AG, %pstate 958 959#define IMMU_EXCEPTION \ 960 membar #Sync ;\ 961 SWITCH_GLOBALS ;\ 962 wr %g0, ASI_IMMU, %asi ;\ 963 rdpr %tpc, %g2 ;\ 964 ldxa [MMU_SFSR]%asi, %g3 ;\ 965 ba,pt %xcc, .mmu_exception_end ;\ 966 mov T_INSTR_EXCEPTION, %g1 ;\ 967 .align 32 968 969#define DMMU_EXCEPTION \ 970 SWITCH_GLOBALS ;\ 971 wr %g0, ASI_DMMU, %asi ;\ 972 ldxa [MMU_TAG_ACCESS]%asi, %g2 ;\ 973 ldxa [MMU_SFSR]%asi, %g3 ;\ 974 ba,pt %xcc, .mmu_exception_end ;\ 975 mov T_DATA_EXCEPTION, %g1 ;\ 976 .align 32 977 978#define DMMU_EXC_AG_PRIV \ 979 wr %g0, ASI_DMMU, %asi ;\ 980 ldxa [MMU_SFAR]%asi, %g2 ;\ 981 ba,pt %xcc, .mmu_priv_exception ;\ 982 ldxa [MMU_SFSR]%asi, %g3 ;\ 983 .align 32 984 985#define DMMU_EXC_AG_NOT_ALIGNED \ 986 wr %g0, ASI_DMMU, %asi ;\ 987 ldxa [MMU_SFAR]%asi, %g2 ;\ 988 ba,pt %xcc, .mmu_exception_not_aligned ;\ 989 ldxa [MMU_SFSR]%asi, %g3 ;\ 990 .align 32 991 992/* 993 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 994 */ 995#define DMMU_EXC_LDDF_NOT_ALIGNED \ 996 btst 1, %sp ;\ 997 bnz,pt %xcc, .lddf_exception_not_aligned ;\ 998 wr %g0, ASI_DMMU, %asi ;\ 999 ldxa [MMU_SFAR]%asi, %g2 ;\ 1000 ba,pt %xcc, .mmu_exception_not_aligned ;\ 1001 ldxa [MMU_SFSR]%asi, %g3 ;\ 1002 .align 32 1003 1004#define DMMU_EXC_STDF_NOT_ALIGNED \ 1005 btst 1, %sp ;\ 1006 bnz,pt %xcc, .stdf_exception_not_aligned ;\ 1007 wr %g0, ASI_DMMU, %asi ;\ 1008 ldxa [MMU_SFAR]%asi, %g2 ;\ 1009 ba,pt %xcc, .mmu_exception_not_aligned ;\ 1010 ldxa [MMU_SFSR]%asi, %g3 ;\ 1011 .align 32 1012 1013/* 1014 * Flush the TLB using either the primary, secondary, or nucleus flush 1015 * operation based on whether the ctx from the tag access register matches 1016 * the primary or secondary context (flush the nucleus if neither matches). 1017 * 1018 * Requires a membar #Sync before next ld/st. 1019 * exits with: 1020 * g2 = tag access register 1021 * g3 = ctx number 1022 */ 1023#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 1024#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 1025#endif 1026#define DTLB_DEMAP_ENTRY \ 1027 mov MMU_TAG_ACCESS, %g1 ;\ 1028 mov MMU_PCONTEXT, %g5 ;\ 1029 ldxa [%g1]ASI_DMMU, %g2 ;\ 1030 sethi %hi(TAGACC_CTX_MASK), %g4 ;\ 1031 or %g4, %lo(TAGACC_CTX_MASK), %g4 ;\ 1032 and %g2, %g4, %g3 /* g3 = ctx */ ;\ 1033 ldxa [%g5]ASI_DMMU, %g6 /* g6 = primary ctx */ ;\ 1034 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1035 cmp %g3, %g6 ;\ 1036 be,pt %xcc, 1f ;\ 1037 andn %g2, %g4, %g1 /* ctx = primary */ ;\ 1038 mov MMU_SCONTEXT, %g5 ;\ 1039 ldxa [%g5]ASI_DMMU, %g6 /* g6 = secondary ctx */ ;\ 1040 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1041 cmp %g3, %g6 ;\ 1042 be,a,pt %xcc, 1f ;\ 1043 or %g1, DEMAP_SECOND, %g1 ;\ 1044 or %g1, DEMAP_NUCLEUS, %g1 ;\ 10451: stxa %g0, [%g1]ASI_DTLB_DEMAP /* MMU_DEMAP_PAGE */ ;\ 1046 membar #Sync 1047 1048#if defined(cscope) 1049/* 1050 * Define labels to direct cscope quickly to labels that 1051 * are generated by macro expansion of DTLB_MISS(). 1052 */ 1053 .global tt0_dtlbmiss 1054tt0_dtlbmiss: 1055 .global tt1_dtlbmiss 1056tt1_dtlbmiss: 1057 nop 1058#endif 1059 1060/* 1061 * Needs to be exactly 32 instructions 1062 * 1063 * UTLB NOTE: If we don't hit on the 8k pointer then we branch 1064 * to a special 4M tsb handler. It would be nice if that handler 1065 * could live in this file but currently it seems better to allow 1066 * it to fall thru to sfmmu_tsb_miss. 1067 */ 1068#ifdef UTSB_PHYS 1069#define DTLB_MISS(table_name) ;\ 1070 .global table_name/**/_dtlbmiss ;\ 1071table_name/**/_dtlbmiss: ;\ 1072 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1073 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1074 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1075 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1076 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1077 cmp %g3, INVALID_CONTEXT ;\ 1078 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1079 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1080 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1081 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1082 brgez,pn %g3, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 1083 nop ;\ 1084 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, %g5 data */;\ 1085 cmp %g4, %g7 ;\ 1086 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1087 mov -1, %g3 /* set 4M tsbe ptr to -1 */ ;\ 1088 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1089 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1090 retry /* in %g5 */ ;\ 1091 unimp 0 ;\ 1092 unimp 0 ;\ 1093 unimp 0 ;\ 1094 unimp 0 ;\ 1095 unimp 0 ;\ 1096 unimp 0 ;\ 1097 unimp 0 ;\ 1098 unimp 0 ;\ 1099 unimp 0 ;\ 1100 unimp 0 ;\ 1101 unimp 0 ;\ 1102 unimp 0 ;\ 1103 .align 128 1104#else /* UTSB_PHYS */ 1105#define DTLB_MISS(table_name) ;\ 1106 .global table_name/**/_dtlbmiss ;\ 1107table_name/**/_dtlbmiss: ;\ 1108 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1109 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1110 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1111 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1112 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1113 cmp %g3, INVALID_CONTEXT ;\ 1114 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1115 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1116 brlz,pn %g1, sfmmu_udtlb_slowpath ;\ 1117 nop ;\ 1118 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, %g5 data */ ;\ 1119 cmp %g4, %g7 ;\ 1120 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1121 mov -1, %g3 /* set 4M tsbe ptr to -1 */ ;\ 1122 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1123 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1124 retry /* in %g5 */ ;\ 1125 unimp 0 ;\ 1126 unimp 0 ;\ 1127 unimp 0 ;\ 1128 unimp 0 ;\ 1129 unimp 0 ;\ 1130 unimp 0 ;\ 1131 unimp 0 ;\ 1132 unimp 0 ;\ 1133 unimp 0 ;\ 1134 unimp 0 ;\ 1135 unimp 0 ;\ 1136 unimp 0 ;\ 1137 unimp 0 ;\ 1138 unimp 0 ;\ 1139 .align 128 1140#endif /* UTSB_PHYS */ 1141 1142#if defined(cscope) 1143/* 1144 * Define labels to direct cscope quickly to labels that 1145 * are generated by macro expansion of ITLB_MISS(). 1146 */ 1147 .global tt0_itlbmiss 1148tt0_itlbmiss: 1149 .global tt1_itlbmiss 1150tt1_itlbmiss: 1151 nop 1152#endif 1153 1154/* 1155 * Instruction miss handler. 1156 * ldda instructions will have their ASI patched 1157 * by sfmmu_patch_ktsb at runtime. 1158 * MUST be EXACTLY 32 instructions or we'll break. 1159 */ 1160#ifdef UTSB_PHYS 1161#define ITLB_MISS(table_name) \ 1162 .global table_name/**/_itlbmiss ;\ 1163table_name/**/_itlbmiss: ;\ 1164 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1165 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1166 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1167 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1168 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1169 cmp %g3, INVALID_CONTEXT ;\ 1170 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1171 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1172 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1173 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1174 brgez,pn %g3, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 1175 nop ;\ 1176 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\ 1177 cmp %g4, %g7 ;\ 1178 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1179 mov -1, %g3 /* set 4M TSB ptr to -1 */ ;\ 1180 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1181 bz,pn %icc, exec_fault ;\ 1182 nop ;\ 1183 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1184 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1185 retry ;\ 1186 unimp 0 ;\ 1187 unimp 0 ;\ 1188 unimp 0 ;\ 1189 unimp 0 ;\ 1190 unimp 0 ;\ 1191 unimp 0 ;\ 1192 unimp 0 ;\ 1193 unimp 0 ;\ 1194 unimp 0 ;\ 1195 .align 128 1196#else /* UTSB_PHYS */ 1197#define ITLB_MISS(table_name) \ 1198 .global table_name/**/_itlbmiss ;\ 1199table_name/**/_itlbmiss: ;\ 1200 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1201 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1202 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1203 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1204 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1205 cmp %g3, INVALID_CONTEXT ;\ 1206 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1207 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1208 brlz,pn %g1, sfmmu_uitlb_slowpath /* if >1 TSB branch */ ;\ 1209 nop ;\ 1210 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, g5 = data */ ;\ 1211 cmp %g4, %g7 ;\ 1212 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1213 mov -1, %g3 /* set 4M TSB ptr to -1 */ ;\ 1214 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1215 bz,pn %icc, exec_fault ;\ 1216 nop ;\ 1217 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1218 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1219 retry ;\ 1220 unimp 0 ;\ 1221 unimp 0 ;\ 1222 unimp 0 ;\ 1223 unimp 0 ;\ 1224 unimp 0 ;\ 1225 unimp 0 ;\ 1226 unimp 0 ;\ 1227 unimp 0 ;\ 1228 unimp 0 ;\ 1229 unimp 0 ;\ 1230 unimp 0 ;\ 1231 .align 128 1232#endif /* UTSB_PHYS */ 1233 1234 1235/* 1236 * This macro is the first level handler for fast protection faults. 1237 * It first demaps the tlb entry which generated the fault and then 1238 * attempts to set the modify bit on the hash. It needs to be 1239 * exactly 32 instructions. 1240 */ 1241#define DTLB_PROT \ 1242 DTLB_DEMAP_ENTRY /* 20 instructions */ ;\ 1243 /* ;\ 1244 * At this point: ;\ 1245 * g1 = ???? ;\ 1246 * g2 = tag access register ;\ 1247 * g3 = ctx number ;\ 1248 * g4 = ???? ;\ 1249 */ ;\ 1250 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1251 /* clobbers g1 and g6 */ ;\ 1252 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1253 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1254 nop ;\ 1255 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1256 unimp 0 ;\ 1257 unimp 0 ;\ 1258 unimp 0 ;\ 1259 unimp 0 ;\ 1260 unimp 0 ;\ 1261 unimp 0 ;\ 1262 .align 128 1263 1264#define DMMU_EXCEPTION_TL1 ;\ 1265 SWITCH_GLOBALS ;\ 1266 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1267 nop ;\ 1268 .align 32 1269 1270#define MISALIGN_ADDR_TL1 ;\ 1271 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1272 nop ;\ 1273 .align 32 1274 1275/* 1276 * Trace a tsb hit 1277 * g1 = tsbe pointer (in/clobbered) 1278 * g2 = tag access register (in) 1279 * g3 - g4 = scratch (clobbered) 1280 * g5 = tsbe data (in) 1281 * g6 = scratch (clobbered) 1282 * g7 = pc we jumped here from (in) 1283 * ttextra = value to OR in to trap type (%tt) (in) 1284 */ 1285#ifdef TRAPTRACE 1286#define TRACE_TSBHIT(ttextra) \ 1287 membar #Sync ;\ 1288 sethi %hi(FLUSH_ADDR), %g6 ;\ 1289 flush %g6 ;\ 1290 TRACE_PTR(%g3, %g6) ;\ 1291 GET_TRACE_TICK(%g6) ;\ 1292 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1293 stxa %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1294 stxa %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1295 rdpr %tnpc, %g6 ;\ 1296 stxa %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1297 stxa %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1298 stxa %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1299 rdpr %tpc, %g6 ;\ 1300 stxa %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1301 rdpr %tl, %g6 ;\ 1302 stha %g6, [%g3 + TRAP_ENT_TL]%asi ;\ 1303 rdpr %tt, %g6 ;\ 1304 or %g6, (ttextra), %g6 ;\ 1305 stha %g6, [%g3 + TRAP_ENT_TT]%asi ;\ 1306 ldxa [%g0]ASI_IMMU, %g1 /* tag target */ ;\ 1307 ldxa [%g0]ASI_DMMU, %g4 ;\ 1308 cmp %g6, FAST_IMMU_MISS_TT ;\ 1309 movne %icc, %g4, %g1 ;\ 1310 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */ ;\ 1311 stxa %g0, [%g3 + TRAP_ENT_TR]%asi ;\ 1312 TRACE_NEXT(%g3, %g4, %g6) 1313#else 1314#define TRACE_TSBHIT(ttextra) 1315#endif 1316 1317#if defined(lint) 1318 1319struct scb trap_table; 1320struct scb scb; /* trap_table/scb are the same object */ 1321 1322#else /* lint */ 1323 1324/* 1325 * ======================================================================= 1326 * SPARC V9 TRAP TABLE 1327 * 1328 * The trap table is divided into two halves: the first half is used when 1329 * taking traps when TL=0; the second half is used when taking traps from 1330 * TL>0. Note that handlers in the second half of the table might not be able 1331 * to make the same assumptions as handlers in the first half of the table. 1332 * 1333 * Worst case trap nesting so far: 1334 * 1335 * at TL=0 client issues software trap requesting service 1336 * at TL=1 nucleus wants a register window 1337 * at TL=2 register window clean/spill/fill takes a TLB miss 1338 * at TL=3 processing TLB miss 1339 * at TL=4 handle asynchronous error 1340 * 1341 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1342 * 1343 * ======================================================================= 1344 */ 1345 .section ".text" 1346 .align 4 1347 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1348 .type trap_table, #object 1349 .type scb, #object 1350trap_table: 1351scb: 1352trap_table0: 1353 /* hardware traps */ 1354 NOT; /* 000 reserved */ 1355 RED; /* 001 power on reset */ 1356 RED; /* 002 watchdog reset */ 1357 RED; /* 003 externally initiated reset */ 1358 RED; /* 004 software initiated reset */ 1359 RED; /* 005 red mode exception */ 1360 NOT; NOT; /* 006 - 007 reserved */ 1361 IMMU_EXCEPTION; /* 008 instruction access exception */ 1362 NOT; /* 009 instruction access MMU miss */ 1363 ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae); 1364 /* 00A instruction access error */ 1365 NOT; NOT4; /* 00B - 00F reserved */ 1366 ILLTRAP_INSTR; /* 010 illegal instruction */ 1367 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1368 NOT; /* 012 unimplemented LDD */ 1369 NOT; /* 013 unimplemented STD */ 1370 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1371 FP_DISABLED_TRAP; /* 020 fp disabled */ 1372 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1373 FP_TRAP; /* 022 fp exception other */ 1374 TAG_OVERFLOW; /* 023 tag overflow */ 1375 CLEAN_WINDOW; /* 024 - 027 clean window */ 1376 DIV_BY_ZERO; /* 028 division by zero */ 1377 NOT; /* 029 internal processor error */ 1378 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1379 DMMU_EXCEPTION; /* 030 data access exception */ 1380 NOT; /* 031 data access MMU miss */ 1381 ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae); 1382 /* 032 data access error */ 1383 NOT; /* 033 data access protection */ 1384 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1385 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1386 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1387 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1388 NOT; /* 038 LDQF mem address not aligned */ 1389 NOT; /* 039 STQF mem address not aligned */ 1390 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1391 LABELED_BAD(tt0_asdat); /* 040 async data error */ 1392 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1393 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1394 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1395 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1396 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1397 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1398 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1399 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1400 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1401 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1402 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1403 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1404 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1405 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1406 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1407 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1408 VECTOR_INTERRUPT; /* 060 interrupt vector */ 1409 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1410 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1411 GOTO_TT(ce_err, trace_gen); /* 063 corrected ECC error */ 1412 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1413 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1414 DTLB_PROT; /* 06C data access protection */ 1415 LABELED_BAD(tt0_fecc); /* 070 fast ecache ECC error */ 1416 LABELED_BAD(tt0_dperr); /* 071 Cheetah+ dcache parity error */ 1417 LABELED_BAD(tt0_iperr); /* 072 Cheetah+ icache parity error */ 1418 NOT; /* 073 reserved */ 1419 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1420 NOT4; /* 080 spill 0 normal */ 1421 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1422 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1423 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1424 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1425 SPILL_32bit(not); /* 094 spill 5 normal */ 1426 SPILL_64bit(not); /* 098 spill 6 normal */ 1427 SPILL_mixed; /* 09C spill 7 normal */ 1428 NOT4; /* 0A0 spill 0 other */ 1429 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1430 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1431 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1432 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1433 NOT4; /* 0B4 spill 5 other */ 1434 NOT4; /* 0B8 spill 6 other */ 1435 NOT4; /* 0BC spill 7 other */ 1436 NOT4; /* 0C0 fill 0 normal */ 1437 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1438 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1439 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1440 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1441 FILL_32bit(not); /* 0D4 fill 5 normal */ 1442 FILL_64bit(not); /* 0D8 fill 6 normal */ 1443 FILL_mixed; /* 0DC fill 7 normal */ 1444 NOT4; /* 0E0 fill 0 other */ 1445 NOT4; /* 0E4 fill 1 other */ 1446 NOT4; /* 0E8 fill 2 other */ 1447 NOT4; /* 0EC fill 3 other */ 1448 NOT4; /* 0F0 fill 4 other */ 1449 NOT4; /* 0F4 fill 5 other */ 1450 NOT4; /* 0F8 fill 6 other */ 1451 NOT4; /* 0FC fill 7 other */ 1452 /* user traps */ 1453 GOTO(syscall_trap_4x); /* 100 old system call */ 1454 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1455 TRAP(T_DIV0); /* 102 user divide by zero */ 1456 FLUSHW(tt0_flushw); /* 103 flush windows */ 1457 GOTO(.clean_windows); /* 104 clean windows */ 1458 BAD; /* 105 range check ?? */ 1459 GOTO(.fix_alignment); /* 106 do unaligned references */ 1460 BAD; /* 107 unused */ 1461 SYSCALL_TRAP32; /* 108 ILP32 system call on LP64 */ 1462 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1463 BAD; BAD; BAD4; /* 10A - 10F unused */ 1464 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1465 GOTO(.getcc); /* 120 get condition codes */ 1466 GOTO(.setcc); /* 121 set condition codes */ 1467 GOTO(.getpsr); /* 122 get psr */ 1468 GOTO(.setpsr); /* 123 set psr (some fields) */ 1469 GOTO(get_timestamp); /* 124 get timestamp */ 1470 GOTO(get_virtime); /* 125 get lwp virtual time */ 1471 PRIV(self_xcall); /* 126 self xcall */ 1472 GOTO(get_hrestime); /* 127 get hrestime */ 1473 BAD; /* 128 ST_SETV9STACK */ 1474 GOTO(.getlgrp); /* 129 get lgrpid */ 1475 BAD; BAD; BAD4; /* 12A - 12F unused */ 1476 BAD4; BAD4; /* 130 - 137 unused */ 1477 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1478 BAD; /* 139 unused */ 1479 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1480 BAD; BAD4; /* 13B - 13F unused */ 1481 SYSCALL_TRAP; /* 140 LP64 system call */ 1482 SYSCALL(nosys); /* 141 unused system call trap */ 1483#ifdef DEBUG_USER_TRAPTRACECTL 1484 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1485 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1486#else 1487 SYSCALL(nosys); /* 142 unused system call trap */ 1488 SYSCALL(nosys); /* 143 unused system call trap */ 1489#endif 1490 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1491 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1492 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1493 BAD; /* 170 - unused */ 1494 BAD; /* 171 - unused */ 1495 BAD; BAD; /* 172 - 173 unused */ 1496 BAD4; BAD4; /* 174 - 17B unused */ 1497#ifdef PTL1_PANIC_DEBUG 1498 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1499 /* 17C test ptl1_panic */ 1500#else 1501 BAD; /* 17C unused */ 1502#endif /* PTL1_PANIC_DEBUG */ 1503 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1504 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1505 PRIV(kctx_obp_bpt); /* 17F obp breakpoint */ 1506 /* reserved */ 1507 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1508 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1509 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1510 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1511 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1512 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1513 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1514 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1515trap_table1: 1516 NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */ 1517 ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae); 1518 /* 00A instruction access error */ 1519 NOT; NOT4; /* 00B - 00F unused */ 1520 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1521 NOT4; /* 020 - 023 unused */ 1522 CLEAN_WINDOW; /* 024 - 027 clean window */ 1523 NOT4; NOT4; /* 028 - 02F unused */ 1524 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1525 NOT; /* 031 unused */ 1526 ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae); 1527 /* 032 data access error */ 1528 NOT; /* 033 unused */ 1529 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1530 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1531 LABELED_BAD(tt1_asdat); /* 040 async data error */ 1532 NOT; NOT; NOT; /* 041 - 043 unused */ 1533 NOT4; NOT4; NOT4; /* 044 - 04F unused */ 1534 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1535 NOT; /* 060 unused */ 1536 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1537 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1538 GOTO_TT(ce_err_tl1, trace_gen); /* 063 corrected ECC error */ 1539 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1540 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1541 DTLB_PROT; /* 06C data access protection */ 1542 LABELED_BAD(tt1_fecc); /* 070 fast ecache ECC error */ 1543 LABELED_BAD(tt1_dperr); /* 071 Cheetah+ dcache parity error */ 1544 LABELED_BAD(tt1_iperr); /* 072 Cheetah+ icache parity error */ 1545 NOT; /* 073 reserved */ 1546 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1547 NOT4; /* 080 spill 0 normal */ 1548 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1549 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1550 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1551 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1552 SPILL_32bit(not); /* 094 spill 5 normal */ 1553 SPILL_64bit(not); /* 098 spill 6 normal */ 1554 SPILL_mixed; /* 09C spill 7 normal */ 1555 NOT4; /* 0A0 spill 0 other */ 1556 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1557 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1558 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1559 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1560 NOT4; /* 0B4 spill 5 other */ 1561 NOT4; /* 0B8 spill 6 other */ 1562 NOT4; /* 0BC spill 7 other */ 1563 NOT4; /* 0C0 fill 0 normal */ 1564 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0C4 fill 1 normal */ 1565 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0C8 fill 2 normal */ 1566 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0CC fill 3 normal */ 1567 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0D0 fill 4 normal */ 1568 FILL_32bit(not); /* 0D4 fill 5 normal */ 1569 FILL_64bit(not); /* 0D8 fill 6 normal */ 1570 FILL_mixed; /* 0DC fill 7 normal */ 1571 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1572 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1573 LABELED_BAD(tt1_swtrap0); /* 100 fast ecache ECC error (cont) */ 1574 LABELED_BAD(tt1_swtrap1); /* 101 Ch+ D$ parity error (cont) */ 1575 LABELED_BAD(tt1_swtrap2); /* 102 Ch+ I$ parity error (cont) */ 1576 NOT; /* 103 reserved */ 1577/* 1578 * We only reserve the above four special case soft traps for code running 1579 * at TL>0, so we can truncate the trap table here. 1580 */ 1581etrap_table: 1582 .size trap_table, (.-trap_table) 1583 .size scb, (.-scb) 1584 1585/* 1586 * We get to exec_fault in the case of an instruction miss and tte 1587 * has no execute bit set. We go to tl0 to handle it. 1588 * 1589 * g1 = tsbe pointer (in/clobbered) 1590 * g2 = tag access register (in) 1591 * g3 - g4 = scratch (clobbered) 1592 * g5 = tsbe data (in) 1593 * g6 = scratch (clobbered) 1594 */ 1595 ALTENTRY(exec_fault) 1596 TRACE_TSBHIT(0x200) 1597 SWITCH_GLOBALS 1598 mov MMU_TAG_ACCESS, %g4 1599 ldxa [%g4]ASI_IMMU, %g2 ! arg1 = addr 1600 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1601 set trap, %g1 1602 ba,pt %xcc, sys_trap 1603 mov -1, %g4 1604 1605.mmu_exception_not_aligned: 1606 rdpr %tstate, %g1 1607 btst TSTATE_PRIV, %g1 1608 bnz,pn %icc, 2f 1609 nop 1610 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1611 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1612 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1613 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1614 brz,pt %g5, 2f 1615 nop 1616 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1617 brz,pn %g5, 2f 1618 nop 1619 btst 1, %sp 1620 bz,pt %xcc, 1f ! 32 bit user program 1621 nop 1622 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1623 nop 16241: 1625 ba,pt %xcc, .setup_utrap 1626 or %g2, %g0, %g7 16272: 1628 ba,pt %xcc, .mmu_exception_end 1629 mov T_ALIGNMENT, %g1 1630 1631.mmu_priv_exception: 1632 rdpr %tstate, %g1 1633 btst TSTATE_PRIV, %g1 1634 bnz,pn %icc, 1f 1635 nop 1636 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1637 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1638 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1639 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1640 brz,pt %g5, 1f 1641 nop 1642 ldn [%g5 + P_UTRAP16], %g5 1643 brnz,pt %g5, .setup_v9utrap 1644 nop 16451: 1646 mov T_PRIV_INSTR, %g1 1647 1648.mmu_exception_end: 1649 CPU_INDEX(%g4, %g5) 1650 set cpu_core, %g5 1651 sllx %g4, CPU_CORE_SHIFT, %g4 1652 add %g4, %g5, %g4 1653 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1654 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1655 bz %xcc, .mmu_exception_tlb_chk 1656 or %g5, CPU_DTRACE_BADADDR, %g5 1657 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1658 done 1659 1660.mmu_exception_tlb_chk: 1661 GET_CPU_IMPL(%g5) ! check SFSR.FT to see if this 1662 cmp %g5, PANTHER_IMPL ! is a TLB parity error. But 1663 bne 2f ! we only do this check while 1664 mov 1, %g4 ! running on Panther CPUs 1665 sllx %g4, PN_SFSR_PARITY_SHIFT, %g4 ! since US-I/II use the same 1666 andcc %g3, %g4, %g0 ! bit for something else which 1667 bz 2f ! will be handled later. 1668 nop 1669.mmu_exception_is_tlb_parity: 1670 .weak itlb_parity_trap 1671 .weak dtlb_parity_trap 1672 set itlb_parity_trap, %g4 1673 cmp %g1, T_INSTR_EXCEPTION ! branch to the itlb or 1674 be 3f ! dtlb parity handler 1675 nop ! if this trap is due 1676 set dtlb_parity_trap, %g4 1677 cmp %g1, T_DATA_EXCEPTION ! to a IMMU exception 1678 be 3f ! or DMMU exception. 1679 nop 16802: 1681 sllx %g3, 32, %g3 1682 or %g3, %g1, %g3 1683 set trap, %g1 1684 ba,pt %xcc, sys_trap 1685 sub %g0, 1, %g4 16863: 1687 jmp %g4 ! off to the appropriate 1688 nop ! TLB parity handler 1689 1690.fp_disabled: 1691 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1692 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1693#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1694 brz,a,pn %g1, 2f 1695 nop 1696#endif 1697 rdpr %tstate, %g4 1698 btst TSTATE_PRIV, %g4 1699#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1700 bnz,pn %icc, 2f 1701 nop 1702#else 1703 bnz,a,pn %icc, ptl1_panic 1704 mov PTL1_BAD_FPTRAP, %g1 1705#endif 1706 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1707 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1708 brz,a,pt %g5, 2f 1709 nop 1710 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1711 brz,a,pn %g5, 2f 1712 nop 1713 btst 1, %sp 1714 bz,a,pt %xcc, 1f ! 32 bit user program 1715 nop 1716 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1717 nop 17181: 1719 ba,pt %xcc, .setup_utrap 1720 or %g0, %g0, %g7 17212: 1722 set fp_disabled, %g1 1723 ba,pt %xcc, sys_trap 1724 sub %g0, 1, %g4 1725 1726.fp_ieee_exception: 1727 rdpr %tstate, %g1 1728 btst TSTATE_PRIV, %g1 1729 bnz,a,pn %icc, ptl1_panic 1730 mov PTL1_BAD_FPTRAP, %g1 1731 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1732 stx %fsr, [%g1 + CPU_TMP1] 1733 ldx [%g1 + CPU_TMP1], %g2 1734 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1735 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1736 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1737 brz,a,pt %g5, 1f 1738 nop 1739 ldn [%g5 + P_UTRAP8], %g5 1740 brnz,a,pt %g5, .setup_v9utrap 1741 nop 17421: 1743 set _fp_ieee_exception, %g1 1744 ba,pt %xcc, sys_trap 1745 sub %g0, 1, %g4 1746 1747/* 1748 * Register Inputs: 1749 * %g5 user trap handler 1750 * %g7 misaligned addr - for alignment traps only 1751 */ 1752.setup_utrap: 1753 set trap, %g1 ! setup in case we go 1754 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1755 sub %g0, 1, %g4 ! the save instruction below 1756 1757 /* 1758 * If the DTrace pid provider is single stepping a copied-out 1759 * instruction, t->t_dtrace_step will be set. In that case we need 1760 * to abort the single-stepping (since execution of the instruction 1761 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1762 */ 1763 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1764 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1765 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1766 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1767 rdpr %tnpc, %l2 ! arg1 == tnpc 1768 brz,pt %g2, 1f 1769 rdpr %tpc, %l1 ! arg0 == tpc 1770 1771 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1772 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1773 brz,pt %g2, 1f 1774 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1775 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 17761: 1777 mov %g7, %l3 ! arg2 == misaligned address 1778 1779 rdpr %tstate, %g1 ! cwp for trap handler 1780 rdpr %cwp, %g4 1781 bclr TSTATE_CWP_MASK, %g1 1782 wrpr %g1, %g4, %tstate 1783 wrpr %g0, %g5, %tnpc ! trap handler address 1784 FAST_TRAP_DONE 1785 /* NOTREACHED */ 1786 1787.check_v9utrap: 1788 rdpr %tstate, %g1 1789 btst TSTATE_PRIV, %g1 1790 bnz,a,pn %icc, 3f 1791 nop 1792 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1793 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1794 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1795 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1796 1797 cmp %g3, T_SOFTWARE_TRAP 1798 bne,a,pt %icc, 1f 1799 nop 1800 1801 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1802 rdpr %tt, %g3 ! delay - get actual hw trap type 1803 1804 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1805 ba,pt %icc, 2f 1806 smul %g1, CPTRSIZE, %g2 18071: 1808 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1809 nop 1810 1811 cmp %g3, T_UNIMP_INSTR 1812 bne,a,pt %icc, 2f 1813 nop 1814 1815 mov 1, %g1 1816 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1817 rdpr %tpc, %g1 ! ld trapping instruction using 1818 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1819 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1820 1821 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1822 andcc %g1, %g4, %g4 ! and instruction with mask 1823 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1824 nop ! fall thru to setup 18252: 1826 ldn [%g5 + %g2], %g5 1827 brnz,a,pt %g5, .setup_v9utrap 1828 nop 18293: 1830 set trap, %g1 1831 ba,pt %xcc, sys_trap 1832 sub %g0, 1, %g4 1833 /* NOTREACHED */ 1834 1835/* 1836 * Register Inputs: 1837 * %g5 user trap handler 1838 */ 1839.setup_v9utrap: 1840 set trap, %g1 ! setup in case we go 1841 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1842 sub %g0, 1, %g4 ! the save instruction below 1843 1844 /* 1845 * If the DTrace pid provider is single stepping a copied-out 1846 * instruction, t->t_dtrace_step will be set. In that case we need 1847 * to abort the single-stepping (since execution of the instruction 1848 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1849 */ 1850 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1851 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1852 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1853 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1854 rdpr %tnpc, %l7 ! arg1 == tnpc 1855 brz,pt %g2, 1f 1856 rdpr %tpc, %l6 ! arg0 == tpc 1857 1858 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1859 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1860 brz,pt %g2, 1f 1861 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1862 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 18631: 1864 rdpr %tstate, %g2 ! cwp for trap handler 1865 rdpr %cwp, %g4 1866 bclr TSTATE_CWP_MASK, %g2 1867 wrpr %g2, %g4, %tstate 1868 1869 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1870 ldn [%g4 + P_AS], %g4 ! load as pointer 1871 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1872 cmp %l7, %g4 ! check for single-step set 1873 bne,pt %xcc, 4f 1874 nop 1875 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1876 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1877 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1878 bne,pt %icc, 4f 1879 nop 1880 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1881 mov %l7, %g4 ! on entry to precise user trap 1882 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1883 ! at time of trap 1884 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1885 ! %g4 == userlimit 1886 FAST_TRAP_DONE 1887 /* NOTREACHED */ 18884: 1889 wrpr %g0, %g5, %tnpc ! trap handler address 1890 FAST_TRAP_DONE_CHK_INTR 1891 /* NOTREACHED */ 1892 1893.fp_exception: 1894 CPU_ADDR(%g1, %g4) 1895 stx %fsr, [%g1 + CPU_TMP1] 1896 ldx [%g1 + CPU_TMP1], %g2 1897 1898 /* 1899 * Cheetah takes unfinished_FPop trap for certain range of operands 1900 * to the "fitos" instruction. Instead of going through the slow 1901 * software emulation path, we try to simulate the "fitos" instruction 1902 * via "fitod" and "fdtos" provided the following conditions are met: 1903 * 1904 * fpu_exists is set (if DEBUG) 1905 * not in privileged mode 1906 * ftt is unfinished_FPop 1907 * NXM IEEE trap is not enabled 1908 * instruction at %tpc is "fitos" 1909 * 1910 * Usage: 1911 * %g1 per cpu address 1912 * %g2 %fsr 1913 * %g6 user instruction 1914 * 1915 * Note that we can take a memory access related trap while trying 1916 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1917 * flag to catch those traps and let the SFMMU code deal with page 1918 * fault and data access exception. 1919 */ 1920#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1921 sethi %hi(fpu_exists), %g7 1922 ld [%g7 + %lo(fpu_exists)], %g7 1923 brz,pn %g7, .fp_exception_cont 1924 nop 1925#endif 1926 rdpr %tstate, %g7 ! branch if in privileged mode 1927 btst TSTATE_PRIV, %g7 1928 bnz,pn %xcc, .fp_exception_cont 1929 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1930 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1931 cmp %g7, FTT_UNFIN 1932 set FSR_TEM_NX, %g5 1933 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1934 andcc %g2, %g5, %g0 1935 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1936 rdpr %tpc, %g5 ! get faulting PC 1937 1938 or %g0, 1, %g7 1939 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1940 lda [%g5]ASI_USER, %g6 ! get user's instruction 1941 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1942 1943 set FITOS_INSTR_MASK, %g7 1944 and %g6, %g7, %g7 1945 set FITOS_INSTR, %g5 1946 cmp %g7, %g5 1947 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1948 nop 1949 1950 /* 1951 * This is unfinished FPops trap for "fitos" instruction. We 1952 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1953 * sequence. 1954 * 1955 * We need a temporary FP register to do the conversion. Since 1956 * both source and destination operands for the "fitos" instruction 1957 * have to be within %f0-%f31, we use an FP register from the upper 1958 * half to guarantee that it won't collide with the source or the 1959 * dest operand. However, we do have to save and restore its value. 1960 * 1961 * We use %d62 as a temporary FP register for the conversion and 1962 * branch to appropriate instruction within the conversion tables 1963 * based upon the rs2 and rd values. 1964 */ 1965 1966 std %d62, [%g1 + CPU_TMP1] ! save original value 1967 1968 srl %g6, FITOS_RS2_SHIFT, %g7 1969 and %g7, FITOS_REG_MASK, %g7 1970 set _fitos_fitod_table, %g4 1971 sllx %g7, 2, %g7 1972 jmp %g4 + %g7 1973 ba,pt %xcc, _fitos_fitod_done 1974 .empty 1975 1976_fitos_fitod_table: 1977 fitod %f0, %d62 1978 fitod %f1, %d62 1979 fitod %f2, %d62 1980 fitod %f3, %d62 1981 fitod %f4, %d62 1982 fitod %f5, %d62 1983 fitod %f6, %d62 1984 fitod %f7, %d62 1985 fitod %f8, %d62 1986 fitod %f9, %d62 1987 fitod %f10, %d62 1988 fitod %f11, %d62 1989 fitod %f12, %d62 1990 fitod %f13, %d62 1991 fitod %f14, %d62 1992 fitod %f15, %d62 1993 fitod %f16, %d62 1994 fitod %f17, %d62 1995 fitod %f18, %d62 1996 fitod %f19, %d62 1997 fitod %f20, %d62 1998 fitod %f21, %d62 1999 fitod %f22, %d62 2000 fitod %f23, %d62 2001 fitod %f24, %d62 2002 fitod %f25, %d62 2003 fitod %f26, %d62 2004 fitod %f27, %d62 2005 fitod %f28, %d62 2006 fitod %f29, %d62 2007 fitod %f30, %d62 2008 fitod %f31, %d62 2009_fitos_fitod_done: 2010 2011 /* 2012 * Now convert data back into single precision 2013 */ 2014 srl %g6, FITOS_RD_SHIFT, %g7 2015 and %g7, FITOS_REG_MASK, %g7 2016 set _fitos_fdtos_table, %g4 2017 sllx %g7, 2, %g7 2018 jmp %g4 + %g7 2019 ba,pt %xcc, _fitos_fdtos_done 2020 .empty 2021 2022_fitos_fdtos_table: 2023 fdtos %d62, %f0 2024 fdtos %d62, %f1 2025 fdtos %d62, %f2 2026 fdtos %d62, %f3 2027 fdtos %d62, %f4 2028 fdtos %d62, %f5 2029 fdtos %d62, %f6 2030 fdtos %d62, %f7 2031 fdtos %d62, %f8 2032 fdtos %d62, %f9 2033 fdtos %d62, %f10 2034 fdtos %d62, %f11 2035 fdtos %d62, %f12 2036 fdtos %d62, %f13 2037 fdtos %d62, %f14 2038 fdtos %d62, %f15 2039 fdtos %d62, %f16 2040 fdtos %d62, %f17 2041 fdtos %d62, %f18 2042 fdtos %d62, %f19 2043 fdtos %d62, %f20 2044 fdtos %d62, %f21 2045 fdtos %d62, %f22 2046 fdtos %d62, %f23 2047 fdtos %d62, %f24 2048 fdtos %d62, %f25 2049 fdtos %d62, %f26 2050 fdtos %d62, %f27 2051 fdtos %d62, %f28 2052 fdtos %d62, %f29 2053 fdtos %d62, %f30 2054 fdtos %d62, %f31 2055_fitos_fdtos_done: 2056 2057 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 2058 2059#if DEBUG 2060 /* 2061 * Update FPop_unfinished trap kstat 2062 */ 2063 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 2064 ldx [%g7], %g5 20651: 2066 add %g5, 1, %g6 2067 2068 casxa [%g7] ASI_N, %g5, %g6 2069 cmp %g5, %g6 2070 bne,a,pn %xcc, 1b 2071 or %g0, %g6, %g5 2072 2073 /* 2074 * Update fpu_sim_fitos kstat 2075 */ 2076 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 2077 ldx [%g7], %g5 20781: 2079 add %g5, 1, %g6 2080 2081 casxa [%g7] ASI_N, %g5, %g6 2082 cmp %g5, %g6 2083 bne,a,pn %xcc, 1b 2084 or %g0, %g6, %g5 2085#endif /* DEBUG */ 2086 2087 FAST_TRAP_DONE 2088 2089.fp_exception_cont: 2090 /* 2091 * Let _fp_exception deal with simulating FPop instruction. 2092 * Note that we need to pass %fsr in %g2 (already read above). 2093 */ 2094 2095 set _fp_exception, %g1 2096 ba,pt %xcc, sys_trap 2097 sub %g0, 1, %g4 2098 2099 .global opl_cleanw_patch 2100opl_cleanw_patch: 2101.clean_windows: 2102 set trap, %g1 2103 mov T_FLUSH_PCB, %g3 2104 sub %g0, 1, %g4 2105 save 2106 flushw 2107 restore 2108 wrpr %g0, %g0, %cleanwin ! no clean windows 2109 2110 CPU_ADDR(%g4, %g5) 2111 ldn [%g4 + CPU_MPCB], %g4 2112 brz,a,pn %g4, 1f 2113 nop 2114 ld [%g4 + MPCB_WSTATE], %g5 2115 add %g5, WSTATE_CLEAN_OFFSET, %g5 2116 wrpr %g0, %g5, %wstate 21171: FAST_TRAP_DONE 2118 2119/* 2120 * .spill_clean: clean the previous window, restore the wstate, and 2121 * "done". 2122 * 2123 * Entry: %g7 contains new wstate 2124 */ 2125.spill_clean: 2126 sethi %hi(nwin_minus_one), %g5 2127 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 2128 rdpr %cwp, %g6 ! %g6 = %cwp 2129 deccc %g6 ! %g6-- 2130 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 2131 wrpr %g6, %cwp 2132 TT_TRACE_L(trace_win) 2133 clr %l0 2134 clr %l1 2135 clr %l2 2136 clr %l3 2137 clr %l4 2138 clr %l5 2139 clr %l6 2140 clr %l7 2141 wrpr %g0, %g7, %wstate 2142 saved 2143 retry ! restores correct %cwp 2144 2145.fix_alignment: 2146 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2147 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 2148 ldn [%g1 + T_PROCP], %g1 2149 mov 1, %g2 2150 stb %g2, [%g1 + P_FIXALIGNMENT] 2151 FAST_TRAP_DONE 2152 2153#define STDF_REG(REG, ADDR, TMP) \ 2154 sll REG, 3, REG ;\ 2155mark1: set start1, TMP ;\ 2156 jmp REG + TMP ;\ 2157 nop ;\ 2158start1: ba,pt %xcc, done1 ;\ 2159 std %f0, [ADDR + CPU_TMP1] ;\ 2160 ba,pt %xcc, done1 ;\ 2161 std %f32, [ADDR + CPU_TMP1] ;\ 2162 ba,pt %xcc, done1 ;\ 2163 std %f2, [ADDR + CPU_TMP1] ;\ 2164 ba,pt %xcc, done1 ;\ 2165 std %f34, [ADDR + CPU_TMP1] ;\ 2166 ba,pt %xcc, done1 ;\ 2167 std %f4, [ADDR + CPU_TMP1] ;\ 2168 ba,pt %xcc, done1 ;\ 2169 std %f36, [ADDR + CPU_TMP1] ;\ 2170 ba,pt %xcc, done1 ;\ 2171 std %f6, [ADDR + CPU_TMP1] ;\ 2172 ba,pt %xcc, done1 ;\ 2173 std %f38, [ADDR + CPU_TMP1] ;\ 2174 ba,pt %xcc, done1 ;\ 2175 std %f8, [ADDR + CPU_TMP1] ;\ 2176 ba,pt %xcc, done1 ;\ 2177 std %f40, [ADDR + CPU_TMP1] ;\ 2178 ba,pt %xcc, done1 ;\ 2179 std %f10, [ADDR + CPU_TMP1] ;\ 2180 ba,pt %xcc, done1 ;\ 2181 std %f42, [ADDR + CPU_TMP1] ;\ 2182 ba,pt %xcc, done1 ;\ 2183 std %f12, [ADDR + CPU_TMP1] ;\ 2184 ba,pt %xcc, done1 ;\ 2185 std %f44, [ADDR + CPU_TMP1] ;\ 2186 ba,pt %xcc, done1 ;\ 2187 std %f14, [ADDR + CPU_TMP1] ;\ 2188 ba,pt %xcc, done1 ;\ 2189 std %f46, [ADDR + CPU_TMP1] ;\ 2190 ba,pt %xcc, done1 ;\ 2191 std %f16, [ADDR + CPU_TMP1] ;\ 2192 ba,pt %xcc, done1 ;\ 2193 std %f48, [ADDR + CPU_TMP1] ;\ 2194 ba,pt %xcc, done1 ;\ 2195 std %f18, [ADDR + CPU_TMP1] ;\ 2196 ba,pt %xcc, done1 ;\ 2197 std %f50, [ADDR + CPU_TMP1] ;\ 2198 ba,pt %xcc, done1 ;\ 2199 std %f20, [ADDR + CPU_TMP1] ;\ 2200 ba,pt %xcc, done1 ;\ 2201 std %f52, [ADDR + CPU_TMP1] ;\ 2202 ba,pt %xcc, done1 ;\ 2203 std %f22, [ADDR + CPU_TMP1] ;\ 2204 ba,pt %xcc, done1 ;\ 2205 std %f54, [ADDR + CPU_TMP1] ;\ 2206 ba,pt %xcc, done1 ;\ 2207 std %f24, [ADDR + CPU_TMP1] ;\ 2208 ba,pt %xcc, done1 ;\ 2209 std %f56, [ADDR + CPU_TMP1] ;\ 2210 ba,pt %xcc, done1 ;\ 2211 std %f26, [ADDR + CPU_TMP1] ;\ 2212 ba,pt %xcc, done1 ;\ 2213 std %f58, [ADDR + CPU_TMP1] ;\ 2214 ba,pt %xcc, done1 ;\ 2215 std %f28, [ADDR + CPU_TMP1] ;\ 2216 ba,pt %xcc, done1 ;\ 2217 std %f60, [ADDR + CPU_TMP1] ;\ 2218 ba,pt %xcc, done1 ;\ 2219 std %f30, [ADDR + CPU_TMP1] ;\ 2220 ba,pt %xcc, done1 ;\ 2221 std %f62, [ADDR + CPU_TMP1] ;\ 2222done1: 2223 2224#define LDDF_REG(REG, ADDR, TMP) \ 2225 sll REG, 3, REG ;\ 2226mark2: set start2, TMP ;\ 2227 jmp REG + TMP ;\ 2228 nop ;\ 2229start2: ba,pt %xcc, done2 ;\ 2230 ldd [ADDR + CPU_TMP1], %f0 ;\ 2231 ba,pt %xcc, done2 ;\ 2232 ldd [ADDR + CPU_TMP1], %f32 ;\ 2233 ba,pt %xcc, done2 ;\ 2234 ldd [ADDR + CPU_TMP1], %f2 ;\ 2235 ba,pt %xcc, done2 ;\ 2236 ldd [ADDR + CPU_TMP1], %f34 ;\ 2237 ba,pt %xcc, done2 ;\ 2238 ldd [ADDR + CPU_TMP1], %f4 ;\ 2239 ba,pt %xcc, done2 ;\ 2240 ldd [ADDR + CPU_TMP1], %f36 ;\ 2241 ba,pt %xcc, done2 ;\ 2242 ldd [ADDR + CPU_TMP1], %f6 ;\ 2243 ba,pt %xcc, done2 ;\ 2244 ldd [ADDR + CPU_TMP1], %f38 ;\ 2245 ba,pt %xcc, done2 ;\ 2246 ldd [ADDR + CPU_TMP1], %f8 ;\ 2247 ba,pt %xcc, done2 ;\ 2248 ldd [ADDR + CPU_TMP1], %f40 ;\ 2249 ba,pt %xcc, done2 ;\ 2250 ldd [ADDR + CPU_TMP1], %f10 ;\ 2251 ba,pt %xcc, done2 ;\ 2252 ldd [ADDR + CPU_TMP1], %f42 ;\ 2253 ba,pt %xcc, done2 ;\ 2254 ldd [ADDR + CPU_TMP1], %f12 ;\ 2255 ba,pt %xcc, done2 ;\ 2256 ldd [ADDR + CPU_TMP1], %f44 ;\ 2257 ba,pt %xcc, done2 ;\ 2258 ldd [ADDR + CPU_TMP1], %f14 ;\ 2259 ba,pt %xcc, done2 ;\ 2260 ldd [ADDR + CPU_TMP1], %f46 ;\ 2261 ba,pt %xcc, done2 ;\ 2262 ldd [ADDR + CPU_TMP1], %f16 ;\ 2263 ba,pt %xcc, done2 ;\ 2264 ldd [ADDR + CPU_TMP1], %f48 ;\ 2265 ba,pt %xcc, done2 ;\ 2266 ldd [ADDR + CPU_TMP1], %f18 ;\ 2267 ba,pt %xcc, done2 ;\ 2268 ldd [ADDR + CPU_TMP1], %f50 ;\ 2269 ba,pt %xcc, done2 ;\ 2270 ldd [ADDR + CPU_TMP1], %f20 ;\ 2271 ba,pt %xcc, done2 ;\ 2272 ldd [ADDR + CPU_TMP1], %f52 ;\ 2273 ba,pt %xcc, done2 ;\ 2274 ldd [ADDR + CPU_TMP1], %f22 ;\ 2275 ba,pt %xcc, done2 ;\ 2276 ldd [ADDR + CPU_TMP1], %f54 ;\ 2277 ba,pt %xcc, done2 ;\ 2278 ldd [ADDR + CPU_TMP1], %f24 ;\ 2279 ba,pt %xcc, done2 ;\ 2280 ldd [ADDR + CPU_TMP1], %f56 ;\ 2281 ba,pt %xcc, done2 ;\ 2282 ldd [ADDR + CPU_TMP1], %f26 ;\ 2283 ba,pt %xcc, done2 ;\ 2284 ldd [ADDR + CPU_TMP1], %f58 ;\ 2285 ba,pt %xcc, done2 ;\ 2286 ldd [ADDR + CPU_TMP1], %f28 ;\ 2287 ba,pt %xcc, done2 ;\ 2288 ldd [ADDR + CPU_TMP1], %f60 ;\ 2289 ba,pt %xcc, done2 ;\ 2290 ldd [ADDR + CPU_TMP1], %f30 ;\ 2291 ba,pt %xcc, done2 ;\ 2292 ldd [ADDR + CPU_TMP1], %f62 ;\ 2293done2: 2294 2295.lddf_exception_not_aligned: 2296 /* 2297 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2298 */ 2299 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2300 2301#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2302 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2303 ld [%g2 + %lo(fpu_exists)], %g2 2304 brz,a,pn %g2, 4f 2305 nop 2306#endif 2307 CPU_ADDR(%g1, %g4) 2308 or %g0, 1, %g4 2309 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2310 2311 rdpr %tpc, %g2 2312 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2313 srl %g6, 23, %g1 ! using ldda or not? 2314 and %g1, 1, %g1 2315 brz,a,pt %g1, 2f ! check for ldda instruction 2316 nop 2317 srl %g6, 13, %g1 ! check immflag 2318 and %g1, 1, %g1 2319 rdpr %tstate, %g2 ! %tstate in %g2 2320 brnz,a,pn %g1, 1f 2321 srl %g2, 31, %g1 ! get asi from %tstate 2322 srl %g6, 5, %g1 ! get asi from instruction 2323 and %g1, 0xFF, %g1 ! imm_asi field 23241: 2325 cmp %g1, ASI_P ! primary address space 2326 be,a,pt %icc, 2f 2327 nop 2328 cmp %g1, ASI_PNF ! primary no fault address space 2329 be,a,pt %icc, 2f 2330 nop 2331 cmp %g1, ASI_S ! secondary address space 2332 be,a,pt %icc, 2f 2333 nop 2334 cmp %g1, ASI_SNF ! secondary no fault address space 2335 bne,a,pn %icc, 3f 2336 nop 23372: 2338 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2339 add %g5, 4, %g5 ! increment misaligned data address 2340 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2341 2342 sllx %g7, 32, %g7 2343 or %g5, %g7, %g5 ! combine data 2344 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2345 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2346 2347 srl %g6, 25, %g3 ! %g6 has the instruction 2348 and %g3, 0x1F, %g3 ! %g3 has rd 2349 LDDF_REG(%g3, %g7, %g4) 2350 2351 CPU_ADDR(%g1, %g4) 2352 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2353 FAST_TRAP_DONE 23543: 2355 CPU_ADDR(%g1, %g4) 2356 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 23574: 2358 set T_USER, %g3 ! trap type in %g3 2359 or %g3, T_LDDF_ALIGN, %g3 2360 mov %g5, %g2 ! misaligned vaddr in %g2 2361 set fpu_trap, %g1 ! goto C for the little and 2362 ba,pt %xcc, sys_trap ! no fault little asi's 2363 sub %g0, 1, %g4 2364 2365.stdf_exception_not_aligned: 2366 /* 2367 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2368 */ 2369 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2370 2371#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2372 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2373 ld [%g7 + %lo(fpu_exists)], %g3 2374 brz,a,pn %g3, 4f 2375 nop 2376#endif 2377 CPU_ADDR(%g1, %g4) 2378 or %g0, 1, %g4 2379 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2380 2381 rdpr %tpc, %g2 2382 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2383 2384 srl %g6, 23, %g1 ! using stda or not? 2385 and %g1, 1, %g1 2386 brz,a,pt %g1, 2f ! check for stda instruction 2387 nop 2388 srl %g6, 13, %g1 ! check immflag 2389 and %g1, 1, %g1 2390 rdpr %tstate, %g2 ! %tstate in %g2 2391 brnz,a,pn %g1, 1f 2392 srl %g2, 31, %g1 ! get asi from %tstate 2393 srl %g6, 5, %g1 ! get asi from instruction 2394 and %g1, 0xFF, %g1 ! imm_asi field 23951: 2396 cmp %g1, ASI_P ! primary address space 2397 be,a,pt %icc, 2f 2398 nop 2399 cmp %g1, ASI_S ! secondary address space 2400 bne,a,pn %icc, 3f 2401 nop 24022: 2403 srl %g6, 25, %g6 2404 and %g6, 0x1F, %g6 ! %g6 has rd 2405 CPU_ADDR(%g7, %g1) 2406 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2407 2408 ldx [%g7 + CPU_TMP1], %g6 2409 srlx %g6, 32, %g7 2410 stuwa %g7, [%g5]ASI_USER ! first half 2411 add %g5, 4, %g5 ! increment misaligned data address 2412 stuwa %g6, [%g5]ASI_USER ! second half 2413 2414 CPU_ADDR(%g1, %g4) 2415 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2416 FAST_TRAP_DONE 24173: 2418 CPU_ADDR(%g1, %g4) 2419 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 24204: 2421 set T_USER, %g3 ! trap type in %g3 2422 or %g3, T_STDF_ALIGN, %g3 2423 mov %g5, %g2 ! misaligned vaddr in %g2 2424 set fpu_trap, %g1 ! goto C for the little and 2425 ba,pt %xcc, sys_trap ! nofault little asi's 2426 sub %g0, 1, %g4 2427 2428#ifdef DEBUG_USER_TRAPTRACECTL 2429 2430.traptrace_freeze: 2431 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2432 TT_TRACE_L(trace_win) 2433 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2434 set trap_freeze, %g1 2435 mov 1, %g2 2436 st %g2, [%g1] 2437 FAST_TRAP_DONE 2438 2439.traptrace_unfreeze: 2440 set trap_freeze, %g1 2441 st %g0, [%g1] 2442 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2443 TT_TRACE_L(trace_win) 2444 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2445 FAST_TRAP_DONE 2446 2447#endif /* DEBUG_USER_TRAPTRACECTL */ 2448 2449.getcc: 2450 CPU_ADDR(%g1, %g2) 2451 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2452 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2453 rdpr %tstate, %g3 ! get tstate 2454 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2455 set PSR_ICC, %g2 2456 and %o0, %g2, %o0 ! mask out the rest 2457 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2458 rdpr %pstate, %o1 2459 wrpr %o1, PSTATE_AG, %pstate ! get into normal globals 2460 mov %o0, %g1 ! move ccr to normal %g1 2461 wrpr %g0, %o1, %pstate ! back into alternate globals 2462 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2463 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2464 FAST_TRAP_DONE 2465 2466.setcc: 2467 CPU_ADDR(%g1, %g2) 2468 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2469 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2470 rdpr %pstate, %o0 2471 wrpr %o0, PSTATE_AG, %pstate ! get into normal globals 2472 mov %g1, %o1 2473 wrpr %g0, %o0, %pstate ! back to alternates 2474 sll %o1, PSR_ICC_SHIFT, %g2 2475 set PSR_ICC, %g3 2476 and %g2, %g3, %g2 ! mask out rest 2477 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2478 rdpr %tstate, %g3 ! get tstate 2479 srl %g3, 0, %g3 ! clear upper word 2480 or %g3, %g2, %g3 ! or in new bits 2481 wrpr %g3, %tstate 2482 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2483 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2484 FAST_TRAP_DONE 2485 2486/* 2487 * getpsr(void) 2488 * Note that the xcc part of the ccr is not provided. 2489 * The V8 code shows why the V9 trap is not faster: 2490 * #define GETPSR_TRAP() \ 2491 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2492 */ 2493 2494 .type .getpsr, #function 2495.getpsr: 2496 rdpr %tstate, %g1 ! get tstate 2497 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2498 set PSR_ICC, %g2 2499 and %o0, %g2, %o0 ! mask out the rest 2500 2501 rd %fprs, %g1 ! get fprs 2502 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2503 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2504 or %o0, %g2, %o0 ! or result into psr.ef 2505 2506 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2507 or %o0, %g2, %o0 ! or psr.impl/ver 2508 FAST_TRAP_DONE 2509 SET_SIZE(.getpsr) 2510 2511/* 2512 * setpsr(newpsr) 2513 * Note that there is no support for ccr.xcc in the V9 code. 2514 */ 2515 2516 .type .setpsr, #function 2517.setpsr: 2518 rdpr %tstate, %g1 ! get tstate 2519! setx TSTATE_V8_UBITS, %g2 2520 or %g0, CCR_ICC, %g3 2521 sllx %g3, TSTATE_CCR_SHIFT, %g2 2522 2523 andn %g1, %g2, %g1 ! zero current user bits 2524 set PSR_ICC, %g2 2525 and %g2, %o0, %g2 ! clear all but psr.icc bits 2526 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2527 wrpr %g1, %g3, %tstate ! write tstate 2528 2529 set PSR_EF, %g2 2530 and %g2, %o0, %g2 ! clear all but fp enable bit 2531 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2532 wr %g0, %g4, %fprs ! write fprs 2533 2534 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2535 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2536 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2537 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2538 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2539 srlx %g4, 2, %g4 ! shift fef value to bit 0 2540 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2541 FAST_TRAP_DONE 2542 SET_SIZE(.setpsr) 2543 2544/* 2545 * getlgrp 2546 * get home lgrpid on which the calling thread is currently executing. 2547 */ 2548 .type .getlgrp, #function 2549.getlgrp: 2550 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2551 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2552 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2553 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2554 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2555 sra %g1, 0, %o1 2556 FAST_TRAP_DONE 2557 SET_SIZE(.getlgrp) 2558 2559/* 2560 * Entry for old 4.x trap (trap 0). 2561 */ 2562 ENTRY_NP(syscall_trap_4x) 2563 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2564 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2565 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2566 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2567 brz,pn %g2, 1f ! has it been set? 2568 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2569 st %l1, [%g1 + CPU_TMP2] 2570 rdpr %tnpc, %l1 ! save old tnpc 2571 wrpr %g0, %g2, %tnpc ! setup tnpc 2572 2573 rdpr %pstate, %l0 2574 wrpr %l0, PSTATE_AG, %pstate ! switch to normal globals 2575 mov %l1, %g6 ! pass tnpc to user code in %g6 2576 wrpr %l0, %g0, %pstate ! switch back to alternate globals 2577 2578 ! Note that %g1 still contains CPU struct addr 2579 ld [%g1 + CPU_TMP2], %l1 ! restore locals 2580 ld [%g1 + CPU_TMP1], %l0 2581 FAST_TRAP_DONE_CHK_INTR 25821: 2583 mov %g1, %l0 2584 st %l1, [%g1 + CPU_TMP2] 2585 rdpr %pstate, %l1 2586 wrpr %l1, PSTATE_AG, %pstate 2587 ! 2588 ! check for old syscall mmap which is the only different one which 2589 ! must be the same. Others are handled in the compatibility library. 2590 ! 2591 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2592 movz %icc, SYS_mmap, %g1 2593 wrpr %g0, %l1, %pstate 2594 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2595 ld [%l0 + CPU_TMP1], %l0 2596 SYSCALL(syscall_trap32) 2597 SET_SIZE(syscall_trap_4x) 2598 2599/* 2600 * Handler for software trap 9. 2601 * Set trap0 emulation address for old 4.x system call trap. 2602 * XXX - this should be a system call. 2603 */ 2604 ENTRY_NP(set_trap0_addr) 2605 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2606 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2607 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2608 st %l0, [%g1 + CPU_TMP1] ! save some locals 2609 st %l1, [%g1 + CPU_TMP2] 2610 rdpr %pstate, %l0 2611 wrpr %l0, PSTATE_AG, %pstate 2612 mov %g1, %l1 2613 wrpr %g0, %l0, %pstate 2614 andn %l1, 3, %l1 ! force alignment 2615 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2616 ld [%g1 + CPU_TMP1], %l0 ! restore locals 2617 ld [%g1 + CPU_TMP2], %l1 2618 FAST_TRAP_DONE 2619 SET_SIZE(set_trap0_addr) 2620 2621/* 2622 * mmu_trap_tl1 2623 * trap handler for unexpected mmu traps. 2624 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2625 * case we go to fpu_trap or a user trap from the window handler, in which 2626 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2627 */ 2628 .type mmu_trap_tl1, #function 2629mmu_trap_tl1: 2630#ifdef TRAPTRACE 2631 TRACE_PTR(%g5, %g6) 2632 GET_TRACE_TICK(%g6) 2633 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2634 rdpr %tl, %g6 2635 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2636 rdpr %tt, %g6 2637 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2638 rdpr %tstate, %g6 2639 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2640 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2641 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2642 rdpr %tpc, %g6 2643 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2644 set MMU_SFAR, %g6 2645 ldxa [%g6]ASI_DMMU, %g6 2646 stxa %g6, [%g5 + TRAP_ENT_F1]%asi 2647 CPU_PADDR(%g7, %g6); 2648 add %g7, CPU_TL1_HDLR, %g7 2649 lda [%g7]ASI_MEM, %g6 2650 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2651 set 0xdeadbeef, %g6 2652 stna %g6, [%g5 + TRAP_ENT_F3]%asi 2653 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2654 TRACE_NEXT(%g5, %g6, %g7) 2655#endif /* TRAPTRACE */ 2656 2657 GET_CPU_IMPL(%g5) 2658 cmp %g5, PANTHER_IMPL 2659 bne mmu_trap_tl1_4 2660 nop 2661 rdpr %tt, %g5 2662 cmp %g5, T_DATA_EXCEPTION 2663 bne mmu_trap_tl1_4 2664 nop 2665 wr %g0, ASI_DMMU, %asi 2666 ldxa [MMU_SFSR]%asi, %g5 2667 mov 1, %g6 2668 sllx %g6, PN_SFSR_PARITY_SHIFT, %g6 2669 andcc %g5, %g6, %g0 2670 bz mmu_trap_tl1_4 2671 2672 /* 2673 * We are running on a Panther and have hit a DTLB parity error. 2674 */ 2675 ldxa [MMU_TAG_ACCESS]%asi, %g2 2676 mov %g5, %g3 2677 ba,pt %xcc, .mmu_exception_is_tlb_parity 2678 mov T_DATA_EXCEPTION, %g1 2679 2680mmu_trap_tl1_4: 2681 CPU_PADDR(%g7, %g6); 2682 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2683 /* 2684 * AM is cleared on trap, so addresses are 64 bit 2685 */ 2686 lda [%g7]ASI_MEM, %g6 2687 brz,a,pt %g6, 1f 2688 nop 2689 /* 2690 * We are going to update cpu_m.tl1_hdlr using physical address. 2691 * Flush the D$ line, so that stale data won't be accessed later. 2692 */ 2693 CPU_ADDR(%g6, %g5) 2694 add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA) 2695 GET_CPU_IMPL(%g5) 2696 cmp %g5, CHEETAH_IMPL 2697 bl,pt %icc, 3f 2698 cmp %g5, SPITFIRE_IMPL 2699 stxa %g0, [%g7]ASI_DC_INVAL 2700 membar #Sync 2701 ba,pt %xcc, 2f 2702 nop 27033: 2704 bl,pt %icc, 2f 2705 sethi %hi(dcache_line_mask), %g5 2706 ld [%g5 + %lo(dcache_line_mask)], %g5 2707 and %g6, %g5, %g5 2708 stxa %g0, [%g5]ASI_DC_TAG 2709 membar #Sync 27102: 2711 sta %g0, [%g7]ASI_MEM 2712 SWITCH_GLOBALS ! back to mmu globals 2713 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 27141: 2715 rdpr %tt, %g5 2716 rdpr %tl, %g7 2717 sub %g7, 1, %g6 2718 wrpr %g6, %tl 2719 rdpr %tt, %g6 2720 wrpr %g7, %tl 2721 and %g6, WTRAP_TTMASK, %g6 2722 cmp %g6, WTRAP_TYPE 2723 bne,a,pn %xcc, ptl1_panic 2724 mov PTL1_BAD_MMUTRAP, %g1 2725 rdpr %tpc, %g7 2726 /* tpc should be in the trap table */ 2727 set trap_table, %g6 2728 cmp %g7, %g6 2729 blt,a,pn %xcc, ptl1_panic 2730 mov PTL1_BAD_MMUTRAP, %g1 2731 set etrap_table, %g6 2732 cmp %g7, %g6 2733 bge,a,pn %xcc, ptl1_panic 2734 mov PTL1_BAD_MMUTRAP, %g1 2735 cmp %g5, T_ALIGNMENT 2736 move %icc, MMU_SFAR, %g6 2737 movne %icc, MMU_TAG_ACCESS, %g6 2738 ldxa [%g6]ASI_DMMU, %g6 2739 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2740 add %g7, WTRAP_FAULTOFF, %g7 2741 wrpr %g0, %g7, %tnpc 2742 done 2743 SET_SIZE(mmu_trap_tl1) 2744 2745/* 2746 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2747 * traps are valid only when kmdb is loaded. When the debugger is active, 2748 * the code below is rewritten to transfer control to the appropriate 2749 * debugger entry points. 2750 */ 2751 .global kmdb_trap 2752 .align 8 2753kmdb_trap: 2754 ba,a trap_table0 2755 jmp %g1 + 0 2756 nop 2757 2758 .global kmdb_trap_tl1 2759 .align 8 2760kmdb_trap_tl1: 2761 ba,a trap_table0 2762 jmp %g1 + 0 2763 nop 2764 2765/* 2766 * This entry is copied from OBP's trap table during boot. 2767 */ 2768 .global obp_bpt 2769 .align 8 2770obp_bpt: 2771 NOT 2772 2773/* 2774 * if kernel, set PCONTEXT to 0 for debuggers 2775 * if user, clear nucleus page sizes 2776 */ 2777 .global kctx_obp_bpt 2778kctx_obp_bpt: 2779 set obp_bpt, %g2 27801: 2781 mov MMU_PCONTEXT, %g1 2782 ldxa [%g1]ASI_DMMU, %g1 2783 srlx %g1, CTXREG_NEXT_SHIFT, %g3 2784 brz,pt %g3, 3f ! nucleus pgsz is 0, no problem 2785 sllx %g3, CTXREG_NEXT_SHIFT, %g3 2786 set CTXREG_CTX_MASK, %g4 ! check Pcontext 2787 btst %g4, %g1 2788 bz,a,pt %xcc, 2f 2789 clr %g3 ! kernel: PCONTEXT=0 2790 xor %g3, %g1, %g3 ! user: clr N_pgsz0/1 bits 27912: 2792 set DEMAP_ALL_TYPE, %g1 2793 stxa %g0, [%g1]ASI_DTLB_DEMAP 2794 stxa %g0, [%g1]ASI_ITLB_DEMAP 2795 mov MMU_PCONTEXT, %g1 2796 stxa %g3, [%g1]ASI_DMMU 2797 membar #Sync 2798 sethi %hi(FLUSH_ADDR), %g1 2799 flush %g1 ! flush required by immu 28003: 2801 jmp %g2 2802 nop 2803 2804 2805#ifdef TRAPTRACE 2806/* 2807 * TRAPTRACE support. 2808 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2809 * Return is done by "jmp %g7 + 4". 2810 */ 2811 2812trace_gen: 2813 TRACE_PTR(%g3, %g6) 2814 GET_TRACE_TICK(%g6) 2815 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2816 rdpr %tl, %g6 2817 stha %g6, [%g3 + TRAP_ENT_TL]%asi 2818 rdpr %tt, %g6 2819 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2820 rdpr %tstate, %g6 2821 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2822 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2823 rdpr %tpc, %g6 2824 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2825 TRACE_NEXT(%g3, %g4, %g5) 2826 jmp %g7 + 4 2827 nop 2828 2829trace_win: 2830 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2831 ! Keep the locals as clean as possible, caller cleans %l4 2832 clr %l2 2833 clr %l1 2834 jmp %l4 + 4 2835 clr %l0 2836 2837/* 2838 * Trace a tsb hit 2839 * g1 = tsbe pointer (in/clobbered) 2840 * g2 = tag access register (in) 2841 * g3 - g4 = scratch (clobbered) 2842 * g5 = tsbe data (in) 2843 * g6 = scratch (clobbered) 2844 * g7 = pc we jumped here from (in) 2845 */ 2846 2847 ! Do not disturb %g5, it will be used after the trace 2848 ALTENTRY(trace_tsbhit) 2849 TRACE_TSBHIT(0) 2850 jmp %g7 + 4 2851 nop 2852 2853/* 2854 * Trace a TSB miss 2855 * 2856 * g1 = tsb8k pointer (in) 2857 * g2 = tag access register (in) 2858 * g3 = tsb4m pointer (in) 2859 * g4 = tsbe tag (in/clobbered) 2860 * g5 - g6 = scratch (clobbered) 2861 * g7 = pc we jumped here from (in) 2862 */ 2863 .global trace_tsbmiss 2864trace_tsbmiss: 2865 membar #Sync 2866 sethi %hi(FLUSH_ADDR), %g6 2867 flush %g6 2868 TRACE_PTR(%g5, %g6) 2869 GET_TRACE_TICK(%g6) 2870 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2871 stxa %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2872 stxa %g4, [%g5 + TRAP_ENT_F1]%asi ! tsb tag 2873 rdpr %tnpc, %g6 2874 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2875 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2876 srlx %g1, 32, %g6 2877 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2878 rdpr %tpc, %g6 2879 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2880 rdpr %tl, %g6 2881 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2882 rdpr %tt, %g6 2883 or %g6, TT_MMU_MISS, %g4 2884 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2885 cmp %g6, FAST_IMMU_MISS_TT 2886 be,a %icc, 1f 2887 ldxa [%g0]ASI_IMMU, %g6 2888 ldxa [%g0]ASI_DMMU, %g6 28891: stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2890 stxa %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2891 TRACE_NEXT(%g5, %g4, %g6) 2892 jmp %g7 + 4 2893 nop 2894 2895/* 2896 * g2 = tag access register (in) 2897 * g3 = ctx number (in) 2898 */ 2899trace_dataprot: 2900 membar #Sync 2901 sethi %hi(FLUSH_ADDR), %g6 2902 flush %g6 2903 TRACE_PTR(%g1, %g6) 2904 GET_TRACE_TICK(%g6) 2905 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2906 rdpr %tpc, %g6 2907 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2908 rdpr %tstate, %g6 2909 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2910 stxa %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2911 stxa %g0, [%g1 + TRAP_ENT_TR]%asi 2912 stxa %g0, [%g1 + TRAP_ENT_F1]%asi 2913 stxa %g0, [%g1 + TRAP_ENT_F2]%asi 2914 stxa %g0, [%g1 + TRAP_ENT_F3]%asi 2915 stxa %g0, [%g1 + TRAP_ENT_F4]%asi 2916 rdpr %tl, %g6 2917 stha %g6, [%g1 + TRAP_ENT_TL]%asi 2918 rdpr %tt, %g6 2919 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2920 TRACE_NEXT(%g1, %g4, %g5) 2921 jmp %g7 + 4 2922 nop 2923 2924#endif /* TRAPTRACE */ 2925 2926/* 2927 * fast_trap_done, fast_trap_done_chk_intr: 2928 * 2929 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2930 * taken immediately after a RETRY or DONE instruction which causes IE to 2931 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2932 * to execute first before taking any interrupts. If that instruction 2933 * results in other traps, and if the corresponding trap handler runs 2934 * entirely at TL=1 with interrupts disabled, then pending interrupts 2935 * won't be taken until after yet another instruction following the %tpc 2936 * or %tnpc. 2937 * 2938 * A malicious user program can use this feature to block out interrupts 2939 * for extended durations, which can result in send_mondo_timeout kernel 2940 * panic. 2941 * 2942 * This problem is addressed by servicing any pending interrupts via 2943 * sys_trap before returning back to the user mode from a fast trap 2944 * handler. The "done" instruction within a fast trap handler, which 2945 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2946 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2947 * entry point. 2948 * 2949 * We check for any pending interrupts here and force a sys_trap to 2950 * service those interrupts, if any. To minimize overhead, pending 2951 * interrupts are checked if the %tpc happens to be at 16K boundary, 2952 * which allows a malicious program to execute at most 4K consecutive 2953 * instructions before we service any pending interrupts. If a worst 2954 * case fast trap handler takes about 2 usec, then interrupts will be 2955 * blocked for at most 8 msec, less than a clock tick. 2956 * 2957 * For the cases where we don't know if the %tpc will cross a 16K 2958 * boundary, we can't use the above optimization and always process 2959 * any pending interrupts via fast_frap_done_chk_intr entry point. 2960 * 2961 * Entry Conditions: 2962 * %pstate am:0 priv:1 ie:0 2963 * globals are AG (not normal globals) 2964 */ 2965 2966 .global fast_trap_done, fast_trap_done_chk_intr 2967fast_trap_done: 2968 rdpr %tpc, %g5 2969 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2970 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2971 bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only) 2972 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2973 done 2974 2975 ALTENTRY(fast_trap_done_check_interrupts) 2976fast_trap_done_chk_intr: 2977 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2978 29791: rd SOFTINT, %g6 2980 and %g5, IRSR_BUSY, %g5 2981 orcc %g5, %g6, %g0 2982 bnz,pn %xcc, 2f ! branch if any pending intr 2983 nop 2984 done 2985 29862: 2987 /* 2988 * We get here if there are any pending interrupts. 2989 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2990 * instruction. 2991 */ 2992 rdpr %tnpc, %g5 2993 wrpr %g0, %g5, %tpc 2994 add %g5, 4, %g5 2995 wrpr %g0, %g5, %tnpc 2996 2997 /* 2998 * Force a dummy sys_trap call so that interrupts can be serviced. 2999 */ 3000 set fast_trap_dummy_call, %g1 3001 ba,pt %xcc, sys_trap 3002 mov -1, %g4 3003 3004fast_trap_dummy_call: 3005 retl 3006 nop 3007 3008/* 3009 * Currently the brand syscall interposition code is not enabled by 3010 * default. Instead, when a branded zone is first booted the brand 3011 * infrastructure will patch the trap table so that the syscall 3012 * entry points are redirected to syscall_wrapper32 and syscall_wrapper 3013 * for ILP32 and LP64 syscalls respectively. this is done in 3014 * brand_plat_interposition_enable(). Note that the syscall wrappers 3015 * below do not collect any trap trace data since the syscall hot patch 3016 * points are reached after trap trace data has already been collected. 3017 */ 3018#define BRAND_CALLBACK(callback_id) \ 3019 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\ 3020 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\ 3021 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\ 3022 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\ 3023 brz %g3, 1f /* No brand? No callback. */ ;\ 3024 nop ;\ 3025 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\ 3026 ldn [%g3 + (callback_id << 3)], %g3 ;\ 3027 brz %g3, 1f ;\ 3028 /* \ 3029 * This isn't pretty. We want a low-latency way for the callback \ 3030 * routine to decline to do anything. We just pass in an address \ 3031 * the routine can directly jmp back to, pretending that nothing \ 3032 * has happened. \ 3033 * \ 3034 * %g1: return address (where the brand handler jumps back to) \ 3035 * %g2: address of CPU structure \ 3036 * %g3: address of brand handler (where we will jump to) \ 3037 */ \ 3038 mov %pc, %g1 ;\ 3039 add %g1, 16, %g1 ;\ 3040 jmp %g3 ;\ 3041 nop ;\ 30421: 3043 3044 ENTRY_NP(syscall_wrapper32) 3045 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 3046 SYSCALL_NOTT(syscall_trap32) 3047 SET_SIZE(syscall_wrapper32) 3048 3049 ENTRY_NP(syscall_wrapper) 3050 BRAND_CALLBACK(BRAND_CB_SYSCALL) 3051 SYSCALL_NOTT(syscall_trap) 3052 SET_SIZE(syscall_wrapper) 3053 3054#endif /* lint */ 3055