1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif /* !lint */ 31#include <sys/asm_linkage.h> 32#include <sys/privregs.h> 33#include <sys/sun4asi.h> 34#include <sys/spitregs.h> 35#include <sys/cheetahregs.h> 36#include <sys/machtrap.h> 37#include <sys/machthread.h> 38#include <sys/machbrand.h> 39#include <sys/pcb.h> 40#include <sys/pte.h> 41#include <sys/mmu.h> 42#include <sys/machpcb.h> 43#include <sys/async.h> 44#include <sys/intreg.h> 45#include <sys/scb.h> 46#include <sys/psr_compat.h> 47#include <sys/syscall.h> 48#include <sys/machparam.h> 49#include <sys/traptrace.h> 50#include <vm/hat_sfmmu.h> 51#include <sys/archsystm.h> 52#include <sys/utrap.h> 53#include <sys/clock.h> 54#include <sys/intr.h> 55#include <sys/fpu/fpu_simulator.h> 56#include <vm/seg_spt.h> 57 58/* 59 * WARNING: If you add a fast trap handler which can be invoked by a 60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 61 * instead of "done" instruction to return back to the user mode. See 62 * comments for the "fast_trap_done" entry point for more information. 63 * 64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 65 * cases where you always want to process any pending interrupts before 66 * returning back to the user mode. 67 */ 68#define FAST_TRAP_DONE \ 69 ba,a fast_trap_done 70 71#define FAST_TRAP_DONE_CHK_INTR \ 72 ba,a fast_trap_done_chk_intr 73 74/* 75 * SPARC V9 Trap Table 76 * 77 * Most of the trap handlers are made from common building 78 * blocks, and some are instantiated multiple times within 79 * the trap table. So, I build a bunch of macros, then 80 * populate the table using only the macros. 81 * 82 * Many macros branch to sys_trap. Its calling convention is: 83 * %g1 kernel trap handler 84 * %g2, %g3 args for above 85 * %g4 desire %pil 86 */ 87 88#ifdef TRAPTRACE 89 90/* 91 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 92 */ 93#define TT_TRACE(label) \ 94 ba label ;\ 95 rd %pc, %g7 96#define TT_TRACE_INS 2 97 98#define TT_TRACE_L(label) \ 99 ba label ;\ 100 rd %pc, %l4 ;\ 101 clr %l4 102#define TT_TRACE_L_INS 3 103 104#else 105 106#define TT_TRACE(label) 107#define TT_TRACE_INS 0 108 109#define TT_TRACE_L(label) 110#define TT_TRACE_L_INS 0 111 112#endif 113 114/* 115 * This first set are funneled to trap() with %tt as the type. 116 * Trap will then either panic or send the user a signal. 117 */ 118/* 119 * NOT is used for traps that just shouldn't happen. 120 * It comes in both single and quadruple flavors. 121 */ 122#if !defined(lint) 123 .global trap 124#endif /* !lint */ 125#define NOT \ 126 TT_TRACE(trace_gen) ;\ 127 set trap, %g1 ;\ 128 rdpr %tt, %g3 ;\ 129 ba,pt %xcc, sys_trap ;\ 130 sub %g0, 1, %g4 ;\ 131 .align 32 132#define NOT4 NOT; NOT; NOT; NOT 133/* 134 * RED is for traps that use the red mode handler. 135 * We should never see these either. 136 */ 137#define RED NOT 138/* 139 * BAD is used for trap vectors we don't have a kernel 140 * handler for. 141 * It also comes in single and quadruple versions. 142 */ 143#define BAD NOT 144#define BAD4 NOT4 145 146#define DONE \ 147 done; \ 148 .align 32 149 150/* 151 * TRAP vectors to the trap() function. 152 * It's main use is for user errors. 153 */ 154#if !defined(lint) 155 .global trap 156#endif /* !lint */ 157#define TRAP(arg) \ 158 TT_TRACE(trace_gen) ;\ 159 set trap, %g1 ;\ 160 mov arg, %g3 ;\ 161 ba,pt %xcc, sys_trap ;\ 162 sub %g0, 1, %g4 ;\ 163 .align 32 164 165/* 166 * SYSCALL is used for unsupported syscall interfaces (with 'which' 167 * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with 168 * 'which' set to 'syscall_trap32'). 169 * 170 * The SYSCALL_TRAP* macros are used for syscall entry points. 171 * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32 172 * is used to support ILP32. Each macro can only be used once 173 * since they each define a symbol. The symbols are used as hot patch 174 * points by the brand infrastructure to dynamically enable and disable 175 * brand syscall interposition. See the comments around BRAND_CALLBACK 176 * and brand_plat_interposition_enable() for more information. 177 */ 178#define SYSCALL_NOTT(which) \ 179 set (which), %g1 ;\ 180 ba,pt %xcc, sys_trap ;\ 181 sub %g0, 1, %g4 ;\ 182 .align 32 183 184#define SYSCALL(which) \ 185 TT_TRACE(trace_gen) ;\ 186 SYSCALL_NOTT(which) 187 188#define SYSCALL_TRAP32 \ 189 TT_TRACE(trace_gen) ;\ 190 ALTENTRY(syscall_trap32_patch_point) \ 191 SYSCALL_NOTT(syscall_trap32) 192 193#define SYSCALL_TRAP \ 194 TT_TRACE(trace_gen) ;\ 195 ALTENTRY(syscall_trap_patch_point) \ 196 SYSCALL_NOTT(syscall_trap) 197 198#define FLUSHW() \ 199 set trap, %g1 ;\ 200 mov T_FLUSHW, %g3 ;\ 201 sub %g0, 1, %g4 ;\ 202 save ;\ 203 flushw ;\ 204 restore ;\ 205 FAST_TRAP_DONE ;\ 206 .align 32 207 208/* 209 * GOTO just jumps to a label. 210 * It's used for things that can be fixed without going thru sys_trap. 211 */ 212#define GOTO(label) \ 213 .global label ;\ 214 ba,a label ;\ 215 .empty ;\ 216 .align 32 217 218/* 219 * GOTO_TT just jumps to a label. 220 * correctable ECC error traps at level 0 and 1 will use this macro. 221 * It's used for things that can be fixed without going thru sys_trap. 222 */ 223#define GOTO_TT(label, ttlabel) \ 224 .global label ;\ 225 TT_TRACE(ttlabel) ;\ 226 ba,a label ;\ 227 .empty ;\ 228 .align 32 229 230/* 231 * Privileged traps 232 * Takes breakpoint if privileged, calls trap() if not. 233 */ 234#define PRIV(label) \ 235 rdpr %tstate, %g1 ;\ 236 btst TSTATE_PRIV, %g1 ;\ 237 bnz label ;\ 238 rdpr %tt, %g3 ;\ 239 set trap, %g1 ;\ 240 ba,pt %xcc, sys_trap ;\ 241 sub %g0, 1, %g4 ;\ 242 .align 32 243 244 245/* 246 * DTrace traps. 247 */ 248#define DTRACE_PID \ 249 .global dtrace_pid_probe ;\ 250 set dtrace_pid_probe, %g1 ;\ 251 ba,pt %xcc, user_trap ;\ 252 sub %g0, 1, %g4 ;\ 253 .align 32 254 255#define DTRACE_RETURN \ 256 .global dtrace_return_probe ;\ 257 set dtrace_return_probe, %g1 ;\ 258 ba,pt %xcc, user_trap ;\ 259 sub %g0, 1, %g4 ;\ 260 .align 32 261 262/* 263 * REGISTER WINDOW MANAGEMENT MACROS 264 */ 265 266/* 267 * various convenient units of padding 268 */ 269#define SKIP(n) .skip 4*(n) 270 271/* 272 * CLEAN_WINDOW is the simple handler for cleaning a register window. 273 */ 274#define CLEAN_WINDOW \ 275 TT_TRACE_L(trace_win) ;\ 276 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 277 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 278 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 279 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 280 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 281 retry; .align 128 282 283#if !defined(lint) 284 285/* 286 * If we get an unresolved tlb miss while in a window handler, the fault 287 * handler will resume execution at the last instruction of the window 288 * hander, instead of delivering the fault to the kernel. Spill handlers 289 * use this to spill windows into the wbuf. 290 * 291 * The mixed handler works by checking %sp, and branching to the correct 292 * handler. This is done by branching back to label 1: for 32b frames, 293 * or label 2: for 64b frames; which implies the handler order is: 32b, 294 * 64b, mixed. The 1: and 2: labels are offset into the routines to 295 * allow the branchs' delay slots to contain useful instructions. 296 */ 297 298/* 299 * SPILL_32bit spills a 32-bit-wide kernel register window. It 300 * assumes that the kernel context and the nucleus context are the 301 * same. The stack pointer is required to be eight-byte aligned even 302 * though this code only needs it to be four-byte aligned. 303 */ 304#define SPILL_32bit(tail) \ 305 srl %sp, 0, %sp ;\ 3061: st %l0, [%sp + 0] ;\ 307 st %l1, [%sp + 4] ;\ 308 st %l2, [%sp + 8] ;\ 309 st %l3, [%sp + 12] ;\ 310 st %l4, [%sp + 16] ;\ 311 st %l5, [%sp + 20] ;\ 312 st %l6, [%sp + 24] ;\ 313 st %l7, [%sp + 28] ;\ 314 st %i0, [%sp + 32] ;\ 315 st %i1, [%sp + 36] ;\ 316 st %i2, [%sp + 40] ;\ 317 st %i3, [%sp + 44] ;\ 318 st %i4, [%sp + 48] ;\ 319 st %i5, [%sp + 52] ;\ 320 st %i6, [%sp + 56] ;\ 321 st %i7, [%sp + 60] ;\ 322 TT_TRACE_L(trace_win) ;\ 323 saved ;\ 324 retry ;\ 325 SKIP(31-19-TT_TRACE_L_INS) ;\ 326 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 327 .empty 328 329/* 330 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 331 * wide address space via the designated asi. It is used to spill 332 * non-kernel windows. The stack pointer is required to be eight-byte 333 * aligned even though this code only needs it to be four-byte 334 * aligned. 335 */ 336#define SPILL_32bit_asi(asi_num, tail) \ 337 srl %sp, 0, %sp ;\ 3381: sta %l0, [%sp + %g0]asi_num ;\ 339 mov 4, %g1 ;\ 340 sta %l1, [%sp + %g1]asi_num ;\ 341 mov 8, %g2 ;\ 342 sta %l2, [%sp + %g2]asi_num ;\ 343 mov 12, %g3 ;\ 344 sta %l3, [%sp + %g3]asi_num ;\ 345 add %sp, 16, %g4 ;\ 346 sta %l4, [%g4 + %g0]asi_num ;\ 347 sta %l5, [%g4 + %g1]asi_num ;\ 348 sta %l6, [%g4 + %g2]asi_num ;\ 349 sta %l7, [%g4 + %g3]asi_num ;\ 350 add %g4, 16, %g4 ;\ 351 sta %i0, [%g4 + %g0]asi_num ;\ 352 sta %i1, [%g4 + %g1]asi_num ;\ 353 sta %i2, [%g4 + %g2]asi_num ;\ 354 sta %i3, [%g4 + %g3]asi_num ;\ 355 add %g4, 16, %g4 ;\ 356 sta %i4, [%g4 + %g0]asi_num ;\ 357 sta %i5, [%g4 + %g1]asi_num ;\ 358 sta %i6, [%g4 + %g2]asi_num ;\ 359 sta %i7, [%g4 + %g3]asi_num ;\ 360 TT_TRACE_L(trace_win) ;\ 361 saved ;\ 362 retry ;\ 363 SKIP(31-25-TT_TRACE_L_INS) ;\ 364 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 365 .empty 366 367/* 368 * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit 369 * wide address space via the designated asi. It is used to spill 370 * windows at tl>1 where performance isn't the primary concern and 371 * where we don't want to use unnecessary registers. The stack 372 * pointer is required to be eight-byte aligned even though this code 373 * only needs it to be four-byte aligned. 374 */ 375#define SPILL_32bit_tt1(asi_num, tail) \ 376 mov asi_num, %asi ;\ 3771: srl %sp, 0, %sp ;\ 378 sta %l0, [%sp + 0]%asi ;\ 379 sta %l1, [%sp + 4]%asi ;\ 380 sta %l2, [%sp + 8]%asi ;\ 381 sta %l3, [%sp + 12]%asi ;\ 382 sta %l4, [%sp + 16]%asi ;\ 383 sta %l5, [%sp + 20]%asi ;\ 384 sta %l6, [%sp + 24]%asi ;\ 385 sta %l7, [%sp + 28]%asi ;\ 386 sta %i0, [%sp + 32]%asi ;\ 387 sta %i1, [%sp + 36]%asi ;\ 388 sta %i2, [%sp + 40]%asi ;\ 389 sta %i3, [%sp + 44]%asi ;\ 390 sta %i4, [%sp + 48]%asi ;\ 391 sta %i5, [%sp + 52]%asi ;\ 392 sta %i6, [%sp + 56]%asi ;\ 393 sta %i7, [%sp + 60]%asi ;\ 394 TT_TRACE_L(trace_win) ;\ 395 saved ;\ 396 retry ;\ 397 SKIP(31-20-TT_TRACE_L_INS) ;\ 398 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 399 .empty 400 401 402/* 403 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 404 * that the kernel context and the nucleus context are the same. The 405 * stack pointer is required to be eight-byte aligned even though this 406 * code only needs it to be four-byte aligned. 407 */ 408#define FILL_32bit(tail) \ 409 srl %sp, 0, %sp ;\ 4101: TT_TRACE_L(trace_win) ;\ 411 ld [%sp + 0], %l0 ;\ 412 ld [%sp + 4], %l1 ;\ 413 ld [%sp + 8], %l2 ;\ 414 ld [%sp + 12], %l3 ;\ 415 ld [%sp + 16], %l4 ;\ 416 ld [%sp + 20], %l5 ;\ 417 ld [%sp + 24], %l6 ;\ 418 ld [%sp + 28], %l7 ;\ 419 ld [%sp + 32], %i0 ;\ 420 ld [%sp + 36], %i1 ;\ 421 ld [%sp + 40], %i2 ;\ 422 ld [%sp + 44], %i3 ;\ 423 ld [%sp + 48], %i4 ;\ 424 ld [%sp + 52], %i5 ;\ 425 ld [%sp + 56], %i6 ;\ 426 ld [%sp + 60], %i7 ;\ 427 restored ;\ 428 retry ;\ 429 SKIP(31-19-TT_TRACE_L_INS) ;\ 430 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 431 .empty 432 433/* 434 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 435 * wide address space via the designated asi. It is used to fill 436 * non-kernel windows. The stack pointer is required to be eight-byte 437 * aligned even though this code only needs it to be four-byte 438 * aligned. 439 */ 440#define FILL_32bit_asi(asi_num, tail) \ 441 srl %sp, 0, %sp ;\ 4421: TT_TRACE_L(trace_win) ;\ 443 mov 4, %g1 ;\ 444 lda [%sp + %g0]asi_num, %l0 ;\ 445 mov 8, %g2 ;\ 446 lda [%sp + %g1]asi_num, %l1 ;\ 447 mov 12, %g3 ;\ 448 lda [%sp + %g2]asi_num, %l2 ;\ 449 lda [%sp + %g3]asi_num, %l3 ;\ 450 add %sp, 16, %g4 ;\ 451 lda [%g4 + %g0]asi_num, %l4 ;\ 452 lda [%g4 + %g1]asi_num, %l5 ;\ 453 lda [%g4 + %g2]asi_num, %l6 ;\ 454 lda [%g4 + %g3]asi_num, %l7 ;\ 455 add %g4, 16, %g4 ;\ 456 lda [%g4 + %g0]asi_num, %i0 ;\ 457 lda [%g4 + %g1]asi_num, %i1 ;\ 458 lda [%g4 + %g2]asi_num, %i2 ;\ 459 lda [%g4 + %g3]asi_num, %i3 ;\ 460 add %g4, 16, %g4 ;\ 461 lda [%g4 + %g0]asi_num, %i4 ;\ 462 lda [%g4 + %g1]asi_num, %i5 ;\ 463 lda [%g4 + %g2]asi_num, %i6 ;\ 464 lda [%g4 + %g3]asi_num, %i7 ;\ 465 restored ;\ 466 retry ;\ 467 SKIP(31-25-TT_TRACE_L_INS) ;\ 468 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 469 .empty 470 471/* 472 * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit 473 * wide address space via the designated asi. It is used to fill 474 * windows at tl>1 where performance isn't the primary concern and 475 * where we don't want to use unnecessary registers. The stack 476 * pointer is required to be eight-byte aligned even though this code 477 * only needs it to be four-byte aligned. 478 */ 479#define FILL_32bit_tt1(asi_num, tail) \ 480 mov asi_num, %asi ;\ 4811: srl %sp, 0, %sp ;\ 482 TT_TRACE_L(trace_win) ;\ 483 lda [%sp + 0]%asi, %l0 ;\ 484 lda [%sp + 4]%asi, %l1 ;\ 485 lda [%sp + 8]%asi, %l2 ;\ 486 lda [%sp + 12]%asi, %l3 ;\ 487 lda [%sp + 16]%asi, %l4 ;\ 488 lda [%sp + 20]%asi, %l5 ;\ 489 lda [%sp + 24]%asi, %l6 ;\ 490 lda [%sp + 28]%asi, %l7 ;\ 491 lda [%sp + 32]%asi, %i0 ;\ 492 lda [%sp + 36]%asi, %i1 ;\ 493 lda [%sp + 40]%asi, %i2 ;\ 494 lda [%sp + 44]%asi, %i3 ;\ 495 lda [%sp + 48]%asi, %i4 ;\ 496 lda [%sp + 52]%asi, %i5 ;\ 497 lda [%sp + 56]%asi, %i6 ;\ 498 lda [%sp + 60]%asi, %i7 ;\ 499 restored ;\ 500 retry ;\ 501 SKIP(31-20-TT_TRACE_L_INS) ;\ 502 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 503 .empty 504 505 506/* 507 * SPILL_64bit spills a 64-bit-wide kernel register window. It 508 * assumes that the kernel context and the nucleus context are the 509 * same. The stack pointer is required to be eight-byte aligned. 510 */ 511#define SPILL_64bit(tail) \ 5122: stx %l0, [%sp + V9BIAS64 + 0] ;\ 513 stx %l1, [%sp + V9BIAS64 + 8] ;\ 514 stx %l2, [%sp + V9BIAS64 + 16] ;\ 515 stx %l3, [%sp + V9BIAS64 + 24] ;\ 516 stx %l4, [%sp + V9BIAS64 + 32] ;\ 517 stx %l5, [%sp + V9BIAS64 + 40] ;\ 518 stx %l6, [%sp + V9BIAS64 + 48] ;\ 519 stx %l7, [%sp + V9BIAS64 + 56] ;\ 520 stx %i0, [%sp + V9BIAS64 + 64] ;\ 521 stx %i1, [%sp + V9BIAS64 + 72] ;\ 522 stx %i2, [%sp + V9BIAS64 + 80] ;\ 523 stx %i3, [%sp + V9BIAS64 + 88] ;\ 524 stx %i4, [%sp + V9BIAS64 + 96] ;\ 525 stx %i5, [%sp + V9BIAS64 + 104] ;\ 526 stx %i6, [%sp + V9BIAS64 + 112] ;\ 527 stx %i7, [%sp + V9BIAS64 + 120] ;\ 528 TT_TRACE_L(trace_win) ;\ 529 saved ;\ 530 retry ;\ 531 SKIP(31-18-TT_TRACE_L_INS) ;\ 532 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 533 .empty 534 535/* 536 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 537 * wide address space via the designated asi. It is used to spill 538 * non-kernel windows. The stack pointer is required to be eight-byte 539 * aligned. 540 */ 541#define SPILL_64bit_asi(asi_num, tail) \ 542 mov 0 + V9BIAS64, %g1 ;\ 5432: stxa %l0, [%sp + %g1]asi_num ;\ 544 mov 8 + V9BIAS64, %g2 ;\ 545 stxa %l1, [%sp + %g2]asi_num ;\ 546 mov 16 + V9BIAS64, %g3 ;\ 547 stxa %l2, [%sp + %g3]asi_num ;\ 548 mov 24 + V9BIAS64, %g4 ;\ 549 stxa %l3, [%sp + %g4]asi_num ;\ 550 add %sp, 32, %g5 ;\ 551 stxa %l4, [%g5 + %g1]asi_num ;\ 552 stxa %l5, [%g5 + %g2]asi_num ;\ 553 stxa %l6, [%g5 + %g3]asi_num ;\ 554 stxa %l7, [%g5 + %g4]asi_num ;\ 555 add %g5, 32, %g5 ;\ 556 stxa %i0, [%g5 + %g1]asi_num ;\ 557 stxa %i1, [%g5 + %g2]asi_num ;\ 558 stxa %i2, [%g5 + %g3]asi_num ;\ 559 stxa %i3, [%g5 + %g4]asi_num ;\ 560 add %g5, 32, %g5 ;\ 561 stxa %i4, [%g5 + %g1]asi_num ;\ 562 stxa %i5, [%g5 + %g2]asi_num ;\ 563 stxa %i6, [%g5 + %g3]asi_num ;\ 564 stxa %i7, [%g5 + %g4]asi_num ;\ 565 TT_TRACE_L(trace_win) ;\ 566 saved ;\ 567 retry ;\ 568 SKIP(31-25-TT_TRACE_L_INS) ;\ 569 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 570 .empty 571 572/* 573 * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit 574 * wide address space via the designated asi. It is used to spill 575 * windows at tl>1 where performance isn't the primary concern and 576 * where we don't want to use unnecessary registers. The stack 577 * pointer is required to be eight-byte aligned. 578 */ 579#define SPILL_64bit_tt1(asi_num, tail) \ 580 mov asi_num, %asi ;\ 5812: stxa %l0, [%sp + V9BIAS64 + 0]%asi ;\ 582 stxa %l1, [%sp + V9BIAS64 + 8]%asi ;\ 583 stxa %l2, [%sp + V9BIAS64 + 16]%asi ;\ 584 stxa %l3, [%sp + V9BIAS64 + 24]%asi ;\ 585 stxa %l4, [%sp + V9BIAS64 + 32]%asi ;\ 586 stxa %l5, [%sp + V9BIAS64 + 40]%asi ;\ 587 stxa %l6, [%sp + V9BIAS64 + 48]%asi ;\ 588 stxa %l7, [%sp + V9BIAS64 + 56]%asi ;\ 589 stxa %i0, [%sp + V9BIAS64 + 64]%asi ;\ 590 stxa %i1, [%sp + V9BIAS64 + 72]%asi ;\ 591 stxa %i2, [%sp + V9BIAS64 + 80]%asi ;\ 592 stxa %i3, [%sp + V9BIAS64 + 88]%asi ;\ 593 stxa %i4, [%sp + V9BIAS64 + 96]%asi ;\ 594 stxa %i5, [%sp + V9BIAS64 + 104]%asi ;\ 595 stxa %i6, [%sp + V9BIAS64 + 112]%asi ;\ 596 stxa %i7, [%sp + V9BIAS64 + 120]%asi ;\ 597 TT_TRACE_L(trace_win) ;\ 598 saved ;\ 599 retry ;\ 600 SKIP(31-19-TT_TRACE_L_INS) ;\ 601 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 602 .empty 603 604 605/* 606 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 607 * that the kernel context and the nucleus context are the same. The 608 * stack pointer is required to be eight-byte aligned. 609 */ 610#define FILL_64bit(tail) \ 6112: TT_TRACE_L(trace_win) ;\ 612 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 613 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 614 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 615 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 616 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 617 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 618 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 619 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 620 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 621 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 622 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 623 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 624 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 625 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 626 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 627 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 628 restored ;\ 629 retry ;\ 630 SKIP(31-18-TT_TRACE_L_INS) ;\ 631 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 632 .empty 633 634/* 635 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 636 * wide address space via the designated asi. It is used to fill 637 * non-kernel windows. The stack pointer is required to be eight-byte 638 * aligned. 639 */ 640#define FILL_64bit_asi(asi_num, tail) \ 641 mov V9BIAS64 + 0, %g1 ;\ 6422: TT_TRACE_L(trace_win) ;\ 643 ldxa [%sp + %g1]asi_num, %l0 ;\ 644 mov V9BIAS64 + 8, %g2 ;\ 645 ldxa [%sp + %g2]asi_num, %l1 ;\ 646 mov V9BIAS64 + 16, %g3 ;\ 647 ldxa [%sp + %g3]asi_num, %l2 ;\ 648 mov V9BIAS64 + 24, %g4 ;\ 649 ldxa [%sp + %g4]asi_num, %l3 ;\ 650 add %sp, 32, %g5 ;\ 651 ldxa [%g5 + %g1]asi_num, %l4 ;\ 652 ldxa [%g5 + %g2]asi_num, %l5 ;\ 653 ldxa [%g5 + %g3]asi_num, %l6 ;\ 654 ldxa [%g5 + %g4]asi_num, %l7 ;\ 655 add %g5, 32, %g5 ;\ 656 ldxa [%g5 + %g1]asi_num, %i0 ;\ 657 ldxa [%g5 + %g2]asi_num, %i1 ;\ 658 ldxa [%g5 + %g3]asi_num, %i2 ;\ 659 ldxa [%g5 + %g4]asi_num, %i3 ;\ 660 add %g5, 32, %g5 ;\ 661 ldxa [%g5 + %g1]asi_num, %i4 ;\ 662 ldxa [%g5 + %g2]asi_num, %i5 ;\ 663 ldxa [%g5 + %g3]asi_num, %i6 ;\ 664 ldxa [%g5 + %g4]asi_num, %i7 ;\ 665 restored ;\ 666 retry ;\ 667 SKIP(31-25-TT_TRACE_L_INS) ;\ 668 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 669 .empty 670 671/* 672 * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit 673 * wide address space via the designated asi. It is used to fill 674 * windows at tl>1 where performance isn't the primary concern and 675 * where we don't want to use unnecessary registers. The stack 676 * pointer is required to be eight-byte aligned. 677 */ 678#define FILL_64bit_tt1(asi_num, tail) \ 679 mov asi_num, %asi ;\ 680 TT_TRACE_L(trace_win) ;\ 681 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ 682 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ 683 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ 684 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ 685 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ 686 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ 687 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ 688 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ 689 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ 690 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ 691 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ 692 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ 693 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ 694 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ 695 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ 696 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ 697 restored ;\ 698 retry ;\ 699 SKIP(31-19-TT_TRACE_L_INS) ;\ 700 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 701 .empty 702 703#endif /* !lint */ 704 705/* 706 * SPILL_mixed spills either size window, depending on 707 * whether %sp is even or odd, to a 32-bit address space. 708 * This may only be used in conjunction with SPILL_32bit/ 709 * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be 710 * needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular 711 * attention should be paid to the instructions that belong 712 * in the delay slots of the branches depending on the type 713 * of spill handler being branched to. 714 * Clear upper 32 bits of %sp if it is odd. 715 * We won't need to clear them in 64 bit kernel. 716 */ 717#define SPILL_mixed \ 718 btst 1, %sp ;\ 719 bz,a,pt %xcc, 1b ;\ 720 srl %sp, 0, %sp ;\ 721 ba,pt %xcc, 2b ;\ 722 nop ;\ 723 .align 128 724 725/* 726 * FILL_mixed(ASI) fills either size window, depending on 727 * whether %sp is even or odd, from a 32-bit address space. 728 * This may only be used in conjunction with FILL_32bit/ 729 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 730 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 731 * attention should be paid to the instructions that belong 732 * in the delay slots of the branches depending on the type 733 * of fill handler being branched to. 734 * Clear upper 32 bits of %sp if it is odd. 735 * We won't need to clear them in 64 bit kernel. 736 */ 737#define FILL_mixed \ 738 btst 1, %sp ;\ 739 bz,a,pt %xcc, 1b ;\ 740 srl %sp, 0, %sp ;\ 741 ba,pt %xcc, 2b ;\ 742 nop ;\ 743 .align 128 744 745 746/* 747 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 748 * respectively, into the address space via the designated asi. The 749 * unbiased stack pointer is required to be eight-byte aligned (even for 750 * the 32-bit case even though this code does not require such strict 751 * alignment). 752 * 753 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 754 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 755 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 756 * window may contain kernel data so in user_rtt we set wstate to call 757 * these spill handlers on the first user spill trap. These handler then 758 * spill the appropriate window but also back up a window and clean the 759 * window that didn't get a cleanwin trap. 760 */ 761#define SPILL_32clean(asi_num, tail) \ 762 srl %sp, 0, %sp ;\ 763 sta %l0, [%sp + %g0]asi_num ;\ 764 mov 4, %g1 ;\ 765 sta %l1, [%sp + %g1]asi_num ;\ 766 mov 8, %g2 ;\ 767 sta %l2, [%sp + %g2]asi_num ;\ 768 mov 12, %g3 ;\ 769 sta %l3, [%sp + %g3]asi_num ;\ 770 add %sp, 16, %g4 ;\ 771 sta %l4, [%g4 + %g0]asi_num ;\ 772 sta %l5, [%g4 + %g1]asi_num ;\ 773 sta %l6, [%g4 + %g2]asi_num ;\ 774 sta %l7, [%g4 + %g3]asi_num ;\ 775 add %g4, 16, %g4 ;\ 776 sta %i0, [%g4 + %g0]asi_num ;\ 777 sta %i1, [%g4 + %g1]asi_num ;\ 778 sta %i2, [%g4 + %g2]asi_num ;\ 779 sta %i3, [%g4 + %g3]asi_num ;\ 780 add %g4, 16, %g4 ;\ 781 sta %i4, [%g4 + %g0]asi_num ;\ 782 sta %i5, [%g4 + %g1]asi_num ;\ 783 sta %i6, [%g4 + %g2]asi_num ;\ 784 sta %i7, [%g4 + %g3]asi_num ;\ 785 TT_TRACE_L(trace_win) ;\ 786 b .spill_clean ;\ 787 mov WSTATE_USER32, %g7 ;\ 788 SKIP(31-25-TT_TRACE_L_INS) ;\ 789 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 790 .empty 791 792#define SPILL_64clean(asi_num, tail) \ 793 mov 0 + V9BIAS64, %g1 ;\ 794 stxa %l0, [%sp + %g1]asi_num ;\ 795 mov 8 + V9BIAS64, %g2 ;\ 796 stxa %l1, [%sp + %g2]asi_num ;\ 797 mov 16 + V9BIAS64, %g3 ;\ 798 stxa %l2, [%sp + %g3]asi_num ;\ 799 mov 24 + V9BIAS64, %g4 ;\ 800 stxa %l3, [%sp + %g4]asi_num ;\ 801 add %sp, 32, %g5 ;\ 802 stxa %l4, [%g5 + %g1]asi_num ;\ 803 stxa %l5, [%g5 + %g2]asi_num ;\ 804 stxa %l6, [%g5 + %g3]asi_num ;\ 805 stxa %l7, [%g5 + %g4]asi_num ;\ 806 add %g5, 32, %g5 ;\ 807 stxa %i0, [%g5 + %g1]asi_num ;\ 808 stxa %i1, [%g5 + %g2]asi_num ;\ 809 stxa %i2, [%g5 + %g3]asi_num ;\ 810 stxa %i3, [%g5 + %g4]asi_num ;\ 811 add %g5, 32, %g5 ;\ 812 stxa %i4, [%g5 + %g1]asi_num ;\ 813 stxa %i5, [%g5 + %g2]asi_num ;\ 814 stxa %i6, [%g5 + %g3]asi_num ;\ 815 stxa %i7, [%g5 + %g4]asi_num ;\ 816 TT_TRACE_L(trace_win) ;\ 817 b .spill_clean ;\ 818 mov WSTATE_USER64, %g7 ;\ 819 SKIP(31-25-TT_TRACE_L_INS) ;\ 820 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 821 .empty 822 823 824/* 825 * Floating point disabled. 826 */ 827#define FP_DISABLED_TRAP \ 828 TT_TRACE(trace_gen) ;\ 829 ba,pt %xcc,.fp_disabled ;\ 830 nop ;\ 831 .align 32 832 833/* 834 * Floating point exceptions. 835 */ 836#define FP_IEEE_TRAP \ 837 TT_TRACE(trace_gen) ;\ 838 ba,pt %xcc,.fp_ieee_exception ;\ 839 nop ;\ 840 .align 32 841 842#define FP_TRAP \ 843 TT_TRACE(trace_gen) ;\ 844 ba,pt %xcc,.fp_exception ;\ 845 nop ;\ 846 .align 32 847 848#if !defined(lint) 849/* 850 * asynchronous traps at level 0 and level 1 851 * 852 * The first instruction must be a membar for UltraSPARC-III 853 * to stop RED state entry if the store queue has many 854 * pending bad stores (PRM, Chapter 11). 855 */ 856#define ASYNC_TRAP(ttype, ttlabel, table_name)\ 857 .global table_name ;\ 858table_name: ;\ 859 membar #Sync ;\ 860 TT_TRACE(ttlabel) ;\ 861 ba async_err ;\ 862 mov ttype, %g5 ;\ 863 .align 32 864 865/* 866 * Defaults to BAD entry, but establishes label to be used for 867 * architecture-specific overwrite of trap table entry. 868 */ 869#define LABELED_BAD(table_name) \ 870 .global table_name ;\ 871table_name: ;\ 872 BAD 873 874#endif /* !lint */ 875 876/* 877 * illegal instruction trap 878 */ 879#define ILLTRAP_INSTR \ 880 membar #Sync ;\ 881 TT_TRACE(trace_gen) ;\ 882 or %g0, P_UTRAP4, %g2 ;\ 883 or %g0, T_UNIMP_INSTR, %g3 ;\ 884 sethi %hi(.check_v9utrap), %g4 ;\ 885 jmp %g4 + %lo(.check_v9utrap) ;\ 886 nop ;\ 887 .align 32 888 889/* 890 * tag overflow trap 891 */ 892#define TAG_OVERFLOW \ 893 TT_TRACE(trace_gen) ;\ 894 or %g0, P_UTRAP10, %g2 ;\ 895 or %g0, T_TAG_OVERFLOW, %g3 ;\ 896 sethi %hi(.check_v9utrap), %g4 ;\ 897 jmp %g4 + %lo(.check_v9utrap) ;\ 898 nop ;\ 899 .align 32 900 901/* 902 * divide by zero trap 903 */ 904#define DIV_BY_ZERO \ 905 TT_TRACE(trace_gen) ;\ 906 or %g0, P_UTRAP11, %g2 ;\ 907 or %g0, T_IDIV0, %g3 ;\ 908 sethi %hi(.check_v9utrap), %g4 ;\ 909 jmp %g4 + %lo(.check_v9utrap) ;\ 910 nop ;\ 911 .align 32 912 913/* 914 * trap instruction for V9 user trap handlers 915 */ 916#define TRAP_INSTR \ 917 TT_TRACE(trace_gen) ;\ 918 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 919 sethi %hi(.check_v9utrap), %g4 ;\ 920 jmp %g4 + %lo(.check_v9utrap) ;\ 921 nop ;\ 922 .align 32 923#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 924 925/* 926 * LEVEL_INTERRUPT is for level N interrupts. 927 * VECTOR_INTERRUPT is for the vector trap. 928 */ 929#define LEVEL_INTERRUPT(level) \ 930 .global tt_pil/**/level ;\ 931tt_pil/**/level: ;\ 932 ba,pt %xcc, pil_interrupt ;\ 933 mov level, %g4 ;\ 934 .align 32 935 936#define LEVEL14_INTERRUPT \ 937 ba pil14_interrupt ;\ 938 mov PIL_14, %g4 ;\ 939 .align 32 940 941#define VECTOR_INTERRUPT \ 942 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g1 ;\ 943 btst IRSR_BUSY, %g1 ;\ 944 bnz,pt %xcc, vec_interrupt ;\ 945 nop ;\ 946 ba,a,pt %xcc, vec_intr_spurious ;\ 947 .empty ;\ 948 .align 32 949 950/* 951 * MMU Trap Handlers. 952 */ 953#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \ 954 rdpr %pstate, %g5 ;\ 955 wrpr %g5, PSTATE_MG | PSTATE_AG, %pstate 956 957#define IMMU_EXCEPTION \ 958 membar #Sync ;\ 959 SWITCH_GLOBALS ;\ 960 wr %g0, ASI_IMMU, %asi ;\ 961 rdpr %tpc, %g2 ;\ 962 ldxa [MMU_SFSR]%asi, %g3 ;\ 963 ba,pt %xcc, .mmu_exception_end ;\ 964 mov T_INSTR_EXCEPTION, %g1 ;\ 965 .align 32 966 967#define DMMU_EXCEPTION \ 968 SWITCH_GLOBALS ;\ 969 wr %g0, ASI_DMMU, %asi ;\ 970 ldxa [MMU_TAG_ACCESS]%asi, %g2 ;\ 971 ldxa [MMU_SFSR]%asi, %g3 ;\ 972 ba,pt %xcc, .mmu_exception_end ;\ 973 mov T_DATA_EXCEPTION, %g1 ;\ 974 .align 32 975 976#define DMMU_EXC_AG_PRIV \ 977 wr %g0, ASI_DMMU, %asi ;\ 978 ldxa [MMU_SFAR]%asi, %g2 ;\ 979 ba,pt %xcc, .mmu_priv_exception ;\ 980 ldxa [MMU_SFSR]%asi, %g3 ;\ 981 .align 32 982 983#define DMMU_EXC_AG_NOT_ALIGNED \ 984 wr %g0, ASI_DMMU, %asi ;\ 985 ldxa [MMU_SFAR]%asi, %g2 ;\ 986 ba,pt %xcc, .mmu_exception_not_aligned ;\ 987 ldxa [MMU_SFSR]%asi, %g3 ;\ 988 .align 32 989 990/* 991 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 992 */ 993#define DMMU_EXC_LDDF_NOT_ALIGNED \ 994 btst 1, %sp ;\ 995 bnz,pt %xcc, .lddf_exception_not_aligned ;\ 996 wr %g0, ASI_DMMU, %asi ;\ 997 ldxa [MMU_SFAR]%asi, %g2 ;\ 998 ba,pt %xcc, .mmu_exception_not_aligned ;\ 999 ldxa [MMU_SFSR]%asi, %g3 ;\ 1000 .align 32 1001 1002#define DMMU_EXC_STDF_NOT_ALIGNED \ 1003 btst 1, %sp ;\ 1004 bnz,pt %xcc, .stdf_exception_not_aligned ;\ 1005 wr %g0, ASI_DMMU, %asi ;\ 1006 ldxa [MMU_SFAR]%asi, %g2 ;\ 1007 ba,pt %xcc, .mmu_exception_not_aligned ;\ 1008 ldxa [MMU_SFSR]%asi, %g3 ;\ 1009 .align 32 1010 1011/* 1012 * Flush the TLB using either the primary, secondary, or nucleus flush 1013 * operation based on whether the ctx from the tag access register matches 1014 * the primary or secondary context (flush the nucleus if neither matches). 1015 * 1016 * Requires a membar #Sync before next ld/st. 1017 * exits with: 1018 * g2 = tag access register 1019 * g3 = ctx number 1020 */ 1021#if TAGACC_CTX_MASK != CTXREG_CTX_MASK 1022#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK" 1023#endif 1024#define DTLB_DEMAP_ENTRY \ 1025 mov MMU_TAG_ACCESS, %g1 ;\ 1026 mov MMU_PCONTEXT, %g5 ;\ 1027 ldxa [%g1]ASI_DMMU, %g2 ;\ 1028 sethi %hi(TAGACC_CTX_MASK), %g4 ;\ 1029 or %g4, %lo(TAGACC_CTX_MASK), %g4 ;\ 1030 and %g2, %g4, %g3 /* g3 = ctx */ ;\ 1031 ldxa [%g5]ASI_DMMU, %g6 /* g6 = primary ctx */ ;\ 1032 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1033 cmp %g3, %g6 ;\ 1034 be,pt %xcc, 1f ;\ 1035 andn %g2, %g4, %g1 /* ctx = primary */ ;\ 1036 mov MMU_SCONTEXT, %g5 ;\ 1037 ldxa [%g5]ASI_DMMU, %g6 /* g6 = secondary ctx */ ;\ 1038 and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\ 1039 cmp %g3, %g6 ;\ 1040 be,a,pt %xcc, 1f ;\ 1041 or %g1, DEMAP_SECOND, %g1 ;\ 1042 or %g1, DEMAP_NUCLEUS, %g1 ;\ 10431: stxa %g0, [%g1]ASI_DTLB_DEMAP /* MMU_DEMAP_PAGE */ ;\ 1044 membar #Sync 1045 1046#if defined(cscope) 1047/* 1048 * Define labels to direct cscope quickly to labels that 1049 * are generated by macro expansion of DTLB_MISS(). 1050 */ 1051 .global tt0_dtlbmiss 1052tt0_dtlbmiss: 1053 .global tt1_dtlbmiss 1054tt1_dtlbmiss: 1055 nop 1056#endif 1057 1058/* 1059 * Needs to be exactly 32 instructions 1060 * 1061 * UTLB NOTE: If we don't hit on the 8k pointer then we branch 1062 * to a special 4M tsb handler. It would be nice if that handler 1063 * could live in this file but currently it seems better to allow 1064 * it to fall thru to sfmmu_tsb_miss. 1065 */ 1066#ifdef UTSB_PHYS 1067#define DTLB_MISS(table_name) ;\ 1068 .global table_name/**/_dtlbmiss ;\ 1069table_name/**/_dtlbmiss: ;\ 1070 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1071 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1072 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1073 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1074 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1075 cmp %g3, INVALID_CONTEXT ;\ 1076 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1077 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1078 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1079 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1080 brgez,pn %g3, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 1081 nop ;\ 1082 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, %g5 data */;\ 1083 cmp %g4, %g7 ;\ 1084 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1085 mov -1, %g3 /* set 4M tsbe ptr to -1 */ ;\ 1086 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1087 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1088 retry /* in %g5 */ ;\ 1089 unimp 0 ;\ 1090 unimp 0 ;\ 1091 unimp 0 ;\ 1092 unimp 0 ;\ 1093 unimp 0 ;\ 1094 unimp 0 ;\ 1095 unimp 0 ;\ 1096 unimp 0 ;\ 1097 unimp 0 ;\ 1098 unimp 0 ;\ 1099 unimp 0 ;\ 1100 unimp 0 ;\ 1101 .align 128 1102#else /* UTSB_PHYS */ 1103#define DTLB_MISS(table_name) ;\ 1104 .global table_name/**/_dtlbmiss ;\ 1105table_name/**/_dtlbmiss: ;\ 1106 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1107 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1108 ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\ 1109 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1110 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1111 cmp %g3, INVALID_CONTEXT ;\ 1112 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 1113 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1114 brlz,pn %g1, sfmmu_udtlb_slowpath ;\ 1115 nop ;\ 1116 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, %g5 data */ ;\ 1117 cmp %g4, %g7 ;\ 1118 bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\ 1119 mov -1, %g3 /* set 4M tsbe ptr to -1 */ ;\ 1120 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1121 stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\ 1122 retry /* in %g5 */ ;\ 1123 unimp 0 ;\ 1124 unimp 0 ;\ 1125 unimp 0 ;\ 1126 unimp 0 ;\ 1127 unimp 0 ;\ 1128 unimp 0 ;\ 1129 unimp 0 ;\ 1130 unimp 0 ;\ 1131 unimp 0 ;\ 1132 unimp 0 ;\ 1133 unimp 0 ;\ 1134 unimp 0 ;\ 1135 unimp 0 ;\ 1136 unimp 0 ;\ 1137 .align 128 1138#endif /* UTSB_PHYS */ 1139 1140#if defined(cscope) 1141/* 1142 * Define labels to direct cscope quickly to labels that 1143 * are generated by macro expansion of ITLB_MISS(). 1144 */ 1145 .global tt0_itlbmiss 1146tt0_itlbmiss: 1147 .global tt1_itlbmiss 1148tt1_itlbmiss: 1149 nop 1150#endif 1151 1152/* 1153 * Instruction miss handler. 1154 * ldda instructions will have their ASI patched 1155 * by sfmmu_patch_ktsb at runtime. 1156 * MUST be EXACTLY 32 instructions or we'll break. 1157 */ 1158#ifdef UTSB_PHYS 1159#define ITLB_MISS(table_name) \ 1160 .global table_name/**/_itlbmiss ;\ 1161table_name/**/_itlbmiss: ;\ 1162 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1163 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1164 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1165 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1166 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1167 cmp %g3, INVALID_CONTEXT ;\ 1168 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1169 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1170 mov SCRATCHPAD_UTSBREG, %g3 ;\ 1171 ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\ 1172 brgez,pn %g3, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 1173 nop ;\ 1174 ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\ 1175 cmp %g4, %g7 ;\ 1176 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1177 mov -1, %g3 /* set 4M TSB ptr to -1 */ ;\ 1178 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1179 bz,pn %icc, exec_fault ;\ 1180 nop ;\ 1181 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1182 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1183 retry ;\ 1184 unimp 0 ;\ 1185 unimp 0 ;\ 1186 unimp 0 ;\ 1187 unimp 0 ;\ 1188 unimp 0 ;\ 1189 unimp 0 ;\ 1190 unimp 0 ;\ 1191 unimp 0 ;\ 1192 unimp 0 ;\ 1193 .align 128 1194#else /* UTSB_PHYS */ 1195#define ITLB_MISS(table_name) \ 1196 .global table_name/**/_itlbmiss ;\ 1197table_name/**/_itlbmiss: ;\ 1198 mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\ 1199 ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1200 ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\ 1201 sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\ 1202 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\ 1203 cmp %g3, INVALID_CONTEXT ;\ 1204 ble,pn %xcc, sfmmu_kitlb_miss ;\ 1205 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1206 brlz,pn %g1, sfmmu_uitlb_slowpath /* if >1 TSB branch */ ;\ 1207 nop ;\ 1208 ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, g5 = data */ ;\ 1209 cmp %g4, %g7 ;\ 1210 bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\ 1211 mov -1, %g3 /* set 4M TSB ptr to -1 */ ;\ 1212 andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\ 1213 bz,pn %icc, exec_fault ;\ 1214 nop ;\ 1215 TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\ 1216 stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\ 1217 retry ;\ 1218 unimp 0 ;\ 1219 unimp 0 ;\ 1220 unimp 0 ;\ 1221 unimp 0 ;\ 1222 unimp 0 ;\ 1223 unimp 0 ;\ 1224 unimp 0 ;\ 1225 unimp 0 ;\ 1226 unimp 0 ;\ 1227 unimp 0 ;\ 1228 unimp 0 ;\ 1229 .align 128 1230#endif /* UTSB_PHYS */ 1231 1232 1233/* 1234 * This macro is the first level handler for fast protection faults. 1235 * It first demaps the tlb entry which generated the fault and then 1236 * attempts to set the modify bit on the hash. It needs to be 1237 * exactly 32 instructions. 1238 */ 1239#define DTLB_PROT \ 1240 DTLB_DEMAP_ENTRY /* 20 instructions */ ;\ 1241 /* ;\ 1242 * At this point: ;\ 1243 * g1 = ???? ;\ 1244 * g2 = tag access register ;\ 1245 * g3 = ctx number ;\ 1246 * g4 = ???? ;\ 1247 */ ;\ 1248 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1249 /* clobbers g1 and g6 */ ;\ 1250 ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\ 1251 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1252 nop ;\ 1253 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1254 unimp 0 ;\ 1255 unimp 0 ;\ 1256 unimp 0 ;\ 1257 unimp 0 ;\ 1258 unimp 0 ;\ 1259 unimp 0 ;\ 1260 .align 128 1261 1262#define DMMU_EXCEPTION_TL1 ;\ 1263 SWITCH_GLOBALS ;\ 1264 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1265 nop ;\ 1266 .align 32 1267 1268#define MISALIGN_ADDR_TL1 ;\ 1269 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1270 nop ;\ 1271 .align 32 1272 1273/* 1274 * Trace a tsb hit 1275 * g1 = tsbe pointer (in/clobbered) 1276 * g2 = tag access register (in) 1277 * g3 - g4 = scratch (clobbered) 1278 * g5 = tsbe data (in) 1279 * g6 = scratch (clobbered) 1280 * g7 = pc we jumped here from (in) 1281 * ttextra = value to OR in to trap type (%tt) (in) 1282 */ 1283#ifdef TRAPTRACE 1284#define TRACE_TSBHIT(ttextra) \ 1285 membar #Sync ;\ 1286 sethi %hi(FLUSH_ADDR), %g6 ;\ 1287 flush %g6 ;\ 1288 TRACE_PTR(%g3, %g6) ;\ 1289 GET_TRACE_TICK(%g6) ;\ 1290 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1291 stxa %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1292 stxa %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1293 rdpr %tnpc, %g6 ;\ 1294 stxa %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1295 stxa %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1296 stxa %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1297 rdpr %tpc, %g6 ;\ 1298 stxa %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1299 rdpr %tl, %g6 ;\ 1300 stha %g6, [%g3 + TRAP_ENT_TL]%asi ;\ 1301 rdpr %tt, %g6 ;\ 1302 or %g6, (ttextra), %g6 ;\ 1303 stha %g6, [%g3 + TRAP_ENT_TT]%asi ;\ 1304 ldxa [%g0]ASI_IMMU, %g1 /* tag target */ ;\ 1305 ldxa [%g0]ASI_DMMU, %g4 ;\ 1306 cmp %g6, FAST_IMMU_MISS_TT ;\ 1307 movne %icc, %g4, %g1 ;\ 1308 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */ ;\ 1309 stxa %g0, [%g3 + TRAP_ENT_TR]%asi ;\ 1310 TRACE_NEXT(%g3, %g4, %g6) 1311#else 1312#define TRACE_TSBHIT(ttextra) 1313#endif 1314 1315#if defined(lint) 1316 1317struct scb trap_table; 1318struct scb scb; /* trap_table/scb are the same object */ 1319 1320#else /* lint */ 1321 1322/* 1323 * ======================================================================= 1324 * SPARC V9 TRAP TABLE 1325 * 1326 * The trap table is divided into two halves: the first half is used when 1327 * taking traps when TL=0; the second half is used when taking traps from 1328 * TL>0. Note that handlers in the second half of the table might not be able 1329 * to make the same assumptions as handlers in the first half of the table. 1330 * 1331 * Worst case trap nesting so far: 1332 * 1333 * at TL=0 client issues software trap requesting service 1334 * at TL=1 nucleus wants a register window 1335 * at TL=2 register window clean/spill/fill takes a TLB miss 1336 * at TL=3 processing TLB miss 1337 * at TL=4 handle asynchronous error 1338 * 1339 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1340 * 1341 * ======================================================================= 1342 */ 1343 .section ".text" 1344 .align 4 1345 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1346 .type trap_table, #object 1347 .type scb, #object 1348trap_table: 1349scb: 1350trap_table0: 1351 /* hardware traps */ 1352 NOT; /* 000 reserved */ 1353 RED; /* 001 power on reset */ 1354 RED; /* 002 watchdog reset */ 1355 RED; /* 003 externally initiated reset */ 1356 RED; /* 004 software initiated reset */ 1357 RED; /* 005 red mode exception */ 1358 NOT; NOT; /* 006 - 007 reserved */ 1359 IMMU_EXCEPTION; /* 008 instruction access exception */ 1360 NOT; /* 009 instruction access MMU miss */ 1361 ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae); 1362 /* 00A instruction access error */ 1363 NOT; NOT4; /* 00B - 00F reserved */ 1364 ILLTRAP_INSTR; /* 010 illegal instruction */ 1365 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1366 NOT; /* 012 unimplemented LDD */ 1367 NOT; /* 013 unimplemented STD */ 1368 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1369 FP_DISABLED_TRAP; /* 020 fp disabled */ 1370 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1371 FP_TRAP; /* 022 fp exception other */ 1372 TAG_OVERFLOW; /* 023 tag overflow */ 1373 CLEAN_WINDOW; /* 024 - 027 clean window */ 1374 DIV_BY_ZERO; /* 028 division by zero */ 1375 NOT; /* 029 internal processor error */ 1376 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1377 DMMU_EXCEPTION; /* 030 data access exception */ 1378 NOT; /* 031 data access MMU miss */ 1379 ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae); 1380 /* 032 data access error */ 1381 NOT; /* 033 data access protection */ 1382 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1383 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1384 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1385 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1386 NOT; /* 038 LDQF mem address not aligned */ 1387 NOT; /* 039 STQF mem address not aligned */ 1388 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1389 LABELED_BAD(tt0_asdat); /* 040 async data error */ 1390 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1391 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1392 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1393 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1394 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1395 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1396 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1397 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1398 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1399 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1400 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1401 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1402 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1403 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1404 LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */ 1405 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1406 VECTOR_INTERRUPT; /* 060 interrupt vector */ 1407 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1408 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1409 GOTO_TT(ce_err, trace_gen); /* 063 corrected ECC error */ 1410 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1411 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1412 DTLB_PROT; /* 06C data access protection */ 1413 LABELED_BAD(tt0_fecc); /* 070 fast ecache ECC error */ 1414 LABELED_BAD(tt0_dperr); /* 071 Cheetah+ dcache parity error */ 1415 LABELED_BAD(tt0_iperr); /* 072 Cheetah+ icache parity error */ 1416 NOT; /* 073 reserved */ 1417 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1418 NOT4; /* 080 spill 0 normal */ 1419 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1420 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1421 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1422 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1423 SPILL_32bit(not); /* 094 spill 5 normal */ 1424 SPILL_64bit(not); /* 098 spill 6 normal */ 1425 SPILL_mixed; /* 09C spill 7 normal */ 1426 NOT4; /* 0A0 spill 0 other */ 1427 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1428 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1429 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1430 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1431 NOT4; /* 0B4 spill 5 other */ 1432 NOT4; /* 0B8 spill 6 other */ 1433 NOT4; /* 0BC spill 7 other */ 1434 NOT4; /* 0C0 fill 0 normal */ 1435 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1436 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1437 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1438 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1439 FILL_32bit(not); /* 0D4 fill 5 normal */ 1440 FILL_64bit(not); /* 0D8 fill 6 normal */ 1441 FILL_mixed; /* 0DC fill 7 normal */ 1442 NOT4; /* 0E0 fill 0 other */ 1443 NOT4; /* 0E4 fill 1 other */ 1444 NOT4; /* 0E8 fill 2 other */ 1445 NOT4; /* 0EC fill 3 other */ 1446 NOT4; /* 0F0 fill 4 other */ 1447 NOT4; /* 0F4 fill 5 other */ 1448 NOT4; /* 0F8 fill 6 other */ 1449 NOT4; /* 0FC fill 7 other */ 1450 /* user traps */ 1451 GOTO(syscall_trap_4x); /* 100 old system call */ 1452 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1453 TRAP(T_DIV0); /* 102 user divide by zero */ 1454 FLUSHW(); /* 103 flush windows */ 1455 GOTO(.clean_windows); /* 104 clean windows */ 1456 BAD; /* 105 range check ?? */ 1457 GOTO(.fix_alignment); /* 106 do unaligned references */ 1458 BAD; /* 107 unused */ 1459 SYSCALL_TRAP32; /* 108 ILP32 system call on LP64 */ 1460 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1461 BAD; BAD; BAD4; /* 10A - 10F unused */ 1462 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1463 GOTO(.getcc); /* 120 get condition codes */ 1464 GOTO(.setcc); /* 121 set condition codes */ 1465 GOTO(.getpsr); /* 122 get psr */ 1466 GOTO(.setpsr); /* 123 set psr (some fields) */ 1467 GOTO(get_timestamp); /* 124 get timestamp */ 1468 GOTO(get_virtime); /* 125 get lwp virtual time */ 1469 PRIV(self_xcall); /* 126 self xcall */ 1470 GOTO(get_hrestime); /* 127 get hrestime */ 1471 BAD; /* 128 ST_SETV9STACK */ 1472 GOTO(.getlgrp); /* 129 get lgrpid */ 1473 BAD; BAD; BAD4; /* 12A - 12F unused */ 1474 BAD4; BAD4; /* 130 - 137 unused */ 1475 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1476 BAD; /* 139 unused */ 1477 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1478 BAD; BAD4; /* 13B - 13F unused */ 1479 SYSCALL_TRAP; /* 140 LP64 system call */ 1480 SYSCALL(nosys); /* 141 unused system call trap */ 1481#ifdef DEBUG_USER_TRAPTRACECTL 1482 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1483 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1484#else 1485 SYSCALL(nosys); /* 142 unused system call trap */ 1486 SYSCALL(nosys); /* 143 unused system call trap */ 1487#endif 1488 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1489 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1490 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1491 BAD; /* 170 - unused */ 1492 BAD; /* 171 - unused */ 1493 BAD; BAD; /* 172 - 173 unused */ 1494 BAD4; BAD4; /* 174 - 17B unused */ 1495#ifdef PTL1_PANIC_DEBUG 1496 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1497 /* 17C test ptl1_panic */ 1498#else 1499 BAD; /* 17C unused */ 1500#endif /* PTL1_PANIC_DEBUG */ 1501 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1502 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1503 PRIV(kctx_obp_bpt); /* 17F obp breakpoint */ 1504 /* reserved */ 1505 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1506 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1507 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1508 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1509 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1510 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1511 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1512 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1513trap_table1: 1514 NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */ 1515 ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae); 1516 /* 00A instruction access error */ 1517 NOT; NOT4; /* 00B - 00F unused */ 1518 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1519 NOT4; /* 020 - 023 unused */ 1520 CLEAN_WINDOW; /* 024 - 027 clean window */ 1521 NOT4; NOT4; /* 028 - 02F unused */ 1522 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1523 NOT; /* 031 unused */ 1524 ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae); 1525 /* 032 data access error */ 1526 NOT; /* 033 unused */ 1527 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1528 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1529 LABELED_BAD(tt1_asdat); /* 040 async data error */ 1530 NOT; NOT; NOT; /* 041 - 043 unused */ 1531 NOT4; NOT4; NOT4; /* 044 - 04F unused */ 1532 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1533 NOT; /* 060 unused */ 1534 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1535 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1536 GOTO_TT(ce_err_tl1, trace_gen); /* 063 corrected ECC error */ 1537 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1538 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1539 DTLB_PROT; /* 06C data access protection */ 1540 LABELED_BAD(tt1_fecc); /* 070 fast ecache ECC error */ 1541 LABELED_BAD(tt1_dperr); /* 071 Cheetah+ dcache parity error */ 1542 LABELED_BAD(tt1_iperr); /* 072 Cheetah+ icache parity error */ 1543 NOT; /* 073 reserved */ 1544 NOT4; NOT4; NOT4; /* 074 - 07F reserved */ 1545 NOT4; /* 080 spill 0 normal */ 1546 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1547 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1548 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1549 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1550 SPILL_32bit(not); /* 094 spill 5 normal */ 1551 SPILL_64bit(not); /* 098 spill 6 normal */ 1552 SPILL_mixed; /* 09C spill 7 normal */ 1553 NOT4; /* 0A0 spill 0 other */ 1554 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1555 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1556 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1557 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1558 NOT4; /* 0B4 spill 5 other */ 1559 NOT4; /* 0B8 spill 6 other */ 1560 NOT4; /* 0BC spill 7 other */ 1561 NOT4; /* 0C0 fill 0 normal */ 1562 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0C4 fill 1 normal */ 1563 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0C8 fill 2 normal */ 1564 FILL_32bit_tt1(ASI_AIUP,fn1); /* 0CC fill 3 normal */ 1565 FILL_64bit_tt1(ASI_AIUP,fn1); /* 0D0 fill 4 normal */ 1566 FILL_32bit(not); /* 0D4 fill 5 normal */ 1567 FILL_64bit(not); /* 0D8 fill 6 normal */ 1568 FILL_mixed; /* 0DC fill 7 normal */ 1569 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1570 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1571 LABELED_BAD(tt1_swtrap0); /* 100 fast ecache ECC error (cont) */ 1572 LABELED_BAD(tt1_swtrap1); /* 101 Ch+ D$ parity error (cont) */ 1573 LABELED_BAD(tt1_swtrap2); /* 102 Ch+ I$ parity error (cont) */ 1574 NOT; /* 103 reserved */ 1575/* 1576 * We only reserve the above four special case soft traps for code running 1577 * at TL>0, so we can truncate the trap table here. 1578 */ 1579etrap_table: 1580 .size trap_table, (.-trap_table) 1581 .size scb, (.-scb) 1582 1583/* 1584 * We get to exec_fault in the case of an instruction miss and tte 1585 * has no execute bit set. We go to tl0 to handle it. 1586 * 1587 * g1 = tsbe pointer (in/clobbered) 1588 * g2 = tag access register (in) 1589 * g3 - g4 = scratch (clobbered) 1590 * g5 = tsbe data (in) 1591 * g6 = scratch (clobbered) 1592 */ 1593 ALTENTRY(exec_fault) 1594 TRACE_TSBHIT(0x200) 1595 SWITCH_GLOBALS 1596 mov MMU_TAG_ACCESS, %g4 1597 ldxa [%g4]ASI_IMMU, %g2 ! arg1 = addr 1598 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1599 set trap, %g1 1600 ba,pt %xcc, sys_trap 1601 mov -1, %g4 1602 1603.mmu_exception_not_aligned: 1604 rdpr %tstate, %g1 1605 btst TSTATE_PRIV, %g1 1606 bnz,pn %icc, 2f 1607 nop 1608 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1609 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1610 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1611 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1612 brz,pt %g5, 2f 1613 nop 1614 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1615 brz,pn %g5, 2f 1616 nop 1617 btst 1, %sp 1618 bz,pt %xcc, 1f ! 32 bit user program 1619 nop 1620 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1621 nop 16221: 1623 ba,pt %xcc, .setup_utrap 1624 or %g2, %g0, %g7 16252: 1626 ba,pt %xcc, .mmu_exception_end 1627 mov T_ALIGNMENT, %g1 1628 1629.mmu_priv_exception: 1630 rdpr %tstate, %g1 1631 btst TSTATE_PRIV, %g1 1632 bnz,pn %icc, 1f 1633 nop 1634 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1635 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1636 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1637 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1638 brz,pt %g5, 1f 1639 nop 1640 ldn [%g5 + P_UTRAP16], %g5 1641 brnz,pt %g5, .setup_v9utrap 1642 nop 16431: 1644 mov T_PRIV_INSTR, %g1 1645 1646.mmu_exception_end: 1647 CPU_INDEX(%g4, %g5) 1648 set cpu_core, %g5 1649 sllx %g4, CPU_CORE_SHIFT, %g4 1650 add %g4, %g5, %g4 1651 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1652 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1653 bz %xcc, .mmu_exception_tlb_chk 1654 or %g5, CPU_DTRACE_BADADDR, %g5 1655 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1656 done 1657 1658.mmu_exception_tlb_chk: 1659 GET_CPU_IMPL(%g5) ! check SFSR.FT to see if this 1660 cmp %g5, PANTHER_IMPL ! is a TLB parity error. But 1661 bne 2f ! we only do this check while 1662 mov 1, %g4 ! running on Panther CPUs 1663 sllx %g4, PN_SFSR_PARITY_SHIFT, %g4 ! since US-I/II use the same 1664 andcc %g3, %g4, %g0 ! bit for something else which 1665 bz 2f ! will be handled later. 1666 nop 1667.mmu_exception_is_tlb_parity: 1668 .weak itlb_parity_trap 1669 .weak dtlb_parity_trap 1670 set itlb_parity_trap, %g4 1671 cmp %g1, T_INSTR_EXCEPTION ! branch to the itlb or 1672 be 3f ! dtlb parity handler 1673 nop ! if this trap is due 1674 set dtlb_parity_trap, %g4 1675 cmp %g1, T_DATA_EXCEPTION ! to a IMMU exception 1676 be 3f ! or DMMU exception. 1677 nop 16782: 1679 sllx %g3, 32, %g3 1680 or %g3, %g1, %g3 1681 set trap, %g1 1682 ba,pt %xcc, sys_trap 1683 sub %g0, 1, %g4 16843: 1685 jmp %g4 ! off to the appropriate 1686 nop ! TLB parity handler 1687 1688.fp_disabled: 1689 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1690 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1691#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1692 brz,a,pn %g1, 2f 1693 nop 1694#endif 1695 rdpr %tstate, %g4 1696 btst TSTATE_PRIV, %g4 1697#ifdef SF_ERRATA_30 /* call causes fp-disabled */ 1698 bnz,pn %icc, 2f 1699 nop 1700#else 1701 bnz,a,pn %icc, ptl1_panic 1702 mov PTL1_BAD_FPTRAP, %g1 1703#endif 1704 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1705 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1706 brz,a,pt %g5, 2f 1707 nop 1708 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1709 brz,a,pn %g5, 2f 1710 nop 1711 btst 1, %sp 1712 bz,a,pt %xcc, 1f ! 32 bit user program 1713 nop 1714 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1715 nop 17161: 1717 ba,pt %xcc, .setup_utrap 1718 or %g0, %g0, %g7 17192: 1720 set fp_disabled, %g1 1721 ba,pt %xcc, sys_trap 1722 sub %g0, 1, %g4 1723 1724.fp_ieee_exception: 1725 rdpr %tstate, %g1 1726 btst TSTATE_PRIV, %g1 1727 bnz,a,pn %icc, ptl1_panic 1728 mov PTL1_BAD_FPTRAP, %g1 1729 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1730 stx %fsr, [%g1 + CPU_TMP1] 1731 ldx [%g1 + CPU_TMP1], %g2 1732 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1733 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1734 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1735 brz,a,pt %g5, 1f 1736 nop 1737 ldn [%g5 + P_UTRAP8], %g5 1738 brnz,a,pt %g5, .setup_v9utrap 1739 nop 17401: 1741 set _fp_ieee_exception, %g1 1742 ba,pt %xcc, sys_trap 1743 sub %g0, 1, %g4 1744 1745/* 1746 * Register Inputs: 1747 * %g5 user trap handler 1748 * %g7 misaligned addr - for alignment traps only 1749 */ 1750.setup_utrap: 1751 set trap, %g1 ! setup in case we go 1752 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1753 sub %g0, 1, %g4 ! the save instruction below 1754 1755 /* 1756 * If the DTrace pid provider is single stepping a copied-out 1757 * instruction, t->t_dtrace_step will be set. In that case we need 1758 * to abort the single-stepping (since execution of the instruction 1759 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1760 */ 1761 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1762 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1763 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1764 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1765 rdpr %tnpc, %l2 ! arg1 == tnpc 1766 brz,pt %g2, 1f 1767 rdpr %tpc, %l1 ! arg0 == tpc 1768 1769 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1770 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1771 brz,pt %g2, 1f 1772 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1773 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 17741: 1775 mov %g7, %l3 ! arg2 == misaligned address 1776 1777 rdpr %tstate, %g1 ! cwp for trap handler 1778 rdpr %cwp, %g4 1779 bclr TSTATE_CWP_MASK, %g1 1780 wrpr %g1, %g4, %tstate 1781 wrpr %g0, %g5, %tnpc ! trap handler address 1782 FAST_TRAP_DONE 1783 /* NOTREACHED */ 1784 1785.check_v9utrap: 1786 rdpr %tstate, %g1 1787 btst TSTATE_PRIV, %g1 1788 bnz,a,pn %icc, 3f 1789 nop 1790 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1791 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1792 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1793 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1794 1795 cmp %g3, T_SOFTWARE_TRAP 1796 bne,a,pt %icc, 1f 1797 nop 1798 1799 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1800 rdpr %tt, %g3 ! delay - get actual hw trap type 1801 1802 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1803 ba,pt %icc, 2f 1804 smul %g1, CPTRSIZE, %g2 18051: 1806 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1807 nop 1808 1809 cmp %g3, T_UNIMP_INSTR 1810 bne,a,pt %icc, 2f 1811 nop 1812 1813 mov 1, %g1 1814 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1815 rdpr %tpc, %g1 ! ld trapping instruction using 1816 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1817 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1818 1819 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1820 andcc %g1, %g4, %g4 ! and instruction with mask 1821 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1822 nop ! fall thru to setup 18232: 1824 ldn [%g5 + %g2], %g5 1825 brnz,a,pt %g5, .setup_v9utrap 1826 nop 18273: 1828 set trap, %g1 1829 ba,pt %xcc, sys_trap 1830 sub %g0, 1, %g4 1831 /* NOTREACHED */ 1832 1833/* 1834 * Register Inputs: 1835 * %g5 user trap handler 1836 */ 1837.setup_v9utrap: 1838 set trap, %g1 ! setup in case we go 1839 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1840 sub %g0, 1, %g4 ! the save instruction below 1841 1842 /* 1843 * If the DTrace pid provider is single stepping a copied-out 1844 * instruction, t->t_dtrace_step will be set. In that case we need 1845 * to abort the single-stepping (since execution of the instruction 1846 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1847 */ 1848 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1849 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1850 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1851 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1852 rdpr %tnpc, %l7 ! arg1 == tnpc 1853 brz,pt %g2, 1f 1854 rdpr %tpc, %l6 ! arg0 == tpc 1855 1856 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1857 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1858 brz,pt %g2, 1f 1859 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1860 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 18611: 1862 rdpr %tstate, %g2 ! cwp for trap handler 1863 rdpr %cwp, %g4 1864 bclr TSTATE_CWP_MASK, %g2 1865 wrpr %g2, %g4, %tstate 1866 1867 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1868 ldn [%g4 + P_AS], %g4 ! load as pointer 1869 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1870 cmp %l7, %g4 ! check for single-step set 1871 bne,pt %xcc, 4f 1872 nop 1873 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1874 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1875 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1876 bne,pt %icc, 4f 1877 nop 1878 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1879 mov %l7, %g4 ! on entry to precise user trap 1880 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1881 ! at time of trap 1882 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1883 ! %g4 == userlimit 1884 FAST_TRAP_DONE 1885 /* NOTREACHED */ 18864: 1887 wrpr %g0, %g5, %tnpc ! trap handler address 1888 FAST_TRAP_DONE_CHK_INTR 1889 /* NOTREACHED */ 1890 1891.fp_exception: 1892 CPU_ADDR(%g1, %g4) 1893 stx %fsr, [%g1 + CPU_TMP1] 1894 ldx [%g1 + CPU_TMP1], %g2 1895 1896 /* 1897 * Cheetah takes unfinished_FPop trap for certain range of operands 1898 * to the "fitos" instruction. Instead of going through the slow 1899 * software emulation path, we try to simulate the "fitos" instruction 1900 * via "fitod" and "fdtos" provided the following conditions are met: 1901 * 1902 * fpu_exists is set (if DEBUG) 1903 * not in privileged mode 1904 * ftt is unfinished_FPop 1905 * NXM IEEE trap is not enabled 1906 * instruction at %tpc is "fitos" 1907 * 1908 * Usage: 1909 * %g1 per cpu address 1910 * %g2 %fsr 1911 * %g6 user instruction 1912 * 1913 * Note that we can take a memory access related trap while trying 1914 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1915 * flag to catch those traps and let the SFMMU code deal with page 1916 * fault and data access exception. 1917 */ 1918#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1919 sethi %hi(fpu_exists), %g7 1920 ld [%g7 + %lo(fpu_exists)], %g7 1921 brz,pn %g7, .fp_exception_cont 1922 nop 1923#endif 1924 rdpr %tstate, %g7 ! branch if in privileged mode 1925 btst TSTATE_PRIV, %g7 1926 bnz,pn %xcc, .fp_exception_cont 1927 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1928 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1929 cmp %g7, FTT_UNFIN 1930 set FSR_TEM_NX, %g5 1931 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1932 andcc %g2, %g5, %g0 1933 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1934 rdpr %tpc, %g5 ! get faulting PC 1935 1936 or %g0, 1, %g7 1937 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1938 lda [%g5]ASI_USER, %g6 ! get user's instruction 1939 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1940 1941 set FITOS_INSTR_MASK, %g7 1942 and %g6, %g7, %g7 1943 set FITOS_INSTR, %g5 1944 cmp %g7, %g5 1945 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1946 nop 1947 1948 /* 1949 * This is unfinished FPops trap for "fitos" instruction. We 1950 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1951 * sequence. 1952 * 1953 * We need a temporary FP register to do the conversion. Since 1954 * both source and destination operands for the "fitos" instruction 1955 * have to be within %f0-%f31, we use an FP register from the upper 1956 * half to guarantee that it won't collide with the source or the 1957 * dest operand. However, we do have to save and restore its value. 1958 * 1959 * We use %d62 as a temporary FP register for the conversion and 1960 * branch to appropriate instruction within the conversion tables 1961 * based upon the rs2 and rd values. 1962 */ 1963 1964 std %d62, [%g1 + CPU_TMP1] ! save original value 1965 1966 srl %g6, FITOS_RS2_SHIFT, %g7 1967 and %g7, FITOS_REG_MASK, %g7 1968 set _fitos_fitod_table, %g4 1969 sllx %g7, 2, %g7 1970 jmp %g4 + %g7 1971 ba,pt %xcc, _fitos_fitod_done 1972 .empty 1973 1974_fitos_fitod_table: 1975 fitod %f0, %d62 1976 fitod %f1, %d62 1977 fitod %f2, %d62 1978 fitod %f3, %d62 1979 fitod %f4, %d62 1980 fitod %f5, %d62 1981 fitod %f6, %d62 1982 fitod %f7, %d62 1983 fitod %f8, %d62 1984 fitod %f9, %d62 1985 fitod %f10, %d62 1986 fitod %f11, %d62 1987 fitod %f12, %d62 1988 fitod %f13, %d62 1989 fitod %f14, %d62 1990 fitod %f15, %d62 1991 fitod %f16, %d62 1992 fitod %f17, %d62 1993 fitod %f18, %d62 1994 fitod %f19, %d62 1995 fitod %f20, %d62 1996 fitod %f21, %d62 1997 fitod %f22, %d62 1998 fitod %f23, %d62 1999 fitod %f24, %d62 2000 fitod %f25, %d62 2001 fitod %f26, %d62 2002 fitod %f27, %d62 2003 fitod %f28, %d62 2004 fitod %f29, %d62 2005 fitod %f30, %d62 2006 fitod %f31, %d62 2007_fitos_fitod_done: 2008 2009 /* 2010 * Now convert data back into single precision 2011 */ 2012 srl %g6, FITOS_RD_SHIFT, %g7 2013 and %g7, FITOS_REG_MASK, %g7 2014 set _fitos_fdtos_table, %g4 2015 sllx %g7, 2, %g7 2016 jmp %g4 + %g7 2017 ba,pt %xcc, _fitos_fdtos_done 2018 .empty 2019 2020_fitos_fdtos_table: 2021 fdtos %d62, %f0 2022 fdtos %d62, %f1 2023 fdtos %d62, %f2 2024 fdtos %d62, %f3 2025 fdtos %d62, %f4 2026 fdtos %d62, %f5 2027 fdtos %d62, %f6 2028 fdtos %d62, %f7 2029 fdtos %d62, %f8 2030 fdtos %d62, %f9 2031 fdtos %d62, %f10 2032 fdtos %d62, %f11 2033 fdtos %d62, %f12 2034 fdtos %d62, %f13 2035 fdtos %d62, %f14 2036 fdtos %d62, %f15 2037 fdtos %d62, %f16 2038 fdtos %d62, %f17 2039 fdtos %d62, %f18 2040 fdtos %d62, %f19 2041 fdtos %d62, %f20 2042 fdtos %d62, %f21 2043 fdtos %d62, %f22 2044 fdtos %d62, %f23 2045 fdtos %d62, %f24 2046 fdtos %d62, %f25 2047 fdtos %d62, %f26 2048 fdtos %d62, %f27 2049 fdtos %d62, %f28 2050 fdtos %d62, %f29 2051 fdtos %d62, %f30 2052 fdtos %d62, %f31 2053_fitos_fdtos_done: 2054 2055 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 2056 2057#if DEBUG 2058 /* 2059 * Update FPop_unfinished trap kstat 2060 */ 2061 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 2062 ldx [%g7], %g5 20631: 2064 add %g5, 1, %g6 2065 2066 casxa [%g7] ASI_N, %g5, %g6 2067 cmp %g5, %g6 2068 bne,a,pn %xcc, 1b 2069 or %g0, %g6, %g5 2070 2071 /* 2072 * Update fpu_sim_fitos kstat 2073 */ 2074 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 2075 ldx [%g7], %g5 20761: 2077 add %g5, 1, %g6 2078 2079 casxa [%g7] ASI_N, %g5, %g6 2080 cmp %g5, %g6 2081 bne,a,pn %xcc, 1b 2082 or %g0, %g6, %g5 2083#endif /* DEBUG */ 2084 2085 FAST_TRAP_DONE 2086 2087.fp_exception_cont: 2088 /* 2089 * Let _fp_exception deal with simulating FPop instruction. 2090 * Note that we need to pass %fsr in %g2 (already read above). 2091 */ 2092 2093 set _fp_exception, %g1 2094 ba,pt %xcc, sys_trap 2095 sub %g0, 1, %g4 2096 2097.clean_windows: 2098 set trap, %g1 2099 mov T_FLUSH_PCB, %g3 2100 sub %g0, 1, %g4 2101 save 2102 flushw 2103 restore 2104 wrpr %g0, %g0, %cleanwin ! no clean windows 2105 2106 CPU_ADDR(%g4, %g5) 2107 ldn [%g4 + CPU_MPCB], %g4 2108 brz,a,pn %g4, 1f 2109 nop 2110 ld [%g4 + MPCB_WSTATE], %g5 2111 add %g5, WSTATE_CLEAN_OFFSET, %g5 2112 wrpr %g0, %g5, %wstate 21131: FAST_TRAP_DONE 2114 2115/* 2116 * .spill_clean: clean the previous window, restore the wstate, and 2117 * "done". 2118 * 2119 * Entry: %g7 contains new wstate 2120 */ 2121.spill_clean: 2122 sethi %hi(nwin_minus_one), %g5 2123 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 2124 rdpr %cwp, %g6 ! %g6 = %cwp 2125 deccc %g6 ! %g6-- 2126 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 2127 wrpr %g6, %cwp 2128 TT_TRACE_L(trace_win) 2129 clr %l0 2130 clr %l1 2131 clr %l2 2132 clr %l3 2133 clr %l4 2134 clr %l5 2135 clr %l6 2136 clr %l7 2137 wrpr %g0, %g7, %wstate 2138 saved 2139 retry ! restores correct %cwp 2140 2141.fix_alignment: 2142 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2143 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 2144 ldn [%g1 + T_PROCP], %g1 2145 mov 1, %g2 2146 stb %g2, [%g1 + P_FIXALIGNMENT] 2147 FAST_TRAP_DONE 2148 2149#define STDF_REG(REG, ADDR, TMP) \ 2150 sll REG, 3, REG ;\ 2151mark1: set start1, TMP ;\ 2152 jmp REG + TMP ;\ 2153 nop ;\ 2154start1: ba,pt %xcc, done1 ;\ 2155 std %f0, [ADDR + CPU_TMP1] ;\ 2156 ba,pt %xcc, done1 ;\ 2157 std %f32, [ADDR + CPU_TMP1] ;\ 2158 ba,pt %xcc, done1 ;\ 2159 std %f2, [ADDR + CPU_TMP1] ;\ 2160 ba,pt %xcc, done1 ;\ 2161 std %f34, [ADDR + CPU_TMP1] ;\ 2162 ba,pt %xcc, done1 ;\ 2163 std %f4, [ADDR + CPU_TMP1] ;\ 2164 ba,pt %xcc, done1 ;\ 2165 std %f36, [ADDR + CPU_TMP1] ;\ 2166 ba,pt %xcc, done1 ;\ 2167 std %f6, [ADDR + CPU_TMP1] ;\ 2168 ba,pt %xcc, done1 ;\ 2169 std %f38, [ADDR + CPU_TMP1] ;\ 2170 ba,pt %xcc, done1 ;\ 2171 std %f8, [ADDR + CPU_TMP1] ;\ 2172 ba,pt %xcc, done1 ;\ 2173 std %f40, [ADDR + CPU_TMP1] ;\ 2174 ba,pt %xcc, done1 ;\ 2175 std %f10, [ADDR + CPU_TMP1] ;\ 2176 ba,pt %xcc, done1 ;\ 2177 std %f42, [ADDR + CPU_TMP1] ;\ 2178 ba,pt %xcc, done1 ;\ 2179 std %f12, [ADDR + CPU_TMP1] ;\ 2180 ba,pt %xcc, done1 ;\ 2181 std %f44, [ADDR + CPU_TMP1] ;\ 2182 ba,pt %xcc, done1 ;\ 2183 std %f14, [ADDR + CPU_TMP1] ;\ 2184 ba,pt %xcc, done1 ;\ 2185 std %f46, [ADDR + CPU_TMP1] ;\ 2186 ba,pt %xcc, done1 ;\ 2187 std %f16, [ADDR + CPU_TMP1] ;\ 2188 ba,pt %xcc, done1 ;\ 2189 std %f48, [ADDR + CPU_TMP1] ;\ 2190 ba,pt %xcc, done1 ;\ 2191 std %f18, [ADDR + CPU_TMP1] ;\ 2192 ba,pt %xcc, done1 ;\ 2193 std %f50, [ADDR + CPU_TMP1] ;\ 2194 ba,pt %xcc, done1 ;\ 2195 std %f20, [ADDR + CPU_TMP1] ;\ 2196 ba,pt %xcc, done1 ;\ 2197 std %f52, [ADDR + CPU_TMP1] ;\ 2198 ba,pt %xcc, done1 ;\ 2199 std %f22, [ADDR + CPU_TMP1] ;\ 2200 ba,pt %xcc, done1 ;\ 2201 std %f54, [ADDR + CPU_TMP1] ;\ 2202 ba,pt %xcc, done1 ;\ 2203 std %f24, [ADDR + CPU_TMP1] ;\ 2204 ba,pt %xcc, done1 ;\ 2205 std %f56, [ADDR + CPU_TMP1] ;\ 2206 ba,pt %xcc, done1 ;\ 2207 std %f26, [ADDR + CPU_TMP1] ;\ 2208 ba,pt %xcc, done1 ;\ 2209 std %f58, [ADDR + CPU_TMP1] ;\ 2210 ba,pt %xcc, done1 ;\ 2211 std %f28, [ADDR + CPU_TMP1] ;\ 2212 ba,pt %xcc, done1 ;\ 2213 std %f60, [ADDR + CPU_TMP1] ;\ 2214 ba,pt %xcc, done1 ;\ 2215 std %f30, [ADDR + CPU_TMP1] ;\ 2216 ba,pt %xcc, done1 ;\ 2217 std %f62, [ADDR + CPU_TMP1] ;\ 2218done1: 2219 2220#define LDDF_REG(REG, ADDR, TMP) \ 2221 sll REG, 3, REG ;\ 2222mark2: set start2, TMP ;\ 2223 jmp REG + TMP ;\ 2224 nop ;\ 2225start2: ba,pt %xcc, done2 ;\ 2226 ldd [ADDR + CPU_TMP1], %f0 ;\ 2227 ba,pt %xcc, done2 ;\ 2228 ldd [ADDR + CPU_TMP1], %f32 ;\ 2229 ba,pt %xcc, done2 ;\ 2230 ldd [ADDR + CPU_TMP1], %f2 ;\ 2231 ba,pt %xcc, done2 ;\ 2232 ldd [ADDR + CPU_TMP1], %f34 ;\ 2233 ba,pt %xcc, done2 ;\ 2234 ldd [ADDR + CPU_TMP1], %f4 ;\ 2235 ba,pt %xcc, done2 ;\ 2236 ldd [ADDR + CPU_TMP1], %f36 ;\ 2237 ba,pt %xcc, done2 ;\ 2238 ldd [ADDR + CPU_TMP1], %f6 ;\ 2239 ba,pt %xcc, done2 ;\ 2240 ldd [ADDR + CPU_TMP1], %f38 ;\ 2241 ba,pt %xcc, done2 ;\ 2242 ldd [ADDR + CPU_TMP1], %f8 ;\ 2243 ba,pt %xcc, done2 ;\ 2244 ldd [ADDR + CPU_TMP1], %f40 ;\ 2245 ba,pt %xcc, done2 ;\ 2246 ldd [ADDR + CPU_TMP1], %f10 ;\ 2247 ba,pt %xcc, done2 ;\ 2248 ldd [ADDR + CPU_TMP1], %f42 ;\ 2249 ba,pt %xcc, done2 ;\ 2250 ldd [ADDR + CPU_TMP1], %f12 ;\ 2251 ba,pt %xcc, done2 ;\ 2252 ldd [ADDR + CPU_TMP1], %f44 ;\ 2253 ba,pt %xcc, done2 ;\ 2254 ldd [ADDR + CPU_TMP1], %f14 ;\ 2255 ba,pt %xcc, done2 ;\ 2256 ldd [ADDR + CPU_TMP1], %f46 ;\ 2257 ba,pt %xcc, done2 ;\ 2258 ldd [ADDR + CPU_TMP1], %f16 ;\ 2259 ba,pt %xcc, done2 ;\ 2260 ldd [ADDR + CPU_TMP1], %f48 ;\ 2261 ba,pt %xcc, done2 ;\ 2262 ldd [ADDR + CPU_TMP1], %f18 ;\ 2263 ba,pt %xcc, done2 ;\ 2264 ldd [ADDR + CPU_TMP1], %f50 ;\ 2265 ba,pt %xcc, done2 ;\ 2266 ldd [ADDR + CPU_TMP1], %f20 ;\ 2267 ba,pt %xcc, done2 ;\ 2268 ldd [ADDR + CPU_TMP1], %f52 ;\ 2269 ba,pt %xcc, done2 ;\ 2270 ldd [ADDR + CPU_TMP1], %f22 ;\ 2271 ba,pt %xcc, done2 ;\ 2272 ldd [ADDR + CPU_TMP1], %f54 ;\ 2273 ba,pt %xcc, done2 ;\ 2274 ldd [ADDR + CPU_TMP1], %f24 ;\ 2275 ba,pt %xcc, done2 ;\ 2276 ldd [ADDR + CPU_TMP1], %f56 ;\ 2277 ba,pt %xcc, done2 ;\ 2278 ldd [ADDR + CPU_TMP1], %f26 ;\ 2279 ba,pt %xcc, done2 ;\ 2280 ldd [ADDR + CPU_TMP1], %f58 ;\ 2281 ba,pt %xcc, done2 ;\ 2282 ldd [ADDR + CPU_TMP1], %f28 ;\ 2283 ba,pt %xcc, done2 ;\ 2284 ldd [ADDR + CPU_TMP1], %f60 ;\ 2285 ba,pt %xcc, done2 ;\ 2286 ldd [ADDR + CPU_TMP1], %f30 ;\ 2287 ba,pt %xcc, done2 ;\ 2288 ldd [ADDR + CPU_TMP1], %f62 ;\ 2289done2: 2290 2291.lddf_exception_not_aligned: 2292 /* 2293 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2294 */ 2295 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2296 2297#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2298 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2299 ld [%g2 + %lo(fpu_exists)], %g2 2300 brz,a,pn %g2, 4f 2301 nop 2302#endif 2303 CPU_ADDR(%g1, %g4) 2304 or %g0, 1, %g4 2305 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2306 2307 rdpr %tpc, %g2 2308 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2309 srl %g6, 23, %g1 ! using ldda or not? 2310 and %g1, 1, %g1 2311 brz,a,pt %g1, 2f ! check for ldda instruction 2312 nop 2313 srl %g6, 13, %g1 ! check immflag 2314 and %g1, 1, %g1 2315 rdpr %tstate, %g2 ! %tstate in %g2 2316 brnz,a,pn %g1, 1f 2317 srl %g2, 31, %g1 ! get asi from %tstate 2318 srl %g6, 5, %g1 ! get asi from instruction 2319 and %g1, 0xFF, %g1 ! imm_asi field 23201: 2321 cmp %g1, ASI_P ! primary address space 2322 be,a,pt %icc, 2f 2323 nop 2324 cmp %g1, ASI_PNF ! primary no fault address space 2325 be,a,pt %icc, 2f 2326 nop 2327 cmp %g1, ASI_S ! secondary address space 2328 be,a,pt %icc, 2f 2329 nop 2330 cmp %g1, ASI_SNF ! secondary no fault address space 2331 bne,a,pn %icc, 3f 2332 nop 23332: 2334 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2335 add %g5, 4, %g5 ! increment misaligned data address 2336 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2337 2338 sllx %g7, 32, %g7 2339 or %g5, %g7, %g5 ! combine data 2340 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2341 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2342 2343 srl %g6, 25, %g3 ! %g6 has the instruction 2344 and %g3, 0x1F, %g3 ! %g3 has rd 2345 LDDF_REG(%g3, %g7, %g4) 2346 2347 CPU_ADDR(%g1, %g4) 2348 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2349 FAST_TRAP_DONE 23503: 2351 CPU_ADDR(%g1, %g4) 2352 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 23534: 2354 set T_USER, %g3 ! trap type in %g3 2355 or %g3, T_LDDF_ALIGN, %g3 2356 mov %g5, %g2 ! misaligned vaddr in %g2 2357 set fpu_trap, %g1 ! goto C for the little and 2358 ba,pt %xcc, sys_trap ! no fault little asi's 2359 sub %g0, 1, %g4 2360 2361.stdf_exception_not_aligned: 2362 /* 2363 * Cheetah overwrites SFAR on a DTLB miss, hence read it now. 2364 */ 2365 ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5 2366 2367#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2368 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2369 ld [%g7 + %lo(fpu_exists)], %g3 2370 brz,a,pn %g3, 4f 2371 nop 2372#endif 2373 CPU_ADDR(%g1, %g4) 2374 or %g0, 1, %g4 2375 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2376 2377 rdpr %tpc, %g2 2378 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2379 2380 srl %g6, 23, %g1 ! using stda or not? 2381 and %g1, 1, %g1 2382 brz,a,pt %g1, 2f ! check for stda instruction 2383 nop 2384 srl %g6, 13, %g1 ! check immflag 2385 and %g1, 1, %g1 2386 rdpr %tstate, %g2 ! %tstate in %g2 2387 brnz,a,pn %g1, 1f 2388 srl %g2, 31, %g1 ! get asi from %tstate 2389 srl %g6, 5, %g1 ! get asi from instruction 2390 and %g1, 0xFF, %g1 ! imm_asi field 23911: 2392 cmp %g1, ASI_P ! primary address space 2393 be,a,pt %icc, 2f 2394 nop 2395 cmp %g1, ASI_S ! secondary address space 2396 bne,a,pn %icc, 3f 2397 nop 23982: 2399 srl %g6, 25, %g6 2400 and %g6, 0x1F, %g6 ! %g6 has rd 2401 CPU_ADDR(%g7, %g1) 2402 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2403 2404 ldx [%g7 + CPU_TMP1], %g6 2405 srlx %g6, 32, %g7 2406 stuwa %g7, [%g5]ASI_USER ! first half 2407 add %g5, 4, %g5 ! increment misaligned data address 2408 stuwa %g6, [%g5]ASI_USER ! second half 2409 2410 CPU_ADDR(%g1, %g4) 2411 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2412 FAST_TRAP_DONE 24133: 2414 CPU_ADDR(%g1, %g4) 2415 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 24164: 2417 set T_USER, %g3 ! trap type in %g3 2418 or %g3, T_STDF_ALIGN, %g3 2419 mov %g5, %g2 ! misaligned vaddr in %g2 2420 set fpu_trap, %g1 ! goto C for the little and 2421 ba,pt %xcc, sys_trap ! nofault little asi's 2422 sub %g0, 1, %g4 2423 2424#ifdef DEBUG_USER_TRAPTRACECTL 2425 2426.traptrace_freeze: 2427 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2428 TT_TRACE_L(trace_win) 2429 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2430 set trap_freeze, %g1 2431 mov 1, %g2 2432 st %g2, [%g1] 2433 FAST_TRAP_DONE 2434 2435.traptrace_unfreeze: 2436 set trap_freeze, %g1 2437 st %g0, [%g1] 2438 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2439 TT_TRACE_L(trace_win) 2440 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2441 FAST_TRAP_DONE 2442 2443#endif /* DEBUG_USER_TRAPTRACECTL */ 2444 2445.getcc: 2446 CPU_ADDR(%g1, %g2) 2447 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2448 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2449 rdpr %tstate, %g3 ! get tstate 2450 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2451 set PSR_ICC, %g2 2452 and %o0, %g2, %o0 ! mask out the rest 2453 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2454 rdpr %pstate, %o1 2455 wrpr %o1, PSTATE_AG, %pstate ! get into normal globals 2456 mov %o0, %g1 ! move ccr to normal %g1 2457 wrpr %g0, %o1, %pstate ! back into alternate globals 2458 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2459 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2460 FAST_TRAP_DONE 2461 2462.setcc: 2463 CPU_ADDR(%g1, %g2) 2464 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2465 stx %o1, [%g1 + CPU_TMP2] ! save %o1 2466 rdpr %pstate, %o0 2467 wrpr %o0, PSTATE_AG, %pstate ! get into normal globals 2468 mov %g1, %o1 2469 wrpr %g0, %o0, %pstate ! back to alternates 2470 sll %o1, PSR_ICC_SHIFT, %g2 2471 set PSR_ICC, %g3 2472 and %g2, %g3, %g2 ! mask out rest 2473 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2474 rdpr %tstate, %g3 ! get tstate 2475 srl %g3, 0, %g3 ! clear upper word 2476 or %g3, %g2, %g3 ! or in new bits 2477 wrpr %g3, %tstate 2478 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2479 ldx [%g1 + CPU_TMP2], %o1 ! restore %o1 2480 FAST_TRAP_DONE 2481 2482/* 2483 * getpsr(void) 2484 * Note that the xcc part of the ccr is not provided. 2485 * The V8 code shows why the V9 trap is not faster: 2486 * #define GETPSR_TRAP() \ 2487 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2488 */ 2489 2490 .type .getpsr, #function 2491.getpsr: 2492 rdpr %tstate, %g1 ! get tstate 2493 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2494 set PSR_ICC, %g2 2495 and %o0, %g2, %o0 ! mask out the rest 2496 2497 rd %fprs, %g1 ! get fprs 2498 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2499 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2500 or %o0, %g2, %o0 ! or result into psr.ef 2501 2502 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2503 or %o0, %g2, %o0 ! or psr.impl/ver 2504 FAST_TRAP_DONE 2505 SET_SIZE(.getpsr) 2506 2507/* 2508 * setpsr(newpsr) 2509 * Note that there is no support for ccr.xcc in the V9 code. 2510 */ 2511 2512 .type .setpsr, #function 2513.setpsr: 2514 rdpr %tstate, %g1 ! get tstate 2515! setx TSTATE_V8_UBITS, %g2 2516 or %g0, CCR_ICC, %g3 2517 sllx %g3, TSTATE_CCR_SHIFT, %g2 2518 2519 andn %g1, %g2, %g1 ! zero current user bits 2520 set PSR_ICC, %g2 2521 and %g2, %o0, %g2 ! clear all but psr.icc bits 2522 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2523 wrpr %g1, %g3, %tstate ! write tstate 2524 2525 set PSR_EF, %g2 2526 and %g2, %o0, %g2 ! clear all but fp enable bit 2527 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2528 wr %g0, %g4, %fprs ! write fprs 2529 2530 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2531 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2532 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2533 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2534 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2535 srlx %g4, 2, %g4 ! shift fef value to bit 0 2536 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2537 FAST_TRAP_DONE 2538 SET_SIZE(.setpsr) 2539 2540/* 2541 * getlgrp 2542 * get home lgrpid on which the calling thread is currently executing. 2543 */ 2544 .type .getlgrp, #function 2545.getlgrp: 2546 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2547 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2548 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2549 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2550 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2551 sra %g1, 0, %o1 2552 FAST_TRAP_DONE 2553 SET_SIZE(.getlgrp) 2554 2555/* 2556 * Entry for old 4.x trap (trap 0). 2557 */ 2558 ENTRY_NP(syscall_trap_4x) 2559 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2560 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2561 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2562 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2563 brz,pn %g2, 1f ! has it been set? 2564 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2565 st %l1, [%g1 + CPU_TMP2] 2566 rdpr %tnpc, %l1 ! save old tnpc 2567 wrpr %g0, %g2, %tnpc ! setup tnpc 2568 2569 rdpr %pstate, %l0 2570 wrpr %l0, PSTATE_AG, %pstate ! switch to normal globals 2571 mov %l1, %g6 ! pass tnpc to user code in %g6 2572 wrpr %l0, %g0, %pstate ! switch back to alternate globals 2573 2574 ! Note that %g1 still contains CPU struct addr 2575 ld [%g1 + CPU_TMP2], %l1 ! restore locals 2576 ld [%g1 + CPU_TMP1], %l0 2577 FAST_TRAP_DONE_CHK_INTR 25781: 2579 mov %g1, %l0 2580 st %l1, [%g1 + CPU_TMP2] 2581 rdpr %pstate, %l1 2582 wrpr %l1, PSTATE_AG, %pstate 2583 ! 2584 ! check for old syscall mmap which is the only different one which 2585 ! must be the same. Others are handled in the compatibility library. 2586 ! 2587 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2588 movz %icc, SYS_mmap, %g1 2589 wrpr %g0, %l1, %pstate 2590 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2591 ld [%l0 + CPU_TMP1], %l0 2592 SYSCALL(syscall_trap32) 2593 SET_SIZE(syscall_trap_4x) 2594 2595/* 2596 * Handler for software trap 9. 2597 * Set trap0 emulation address for old 4.x system call trap. 2598 * XXX - this should be a system call. 2599 */ 2600 ENTRY_NP(set_trap0_addr) 2601 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2602 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2603 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2604 st %l0, [%g1 + CPU_TMP1] ! save some locals 2605 st %l1, [%g1 + CPU_TMP2] 2606 rdpr %pstate, %l0 2607 wrpr %l0, PSTATE_AG, %pstate 2608 mov %g1, %l1 2609 wrpr %g0, %l0, %pstate 2610 andn %l1, 3, %l1 ! force alignment 2611 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2612 ld [%g1 + CPU_TMP1], %l0 ! restore locals 2613 ld [%g1 + CPU_TMP2], %l1 2614 FAST_TRAP_DONE 2615 SET_SIZE(set_trap0_addr) 2616 2617/* 2618 * mmu_trap_tl1 2619 * trap handler for unexpected mmu traps. 2620 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2621 * case we go to fpu_trap or a user trap from the window handler, in which 2622 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2623 */ 2624 .type mmu_trap_tl1, #function 2625mmu_trap_tl1: 2626#ifdef TRAPTRACE 2627 TRACE_PTR(%g5, %g6) 2628 GET_TRACE_TICK(%g6) 2629 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2630 rdpr %tl, %g6 2631 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2632 rdpr %tt, %g6 2633 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2634 rdpr %tstate, %g6 2635 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2636 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2637 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2638 rdpr %tpc, %g6 2639 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2640 set MMU_SFAR, %g6 2641 ldxa [%g6]ASI_DMMU, %g6 2642 stxa %g6, [%g5 + TRAP_ENT_F1]%asi 2643 CPU_PADDR(%g7, %g6); 2644 add %g7, CPU_TL1_HDLR, %g7 2645 lda [%g7]ASI_MEM, %g6 2646 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2647 set 0xdeadbeef, %g6 2648 stna %g6, [%g5 + TRAP_ENT_F3]%asi 2649 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2650 TRACE_NEXT(%g5, %g6, %g7) 2651#endif /* TRAPTRACE */ 2652 2653 GET_CPU_IMPL(%g5) 2654 cmp %g5, PANTHER_IMPL 2655 bne mmu_trap_tl1_4 2656 nop 2657 rdpr %tt, %g5 2658 cmp %g5, T_DATA_EXCEPTION 2659 bne mmu_trap_tl1_4 2660 nop 2661 wr %g0, ASI_DMMU, %asi 2662 ldxa [MMU_SFSR]%asi, %g5 2663 mov 1, %g6 2664 sllx %g6, PN_SFSR_PARITY_SHIFT, %g6 2665 andcc %g5, %g6, %g0 2666 bz mmu_trap_tl1_4 2667 2668 /* 2669 * We are running on a Panther and have hit a DTLB parity error. 2670 */ 2671 ldxa [MMU_TAG_ACCESS]%asi, %g2 2672 mov %g5, %g3 2673 ba,pt %xcc, .mmu_exception_is_tlb_parity 2674 mov T_DATA_EXCEPTION, %g1 2675 2676mmu_trap_tl1_4: 2677 CPU_PADDR(%g7, %g6); 2678 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2679 /* 2680 * AM is cleared on trap, so addresses are 64 bit 2681 */ 2682 lda [%g7]ASI_MEM, %g6 2683 brz,a,pt %g6, 1f 2684 nop 2685 /* 2686 * We are going to update cpu_m.tl1_hdlr using physical address. 2687 * Flush the D$ line, so that stale data won't be accessed later. 2688 */ 2689 CPU_ADDR(%g6, %g5) 2690 add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA) 2691 GET_CPU_IMPL(%g5) 2692 cmp %g5, CHEETAH_IMPL 2693 bl,pt %icc, 3f 2694 cmp %g5, SPITFIRE_IMPL 2695 stxa %g0, [%g7]ASI_DC_INVAL 2696 membar #Sync 2697 ba,pt %xcc, 2f 2698 nop 26993: 2700 bl,pt %icc, 2f 2701 sethi %hi(dcache_line_mask), %g5 2702 ld [%g5 + %lo(dcache_line_mask)], %g5 2703 and %g6, %g5, %g5 2704 stxa %g0, [%g5]ASI_DC_TAG 2705 membar #Sync 27062: 2707 sta %g0, [%g7]ASI_MEM 2708 SWITCH_GLOBALS ! back to mmu globals 2709 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 27101: 2711 rdpr %tt, %g5 2712 rdpr %tl, %g7 2713 sub %g7, 1, %g6 2714 wrpr %g6, %tl 2715 rdpr %tt, %g6 2716 wrpr %g7, %tl 2717 and %g6, WTRAP_TTMASK, %g6 2718 cmp %g6, WTRAP_TYPE 2719 bne,a,pn %xcc, ptl1_panic 2720 mov PTL1_BAD_MMUTRAP, %g1 2721 rdpr %tpc, %g7 2722 /* tpc should be in the trap table */ 2723 set trap_table, %g6 2724 cmp %g7, %g6 2725 blt,a,pn %xcc, ptl1_panic 2726 mov PTL1_BAD_MMUTRAP, %g1 2727 set etrap_table, %g6 2728 cmp %g7, %g6 2729 bge,a,pn %xcc, ptl1_panic 2730 mov PTL1_BAD_MMUTRAP, %g1 2731 cmp %g5, T_ALIGNMENT 2732 move %icc, MMU_SFAR, %g6 2733 movne %icc, MMU_TAG_ACCESS, %g6 2734 ldxa [%g6]ASI_DMMU, %g6 2735 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2736 add %g7, WTRAP_FAULTOFF, %g7 2737 wrpr %g0, %g7, %tnpc 2738 done 2739 SET_SIZE(mmu_trap_tl1) 2740 2741/* 2742 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2743 * traps are valid only when kmdb is loaded. When the debugger is active, 2744 * the code below is rewritten to transfer control to the appropriate 2745 * debugger entry points. 2746 */ 2747 .global kmdb_trap 2748 .align 8 2749kmdb_trap: 2750 ba,a trap_table0 2751 jmp %g1 + 0 2752 nop 2753 2754 .global kmdb_trap_tl1 2755 .align 8 2756kmdb_trap_tl1: 2757 ba,a trap_table0 2758 jmp %g1 + 0 2759 nop 2760 2761/* 2762 * This entry is copied from OBP's trap table during boot. 2763 */ 2764 .global obp_bpt 2765 .align 8 2766obp_bpt: 2767 NOT 2768 2769/* 2770 * if kernel, set PCONTEXT to 0 for debuggers 2771 * if user, clear nucleus page sizes 2772 */ 2773 .global kctx_obp_bpt 2774kctx_obp_bpt: 2775 set obp_bpt, %g2 27761: 2777 mov MMU_PCONTEXT, %g1 2778 ldxa [%g1]ASI_DMMU, %g1 2779 srlx %g1, CTXREG_NEXT_SHIFT, %g3 2780 brz,pt %g3, 3f ! nucleus pgsz is 0, no problem 2781 sllx %g3, CTXREG_NEXT_SHIFT, %g3 2782 set CTXREG_CTX_MASK, %g4 ! check Pcontext 2783 btst %g4, %g1 2784 bz,a,pt %xcc, 2f 2785 clr %g3 ! kernel: PCONTEXT=0 2786 xor %g3, %g1, %g3 ! user: clr N_pgsz0/1 bits 27872: 2788 set DEMAP_ALL_TYPE, %g1 2789 stxa %g0, [%g1]ASI_DTLB_DEMAP 2790 stxa %g0, [%g1]ASI_ITLB_DEMAP 2791 mov MMU_PCONTEXT, %g1 2792 stxa %g3, [%g1]ASI_DMMU 2793 membar #Sync 2794 sethi %hi(FLUSH_ADDR), %g1 2795 flush %g1 ! flush required by immu 27963: 2797 jmp %g2 2798 nop 2799 2800 2801#ifdef TRAPTRACE 2802/* 2803 * TRAPTRACE support. 2804 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2805 * Return is done by "jmp %g7 + 4". 2806 */ 2807 2808trace_gen: 2809 TRACE_PTR(%g3, %g6) 2810 GET_TRACE_TICK(%g6) 2811 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2812 rdpr %tl, %g6 2813 stha %g6, [%g3 + TRAP_ENT_TL]%asi 2814 rdpr %tt, %g6 2815 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2816 rdpr %tstate, %g6 2817 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2818 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2819 rdpr %tpc, %g6 2820 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2821 TRACE_NEXT(%g3, %g4, %g5) 2822 jmp %g7 + 4 2823 nop 2824 2825trace_win: 2826 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2827 ! Keep the locals as clean as possible, caller cleans %l4 2828 clr %l2 2829 clr %l1 2830 jmp %l4 + 4 2831 clr %l0 2832 2833/* 2834 * Trace a tsb hit 2835 * g1 = tsbe pointer (in/clobbered) 2836 * g2 = tag access register (in) 2837 * g3 - g4 = scratch (clobbered) 2838 * g5 = tsbe data (in) 2839 * g6 = scratch (clobbered) 2840 * g7 = pc we jumped here from (in) 2841 */ 2842 2843 ! Do not disturb %g5, it will be used after the trace 2844 ALTENTRY(trace_tsbhit) 2845 TRACE_TSBHIT(0) 2846 jmp %g7 + 4 2847 nop 2848 2849/* 2850 * Trace a TSB miss 2851 * 2852 * g1 = tsb8k pointer (in) 2853 * g2 = tag access register (in) 2854 * g3 = tsb4m pointer (in) 2855 * g4 = tsbe tag (in/clobbered) 2856 * g5 - g6 = scratch (clobbered) 2857 * g7 = pc we jumped here from (in) 2858 */ 2859 .global trace_tsbmiss 2860trace_tsbmiss: 2861 membar #Sync 2862 sethi %hi(FLUSH_ADDR), %g6 2863 flush %g6 2864 TRACE_PTR(%g5, %g6) 2865 GET_TRACE_TICK(%g6) 2866 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2867 stxa %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2868 stxa %g4, [%g5 + TRAP_ENT_F1]%asi ! tsb tag 2869 rdpr %tnpc, %g6 2870 stxa %g6, [%g5 + TRAP_ENT_F2]%asi 2871 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2872 srlx %g1, 32, %g6 2873 stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh? 2874 rdpr %tpc, %g6 2875 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2876 rdpr %tl, %g6 2877 stha %g6, [%g5 + TRAP_ENT_TL]%asi 2878 rdpr %tt, %g6 2879 or %g6, TT_MMU_MISS, %g4 2880 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2881 cmp %g6, FAST_IMMU_MISS_TT 2882 be,a %icc, 1f 2883 ldxa [%g0]ASI_IMMU, %g6 2884 ldxa [%g0]ASI_DMMU, %g6 28851: stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2886 stxa %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2887 TRACE_NEXT(%g5, %g4, %g6) 2888 jmp %g7 + 4 2889 nop 2890 2891/* 2892 * g2 = tag access register (in) 2893 * g3 = ctx number (in) 2894 */ 2895trace_dataprot: 2896 membar #Sync 2897 sethi %hi(FLUSH_ADDR), %g6 2898 flush %g6 2899 TRACE_PTR(%g1, %g6) 2900 GET_TRACE_TICK(%g6) 2901 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2902 rdpr %tpc, %g6 2903 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2904 rdpr %tstate, %g6 2905 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2906 stxa %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2907 stxa %g0, [%g1 + TRAP_ENT_TR]%asi 2908 stxa %g0, [%g1 + TRAP_ENT_F1]%asi 2909 stxa %g0, [%g1 + TRAP_ENT_F2]%asi 2910 stxa %g0, [%g1 + TRAP_ENT_F3]%asi 2911 stxa %g0, [%g1 + TRAP_ENT_F4]%asi 2912 rdpr %tl, %g6 2913 stha %g6, [%g1 + TRAP_ENT_TL]%asi 2914 rdpr %tt, %g6 2915 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2916 TRACE_NEXT(%g1, %g4, %g5) 2917 jmp %g7 + 4 2918 nop 2919 2920#endif /* TRAPTRACE */ 2921 2922/* 2923 * fast_trap_done, fast_trap_done_chk_intr: 2924 * 2925 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2926 * taken immediately after a RETRY or DONE instruction which causes IE to 2927 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2928 * to execute first before taking any interrupts. If that instruction 2929 * results in other traps, and if the corresponding trap handler runs 2930 * entirely at TL=1 with interrupts disabled, then pending interrupts 2931 * won't be taken until after yet another instruction following the %tpc 2932 * or %tnpc. 2933 * 2934 * A malicious user program can use this feature to block out interrupts 2935 * for extended durations, which can result in send_mondo_timeout kernel 2936 * panic. 2937 * 2938 * This problem is addressed by servicing any pending interrupts via 2939 * sys_trap before returning back to the user mode from a fast trap 2940 * handler. The "done" instruction within a fast trap handler, which 2941 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2942 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2943 * entry point. 2944 * 2945 * We check for any pending interrupts here and force a sys_trap to 2946 * service those interrupts, if any. To minimize overhead, pending 2947 * interrupts are checked if the %tpc happens to be at 16K boundary, 2948 * which allows a malicious program to execute at most 4K consecutive 2949 * instructions before we service any pending interrupts. If a worst 2950 * case fast trap handler takes about 2 usec, then interrupts will be 2951 * blocked for at most 8 msec, less than a clock tick. 2952 * 2953 * For the cases where we don't know if the %tpc will cross a 16K 2954 * boundary, we can't use the above optimization and always process 2955 * any pending interrupts via fast_frap_done_chk_intr entry point. 2956 * 2957 * Entry Conditions: 2958 * %pstate am:0 priv:1 ie:0 2959 * globals are AG (not normal globals) 2960 */ 2961 2962 .global fast_trap_done, fast_trap_done_chk_intr 2963fast_trap_done: 2964 rdpr %tpc, %g5 2965 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2966 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2967 bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only) 2968 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2969 done 2970 2971 ALTENTRY(fast_trap_done_check_interrupts) 2972fast_trap_done_chk_intr: 2973 ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5 2974 29751: rd SOFTINT, %g6 2976 and %g5, IRSR_BUSY, %g5 2977 orcc %g5, %g6, %g0 2978 bnz,pn %xcc, 2f ! branch if any pending intr 2979 nop 2980 done 2981 29822: 2983 /* 2984 * We get here if there are any pending interrupts. 2985 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2986 * instruction. 2987 */ 2988 rdpr %tnpc, %g5 2989 wrpr %g0, %g5, %tpc 2990 add %g5, 4, %g5 2991 wrpr %g0, %g5, %tnpc 2992 2993 /* 2994 * Force a dummy sys_trap call so that interrupts can be serviced. 2995 */ 2996 set fast_trap_dummy_call, %g1 2997 ba,pt %xcc, sys_trap 2998 mov -1, %g4 2999 3000fast_trap_dummy_call: 3001 retl 3002 nop 3003 3004/* 3005 * Currently the brand syscall interposition code is not enabled by 3006 * default. Instead, when a branded zone is first booted the brand 3007 * infrastructure will patch the trap table so that the syscall 3008 * entry points are redirected to syscall_wrapper32 and syscall_wrapper 3009 * for ILP32 and LP64 syscalls respectively. this is done in 3010 * brand_plat_interposition_enable(). Note that the syscall wrappers 3011 * below do not collect any trap trace data since the syscall hot patch 3012 * points are reached after trap trace data has already been collected. 3013 */ 3014#define BRAND_CALLBACK(callback_id) \ 3015 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\ 3016 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\ 3017 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\ 3018 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\ 3019 brz %g3, 1f /* No brand? No callback. */ ;\ 3020 nop ;\ 3021 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\ 3022 ldn [%g3 + (callback_id << 3)], %g3 ;\ 3023 brz %g3, 1f ;\ 3024 /* \ 3025 * This isn't pretty. We want a low-latency way for the callback \ 3026 * routine to decline to do anything. We just pass in an address \ 3027 * the routine can directly jmp back to, pretending that nothing \ 3028 * has happened. \ 3029 * \ 3030 * %g1: return address (where the brand handler jumps back to) \ 3031 * %g2: address of CPU structure \ 3032 * %g3: address of brand handler (where we will jump to) \ 3033 */ \ 3034 mov %pc, %g1 ;\ 3035 add %g1, 16, %g1 ;\ 3036 jmp %g3 ;\ 3037 nop ;\ 30381: 3039 3040 ENTRY_NP(syscall_wrapper32) 3041 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 3042 SYSCALL_NOTT(syscall_trap32) 3043 SET_SIZE(syscall_wrapper32) 3044 3045 ENTRY_NP(syscall_wrapper) 3046 BRAND_CALLBACK(BRAND_CB_SYSCALL) 3047 SYSCALL_NOTT(syscall_trap) 3048 SET_SIZE(syscall_wrapper) 3049 3050#endif /* lint */ 3051