1/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $ 2 * arch/sparc/kernel/entry.S: Sparc trap low-level entry points. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 7 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) 9 */ 10 11#include <linux/config.h> 12#include <linux/errno.h> 13 14#include <asm/head.h> 15#include <asm/asi.h> 16#include <asm/smp.h> 17#include <asm/kgdb.h> 18#include <asm/contregs.h> 19#include <asm/ptrace.h> 20#include <asm/asm-offsets.h> 21#include <asm/psr.h> 22#include <asm/vaddrs.h> 23#include <asm/memreg.h> 24#include <asm/page.h> 25#ifdef CONFIG_SUN4 26#include <asm/pgtsun4.h> 27#else 28#include <asm/pgtsun4c.h> 29#endif 30#include <asm/winmacro.h> 31#include <asm/signal.h> 32#include <asm/obio.h> 33#include <asm/mxcc.h> 34#include <asm/thread_info.h> 35#include <asm/param.h> 36 37#include <asm/asmmacro.h> 38 39#define curptr g6 40 41#define NR_SYSCALLS 300 /* Each OS is different... */ 42 43/* These are just handy. */ 44#define _SV save %sp, -STACKFRAME_SZ, %sp 45#define _RS restore 46 47#define FLUSH_ALL_KERNEL_WINDOWS \ 48 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ 49 _RS; _RS; _RS; _RS; _RS; _RS; _RS; 50 51/* First, KGDB low level things. This is a rewrite 52 * of the routines found in the sparc-stub.c asm() statement 53 * from the gdb distribution. This is also dual-purpose 54 * as a software trap for userlevel programs. 55 */ 56 .data 57 .align 4 58 59in_trap_handler: 60 .word 0 61 62 .text 63 .align 4 64 65#if 0 /* kgdb is dropped from 2.5.33 */ 66! This function is called when any SPARC trap (except window overflow or 67! underflow) occurs. It makes sure that the invalid register window is still 68! available before jumping into C code. It will also restore the world if you 69! return from handle_exception. 70 71 .globl trap_low 72trap_low: 73 rd %wim, %l3 74 SAVE_ALL 75 76 sethi %hi(in_trap_handler), %l4 77 ld [%lo(in_trap_handler) + %l4], %l5 78 inc %l5 79 st %l5, [%lo(in_trap_handler) + %l4] 80 81 /* Make sure kgdb sees the same state we just saved. */ 82 LOAD_PT_GLOBALS(sp) 83 LOAD_PT_INS(sp) 84 ld [%sp + STACKFRAME_SZ + PT_Y], %l4 85 ld [%sp + STACKFRAME_SZ + PT_WIM], %l3 86 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 87 ld [%sp + STACKFRAME_SZ + PT_PC], %l1 88 ld [%sp + STACKFRAME_SZ + PT_NPC], %l2 89 rd %tbr, %l5 /* Never changes... */ 90 91 /* Make kgdb exception frame. */ 92 sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals 93 ! + hidden arg + arg spill 94 ! + doubleword alignment 95 ! + registers[72] local var 96 SAVE_KGDB_GLOBALS(sp) 97 SAVE_KGDB_INS(sp) 98 SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) 99 100 /* We are increasing PIL, so two writes. */ 101 or %l0, PSR_PIL, %l0 102 wr %l0, 0, %psr 103 WRITE_PAUSE 104 wr %l0, PSR_ET, %psr 105 WRITE_PAUSE 106 107 call handle_exception 108 add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers 109 110 /* Load new kgdb register set. */ 111 LOAD_KGDB_GLOBALS(sp) 112 LOAD_KGDB_INS(sp) 113 LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) 114 wr %l4, 0x0, %y 115 116 sethi %hi(in_trap_handler), %l4 117 ld [%lo(in_trap_handler) + %l4], %l5 118 dec %l5 119 st %l5, [%lo(in_trap_handler) + %l4] 120 121 add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame. 122 123 /* Now take what kgdb did and place it into the pt_regs 124 * frame which SparcLinux RESTORE_ALL understands., 125 */ 126 STORE_PT_INS(sp) 127 STORE_PT_GLOBALS(sp) 128 STORE_PT_YREG(sp, g2) 129 STORE_PT_PRIV(sp, l0, l1, l2) 130 131 RESTORE_ALL 132#endif 133 134#ifdef CONFIG_BLK_DEV_FD 135 .text 136 .align 4 137 .globl floppy_hardint 138floppy_hardint: 139 /* 140 * This code cannot touch registers %l0 %l1 and %l2 141 * because SAVE_ALL depends on their values. It depends 142 * on %l3 also, but we regenerate it before a call. 143 * Other registers are: 144 * %l3 -- base address of fdc registers 145 * %l4 -- pdma_vaddr 146 * %l5 -- scratch for ld/st address 147 * %l6 -- pdma_size 148 * %l7 -- scratch [floppy byte, ld/st address, aux. data] 149 */ 150 151 /* Do we have work to do? */ 152 sethi %hi(doing_pdma), %l7 153 ld [%l7 + %lo(doing_pdma)], %l7 154 cmp %l7, 0 155 be floppy_dosoftint 156 nop 157 158 /* Load fdc register base */ 159 sethi %hi(fdc_status), %l3 160 ld [%l3 + %lo(fdc_status)], %l3 161 162 /* Setup register addresses */ 163 sethi %hi(pdma_vaddr), %l5 ! transfer buffer 164 ld [%l5 + %lo(pdma_vaddr)], %l4 165 sethi %hi(pdma_size), %l5 ! bytes to go 166 ld [%l5 + %lo(pdma_size)], %l6 167next_byte: 168 ldub [%l3], %l7 169 170 andcc %l7, 0x80, %g0 ! Does fifo still have data 171 bz floppy_fifo_emptied ! fifo has been emptied... 172 andcc %l7, 0x20, %g0 ! in non-dma mode still? 173 bz floppy_overrun ! nope, overrun 174 andcc %l7, 0x40, %g0 ! 0=write 1=read 175 bz floppy_write 176 sub %l6, 0x1, %l6 177 178 /* Ok, actually read this byte */ 179 ldub [%l3 + 1], %l7 180 orcc %g0, %l6, %g0 181 stb %l7, [%l4] 182 bne next_byte 183 add %l4, 0x1, %l4 184 185 b floppy_tdone 186 nop 187 188floppy_write: 189 /* Ok, actually write this byte */ 190 ldub [%l4], %l7 191 orcc %g0, %l6, %g0 192 stb %l7, [%l3 + 1] 193 bne next_byte 194 add %l4, 0x1, %l4 195 196 /* fall through... */ 197floppy_tdone: 198 sethi %hi(pdma_vaddr), %l5 199 st %l4, [%l5 + %lo(pdma_vaddr)] 200 sethi %hi(pdma_size), %l5 201 st %l6, [%l5 + %lo(pdma_size)] 202 /* Flip terminal count pin */ 203 set auxio_register, %l7 204 ld [%l7], %l7 205 206 set sparc_cpu_model, %l5 207 ld [%l5], %l5 208 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */ 209 be 1f 210 ldub [%l7], %l5 211 212 or %l5, 0xc2, %l5 213 stb %l5, [%l7] 214 andn %l5, 0x02, %l5 215 b 2f 216 nop 217 2181: 219 or %l5, 0xf4, %l5 220 stb %l5, [%l7] 221 andn %l5, 0x04, %l5 222 2232: 224 /* Kill some time so the bits set */ 225 WRITE_PAUSE 226 WRITE_PAUSE 227 228 stb %l5, [%l7] 229 230 /* Prevent recursion */ 231 sethi %hi(doing_pdma), %l7 232 b floppy_dosoftint 233 st %g0, [%l7 + %lo(doing_pdma)] 234 235 /* We emptied the FIFO, but we haven't read everything 236 * as of yet. Store the current transfer address and 237 * bytes left to read so we can continue when the next 238 * fast IRQ comes in. 239 */ 240floppy_fifo_emptied: 241 sethi %hi(pdma_vaddr), %l5 242 st %l4, [%l5 + %lo(pdma_vaddr)] 243 sethi %hi(pdma_size), %l7 244 st %l6, [%l7 + %lo(pdma_size)] 245 246 /* Restore condition codes */ 247 wr %l0, 0x0, %psr 248 WRITE_PAUSE 249 250 jmp %l1 251 rett %l2 252 253floppy_overrun: 254 sethi %hi(pdma_vaddr), %l5 255 st %l4, [%l5 + %lo(pdma_vaddr)] 256 sethi %hi(pdma_size), %l5 257 st %l6, [%l5 + %lo(pdma_size)] 258 /* Prevent recursion */ 259 sethi %hi(doing_pdma), %l7 260 st %g0, [%l7 + %lo(doing_pdma)] 261 262 /* fall through... */ 263floppy_dosoftint: 264 rd %wim, %l3 265 SAVE_ALL 266 267 /* Set all IRQs off. */ 268 or %l0, PSR_PIL, %l4 269 wr %l4, 0x0, %psr 270 WRITE_PAUSE 271 wr %l4, PSR_ET, %psr 272 WRITE_PAUSE 273 274 mov 11, %o0 ! floppy irq level (unused anyway) 275 mov %g0, %o1 ! devid is not used in fast interrupts 276 call sparc_floppy_irq 277 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs 278 279 RESTORE_ALL 280 281#endif /* (CONFIG_BLK_DEV_FD) */ 282 283 /* Bad trap handler */ 284 .globl bad_trap_handler 285bad_trap_handler: 286 SAVE_ALL 287 288 wr %l0, PSR_ET, %psr 289 WRITE_PAUSE 290 291 add %sp, STACKFRAME_SZ, %o0 ! pt_regs 292 call do_hw_interrupt 293 mov %l7, %o1 ! trap number 294 295 RESTORE_ALL 296 297/* For now all IRQ's not registered get sent here. handler_irq() will 298 * see if a routine is registered to handle this interrupt and if not 299 * it will say so on the console. 300 */ 301 302 .align 4 303 .globl real_irq_entry, patch_handler_irq 304real_irq_entry: 305 SAVE_ALL 306 307#ifdef CONFIG_SMP 308 .globl patchme_maybe_smp_msg 309 310 cmp %l7, 12 311patchme_maybe_smp_msg: 312 bgu maybe_smp4m_msg 313 nop 314#endif 315 316real_irq_continue: 317 or %l0, PSR_PIL, %g2 318 wr %g2, 0x0, %psr 319 WRITE_PAUSE 320 wr %g2, PSR_ET, %psr 321 WRITE_PAUSE 322 mov %l7, %o0 ! irq level 323patch_handler_irq: 324 call handler_irq 325 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr 326 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq 327 wr %g2, PSR_ET, %psr ! keep ET up 328 WRITE_PAUSE 329 330 RESTORE_ALL 331 332#ifdef CONFIG_SMP 333 /* SMP per-cpu ticker interrupts are handled specially. */ 334smp4m_ticker: 335 bne real_irq_continue+4 336 or %l0, PSR_PIL, %g2 337 wr %g2, 0x0, %psr 338 WRITE_PAUSE 339 wr %g2, PSR_ET, %psr 340 WRITE_PAUSE 341 call smp4m_percpu_timer_interrupt 342 add %sp, STACKFRAME_SZ, %o0 343 wr %l0, PSR_ET, %psr 344 WRITE_PAUSE 345 RESTORE_ALL 346 347 /* Here is where we check for possible SMP IPI passed to us 348 * on some level other than 15 which is the NMI and only used 349 * for cross calls. That has a separate entry point below. 350 */ 351maybe_smp4m_msg: 352 GET_PROCESSOR4M_ID(o3) 353 set sun4m_interrupts, %l5 354 ld [%l5], %o5 355 sethi %hi(0x40000000), %o2 356 sll %o3, 12, %o3 357 ld [%o5 + %o3], %o1 358 andcc %o1, %o2, %g0 359 be,a smp4m_ticker 360 cmp %l7, 14 361 st %o2, [%o5 + 0x4] 362 WRITE_PAUSE 363 ld [%o5], %g0 364 WRITE_PAUSE 365 or %l0, PSR_PIL, %l4 366 wr %l4, 0x0, %psr 367 WRITE_PAUSE 368 wr %l4, PSR_ET, %psr 369 WRITE_PAUSE 370 call smp_reschedule_irq 371 nop 372 373 RESTORE_ALL 374 375 .align 4 376 .globl linux_trap_ipi15_sun4m 377linux_trap_ipi15_sun4m: 378 SAVE_ALL 379 sethi %hi(0x80000000), %o2 380 GET_PROCESSOR4M_ID(o0) 381 set sun4m_interrupts, %l5 382 ld [%l5], %o5 383 sll %o0, 12, %o0 384 add %o5, %o0, %o5 385 ld [%o5], %o3 386 andcc %o3, %o2, %g0 387 be 1f ! Must be an NMI async memory error 388 st %o2, [%o5 + 4] 389 WRITE_PAUSE 390 ld [%o5], %g0 391 WRITE_PAUSE 392 or %l0, PSR_PIL, %l4 393 wr %l4, 0x0, %psr 394 WRITE_PAUSE 395 wr %l4, PSR_ET, %psr 396 WRITE_PAUSE 397 call smp4m_cross_call_irq 398 nop 399 b ret_trap_lockless_ipi 400 clr %l6 4011: 402 /* NMI async memory error handling. */ 403 sethi %hi(0x80000000), %l4 404 sethi %hi(0x4000), %o3 405 sub %o5, %o0, %o5 406 add %o5, %o3, %l5 407 st %l4, [%l5 + 0xc] 408 WRITE_PAUSE 409 ld [%l5], %g0 410 WRITE_PAUSE 411 or %l0, PSR_PIL, %l4 412 wr %l4, 0x0, %psr 413 WRITE_PAUSE 414 wr %l4, PSR_ET, %psr 415 WRITE_PAUSE 416 call sun4m_nmi 417 nop 418 st %l4, [%l5 + 0x8] 419 WRITE_PAUSE 420 ld [%l5], %g0 421 WRITE_PAUSE 422 RESTORE_ALL 423 424 .globl smp4d_ticker 425 /* SMP per-cpu ticker interrupts are handled specially. */ 426smp4d_ticker: 427 SAVE_ALL 428 or %l0, PSR_PIL, %g2 429 sethi %hi(CC_ICLR), %o0 430 sethi %hi(1 << 14), %o1 431 or %o0, %lo(CC_ICLR), %o0 432 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ 433 wr %g2, 0x0, %psr 434 WRITE_PAUSE 435 wr %g2, PSR_ET, %psr 436 WRITE_PAUSE 437 call smp4d_percpu_timer_interrupt 438 add %sp, STACKFRAME_SZ, %o0 439 wr %l0, PSR_ET, %psr 440 WRITE_PAUSE 441 RESTORE_ALL 442 443 .align 4 444 .globl linux_trap_ipi15_sun4d 445linux_trap_ipi15_sun4d: 446 SAVE_ALL 447 sethi %hi(CC_BASE), %o4 448 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 449 or %o4, (CC_EREG - CC_BASE), %o0 450 ldda [%o0] ASI_M_MXCC, %o0 451 andcc %o0, %o2, %g0 452 bne 1f 453 sethi %hi(BB_STAT2), %o2 454 lduba [%o2] ASI_M_CTL, %o2 455 andcc %o2, BB_STAT2_MASK, %g0 456 bne 2f 457 or %o4, (CC_ICLR - CC_BASE), %o0 458 sethi %hi(1 << 15), %o1 459 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ 460 or %l0, PSR_PIL, %l4 461 wr %l4, 0x0, %psr 462 WRITE_PAUSE 463 wr %l4, PSR_ET, %psr 464 WRITE_PAUSE 465 call smp4d_cross_call_irq 466 nop 467 b ret_trap_lockless_ipi 468 clr %l6 469 4701: /* MXCC error */ 4712: /* BB error */ 472 /* Disable PIL 15 */ 473 set CC_IMSK, %l4 474 lduha [%l4] ASI_M_MXCC, %l5 475 sethi %hi(1 << 15), %l7 476 or %l5, %l7, %l5 477 stha %l5, [%l4] ASI_M_MXCC 478 /* FIXME */ 4791: b,a 1b 480 481#endif /* CONFIG_SMP */ 482 483 /* This routine handles illegal instructions and privileged 484 * instruction attempts from user code. 485 */ 486 .align 4 487 .globl bad_instruction 488bad_instruction: 489 sethi %hi(0xc1f80000), %l4 490 ld [%l1], %l5 491 sethi %hi(0x81d80000), %l7 492 and %l5, %l4, %l5 493 cmp %l5, %l7 494 be 1f 495 SAVE_ALL 496 497 wr %l0, PSR_ET, %psr ! re-enable traps 498 WRITE_PAUSE 499 500 add %sp, STACKFRAME_SZ, %o0 501 mov %l1, %o1 502 mov %l2, %o2 503 call do_illegal_instruction 504 mov %l0, %o3 505 506 RESTORE_ALL 507 5081: /* unimplemented flush - just skip */ 509 jmpl %l2, %g0 510 rett %l2 + 4 511 512 .align 4 513 .globl priv_instruction 514priv_instruction: 515 SAVE_ALL 516 517 wr %l0, PSR_ET, %psr 518 WRITE_PAUSE 519 520 add %sp, STACKFRAME_SZ, %o0 521 mov %l1, %o1 522 mov %l2, %o2 523 call do_priv_instruction 524 mov %l0, %o3 525 526 RESTORE_ALL 527 528 /* This routine handles unaligned data accesses. */ 529 .align 4 530 .globl mna_handler 531mna_handler: 532 andcc %l0, PSR_PS, %g0 533 be mna_fromuser 534 nop 535 536 SAVE_ALL 537 538 wr %l0, PSR_ET, %psr 539 WRITE_PAUSE 540 541 ld [%l1], %o1 542 call kernel_unaligned_trap 543 add %sp, STACKFRAME_SZ, %o0 544 545 RESTORE_ALL 546 547mna_fromuser: 548 SAVE_ALL 549 550 wr %l0, PSR_ET, %psr ! re-enable traps 551 WRITE_PAUSE 552 553 ld [%l1], %o1 554 call user_unaligned_trap 555 add %sp, STACKFRAME_SZ, %o0 556 557 RESTORE_ALL 558 559 /* This routine handles floating point disabled traps. */ 560 .align 4 561 .globl fpd_trap_handler 562fpd_trap_handler: 563 SAVE_ALL 564 565 wr %l0, PSR_ET, %psr ! re-enable traps 566 WRITE_PAUSE 567 568 add %sp, STACKFRAME_SZ, %o0 569 mov %l1, %o1 570 mov %l2, %o2 571 call do_fpd_trap 572 mov %l0, %o3 573 574 RESTORE_ALL 575 576 /* This routine handles Floating Point Exceptions. */ 577 .align 4 578 .globl fpe_trap_handler 579fpe_trap_handler: 580 set fpsave_magic, %l5 581 cmp %l1, %l5 582 be 1f 583 sethi %hi(fpsave), %l5 584 or %l5, %lo(fpsave), %l5 585 cmp %l1, %l5 586 bne 2f 587 sethi %hi(fpsave_catch2), %l5 588 or %l5, %lo(fpsave_catch2), %l5 589 wr %l0, 0x0, %psr 590 WRITE_PAUSE 591 jmp %l5 592 rett %l5 + 4 5931: 594 sethi %hi(fpsave_catch), %l5 595 or %l5, %lo(fpsave_catch), %l5 596 wr %l0, 0x0, %psr 597 WRITE_PAUSE 598 jmp %l5 599 rett %l5 + 4 600 6012: 602 SAVE_ALL 603 604 wr %l0, PSR_ET, %psr ! re-enable traps 605 WRITE_PAUSE 606 607 add %sp, STACKFRAME_SZ, %o0 608 mov %l1, %o1 609 mov %l2, %o2 610 call do_fpe_trap 611 mov %l0, %o3 612 613 RESTORE_ALL 614 615 /* This routine handles Tag Overflow Exceptions. */ 616 .align 4 617 .globl do_tag_overflow 618do_tag_overflow: 619 SAVE_ALL 620 621 wr %l0, PSR_ET, %psr ! re-enable traps 622 WRITE_PAUSE 623 624 add %sp, STACKFRAME_SZ, %o0 625 mov %l1, %o1 626 mov %l2, %o2 627 call handle_tag_overflow 628 mov %l0, %o3 629 630 RESTORE_ALL 631 632 /* This routine handles Watchpoint Exceptions. */ 633 .align 4 634 .globl do_watchpoint 635do_watchpoint: 636 SAVE_ALL 637 638 wr %l0, PSR_ET, %psr ! re-enable traps 639 WRITE_PAUSE 640 641 add %sp, STACKFRAME_SZ, %o0 642 mov %l1, %o1 643 mov %l2, %o2 644 call handle_watchpoint 645 mov %l0, %o3 646 647 RESTORE_ALL 648 649 /* This routine handles Register Access Exceptions. */ 650 .align 4 651 .globl do_reg_access 652do_reg_access: 653 SAVE_ALL 654 655 wr %l0, PSR_ET, %psr ! re-enable traps 656 WRITE_PAUSE 657 658 add %sp, STACKFRAME_SZ, %o0 659 mov %l1, %o1 660 mov %l2, %o2 661 call handle_reg_access 662 mov %l0, %o3 663 664 RESTORE_ALL 665 666 /* This routine handles Co-Processor Disabled Exceptions. */ 667 .align 4 668 .globl do_cp_disabled 669do_cp_disabled: 670 SAVE_ALL 671 672 wr %l0, PSR_ET, %psr ! re-enable traps 673 WRITE_PAUSE 674 675 add %sp, STACKFRAME_SZ, %o0 676 mov %l1, %o1 677 mov %l2, %o2 678 call handle_cp_disabled 679 mov %l0, %o3 680 681 RESTORE_ALL 682 683 /* This routine handles Co-Processor Exceptions. */ 684 .align 4 685 .globl do_cp_exception 686do_cp_exception: 687 SAVE_ALL 688 689 wr %l0, PSR_ET, %psr ! re-enable traps 690 WRITE_PAUSE 691 692 add %sp, STACKFRAME_SZ, %o0 693 mov %l1, %o1 694 mov %l2, %o2 695 call handle_cp_exception 696 mov %l0, %o3 697 698 RESTORE_ALL 699 700 /* This routine handles Hardware Divide By Zero Exceptions. */ 701 .align 4 702 .globl do_hw_divzero 703do_hw_divzero: 704 SAVE_ALL 705 706 wr %l0, PSR_ET, %psr ! re-enable traps 707 WRITE_PAUSE 708 709 add %sp, STACKFRAME_SZ, %o0 710 mov %l1, %o1 711 mov %l2, %o2 712 call handle_hw_divzero 713 mov %l0, %o3 714 715 RESTORE_ALL 716 717 .align 4 718 .globl do_flush_windows 719do_flush_windows: 720 SAVE_ALL 721 722 wr %l0, PSR_ET, %psr 723 WRITE_PAUSE 724 725 andcc %l0, PSR_PS, %g0 726 bne dfw_kernel 727 nop 728 729 call flush_user_windows 730 nop 731 732 /* Advance over the trap instruction. */ 733 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 734 add %l1, 0x4, %l2 735 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 736 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 737 738 RESTORE_ALL 739 740 .globl flush_patch_one 741 742 /* We get these for debugging routines using __builtin_return_address() */ 743dfw_kernel: 744flush_patch_one: 745 FLUSH_ALL_KERNEL_WINDOWS 746 747 /* Advance over the trap instruction. */ 748 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 749 add %l1, 0x4, %l2 750 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 751 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 752 753 RESTORE_ALL 754 755 /* The getcc software trap. The user wants the condition codes from 756 * the %psr in register %g1. 757 */ 758 759 .align 4 760 .globl getcc_trap_handler 761getcc_trap_handler: 762 srl %l0, 20, %g1 ! give user 763 and %g1, 0xf, %g1 ! only ICC bits in %psr 764 jmp %l2 ! advance over trap instruction 765 rett %l2 + 0x4 ! like this... 766 767 /* The setcc software trap. The user has condition codes in %g1 768 * that it would like placed in the %psr. Be careful not to flip 769 * any unintentional bits! 770 */ 771 772 .align 4 773 .globl setcc_trap_handler 774setcc_trap_handler: 775 sll %g1, 0x14, %l4 776 set PSR_ICC, %l5 777 andn %l0, %l5, %l0 ! clear ICC bits in %psr 778 and %l4, %l5, %l4 ! clear non-ICC bits in user value 779 or %l4, %l0, %l4 ! or them in... mix mix mix 780 781 wr %l4, 0x0, %psr ! set new %psr 782 WRITE_PAUSE ! TI scumbags... 783 784 jmp %l2 ! advance over trap instruction 785 rett %l2 + 0x4 ! like this... 786 787 .align 4 788 .globl linux_trap_nmi_sun4c 789linux_trap_nmi_sun4c: 790 SAVE_ALL 791 792 /* Ugh, we need to clear the IRQ line. This is now 793 * a very sun4c specific trap handler... 794 */ 795 sethi %hi(interrupt_enable), %l5 796 ld [%l5 + %lo(interrupt_enable)], %l5 797 ldub [%l5], %l6 798 andn %l6, INTS_ENAB, %l6 799 stb %l6, [%l5] 800 801 /* Now it is safe to re-enable traps without recursion. */ 802 or %l0, PSR_PIL, %l0 803 wr %l0, PSR_ET, %psr 804 WRITE_PAUSE 805 806 /* Now call the c-code with the pt_regs frame ptr and the 807 * memory error registers as arguments. The ordering chosen 808 * here is due to unlatching semantics. 809 */ 810 sethi %hi(AC_SYNC_ERR), %o0 811 add %o0, 0x4, %o0 812 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr 813 sub %o0, 0x4, %o0 814 lda [%o0] ASI_CONTROL, %o1 ! sync error 815 add %o0, 0xc, %o0 816 lda [%o0] ASI_CONTROL, %o4 ! async vaddr 817 sub %o0, 0x4, %o0 818 lda [%o0] ASI_CONTROL, %o3 ! async error 819 call sparc_lvl15_nmi 820 add %sp, STACKFRAME_SZ, %o0 821 822 RESTORE_ALL 823 824 .align 4 825 .globl invalid_segment_patch1_ff 826 .globl invalid_segment_patch2_ff 827invalid_segment_patch1_ff: cmp %l4, 0xff 828invalid_segment_patch2_ff: mov 0xff, %l3 829 830 .align 4 831 .globl invalid_segment_patch1_1ff 832 .globl invalid_segment_patch2_1ff 833invalid_segment_patch1_1ff: cmp %l4, 0x1ff 834invalid_segment_patch2_1ff: mov 0x1ff, %l3 835 836 .align 4 837 .globl num_context_patch1_16, num_context_patch2_16 838num_context_patch1_16: mov 0x10, %l7 839num_context_patch2_16: mov 0x10, %l7 840 841 .align 4 842 .globl vac_linesize_patch_32 843vac_linesize_patch_32: subcc %l7, 32, %l7 844 845 .align 4 846 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on 847 848/* 849 * Ugly, but we cant use hardware flushing on the sun4 and we'd require 850 * two instructions (Anton) 851 */ 852#ifdef CONFIG_SUN4 853vac_hwflush_patch1_on: nop 854#else 855vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 856#endif 857 858vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG 859 860 .globl invalid_segment_patch1, invalid_segment_patch2 861 .globl num_context_patch1 862 .globl vac_linesize_patch, vac_hwflush_patch1 863 .globl vac_hwflush_patch2 864 865 .align 4 866 .globl sun4c_fault 867 868! %l0 = %psr 869! %l1 = %pc 870! %l2 = %npc 871! %l3 = %wim 872! %l7 = 1 for textfault 873! We want error in %l5, vaddr in %l6 874sun4c_fault: 875#ifdef CONFIG_SUN4 876 sethi %hi(sun4c_memerr_reg), %l4 877 ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr 878 ld [%l4], %l6 ! memerr ctrl reg 879 ld [%l4 + 4], %l5 ! memerr vaddr reg 880 andcc %l6, 0x80, %g0 ! check for error type 881 st %g0, [%l4 + 4] ! clear the error 882 be 0f ! normal error 883 sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr 884 885 call prom_halt ! something weird happened 886 ! what exactly did happen? 887 ! what should we do here? 888 8890: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr 890 lduba [%l4] ASI_CONTROL, %l6 ! bus err reg 891 892 cmp %l7, 1 ! text fault? 893 be 1f ! yes 894 nop 895 896 ld [%l1], %l4 ! load instruction that caused fault 897 srl %l4, 21, %l4 898 andcc %l4, 1, %g0 ! store instruction? 899 900 be 1f ! no 901 sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep 902 ! %lo(SUN4C_SYNC_BADWRITE) = 0 903 or %l4, %l6, %l6 ! set write bit to emulate sun4c 9041: 905#else 906 sethi %hi(AC_SYNC_ERR), %l4 907 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6 908 lda [%l6] ASI_CONTROL, %l5 ! Address 909 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit 910#endif 911 912 andn %l5, 0xfff, %l5 ! Encode all info into l7 913 srl %l6, 14, %l4 914 915 and %l4, 2, %l4 916 or %l5, %l4, %l4 917 918 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault] 919 920 andcc %l0, PSR_PS, %g0 921 be sun4c_fault_fromuser 922 andcc %l7, 1, %g0 ! Text fault? 923 924 be 1f 925 sethi %hi(KERNBASE), %l4 926 927 mov %l1, %l5 ! PC 928 9291: 930 cmp %l5, %l4 931 blu sun4c_fault_fromuser 932 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 933 934 /* If the kernel references a bum kernel pointer, or a pte which 935 * points to a non existant page in ram, we will run this code 936 * _forever_ and lock up the machine!!!!! So we must check for 937 * this condition, the AC_SYNC_ERR bits are what we must examine. 938 * Also a parity error would make this happen as well. So we just 939 * check that we are in fact servicing a tlb miss and not some 940 * other type of fault for the kernel. 941 */ 942 andcc %l6, 0x80, %g0 943 be sun4c_fault_fromuser 944 and %l5, %l4, %l5 945 946 /* Test for NULL pte_t * in vmalloc area. */ 947 sethi %hi(VMALLOC_START), %l4 948 cmp %l5, %l4 949 blu,a invalid_segment_patch1 950 lduXa [%l5] ASI_SEGMAP, %l4 951 952 sethi %hi(swapper_pg_dir), %l4 953 srl %l5, SUN4C_PGDIR_SHIFT, %l6 954 or %l4, %lo(swapper_pg_dir), %l4 955 sll %l6, 2, %l6 956 ld [%l4 + %l6], %l4 957#ifdef CONFIG_SUN4 958 sethi %hi(PAGE_MASK), %l6 959 andcc %l4, %l6, %g0 960#else 961 andcc %l4, PAGE_MASK, %g0 962#endif 963 be sun4c_fault_fromuser 964 lduXa [%l5] ASI_SEGMAP, %l4 965 966invalid_segment_patch1: 967 cmp %l4, 0x7f 968 bne 1f 969 sethi %hi(sun4c_kfree_ring), %l4 970 or %l4, %lo(sun4c_kfree_ring), %l4 971 ld [%l4 + 0x18], %l3 972 deccc %l3 ! do we have a free entry? 973 bcs,a 2f ! no, unmap one. 974 sethi %hi(sun4c_kernel_ring), %l4 975 976 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- 977 978 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next 979 st %l5, [%l6 + 0x08] ! entry->vaddr = address 980 981 ld [%l6 + 0x00], %l3 ! next = entry->next 982 ld [%l6 + 0x04], %l7 ! entry->prev 983 984 st %l7, [%l3 + 0x04] ! next->prev = entry->prev 985 st %l3, [%l7 + 0x00] ! entry->prev->next = next 986 987 sethi %hi(sun4c_kernel_ring), %l4 988 or %l4, %lo(sun4c_kernel_ring), %l4 989 ! head = &sun4c_kernel_ring.ringhd 990 991 ld [%l4 + 0x00], %l7 ! head->next 992 993 st %l4, [%l6 + 0x04] ! entry->prev = head 994 st %l7, [%l6 + 0x00] ! entry->next = head->next 995 st %l6, [%l7 + 0x04] ! head->next->prev = entry 996 997 st %l6, [%l4 + 0x00] ! head->next = entry 998 999 ld [%l4 + 0x18], %l3 1000 inc %l3 ! sun4c_kernel_ring.num_entries++ 1001 st %l3, [%l4 + 0x18] 1002 b 4f 1003 ld [%l6 + 0x08], %l5 1004 10052: 1006 or %l4, %lo(sun4c_kernel_ring), %l4 1007 ! head = &sun4c_kernel_ring.ringhd 1008 1009 ld [%l4 + 0x04], %l6 ! entry = head->prev 1010 1011 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr 1012 1013 ! Flush segment from the cache. 1014#ifdef CONFIG_SUN4 1015 sethi %hi((128 * 1024)), %l7 1016#else 1017 sethi %hi((64 * 1024)), %l7 1018#endif 10199: 1020vac_hwflush_patch1: 1021vac_linesize_patch: 1022 subcc %l7, 16, %l7 1023 bne 9b 1024vac_hwflush_patch2: 1025 sta %g0, [%l3 + %l7] ASI_FLUSHSEG 1026 1027 st %l5, [%l6 + 0x08] ! entry->vaddr = address 1028 1029 ld [%l6 + 0x00], %l5 ! next = entry->next 1030 ld [%l6 + 0x04], %l7 ! entry->prev 1031 1032 st %l7, [%l5 + 0x04] ! next->prev = entry->prev 1033 st %l5, [%l7 + 0x00] ! entry->prev->next = next 1034 st %l4, [%l6 + 0x04] ! entry->prev = head 1035 1036 ld [%l4 + 0x00], %l7 ! head->next 1037 1038 st %l7, [%l6 + 0x00] ! entry->next = head->next 1039 st %l6, [%l7 + 0x04] ! head->next->prev = entry 1040 st %l6, [%l4 + 0x00] ! head->next = entry 1041 1042 mov %l3, %l5 ! address = tmp 1043 10444: 1045num_context_patch1: 1046 mov 0x08, %l7 1047 1048 ld [%l6 + 0x08], %l4 1049 ldub [%l6 + 0x0c], %l3 1050 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 1051 1052 sethi %hi(AC_CONTEXT), %l3 1053 lduba [%l3] ASI_CONTROL, %l6 1054 1055 /* Invalidate old mapping, instantiate new mapping, 1056 * for each context. Registers l6/l7 are live across 1057 * this loop. 1058 */ 10593: deccc %l7 1060 sethi %hi(AC_CONTEXT), %l3 1061 stba %l7, [%l3] ASI_CONTROL 1062invalid_segment_patch2: 1063 mov 0x7f, %l3 1064 stXa %l3, [%l5] ASI_SEGMAP 1065 andn %l4, 0x1ff, %l3 1066 bne 3b 1067 stXa %l4, [%l3] ASI_SEGMAP 1068 1069 sethi %hi(AC_CONTEXT), %l3 1070 stba %l6, [%l3] ASI_CONTROL 1071 1072 andn %l4, 0x1ff, %l5 1073 10741: 1075 sethi %hi(VMALLOC_START), %l4 1076 cmp %l5, %l4 1077 1078 bgeu 1f 1079 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7 1080 1081 sethi %hi(KERNBASE), %l6 1082 1083 sub %l5, %l6, %l4 1084 srl %l4, PAGE_SHIFT, %l4 1085 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3 1086 or %l3, %l4, %l3 1087 1088 sethi %hi(PAGE_SIZE), %l4 1089 10902: 1091 sta %l3, [%l5] ASI_PTE 1092 deccc %l7 1093 inc %l3 1094 bne 2b 1095 add %l5, %l4, %l5 1096 1097 b 7f 1098 sethi %hi(sun4c_kernel_faults), %l4 1099 11001: 1101 srl %l5, SUN4C_PGDIR_SHIFT, %l3 1102 sethi %hi(swapper_pg_dir), %l4 1103 or %l4, %lo(swapper_pg_dir), %l4 1104 sll %l3, 2, %l3 1105 ld [%l4 + %l3], %l4 1106#ifndef CONFIG_SUN4 1107 and %l4, PAGE_MASK, %l4 1108#else 1109 sethi %hi(PAGE_MASK), %l6 1110 and %l4, %l6, %l4 1111#endif 1112 1113 srl %l5, (PAGE_SHIFT - 2), %l6 1114 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6 1115 add %l6, %l4, %l6 1116 1117 sethi %hi(PAGE_SIZE), %l4 1118 11192: 1120 ld [%l6], %l3 1121 deccc %l7 1122 sta %l3, [%l5] ASI_PTE 1123 add %l6, 0x4, %l6 1124 bne 2b 1125 add %l5, %l4, %l5 1126 1127 sethi %hi(sun4c_kernel_faults), %l4 11287: 1129 ld [%l4 + %lo(sun4c_kernel_faults)], %l3 1130 inc %l3 1131 st %l3, [%l4 + %lo(sun4c_kernel_faults)] 1132 1133 /* Restore condition codes */ 1134 wr %l0, 0x0, %psr 1135 WRITE_PAUSE 1136 jmp %l1 1137 rett %l2 1138 1139sun4c_fault_fromuser: 1140 SAVE_ALL 1141 nop 1142 1143 mov %l7, %o1 ! Decode the info from %l7 1144 mov %l7, %o2 1145 and %o1, 1, %o1 ! arg2 = text_faultp 1146 mov %l7, %o3 1147 and %o2, 2, %o2 ! arg3 = writep 1148 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1149 1150 wr %l0, PSR_ET, %psr 1151 WRITE_PAUSE 1152 1153 call do_sun4c_fault 1154 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1155 1156 RESTORE_ALL 1157 1158 .align 4 1159 .globl srmmu_fault 1160srmmu_fault: 1161 mov 0x400, %l5 1162 mov 0x300, %l4 1163 1164 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first 1165 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last 1166 1167 andn %l6, 0xfff, %l6 1168 srl %l5, 6, %l5 ! and encode all info into l7 1169 1170 and %l5, 2, %l5 1171 or %l5, %l6, %l6 1172 1173 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] 1174 1175 SAVE_ALL 1176 1177 mov %l7, %o1 1178 mov %l7, %o2 1179 and %o1, 1, %o1 ! arg2 = text_faultp 1180 mov %l7, %o3 1181 and %o2, 2, %o2 ! arg3 = writep 1182 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1183 1184 wr %l0, PSR_ET, %psr 1185 WRITE_PAUSE 1186 1187 call do_sparc_fault 1188 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1189 1190 RESTORE_ALL 1191 1192#ifdef CONFIG_SUNOS_EMUL 1193 /* SunOS uses syscall zero as the 'indirect syscall' it looks 1194 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc. 1195 * This is complete brain damage. 1196 */ 1197 .globl sunos_indir 1198sunos_indir: 1199 mov %o7, %l4 1200 cmp %o0, NR_SYSCALLS 1201 blu,a 1f 1202 sll %o0, 0x2, %o0 1203 1204 sethi %hi(sunos_nosys), %l6 1205 b 2f 1206 or %l6, %lo(sunos_nosys), %l6 1207 12081: 1209 set sunos_sys_table, %l7 1210 ld [%l7 + %o0], %l6 1211 12122: 1213 mov %o1, %o0 1214 mov %o2, %o1 1215 mov %o3, %o2 1216 mov %o4, %o3 1217 mov %o5, %o4 1218 call %l6 1219 mov %l4, %o7 1220#endif 1221 1222 .align 4 1223 .globl sys_nis_syscall 1224sys_nis_syscall: 1225 mov %o7, %l5 1226 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1227 call c_sys_nis_syscall 1228 mov %l5, %o7 1229 1230 .align 4 1231 .globl sys_ptrace 1232sys_ptrace: 1233 call do_ptrace 1234 add %sp, STACKFRAME_SZ, %o0 1235 1236 ld [%curptr + TI_FLAGS], %l5 1237 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1238 be 1f 1239 nop 1240 1241 call syscall_trace 1242 nop 1243 12441: 1245 RESTORE_ALL 1246 1247 .align 4 1248 .globl sys_execve 1249sys_execve: 1250 mov %o7, %l5 1251 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1252 call sparc_execve 1253 mov %l5, %o7 1254 1255 .align 4 1256 .globl sys_pipe 1257sys_pipe: 1258 mov %o7, %l5 1259 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1260 call sparc_pipe 1261 mov %l5, %o7 1262 1263 .align 4 1264 .globl sys_sigaltstack 1265sys_sigaltstack: 1266 mov %o7, %l5 1267 mov %fp, %o2 1268 call do_sigaltstack 1269 mov %l5, %o7 1270 1271 .align 4 1272 .globl sys_sigstack 1273sys_sigstack: 1274 mov %o7, %l5 1275 mov %fp, %o2 1276 call do_sys_sigstack 1277 mov %l5, %o7 1278 1279 .align 4 1280 .globl sys_sigreturn 1281sys_sigreturn: 1282 call do_sigreturn 1283 add %sp, STACKFRAME_SZ, %o0 1284 1285 ld [%curptr + TI_FLAGS], %l5 1286 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1287 be 1f 1288 nop 1289 1290 call syscall_trace 1291 nop 1292 12931: 1294 /* We don't want to muck with user registers like a 1295 * normal syscall, just return. 1296 */ 1297 RESTORE_ALL 1298 1299 .align 4 1300 .globl sys_rt_sigreturn 1301sys_rt_sigreturn: 1302 call do_rt_sigreturn 1303 add %sp, STACKFRAME_SZ, %o0 1304 1305 ld [%curptr + TI_FLAGS], %l5 1306 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1307 be 1f 1308 nop 1309 1310 call syscall_trace 1311 nop 1312 13131: 1314 /* We are returning to a signal handler. */ 1315 RESTORE_ALL 1316 1317 /* Now that we have a real sys_clone, sys_fork() is 1318 * implemented in terms of it. Our _real_ implementation 1319 * of SunOS vfork() will use sys_vfork(). 1320 * 1321 * XXX These three should be consolidated into mostly shared 1322 * XXX code just like on sparc64... -DaveM 1323 */ 1324 .align 4 1325 .globl sys_fork, flush_patch_two 1326sys_fork: 1327 mov %o7, %l5 1328flush_patch_two: 1329 FLUSH_ALL_KERNEL_WINDOWS; 1330 ld [%curptr + TI_TASK], %o4 1331 rd %psr, %g4 1332 WRITE_PAUSE 1333 mov SIGCHLD, %o0 ! arg0: clone flags 1334 rd %wim, %g5 1335 WRITE_PAUSE 1336 mov %fp, %o1 ! arg1: usp 1337 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1338 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1339 mov 0, %o3 1340 call sparc_do_fork 1341 mov %l5, %o7 1342 1343 /* Whee, kernel threads! */ 1344 .globl sys_clone, flush_patch_three 1345sys_clone: 1346 mov %o7, %l5 1347flush_patch_three: 1348 FLUSH_ALL_KERNEL_WINDOWS; 1349 ld [%curptr + TI_TASK], %o4 1350 rd %psr, %g4 1351 WRITE_PAUSE 1352 1353 /* arg0,1: flags,usp -- loaded already */ 1354 cmp %o1, 0x0 ! Is new_usp NULL? 1355 rd %wim, %g5 1356 WRITE_PAUSE 1357 be,a 1f 1358 mov %fp, %o1 ! yes, use callers usp 1359 andn %o1, 7, %o1 ! no, align to 8 bytes 13601: 1361 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1362 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1363 mov 0, %o3 1364 call sparc_do_fork 1365 mov %l5, %o7 1366 1367 /* Whee, real vfork! */ 1368 .globl sys_vfork, flush_patch_four 1369sys_vfork: 1370flush_patch_four: 1371 FLUSH_ALL_KERNEL_WINDOWS; 1372 ld [%curptr + TI_TASK], %o4 1373 rd %psr, %g4 1374 WRITE_PAUSE 1375 rd %wim, %g5 1376 WRITE_PAUSE 1377 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1378 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 1379 mov %fp, %o1 1380 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 1381 sethi %hi(sparc_do_fork), %l1 1382 mov 0, %o3 1383 jmpl %l1 + %lo(sparc_do_fork), %g0 1384 add %sp, STACKFRAME_SZ, %o2 1385 1386 .align 4 1387linux_sparc_ni_syscall: 1388 sethi %hi(sys_ni_syscall), %l7 1389 b syscall_is_too_hard 1390 or %l7, %lo(sys_ni_syscall), %l7 1391 1392linux_fast_syscall: 1393 andn %l7, 3, %l7 1394 mov %i0, %o0 1395 mov %i1, %o1 1396 mov %i2, %o2 1397 jmpl %l7 + %g0, %g0 1398 mov %i3, %o3 1399 1400linux_syscall_trace: 1401 call syscall_trace 1402 nop 1403 mov %i0, %o0 1404 mov %i1, %o1 1405 mov %i2, %o2 1406 mov %i3, %o3 1407 b 2f 1408 mov %i4, %o4 1409 1410 .globl ret_from_fork 1411ret_from_fork: 1412 call schedule_tail 1413 mov %g3, %o0 1414 b ret_sys_call 1415 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1416 1417 /* Linux native and SunOS system calls enter here... */ 1418 .align 4 1419 .globl linux_sparc_syscall 1420linux_sparc_syscall: 1421 /* Direct access to user regs, must faster. */ 1422 cmp %g1, NR_SYSCALLS 1423 bgeu linux_sparc_ni_syscall 1424 sll %g1, 2, %l4 1425 ld [%l7 + %l4], %l7 1426 andcc %l7, 1, %g0 1427 bne linux_fast_syscall 1428 /* Just do first insn from SAVE_ALL in the delay slot */ 1429 1430 .globl syscall_is_too_hard 1431syscall_is_too_hard: 1432 SAVE_ALL_HEAD 1433 rd %wim, %l3 1434 1435 wr %l0, PSR_ET, %psr 1436 mov %i0, %o0 1437 mov %i1, %o1 1438 mov %i2, %o2 1439 1440 ld [%curptr + TI_FLAGS], %l5 1441 mov %i3, %o3 1442 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1443 mov %i4, %o4 1444 bne linux_syscall_trace 1445 mov %i0, %l5 14462: 1447 call %l7 1448 mov %i5, %o5 1449 1450 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1451 1452 .globl ret_sys_call 1453ret_sys_call: 1454 ld [%curptr + TI_FLAGS], %l6 1455 cmp %o0, -ERESTART_RESTARTBLOCK 1456 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1457 set PSR_C, %g2 1458 bgeu 1f 1459 andcc %l6, _TIF_SYSCALL_TRACE, %g0 1460 1461 /* System call success, clear Carry condition code. */ 1462 andn %g3, %g2, %g3 1463 clr %l6 1464 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1465 bne linux_syscall_trace2 1466 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1467 add %l1, 0x4, %l2 /* npc = npc+4 */ 1468 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1469 b ret_trap_entry 1470 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 14711: 1472 /* System call failure, set Carry condition code. 1473 * Also, get abs(errno) to return to the process. 1474 */ 1475 sub %g0, %o0, %o0 1476 or %g3, %g2, %g3 1477 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1478 mov 1, %l6 1479 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1480 bne linux_syscall_trace2 1481 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1482 add %l1, 0x4, %l2 /* npc = npc+4 */ 1483 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1484 b ret_trap_entry 1485 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1486 1487linux_syscall_trace2: 1488 call syscall_trace 1489 add %l1, 0x4, %l2 /* npc = npc+4 */ 1490 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1491 b ret_trap_entry 1492 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1493 1494 1495 /* 1496 * Solaris system calls and indirect system calls enter here. 1497 * 1498 * I have named the solaris indirect syscalls like that because 1499 * it seems like Solaris has some fast path syscalls that can 1500 * be handled as indirect system calls. - mig 1501 */ 1502 1503linux_syscall_for_solaris: 1504 sethi %hi(sys_call_table), %l7 1505 b linux_sparc_syscall 1506 or %l7, %lo(sys_call_table), %l7 1507 1508 .align 4 1509 .globl solaris_syscall 1510solaris_syscall: 1511 cmp %g1,59 1512 be linux_syscall_for_solaris 1513 cmp %g1,2 1514 be linux_syscall_for_solaris 1515 cmp %g1,42 1516 be linux_syscall_for_solaris 1517 cmp %g1,119 1518 be,a linux_syscall_for_solaris 1519 mov 2, %g1 15201: 1521 SAVE_ALL_HEAD 1522 rd %wim, %l3 1523 1524 wr %l0, PSR_ET, %psr 1525 nop 1526 nop 1527 mov %i0, %l5 1528 1529 call do_solaris_syscall 1530 add %sp, STACKFRAME_SZ, %o0 1531 1532 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1533 set PSR_C, %g2 1534 cmp %o0, -ERESTART_RESTARTBLOCK 1535 bgeu 1f 1536 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1537 1538 /* System call success, clear Carry condition code. */ 1539 andn %g3, %g2, %g3 1540 clr %l6 1541 b 2f 1542 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1543 15441: 1545 /* System call failure, set Carry condition code. 1546 * Also, get abs(errno) to return to the process. 1547 */ 1548 sub %g0, %o0, %o0 1549 mov 1, %l6 1550 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1551 or %g3, %g2, %g3 1552 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1553 1554 /* Advance the pc and npc over the trap instruction. 1555 * If the npc is unaligned (has a 1 in the lower byte), it means 1556 * the kernel does not want us to play magic (ie, skipping over 1557 * traps). Mainly when the Solaris code wants to set some PC and 1558 * nPC (setcontext). 1559 */ 15602: 1561 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1562 andcc %l1, 1, %g0 1563 bne 1f 1564 add %l1, 0x4, %l2 /* npc = npc+4 */ 1565 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1566 b ret_trap_entry 1567 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1568 1569 /* kernel knows what it is doing, fixup npc and continue */ 15701: 1571 sub %l1, 1, %l1 1572 b ret_trap_entry 1573 st %l1, [%sp + STACKFRAME_SZ + PT_NPC] 1574 1575#ifndef CONFIG_SUNOS_EMUL 1576 .align 4 1577 .globl sunos_syscall 1578sunos_syscall: 1579 SAVE_ALL_HEAD 1580 rd %wim, %l3 1581 wr %l0, PSR_ET, %psr 1582 nop 1583 nop 1584 mov %i0, %l5 1585 call do_sunos_syscall 1586 add %sp, STACKFRAME_SZ, %o0 1587#endif 1588 1589 /* {net, open}bsd system calls enter here... */ 1590 .align 4 1591 .globl bsd_syscall 1592bsd_syscall: 1593 /* Direct access to user regs, must faster. */ 1594 cmp %g1, NR_SYSCALLS 1595 blu,a 1f 1596 sll %g1, 2, %l4 1597 1598 set sys_ni_syscall, %l7 1599 b bsd_is_too_hard 1600 nop 1601 16021: 1603 ld [%l7 + %l4], %l7 1604 1605 .globl bsd_is_too_hard 1606bsd_is_too_hard: 1607 rd %wim, %l3 1608 SAVE_ALL 1609 1610 wr %l0, PSR_ET, %psr 1611 WRITE_PAUSE 1612 16132: 1614 mov %i0, %o0 1615 mov %i1, %o1 1616 mov %i2, %o2 1617 mov %i0, %l5 1618 mov %i3, %o3 1619 mov %i4, %o4 1620 call %l7 1621 mov %i5, %o5 1622 1623 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1624 set PSR_C, %g2 1625 cmp %o0, -ERESTART_RESTARTBLOCK 1626 bgeu 1f 1627 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1628 1629 /* System call success, clear Carry condition code. */ 1630 andn %g3, %g2, %g3 1631 clr %l6 1632 b 2f 1633 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1634 16351: 1636 /* System call failure, set Carry condition code. 1637 * Also, get abs(errno) to return to the process. 1638 */ 1639 sub %g0, %o0, %o0 1640#if 0 /* XXX todo XXX */ 1641 sethi %hi(bsd_xlatb_rorl), %o3 1642 or %o3, %lo(bsd_xlatb_rorl), %o3 1643 sll %o0, 2, %o0 1644 ld [%o3 + %o0], %o0 1645#endif 1646 mov 1, %l6 1647 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1648 or %g3, %g2, %g3 1649 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1650 1651 /* Advance the pc and npc over the trap instruction. */ 16522: 1653 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1654 add %l1, 0x4, %l2 /* npc = npc+4 */ 1655 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1656 b ret_trap_entry 1657 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1658 1659/* Saving and restoring the FPU state is best done from lowlevel code. 1660 * 1661 * void fpsave(unsigned long *fpregs, unsigned long *fsr, 1662 * void *fpqueue, unsigned long *fpqdepth) 1663 */ 1664 1665 .globl fpsave 1666fpsave: 1667 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state 1668 ld [%o1], %g1 1669 set 0x2000, %g4 1670 andcc %g1, %g4, %g0 1671 be 2f 1672 mov 0, %g2 1673 1674 /* We have an fpqueue to save. */ 16751: 1676 std %fq, [%o2] 1677fpsave_magic: 1678 st %fsr, [%o1] 1679 ld [%o1], %g3 1680 andcc %g3, %g4, %g0 1681 add %g2, 1, %g2 1682 bne 1b 1683 add %o2, 8, %o2 1684 16852: 1686 st %g2, [%o3] 1687 1688 std %f0, [%o0 + 0x00] 1689 std %f2, [%o0 + 0x08] 1690 std %f4, [%o0 + 0x10] 1691 std %f6, [%o0 + 0x18] 1692 std %f8, [%o0 + 0x20] 1693 std %f10, [%o0 + 0x28] 1694 std %f12, [%o0 + 0x30] 1695 std %f14, [%o0 + 0x38] 1696 std %f16, [%o0 + 0x40] 1697 std %f18, [%o0 + 0x48] 1698 std %f20, [%o0 + 0x50] 1699 std %f22, [%o0 + 0x58] 1700 std %f24, [%o0 + 0x60] 1701 std %f26, [%o0 + 0x68] 1702 std %f28, [%o0 + 0x70] 1703 retl 1704 std %f30, [%o0 + 0x78] 1705 1706 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd 1707 * code for pointing out this possible deadlock, while we save state 1708 * above we could trap on the fsr store so our low level fpu trap 1709 * code has to know how to deal with this. 1710 */ 1711fpsave_catch: 1712 b fpsave_magic + 4 1713 st %fsr, [%o1] 1714 1715fpsave_catch2: 1716 b fpsave + 4 1717 st %fsr, [%o1] 1718 1719 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ 1720 1721 .globl fpload 1722fpload: 1723 ldd [%o0 + 0x00], %f0 1724 ldd [%o0 + 0x08], %f2 1725 ldd [%o0 + 0x10], %f4 1726 ldd [%o0 + 0x18], %f6 1727 ldd [%o0 + 0x20], %f8 1728 ldd [%o0 + 0x28], %f10 1729 ldd [%o0 + 0x30], %f12 1730 ldd [%o0 + 0x38], %f14 1731 ldd [%o0 + 0x40], %f16 1732 ldd [%o0 + 0x48], %f18 1733 ldd [%o0 + 0x50], %f20 1734 ldd [%o0 + 0x58], %f22 1735 ldd [%o0 + 0x60], %f24 1736 ldd [%o0 + 0x68], %f26 1737 ldd [%o0 + 0x70], %f28 1738 ldd [%o0 + 0x78], %f30 1739 ld [%o1], %fsr 1740 retl 1741 nop 1742 1743 /* __ndelay and __udelay take two arguments: 1744 * 0 - nsecs or usecs to delay 1745 * 1 - per_cpu udelay_val (loops per jiffy) 1746 * 1747 * Note that ndelay gives HZ times higher resolution but has a 10ms 1748 * limit. udelay can handle up to 1s. 1749 */ 1750 .globl __ndelay 1751__ndelay: 1752 save %sp, -STACKFRAME_SZ, %sp 1753 mov %i0, %o0 1754 call .umul 1755 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ) 1756 call .umul 1757 mov %i1, %o1 ! udelay_val 1758 ba delay_continue 1759 mov %o1, %o0 ! >>32 later for better resolution 1760 1761 .globl __udelay 1762__udelay: 1763 save %sp, -STACKFRAME_SZ, %sp 1764 mov %i0, %o0 1765 sethi %hi(0x10c6), %o1 1766 call .umul 1767 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000 1768 call .umul 1769 mov %i1, %o1 ! udelay_val 1770 call .umul 1771 mov HZ, %o0 ! >>32 earlier for wider range 1772 1773delay_continue: 1774 cmp %o0, 0x0 17751: 1776 bne 1b 1777 subcc %o0, 1, %o0 1778 1779 ret 1780 restore 1781 1782 /* Handle a software breakpoint */ 1783 /* We have to inform parent that child has stopped */ 1784 .align 4 1785 .globl breakpoint_trap 1786breakpoint_trap: 1787 rd %wim,%l3 1788 SAVE_ALL 1789 wr %l0, PSR_ET, %psr 1790 WRITE_PAUSE 1791 1792 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls 1793 call sparc_breakpoint 1794 add %sp, STACKFRAME_SZ, %o0 1795 1796 RESTORE_ALL 1797 1798 .align 4 1799 .globl __handle_exception, flush_patch_exception 1800__handle_exception: 1801flush_patch_exception: 1802 FLUSH_ALL_KERNEL_WINDOWS; 1803 ldd [%o0], %o6 1804 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h 1805 mov 1, %g1 ! signal EFAULT condition 1806 1807 .align 4 1808 .globl kill_user_windows, kuw_patch1_7win 1809 .globl kuw_patch1 1810kuw_patch1_7win: sll %o3, 6, %o3 1811 1812 /* No matter how much overhead this routine has in the worst 1813 * case scenerio, it is several times better than taking the 1814 * traps with the old method of just doing flush_user_windows(). 1815 */ 1816kill_user_windows: 1817 ld [%g6 + TI_UWINMASK], %o0 ! get current umask 1818 orcc %g0, %o0, %g0 ! if no bits set, we are done 1819 be 3f ! nothing to do 1820 rd %psr, %o5 ! must clear interrupts 1821 or %o5, PSR_PIL, %o4 ! or else that could change 1822 wr %o4, 0x0, %psr ! the uwinmask state 1823 WRITE_PAUSE ! burn them cycles 18241: 1825 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state 1826 orcc %g0, %o0, %g0 ! did an interrupt come in? 1827 be 4f ! yep, we are done 1828 rd %wim, %o3 ! get current wim 1829 srl %o3, 1, %o4 ! simulate a save 1830kuw_patch1: 1831 sll %o3, 7, %o3 ! compute next wim 1832 or %o4, %o3, %o3 ! result 1833 andncc %o0, %o3, %o0 ! clean this bit in umask 1834 bne kuw_patch1 ! not done yet 1835 srl %o3, 1, %o4 ! begin another save simulation 1836 wr %o3, 0x0, %wim ! set the new wim 1837 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask 18384: 1839 wr %o5, 0x0, %psr ! re-enable interrupts 1840 WRITE_PAUSE ! burn baby burn 18413: 1842 retl ! return 1843 st %g0, [%g6 + TI_W_SAVED] ! no windows saved 1844 1845 .align 4 1846 .globl restore_current 1847restore_current: 1848 LOAD_CURRENT(g6, o0) 1849 retl 1850 nop 1851 1852#ifdef CONFIG_PCI 1853#include <asm/pcic.h> 1854 1855 .align 4 1856 .globl linux_trap_ipi15_pcic 1857linux_trap_ipi15_pcic: 1858 rd %wim, %l3 1859 SAVE_ALL 1860 1861 /* 1862 * First deactivate NMI 1863 * or we cannot drop ET, cannot get window spill traps. 1864 * The busy loop is necessary because the PIO error 1865 * sometimes does not go away quickly and we trap again. 1866 */ 1867 sethi %hi(pcic_regs), %o1 1868 ld [%o1 + %lo(pcic_regs)], %o2 1869 1870 ! Get pending status for printouts later. 1871 ld [%o2 + PCI_SYS_INT_PENDING], %o0 1872 1873 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 1874 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] 18751: 1876 ld [%o2 + PCI_SYS_INT_PENDING], %o1 1877 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 1878 bne 1b 1879 nop 1880 1881 or %l0, PSR_PIL, %l4 1882 wr %l4, 0x0, %psr 1883 WRITE_PAUSE 1884 wr %l4, PSR_ET, %psr 1885 WRITE_PAUSE 1886 1887 call pcic_nmi 1888 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs 1889 RESTORE_ALL 1890 1891 .globl pcic_nmi_trap_patch 1892pcic_nmi_trap_patch: 1893 sethi %hi(linux_trap_ipi15_pcic), %l3 1894 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 1895 rd %psr, %l0 1896 .word 0 1897 1898#endif /* CONFIG_PCI */ 1899 1900/* End of entry.S */ 1901