1/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. 2 * 3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) 8 */ 9 10#include <linux/errno.h> 11 12#include <asm/head.h> 13#include <asm/asi.h> 14#include <asm/smp.h> 15#include <asm/kgdb.h> 16#include <asm/contregs.h> 17#include <asm/ptrace.h> 18#include <asm/asm-offsets.h> 19#include <asm/psr.h> 20#include <asm/vaddrs.h> 21#include <asm/memreg.h> 22#include <asm/page.h> 23#ifdef CONFIG_SUN4 24#include <asm/pgtsun4.h> 25#else 26#include <asm/pgtsun4c.h> 27#endif 28#include <asm/winmacro.h> 29#include <asm/signal.h> 30#include <asm/obio.h> 31#include <asm/mxcc.h> 32#include <asm/thread_info.h> 33#include <asm/param.h> 34#include <asm/unistd.h> 35 36#include <asm/asmmacro.h> 37 38#define curptr g6 39 40/* These are just handy. */ 41#define _SV save %sp, -STACKFRAME_SZ, %sp 42#define _RS restore 43 44#define FLUSH_ALL_KERNEL_WINDOWS \ 45 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ 46 _RS; _RS; _RS; _RS; _RS; _RS; _RS; 47 48/* First, KGDB low level things. This is a rewrite 49 * of the routines found in the sparc-stub.c asm() statement 50 * from the gdb distribution. This is also dual-purpose 51 * as a software trap for userlevel programs. 52 */ 53 .data 54 .align 4 55 56in_trap_handler: 57 .word 0 58 59 .text 60 .align 4 61 62#if 0 /* kgdb is dropped from 2.5.33 */ 63! This function is called when any SPARC trap (except window overflow or 64! underflow) occurs. It makes sure that the invalid register window is still 65! available before jumping into C code. It will also restore the world if you 66! return from handle_exception. 67 68 .globl trap_low 69trap_low: 70 rd %wim, %l3 71 SAVE_ALL 72 73 sethi %hi(in_trap_handler), %l4 74 ld [%lo(in_trap_handler) + %l4], %l5 75 inc %l5 76 st %l5, [%lo(in_trap_handler) + %l4] 77 78 /* Make sure kgdb sees the same state we just saved. */ 79 LOAD_PT_GLOBALS(sp) 80 LOAD_PT_INS(sp) 81 ld [%sp + STACKFRAME_SZ + PT_Y], %l4 82 ld [%sp + STACKFRAME_SZ + PT_WIM], %l3 83 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 84 ld [%sp + STACKFRAME_SZ + PT_PC], %l1 85 ld [%sp + STACKFRAME_SZ + PT_NPC], %l2 86 rd %tbr, %l5 /* Never changes... */ 87 88 /* Make kgdb exception frame. */ 89 sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals 90 ! + hidden arg + arg spill 91 ! + doubleword alignment 92 ! + registers[72] local var 93 SAVE_KGDB_GLOBALS(sp) 94 SAVE_KGDB_INS(sp) 95 SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) 96 97 /* We are increasing PIL, so two writes. */ 98 or %l0, PSR_PIL, %l0 99 wr %l0, 0, %psr 100 WRITE_PAUSE 101 wr %l0, PSR_ET, %psr 102 WRITE_PAUSE 103 104 call handle_exception 105 add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers 106 107 /* Load new kgdb register set. */ 108 LOAD_KGDB_GLOBALS(sp) 109 LOAD_KGDB_INS(sp) 110 LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) 111 wr %l4, 0x0, %y 112 113 sethi %hi(in_trap_handler), %l4 114 ld [%lo(in_trap_handler) + %l4], %l5 115 dec %l5 116 st %l5, [%lo(in_trap_handler) + %l4] 117 118 add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame. 119 120 /* Now take what kgdb did and place it into the pt_regs 121 * frame which SparcLinux RESTORE_ALL understands., 122 */ 123 STORE_PT_INS(sp) 124 STORE_PT_GLOBALS(sp) 125 STORE_PT_YREG(sp, g2) 126 STORE_PT_PRIV(sp, l0, l1, l2) 127 128 RESTORE_ALL 129#endif 130 131#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 132 .text 133 .align 4 134 .globl floppy_hardint 135floppy_hardint: 136 /* 137 * This code cannot touch registers %l0 %l1 and %l2 138 * because SAVE_ALL depends on their values. It depends 139 * on %l3 also, but we regenerate it before a call. 140 * Other registers are: 141 * %l3 -- base address of fdc registers 142 * %l4 -- pdma_vaddr 143 * %l5 -- scratch for ld/st address 144 * %l6 -- pdma_size 145 * %l7 -- scratch [floppy byte, ld/st address, aux. data] 146 */ 147 148 /* Do we have work to do? */ 149 sethi %hi(doing_pdma), %l7 150 ld [%l7 + %lo(doing_pdma)], %l7 151 cmp %l7, 0 152 be floppy_dosoftint 153 nop 154 155 /* Load fdc register base */ 156 sethi %hi(fdc_status), %l3 157 ld [%l3 + %lo(fdc_status)], %l3 158 159 /* Setup register addresses */ 160 sethi %hi(pdma_vaddr), %l5 ! transfer buffer 161 ld [%l5 + %lo(pdma_vaddr)], %l4 162 sethi %hi(pdma_size), %l5 ! bytes to go 163 ld [%l5 + %lo(pdma_size)], %l6 164next_byte: 165 ldub [%l3], %l7 166 167 andcc %l7, 0x80, %g0 ! Does fifo still have data 168 bz floppy_fifo_emptied ! fifo has been emptied... 169 andcc %l7, 0x20, %g0 ! in non-dma mode still? 170 bz floppy_overrun ! nope, overrun 171 andcc %l7, 0x40, %g0 ! 0=write 1=read 172 bz floppy_write 173 sub %l6, 0x1, %l6 174 175 /* Ok, actually read this byte */ 176 ldub [%l3 + 1], %l7 177 orcc %g0, %l6, %g0 178 stb %l7, [%l4] 179 bne next_byte 180 add %l4, 0x1, %l4 181 182 b floppy_tdone 183 nop 184 185floppy_write: 186 /* Ok, actually write this byte */ 187 ldub [%l4], %l7 188 orcc %g0, %l6, %g0 189 stb %l7, [%l3 + 1] 190 bne next_byte 191 add %l4, 0x1, %l4 192 193 /* fall through... */ 194floppy_tdone: 195 sethi %hi(pdma_vaddr), %l5 196 st %l4, [%l5 + %lo(pdma_vaddr)] 197 sethi %hi(pdma_size), %l5 198 st %l6, [%l5 + %lo(pdma_size)] 199 /* Flip terminal count pin */ 200 set auxio_register, %l7 201 ld [%l7], %l7 202 203 set sparc_cpu_model, %l5 204 ld [%l5], %l5 205 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */ 206 be 1f 207 ldub [%l7], %l5 208 209 or %l5, 0xc2, %l5 210 stb %l5, [%l7] 211 andn %l5, 0x02, %l5 212 b 2f 213 nop 214 2151: 216 or %l5, 0xf4, %l5 217 stb %l5, [%l7] 218 andn %l5, 0x04, %l5 219 2202: 221 /* Kill some time so the bits set */ 222 WRITE_PAUSE 223 WRITE_PAUSE 224 225 stb %l5, [%l7] 226 227 /* Prevent recursion */ 228 sethi %hi(doing_pdma), %l7 229 b floppy_dosoftint 230 st %g0, [%l7 + %lo(doing_pdma)] 231 232 /* We emptied the FIFO, but we haven't read everything 233 * as of yet. Store the current transfer address and 234 * bytes left to read so we can continue when the next 235 * fast IRQ comes in. 236 */ 237floppy_fifo_emptied: 238 sethi %hi(pdma_vaddr), %l5 239 st %l4, [%l5 + %lo(pdma_vaddr)] 240 sethi %hi(pdma_size), %l7 241 st %l6, [%l7 + %lo(pdma_size)] 242 243 /* Restore condition codes */ 244 wr %l0, 0x0, %psr 245 WRITE_PAUSE 246 247 jmp %l1 248 rett %l2 249 250floppy_overrun: 251 sethi %hi(pdma_vaddr), %l5 252 st %l4, [%l5 + %lo(pdma_vaddr)] 253 sethi %hi(pdma_size), %l5 254 st %l6, [%l5 + %lo(pdma_size)] 255 /* Prevent recursion */ 256 sethi %hi(doing_pdma), %l7 257 st %g0, [%l7 + %lo(doing_pdma)] 258 259 /* fall through... */ 260floppy_dosoftint: 261 rd %wim, %l3 262 SAVE_ALL 263 264 /* Set all IRQs off. */ 265 or %l0, PSR_PIL, %l4 266 wr %l4, 0x0, %psr 267 WRITE_PAUSE 268 wr %l4, PSR_ET, %psr 269 WRITE_PAUSE 270 271 mov 11, %o0 ! floppy irq level (unused anyway) 272 mov %g0, %o1 ! devid is not used in fast interrupts 273 call sparc_floppy_irq 274 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs 275 276 RESTORE_ALL 277 278#endif /* (CONFIG_BLK_DEV_FD) */ 279 280 /* Bad trap handler */ 281 .globl bad_trap_handler 282bad_trap_handler: 283 SAVE_ALL 284 285 wr %l0, PSR_ET, %psr 286 WRITE_PAUSE 287 288 add %sp, STACKFRAME_SZ, %o0 ! pt_regs 289 call do_hw_interrupt 290 mov %l7, %o1 ! trap number 291 292 RESTORE_ALL 293 294/* For now all IRQ's not registered get sent here. handler_irq() will 295 * see if a routine is registered to handle this interrupt and if not 296 * it will say so on the console. 297 */ 298 299 .align 4 300 .globl real_irq_entry, patch_handler_irq 301real_irq_entry: 302 SAVE_ALL 303 304#ifdef CONFIG_SMP 305 .globl patchme_maybe_smp_msg 306 307 cmp %l7, 12 308patchme_maybe_smp_msg: 309 bgu maybe_smp4m_msg 310 nop 311#endif 312 313real_irq_continue: 314 or %l0, PSR_PIL, %g2 315 wr %g2, 0x0, %psr 316 WRITE_PAUSE 317 wr %g2, PSR_ET, %psr 318 WRITE_PAUSE 319 mov %l7, %o0 ! irq level 320patch_handler_irq: 321 call handler_irq 322 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr 323 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq 324 wr %g2, PSR_ET, %psr ! keep ET up 325 WRITE_PAUSE 326 327 RESTORE_ALL 328 329#ifdef CONFIG_SMP 330 /* SMP per-cpu ticker interrupts are handled specially. */ 331smp4m_ticker: 332 bne real_irq_continue+4 333 or %l0, PSR_PIL, %g2 334 wr %g2, 0x0, %psr 335 WRITE_PAUSE 336 wr %g2, PSR_ET, %psr 337 WRITE_PAUSE 338 call smp4m_percpu_timer_interrupt 339 add %sp, STACKFRAME_SZ, %o0 340 wr %l0, PSR_ET, %psr 341 WRITE_PAUSE 342 RESTORE_ALL 343 344 /* Here is where we check for possible SMP IPI passed to us 345 * on some level other than 15 which is the NMI and only used 346 * for cross calls. That has a separate entry point below. 347 */ 348maybe_smp4m_msg: 349 GET_PROCESSOR4M_ID(o3) 350 set sun4m_interrupts, %l5 351 ld [%l5], %o5 352 sethi %hi(0x40000000), %o2 353 sll %o3, 12, %o3 354 ld [%o5 + %o3], %o1 355 andcc %o1, %o2, %g0 356 be,a smp4m_ticker 357 cmp %l7, 14 358 st %o2, [%o5 + 0x4] 359 WRITE_PAUSE 360 ld [%o5], %g0 361 WRITE_PAUSE 362 or %l0, PSR_PIL, %l4 363 wr %l4, 0x0, %psr 364 WRITE_PAUSE 365 wr %l4, PSR_ET, %psr 366 WRITE_PAUSE 367 call smp_reschedule_irq 368 nop 369 370 RESTORE_ALL 371 372 .align 4 373 .globl linux_trap_ipi15_sun4m 374linux_trap_ipi15_sun4m: 375 SAVE_ALL 376 sethi %hi(0x80000000), %o2 377 GET_PROCESSOR4M_ID(o0) 378 set sun4m_interrupts, %l5 379 ld [%l5], %o5 380 sll %o0, 12, %o0 381 add %o5, %o0, %o5 382 ld [%o5], %o3 383 andcc %o3, %o2, %g0 384 be 1f ! Must be an NMI async memory error 385 st %o2, [%o5 + 4] 386 WRITE_PAUSE 387 ld [%o5], %g0 388 WRITE_PAUSE 389 or %l0, PSR_PIL, %l4 390 wr %l4, 0x0, %psr 391 WRITE_PAUSE 392 wr %l4, PSR_ET, %psr 393 WRITE_PAUSE 394 call smp4m_cross_call_irq 395 nop 396 b ret_trap_lockless_ipi 397 clr %l6 3981: 399 /* NMI async memory error handling. */ 400 sethi %hi(0x80000000), %l4 401 sethi %hi(0x4000), %o3 402 sub %o5, %o0, %o5 403 add %o5, %o3, %l5 404 st %l4, [%l5 + 0xc] 405 WRITE_PAUSE 406 ld [%l5], %g0 407 WRITE_PAUSE 408 or %l0, PSR_PIL, %l4 409 wr %l4, 0x0, %psr 410 WRITE_PAUSE 411 wr %l4, PSR_ET, %psr 412 WRITE_PAUSE 413 call sun4m_nmi 414 nop 415 st %l4, [%l5 + 0x8] 416 WRITE_PAUSE 417 ld [%l5], %g0 418 WRITE_PAUSE 419 RESTORE_ALL 420 421 .globl smp4d_ticker 422 /* SMP per-cpu ticker interrupts are handled specially. */ 423smp4d_ticker: 424 SAVE_ALL 425 or %l0, PSR_PIL, %g2 426 sethi %hi(CC_ICLR), %o0 427 sethi %hi(1 << 14), %o1 428 or %o0, %lo(CC_ICLR), %o0 429 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ 430 wr %g2, 0x0, %psr 431 WRITE_PAUSE 432 wr %g2, PSR_ET, %psr 433 WRITE_PAUSE 434 call smp4d_percpu_timer_interrupt 435 add %sp, STACKFRAME_SZ, %o0 436 wr %l0, PSR_ET, %psr 437 WRITE_PAUSE 438 RESTORE_ALL 439 440 .align 4 441 .globl linux_trap_ipi15_sun4d 442linux_trap_ipi15_sun4d: 443 SAVE_ALL 444 sethi %hi(CC_BASE), %o4 445 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 446 or %o4, (CC_EREG - CC_BASE), %o0 447 ldda [%o0] ASI_M_MXCC, %o0 448 andcc %o0, %o2, %g0 449 bne 1f 450 sethi %hi(BB_STAT2), %o2 451 lduba [%o2] ASI_M_CTL, %o2 452 andcc %o2, BB_STAT2_MASK, %g0 453 bne 2f 454 or %o4, (CC_ICLR - CC_BASE), %o0 455 sethi %hi(1 << 15), %o1 456 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ 457 or %l0, PSR_PIL, %l4 458 wr %l4, 0x0, %psr 459 WRITE_PAUSE 460 wr %l4, PSR_ET, %psr 461 WRITE_PAUSE 462 call smp4d_cross_call_irq 463 nop 464 b ret_trap_lockless_ipi 465 clr %l6 466 4671: /* MXCC error */ 4682: /* BB error */ 469 /* Disable PIL 15 */ 470 set CC_IMSK, %l4 471 lduha [%l4] ASI_M_MXCC, %l5 472 sethi %hi(1 << 15), %l7 473 or %l5, %l7, %l5 474 stha %l5, [%l4] ASI_M_MXCC 475 /* FIXME */ 4761: b,a 1b 477 478#endif /* CONFIG_SMP */ 479 480 /* This routine handles illegal instructions and privileged 481 * instruction attempts from user code. 482 */ 483 .align 4 484 .globl bad_instruction 485bad_instruction: 486 sethi %hi(0xc1f80000), %l4 487 ld [%l1], %l5 488 sethi %hi(0x81d80000), %l7 489 and %l5, %l4, %l5 490 cmp %l5, %l7 491 be 1f 492 SAVE_ALL 493 494 wr %l0, PSR_ET, %psr ! re-enable traps 495 WRITE_PAUSE 496 497 add %sp, STACKFRAME_SZ, %o0 498 mov %l1, %o1 499 mov %l2, %o2 500 call do_illegal_instruction 501 mov %l0, %o3 502 503 RESTORE_ALL 504 5051: /* unimplemented flush - just skip */ 506 jmpl %l2, %g0 507 rett %l2 + 4 508 509 .align 4 510 .globl priv_instruction 511priv_instruction: 512 SAVE_ALL 513 514 wr %l0, PSR_ET, %psr 515 WRITE_PAUSE 516 517 add %sp, STACKFRAME_SZ, %o0 518 mov %l1, %o1 519 mov %l2, %o2 520 call do_priv_instruction 521 mov %l0, %o3 522 523 RESTORE_ALL 524 525 /* This routine handles unaligned data accesses. */ 526 .align 4 527 .globl mna_handler 528mna_handler: 529 andcc %l0, PSR_PS, %g0 530 be mna_fromuser 531 nop 532 533 SAVE_ALL 534 535 wr %l0, PSR_ET, %psr 536 WRITE_PAUSE 537 538 ld [%l1], %o1 539 call kernel_unaligned_trap 540 add %sp, STACKFRAME_SZ, %o0 541 542 RESTORE_ALL 543 544mna_fromuser: 545 SAVE_ALL 546 547 wr %l0, PSR_ET, %psr ! re-enable traps 548 WRITE_PAUSE 549 550 ld [%l1], %o1 551 call user_unaligned_trap 552 add %sp, STACKFRAME_SZ, %o0 553 554 RESTORE_ALL 555 556 /* This routine handles floating point disabled traps. */ 557 .align 4 558 .globl fpd_trap_handler 559fpd_trap_handler: 560 SAVE_ALL 561 562 wr %l0, PSR_ET, %psr ! re-enable traps 563 WRITE_PAUSE 564 565 add %sp, STACKFRAME_SZ, %o0 566 mov %l1, %o1 567 mov %l2, %o2 568 call do_fpd_trap 569 mov %l0, %o3 570 571 RESTORE_ALL 572 573 /* This routine handles Floating Point Exceptions. */ 574 .align 4 575 .globl fpe_trap_handler 576fpe_trap_handler: 577 set fpsave_magic, %l5 578 cmp %l1, %l5 579 be 1f 580 sethi %hi(fpsave), %l5 581 or %l5, %lo(fpsave), %l5 582 cmp %l1, %l5 583 bne 2f 584 sethi %hi(fpsave_catch2), %l5 585 or %l5, %lo(fpsave_catch2), %l5 586 wr %l0, 0x0, %psr 587 WRITE_PAUSE 588 jmp %l5 589 rett %l5 + 4 5901: 591 sethi %hi(fpsave_catch), %l5 592 or %l5, %lo(fpsave_catch), %l5 593 wr %l0, 0x0, %psr 594 WRITE_PAUSE 595 jmp %l5 596 rett %l5 + 4 597 5982: 599 SAVE_ALL 600 601 wr %l0, PSR_ET, %psr ! re-enable traps 602 WRITE_PAUSE 603 604 add %sp, STACKFRAME_SZ, %o0 605 mov %l1, %o1 606 mov %l2, %o2 607 call do_fpe_trap 608 mov %l0, %o3 609 610 RESTORE_ALL 611 612 /* This routine handles Tag Overflow Exceptions. */ 613 .align 4 614 .globl do_tag_overflow 615do_tag_overflow: 616 SAVE_ALL 617 618 wr %l0, PSR_ET, %psr ! re-enable traps 619 WRITE_PAUSE 620 621 add %sp, STACKFRAME_SZ, %o0 622 mov %l1, %o1 623 mov %l2, %o2 624 call handle_tag_overflow 625 mov %l0, %o3 626 627 RESTORE_ALL 628 629 /* This routine handles Watchpoint Exceptions. */ 630 .align 4 631 .globl do_watchpoint 632do_watchpoint: 633 SAVE_ALL 634 635 wr %l0, PSR_ET, %psr ! re-enable traps 636 WRITE_PAUSE 637 638 add %sp, STACKFRAME_SZ, %o0 639 mov %l1, %o1 640 mov %l2, %o2 641 call handle_watchpoint 642 mov %l0, %o3 643 644 RESTORE_ALL 645 646 /* This routine handles Register Access Exceptions. */ 647 .align 4 648 .globl do_reg_access 649do_reg_access: 650 SAVE_ALL 651 652 wr %l0, PSR_ET, %psr ! re-enable traps 653 WRITE_PAUSE 654 655 add %sp, STACKFRAME_SZ, %o0 656 mov %l1, %o1 657 mov %l2, %o2 658 call handle_reg_access 659 mov %l0, %o3 660 661 RESTORE_ALL 662 663 /* This routine handles Co-Processor Disabled Exceptions. */ 664 .align 4 665 .globl do_cp_disabled 666do_cp_disabled: 667 SAVE_ALL 668 669 wr %l0, PSR_ET, %psr ! re-enable traps 670 WRITE_PAUSE 671 672 add %sp, STACKFRAME_SZ, %o0 673 mov %l1, %o1 674 mov %l2, %o2 675 call handle_cp_disabled 676 mov %l0, %o3 677 678 RESTORE_ALL 679 680 /* This routine handles Co-Processor Exceptions. */ 681 .align 4 682 .globl do_cp_exception 683do_cp_exception: 684 SAVE_ALL 685 686 wr %l0, PSR_ET, %psr ! re-enable traps 687 WRITE_PAUSE 688 689 add %sp, STACKFRAME_SZ, %o0 690 mov %l1, %o1 691 mov %l2, %o2 692 call handle_cp_exception 693 mov %l0, %o3 694 695 RESTORE_ALL 696 697 /* This routine handles Hardware Divide By Zero Exceptions. */ 698 .align 4 699 .globl do_hw_divzero 700do_hw_divzero: 701 SAVE_ALL 702 703 wr %l0, PSR_ET, %psr ! re-enable traps 704 WRITE_PAUSE 705 706 add %sp, STACKFRAME_SZ, %o0 707 mov %l1, %o1 708 mov %l2, %o2 709 call handle_hw_divzero 710 mov %l0, %o3 711 712 RESTORE_ALL 713 714 .align 4 715 .globl do_flush_windows 716do_flush_windows: 717 SAVE_ALL 718 719 wr %l0, PSR_ET, %psr 720 WRITE_PAUSE 721 722 andcc %l0, PSR_PS, %g0 723 bne dfw_kernel 724 nop 725 726 call flush_user_windows 727 nop 728 729 /* Advance over the trap instruction. */ 730 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 731 add %l1, 0x4, %l2 732 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 733 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 734 735 RESTORE_ALL 736 737 .globl flush_patch_one 738 739 /* We get these for debugging routines using __builtin_return_address() */ 740dfw_kernel: 741flush_patch_one: 742 FLUSH_ALL_KERNEL_WINDOWS 743 744 /* Advance over the trap instruction. */ 745 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 746 add %l1, 0x4, %l2 747 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 748 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 749 750 RESTORE_ALL 751 752 /* The getcc software trap. The user wants the condition codes from 753 * the %psr in register %g1. 754 */ 755 756 .align 4 757 .globl getcc_trap_handler 758getcc_trap_handler: 759 srl %l0, 20, %g1 ! give user 760 and %g1, 0xf, %g1 ! only ICC bits in %psr 761 jmp %l2 ! advance over trap instruction 762 rett %l2 + 0x4 ! like this... 763 764 /* The setcc software trap. The user has condition codes in %g1 765 * that it would like placed in the %psr. Be careful not to flip 766 * any unintentional bits! 767 */ 768 769 .align 4 770 .globl setcc_trap_handler 771setcc_trap_handler: 772 sll %g1, 0x14, %l4 773 set PSR_ICC, %l5 774 andn %l0, %l5, %l0 ! clear ICC bits in %psr 775 and %l4, %l5, %l4 ! clear non-ICC bits in user value 776 or %l4, %l0, %l4 ! or them in... mix mix mix 777 778 wr %l4, 0x0, %psr ! set new %psr 779 WRITE_PAUSE ! TI scumbags... 780 781 jmp %l2 ! advance over trap instruction 782 rett %l2 + 0x4 ! like this... 783 784 .align 4 785 .globl linux_trap_nmi_sun4c 786linux_trap_nmi_sun4c: 787 SAVE_ALL 788 789 /* Ugh, we need to clear the IRQ line. This is now 790 * a very sun4c specific trap handler... 791 */ 792 sethi %hi(interrupt_enable), %l5 793 ld [%l5 + %lo(interrupt_enable)], %l5 794 ldub [%l5], %l6 795 andn %l6, INTS_ENAB, %l6 796 stb %l6, [%l5] 797 798 /* Now it is safe to re-enable traps without recursion. */ 799 or %l0, PSR_PIL, %l0 800 wr %l0, PSR_ET, %psr 801 WRITE_PAUSE 802 803 /* Now call the c-code with the pt_regs frame ptr and the 804 * memory error registers as arguments. The ordering chosen 805 * here is due to unlatching semantics. 806 */ 807 sethi %hi(AC_SYNC_ERR), %o0 808 add %o0, 0x4, %o0 809 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr 810 sub %o0, 0x4, %o0 811 lda [%o0] ASI_CONTROL, %o1 ! sync error 812 add %o0, 0xc, %o0 813 lda [%o0] ASI_CONTROL, %o4 ! async vaddr 814 sub %o0, 0x4, %o0 815 lda [%o0] ASI_CONTROL, %o3 ! async error 816 call sparc_lvl15_nmi 817 add %sp, STACKFRAME_SZ, %o0 818 819 RESTORE_ALL 820 821 .align 4 822 .globl invalid_segment_patch1_ff 823 .globl invalid_segment_patch2_ff 824invalid_segment_patch1_ff: cmp %l4, 0xff 825invalid_segment_patch2_ff: mov 0xff, %l3 826 827 .align 4 828 .globl invalid_segment_patch1_1ff 829 .globl invalid_segment_patch2_1ff 830invalid_segment_patch1_1ff: cmp %l4, 0x1ff 831invalid_segment_patch2_1ff: mov 0x1ff, %l3 832 833 .align 4 834 .globl num_context_patch1_16, num_context_patch2_16 835num_context_patch1_16: mov 0x10, %l7 836num_context_patch2_16: mov 0x10, %l7 837 838 .align 4 839 .globl vac_linesize_patch_32 840vac_linesize_patch_32: subcc %l7, 32, %l7 841 842 .align 4 843 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on 844 845/* 846 * Ugly, but we cant use hardware flushing on the sun4 and we'd require 847 * two instructions (Anton) 848 */ 849#ifdef CONFIG_SUN4 850vac_hwflush_patch1_on: nop 851#else 852vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 853#endif 854 855vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG 856 857 .globl invalid_segment_patch1, invalid_segment_patch2 858 .globl num_context_patch1 859 .globl vac_linesize_patch, vac_hwflush_patch1 860 .globl vac_hwflush_patch2 861 862 .align 4 863 .globl sun4c_fault 864 865! %l0 = %psr 866! %l1 = %pc 867! %l2 = %npc 868! %l3 = %wim 869! %l7 = 1 for textfault 870! We want error in %l5, vaddr in %l6 871sun4c_fault: 872#ifdef CONFIG_SUN4 873 sethi %hi(sun4c_memerr_reg), %l4 874 ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr 875 ld [%l4], %l6 ! memerr ctrl reg 876 ld [%l4 + 4], %l5 ! memerr vaddr reg 877 andcc %l6, 0x80, %g0 ! check for error type 878 st %g0, [%l4 + 4] ! clear the error 879 be 0f ! normal error 880 sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr 881 882 call prom_halt ! something weird happened 883 ! what exactly did happen? 884 ! what should we do here? 885 8860: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr 887 lduba [%l4] ASI_CONTROL, %l6 ! bus err reg 888 889 cmp %l7, 1 ! text fault? 890 be 1f ! yes 891 nop 892 893 ld [%l1], %l4 ! load instruction that caused fault 894 srl %l4, 21, %l4 895 andcc %l4, 1, %g0 ! store instruction? 896 897 be 1f ! no 898 sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep 899 ! %lo(SUN4C_SYNC_BADWRITE) = 0 900 or %l4, %l6, %l6 ! set write bit to emulate sun4c 9011: 902#else 903 sethi %hi(AC_SYNC_ERR), %l4 904 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6 905 lda [%l6] ASI_CONTROL, %l5 ! Address 906 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit 907#endif 908 909 andn %l5, 0xfff, %l5 ! Encode all info into l7 910 srl %l6, 14, %l4 911 912 and %l4, 2, %l4 913 or %l5, %l4, %l4 914 915 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault] 916 917 andcc %l0, PSR_PS, %g0 918 be sun4c_fault_fromuser 919 andcc %l7, 1, %g0 ! Text fault? 920 921 be 1f 922 sethi %hi(KERNBASE), %l4 923 924 mov %l1, %l5 ! PC 925 9261: 927 cmp %l5, %l4 928 blu sun4c_fault_fromuser 929 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 930 931 /* If the kernel references a bum kernel pointer, or a pte which 932 * points to a non existant page in ram, we will run this code 933 * _forever_ and lock up the machine!!!!! So we must check for 934 * this condition, the AC_SYNC_ERR bits are what we must examine. 935 * Also a parity error would make this happen as well. So we just 936 * check that we are in fact servicing a tlb miss and not some 937 * other type of fault for the kernel. 938 */ 939 andcc %l6, 0x80, %g0 940 be sun4c_fault_fromuser 941 and %l5, %l4, %l5 942 943 /* Test for NULL pte_t * in vmalloc area. */ 944 sethi %hi(VMALLOC_START), %l4 945 cmp %l5, %l4 946 blu,a invalid_segment_patch1 947 lduXa [%l5] ASI_SEGMAP, %l4 948 949 sethi %hi(swapper_pg_dir), %l4 950 srl %l5, SUN4C_PGDIR_SHIFT, %l6 951 or %l4, %lo(swapper_pg_dir), %l4 952 sll %l6, 2, %l6 953 ld [%l4 + %l6], %l4 954#ifdef CONFIG_SUN4 955 sethi %hi(PAGE_MASK), %l6 956 andcc %l4, %l6, %g0 957#else 958 andcc %l4, PAGE_MASK, %g0 959#endif 960 be sun4c_fault_fromuser 961 lduXa [%l5] ASI_SEGMAP, %l4 962 963invalid_segment_patch1: 964 cmp %l4, 0x7f 965 bne 1f 966 sethi %hi(sun4c_kfree_ring), %l4 967 or %l4, %lo(sun4c_kfree_ring), %l4 968 ld [%l4 + 0x18], %l3 969 deccc %l3 ! do we have a free entry? 970 bcs,a 2f ! no, unmap one. 971 sethi %hi(sun4c_kernel_ring), %l4 972 973 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- 974 975 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next 976 st %l5, [%l6 + 0x08] ! entry->vaddr = address 977 978 ld [%l6 + 0x00], %l3 ! next = entry->next 979 ld [%l6 + 0x04], %l7 ! entry->prev 980 981 st %l7, [%l3 + 0x04] ! next->prev = entry->prev 982 st %l3, [%l7 + 0x00] ! entry->prev->next = next 983 984 sethi %hi(sun4c_kernel_ring), %l4 985 or %l4, %lo(sun4c_kernel_ring), %l4 986 ! head = &sun4c_kernel_ring.ringhd 987 988 ld [%l4 + 0x00], %l7 ! head->next 989 990 st %l4, [%l6 + 0x04] ! entry->prev = head 991 st %l7, [%l6 + 0x00] ! entry->next = head->next 992 st %l6, [%l7 + 0x04] ! head->next->prev = entry 993 994 st %l6, [%l4 + 0x00] ! head->next = entry 995 996 ld [%l4 + 0x18], %l3 997 inc %l3 ! sun4c_kernel_ring.num_entries++ 998 st %l3, [%l4 + 0x18] 999 b 4f 1000 ld [%l6 + 0x08], %l5 1001 10022: 1003 or %l4, %lo(sun4c_kernel_ring), %l4 1004 ! head = &sun4c_kernel_ring.ringhd 1005 1006 ld [%l4 + 0x04], %l6 ! entry = head->prev 1007 1008 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr 1009 1010 ! Flush segment from the cache. 1011#ifdef CONFIG_SUN4 1012 sethi %hi((128 * 1024)), %l7 1013#else 1014 sethi %hi((64 * 1024)), %l7 1015#endif 10169: 1017vac_hwflush_patch1: 1018vac_linesize_patch: 1019 subcc %l7, 16, %l7 1020 bne 9b 1021vac_hwflush_patch2: 1022 sta %g0, [%l3 + %l7] ASI_FLUSHSEG 1023 1024 st %l5, [%l6 + 0x08] ! entry->vaddr = address 1025 1026 ld [%l6 + 0x00], %l5 ! next = entry->next 1027 ld [%l6 + 0x04], %l7 ! entry->prev 1028 1029 st %l7, [%l5 + 0x04] ! next->prev = entry->prev 1030 st %l5, [%l7 + 0x00] ! entry->prev->next = next 1031 st %l4, [%l6 + 0x04] ! entry->prev = head 1032 1033 ld [%l4 + 0x00], %l7 ! head->next 1034 1035 st %l7, [%l6 + 0x00] ! entry->next = head->next 1036 st %l6, [%l7 + 0x04] ! head->next->prev = entry 1037 st %l6, [%l4 + 0x00] ! head->next = entry 1038 1039 mov %l3, %l5 ! address = tmp 1040 10414: 1042num_context_patch1: 1043 mov 0x08, %l7 1044 1045 ld [%l6 + 0x08], %l4 1046 ldub [%l6 + 0x0c], %l3 1047 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 1048 1049 sethi %hi(AC_CONTEXT), %l3 1050 lduba [%l3] ASI_CONTROL, %l6 1051 1052 /* Invalidate old mapping, instantiate new mapping, 1053 * for each context. Registers l6/l7 are live across 1054 * this loop. 1055 */ 10563: deccc %l7 1057 sethi %hi(AC_CONTEXT), %l3 1058 stba %l7, [%l3] ASI_CONTROL 1059invalid_segment_patch2: 1060 mov 0x7f, %l3 1061 stXa %l3, [%l5] ASI_SEGMAP 1062 andn %l4, 0x1ff, %l3 1063 bne 3b 1064 stXa %l4, [%l3] ASI_SEGMAP 1065 1066 sethi %hi(AC_CONTEXT), %l3 1067 stba %l6, [%l3] ASI_CONTROL 1068 1069 andn %l4, 0x1ff, %l5 1070 10711: 1072 sethi %hi(VMALLOC_START), %l4 1073 cmp %l5, %l4 1074 1075 bgeu 1f 1076 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7 1077 1078 sethi %hi(KERNBASE), %l6 1079 1080 sub %l5, %l6, %l4 1081 srl %l4, PAGE_SHIFT, %l4 1082 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3 1083 or %l3, %l4, %l3 1084 1085 sethi %hi(PAGE_SIZE), %l4 1086 10872: 1088 sta %l3, [%l5] ASI_PTE 1089 deccc %l7 1090 inc %l3 1091 bne 2b 1092 add %l5, %l4, %l5 1093 1094 b 7f 1095 sethi %hi(sun4c_kernel_faults), %l4 1096 10971: 1098 srl %l5, SUN4C_PGDIR_SHIFT, %l3 1099 sethi %hi(swapper_pg_dir), %l4 1100 or %l4, %lo(swapper_pg_dir), %l4 1101 sll %l3, 2, %l3 1102 ld [%l4 + %l3], %l4 1103#ifndef CONFIG_SUN4 1104 and %l4, PAGE_MASK, %l4 1105#else 1106 sethi %hi(PAGE_MASK), %l6 1107 and %l4, %l6, %l4 1108#endif 1109 1110 srl %l5, (PAGE_SHIFT - 2), %l6 1111 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6 1112 add %l6, %l4, %l6 1113 1114 sethi %hi(PAGE_SIZE), %l4 1115 11162: 1117 ld [%l6], %l3 1118 deccc %l7 1119 sta %l3, [%l5] ASI_PTE 1120 add %l6, 0x4, %l6 1121 bne 2b 1122 add %l5, %l4, %l5 1123 1124 sethi %hi(sun4c_kernel_faults), %l4 11257: 1126 ld [%l4 + %lo(sun4c_kernel_faults)], %l3 1127 inc %l3 1128 st %l3, [%l4 + %lo(sun4c_kernel_faults)] 1129 1130 /* Restore condition codes */ 1131 wr %l0, 0x0, %psr 1132 WRITE_PAUSE 1133 jmp %l1 1134 rett %l2 1135 1136sun4c_fault_fromuser: 1137 SAVE_ALL 1138 nop 1139 1140 mov %l7, %o1 ! Decode the info from %l7 1141 mov %l7, %o2 1142 and %o1, 1, %o1 ! arg2 = text_faultp 1143 mov %l7, %o3 1144 and %o2, 2, %o2 ! arg3 = writep 1145 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1146 1147 wr %l0, PSR_ET, %psr 1148 WRITE_PAUSE 1149 1150 call do_sun4c_fault 1151 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1152 1153 RESTORE_ALL 1154 1155 .align 4 1156 .globl srmmu_fault 1157srmmu_fault: 1158 mov 0x400, %l5 1159 mov 0x300, %l4 1160 1161 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first 1162 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last 1163 1164 andn %l6, 0xfff, %l6 1165 srl %l5, 6, %l5 ! and encode all info into l7 1166 1167 and %l5, 2, %l5 1168 or %l5, %l6, %l6 1169 1170 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] 1171 1172 SAVE_ALL 1173 1174 mov %l7, %o1 1175 mov %l7, %o2 1176 and %o1, 1, %o1 ! arg2 = text_faultp 1177 mov %l7, %o3 1178 and %o2, 2, %o2 ! arg3 = writep 1179 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1180 1181 wr %l0, PSR_ET, %psr 1182 WRITE_PAUSE 1183 1184 call do_sparc_fault 1185 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1186 1187 RESTORE_ALL 1188 1189 .align 4 1190 .globl sys_nis_syscall 1191sys_nis_syscall: 1192 mov %o7, %l5 1193 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1194 call c_sys_nis_syscall 1195 mov %l5, %o7 1196 1197 .align 4 1198 .globl sys_execve 1199sys_execve: 1200 mov %o7, %l5 1201 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1202 call sparc_execve 1203 mov %l5, %o7 1204 1205 .globl sunos_execv 1206sunos_execv: 1207 st %g0, [%sp + STACKFRAME_SZ + PT_I2] 1208 1209 call sparc_execve 1210 add %sp, STACKFRAME_SZ, %o0 1211 1212 b ret_sys_call 1213 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1214 1215 .align 4 1216 .globl sys_pipe 1217sys_pipe: 1218 mov %o7, %l5 1219 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1220 call sparc_pipe 1221 mov %l5, %o7 1222 1223 .align 4 1224 .globl sys_sigaltstack 1225sys_sigaltstack: 1226 mov %o7, %l5 1227 mov %fp, %o2 1228 call do_sigaltstack 1229 mov %l5, %o7 1230 1231 .align 4 1232 .globl sys_sigstack 1233sys_sigstack: 1234 mov %o7, %l5 1235 mov %fp, %o2 1236 call do_sys_sigstack 1237 mov %l5, %o7 1238 1239 .align 4 1240 .globl sys_sigreturn 1241sys_sigreturn: 1242 call do_sigreturn 1243 add %sp, STACKFRAME_SZ, %o0 1244 1245 ld [%curptr + TI_FLAGS], %l5 1246 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1247 be 1f 1248 nop 1249 1250 call syscall_trace 1251 nop 1252 12531: 1254 /* We don't want to muck with user registers like a 1255 * normal syscall, just return. 1256 */ 1257 RESTORE_ALL 1258 1259 .align 4 1260 .globl sys_rt_sigreturn 1261sys_rt_sigreturn: 1262 call do_rt_sigreturn 1263 add %sp, STACKFRAME_SZ, %o0 1264 1265 ld [%curptr + TI_FLAGS], %l5 1266 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1267 be 1f 1268 nop 1269 1270 call syscall_trace 1271 nop 1272 12731: 1274 /* We are returning to a signal handler. */ 1275 RESTORE_ALL 1276 1277 /* Now that we have a real sys_clone, sys_fork() is 1278 * implemented in terms of it. Our _real_ implementation 1279 * of SunOS vfork() will use sys_vfork(). 1280 * 1281 * XXX These three should be consolidated into mostly shared 1282 * XXX code just like on sparc64... -DaveM 1283 */ 1284 .align 4 1285 .globl sys_fork, flush_patch_two 1286sys_fork: 1287 mov %o7, %l5 1288flush_patch_two: 1289 FLUSH_ALL_KERNEL_WINDOWS; 1290 ld [%curptr + TI_TASK], %o4 1291 rd %psr, %g4 1292 WRITE_PAUSE 1293 mov SIGCHLD, %o0 ! arg0: clone flags 1294 rd %wim, %g5 1295 WRITE_PAUSE 1296 mov %fp, %o1 ! arg1: usp 1297 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1298 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1299 mov 0, %o3 1300 call sparc_do_fork 1301 mov %l5, %o7 1302 1303 /* Whee, kernel threads! */ 1304 .globl sys_clone, flush_patch_three 1305sys_clone: 1306 mov %o7, %l5 1307flush_patch_three: 1308 FLUSH_ALL_KERNEL_WINDOWS; 1309 ld [%curptr + TI_TASK], %o4 1310 rd %psr, %g4 1311 WRITE_PAUSE 1312 1313 /* arg0,1: flags,usp -- loaded already */ 1314 cmp %o1, 0x0 ! Is new_usp NULL? 1315 rd %wim, %g5 1316 WRITE_PAUSE 1317 be,a 1f 1318 mov %fp, %o1 ! yes, use callers usp 1319 andn %o1, 7, %o1 ! no, align to 8 bytes 13201: 1321 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1322 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1323 mov 0, %o3 1324 call sparc_do_fork 1325 mov %l5, %o7 1326 1327 /* Whee, real vfork! */ 1328 .globl sys_vfork, flush_patch_four 1329sys_vfork: 1330flush_patch_four: 1331 FLUSH_ALL_KERNEL_WINDOWS; 1332 ld [%curptr + TI_TASK], %o4 1333 rd %psr, %g4 1334 WRITE_PAUSE 1335 rd %wim, %g5 1336 WRITE_PAUSE 1337 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1338 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 1339 mov %fp, %o1 1340 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 1341 sethi %hi(sparc_do_fork), %l1 1342 mov 0, %o3 1343 jmpl %l1 + %lo(sparc_do_fork), %g0 1344 add %sp, STACKFRAME_SZ, %o2 1345 1346 .align 4 1347linux_sparc_ni_syscall: 1348 sethi %hi(sys_ni_syscall), %l7 1349 b syscall_is_too_hard 1350 or %l7, %lo(sys_ni_syscall), %l7 1351 1352linux_fast_syscall: 1353 andn %l7, 3, %l7 1354 mov %i0, %o0 1355 mov %i1, %o1 1356 mov %i2, %o2 1357 jmpl %l7 + %g0, %g0 1358 mov %i3, %o3 1359 1360linux_syscall_trace: 1361 call syscall_trace 1362 nop 1363 mov %i0, %o0 1364 mov %i1, %o1 1365 mov %i2, %o2 1366 mov %i3, %o3 1367 b 2f 1368 mov %i4, %o4 1369 1370 .globl ret_from_fork 1371ret_from_fork: 1372 call schedule_tail 1373 mov %g3, %o0 1374 b ret_sys_call 1375 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1376 1377 /* Linux native system calls enter here... */ 1378 .align 4 1379 .globl linux_sparc_syscall 1380linux_sparc_syscall: 1381 /* Direct access to user regs, must faster. */ 1382 cmp %g1, NR_SYSCALLS 1383 bgeu linux_sparc_ni_syscall 1384 sll %g1, 2, %l4 1385 ld [%l7 + %l4], %l7 1386 andcc %l7, 1, %g0 1387 bne linux_fast_syscall 1388 /* Just do first insn from SAVE_ALL in the delay slot */ 1389 1390 .globl syscall_is_too_hard 1391syscall_is_too_hard: 1392 SAVE_ALL_HEAD 1393 rd %wim, %l3 1394 1395 wr %l0, PSR_ET, %psr 1396 mov %i0, %o0 1397 mov %i1, %o1 1398 mov %i2, %o2 1399 1400 ld [%curptr + TI_FLAGS], %l5 1401 mov %i3, %o3 1402 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1403 mov %i4, %o4 1404 bne linux_syscall_trace 1405 mov %i0, %l5 14062: 1407 call %l7 1408 mov %i5, %o5 1409 1410 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1411 1412ret_sys_call: 1413 ld [%curptr + TI_FLAGS], %l6 1414 cmp %o0, -ERESTART_RESTARTBLOCK 1415 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1416 set PSR_C, %g2 1417 bgeu 1f 1418 andcc %l6, _TIF_SYSCALL_TRACE, %g0 1419 1420 /* System call success, clear Carry condition code. */ 1421 andn %g3, %g2, %g3 1422 clr %l6 1423 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1424 bne linux_syscall_trace2 1425 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1426 add %l1, 0x4, %l2 /* npc = npc+4 */ 1427 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1428 b ret_trap_entry 1429 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 14301: 1431 /* System call failure, set Carry condition code. 1432 * Also, get abs(errno) to return to the process. 1433 */ 1434 sub %g0, %o0, %o0 1435 or %g3, %g2, %g3 1436 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1437 mov 1, %l6 1438 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1439 bne linux_syscall_trace2 1440 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1441 add %l1, 0x4, %l2 /* npc = npc+4 */ 1442 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1443 b ret_trap_entry 1444 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1445 1446linux_syscall_trace2: 1447 call syscall_trace 1448 add %l1, 0x4, %l2 /* npc = npc+4 */ 1449 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1450 b ret_trap_entry 1451 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1452 1453 1454/* Saving and restoring the FPU state is best done from lowlevel code. 1455 * 1456 * void fpsave(unsigned long *fpregs, unsigned long *fsr, 1457 * void *fpqueue, unsigned long *fpqdepth) 1458 */ 1459 1460 .globl fpsave 1461fpsave: 1462 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state 1463 ld [%o1], %g1 1464 set 0x2000, %g4 1465 andcc %g1, %g4, %g0 1466 be 2f 1467 mov 0, %g2 1468 1469 /* We have an fpqueue to save. */ 14701: 1471 std %fq, [%o2] 1472fpsave_magic: 1473 st %fsr, [%o1] 1474 ld [%o1], %g3 1475 andcc %g3, %g4, %g0 1476 add %g2, 1, %g2 1477 bne 1b 1478 add %o2, 8, %o2 1479 14802: 1481 st %g2, [%o3] 1482 1483 std %f0, [%o0 + 0x00] 1484 std %f2, [%o0 + 0x08] 1485 std %f4, [%o0 + 0x10] 1486 std %f6, [%o0 + 0x18] 1487 std %f8, [%o0 + 0x20] 1488 std %f10, [%o0 + 0x28] 1489 std %f12, [%o0 + 0x30] 1490 std %f14, [%o0 + 0x38] 1491 std %f16, [%o0 + 0x40] 1492 std %f18, [%o0 + 0x48] 1493 std %f20, [%o0 + 0x50] 1494 std %f22, [%o0 + 0x58] 1495 std %f24, [%o0 + 0x60] 1496 std %f26, [%o0 + 0x68] 1497 std %f28, [%o0 + 0x70] 1498 retl 1499 std %f30, [%o0 + 0x78] 1500 1501 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd 1502 * code for pointing out this possible deadlock, while we save state 1503 * above we could trap on the fsr store so our low level fpu trap 1504 * code has to know how to deal with this. 1505 */ 1506fpsave_catch: 1507 b fpsave_magic + 4 1508 st %fsr, [%o1] 1509 1510fpsave_catch2: 1511 b fpsave + 4 1512 st %fsr, [%o1] 1513 1514 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ 1515 1516 .globl fpload 1517fpload: 1518 ldd [%o0 + 0x00], %f0 1519 ldd [%o0 + 0x08], %f2 1520 ldd [%o0 + 0x10], %f4 1521 ldd [%o0 + 0x18], %f6 1522 ldd [%o0 + 0x20], %f8 1523 ldd [%o0 + 0x28], %f10 1524 ldd [%o0 + 0x30], %f12 1525 ldd [%o0 + 0x38], %f14 1526 ldd [%o0 + 0x40], %f16 1527 ldd [%o0 + 0x48], %f18 1528 ldd [%o0 + 0x50], %f20 1529 ldd [%o0 + 0x58], %f22 1530 ldd [%o0 + 0x60], %f24 1531 ldd [%o0 + 0x68], %f26 1532 ldd [%o0 + 0x70], %f28 1533 ldd [%o0 + 0x78], %f30 1534 ld [%o1], %fsr 1535 retl 1536 nop 1537 1538 /* __ndelay and __udelay take two arguments: 1539 * 0 - nsecs or usecs to delay 1540 * 1 - per_cpu udelay_val (loops per jiffy) 1541 * 1542 * Note that ndelay gives HZ times higher resolution but has a 10ms 1543 * limit. udelay can handle up to 1s. 1544 */ 1545 .globl __ndelay 1546__ndelay: 1547 save %sp, -STACKFRAME_SZ, %sp 1548 mov %i0, %o0 1549 call .umul ! round multiplier up so large ns ok 1550 mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) 1551 call .umul 1552 mov %i1, %o1 ! udelay_val 1553 ba delay_continue 1554 mov %o1, %o0 ! >>32 later for better resolution 1555 1556 .globl __udelay 1557__udelay: 1558 save %sp, -STACKFRAME_SZ, %sp 1559 mov %i0, %o0 1560 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok 1561 call .umul 1562 or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 1563 call .umul 1564 mov %i1, %o1 ! udelay_val 1565 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, 1566 or %g0, %lo(0x028f4b62), %l0 1567 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 1568 bcs,a 3f 1569 add %o1, 0x01, %o1 15703: 1571 call .umul 1572 mov HZ, %o0 ! >>32 earlier for wider range 1573 1574delay_continue: 1575 cmp %o0, 0x0 15761: 1577 bne 1b 1578 subcc %o0, 1, %o0 1579 1580 ret 1581 restore 1582 1583 /* Handle a software breakpoint */ 1584 /* We have to inform parent that child has stopped */ 1585 .align 4 1586 .globl breakpoint_trap 1587breakpoint_trap: 1588 rd %wim,%l3 1589 SAVE_ALL 1590 wr %l0, PSR_ET, %psr 1591 WRITE_PAUSE 1592 1593 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls 1594 call sparc_breakpoint 1595 add %sp, STACKFRAME_SZ, %o0 1596 1597 RESTORE_ALL 1598 1599 .align 4 1600 .globl __handle_exception, flush_patch_exception 1601__handle_exception: 1602flush_patch_exception: 1603 FLUSH_ALL_KERNEL_WINDOWS; 1604 ldd [%o0], %o6 1605 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h 1606 mov 1, %g1 ! signal EFAULT condition 1607 1608 .align 4 1609 .globl kill_user_windows, kuw_patch1_7win 1610 .globl kuw_patch1 1611kuw_patch1_7win: sll %o3, 6, %o3 1612 1613 /* No matter how much overhead this routine has in the worst 1614 * case scenerio, it is several times better than taking the 1615 * traps with the old method of just doing flush_user_windows(). 1616 */ 1617kill_user_windows: 1618 ld [%g6 + TI_UWINMASK], %o0 ! get current umask 1619 orcc %g0, %o0, %g0 ! if no bits set, we are done 1620 be 3f ! nothing to do 1621 rd %psr, %o5 ! must clear interrupts 1622 or %o5, PSR_PIL, %o4 ! or else that could change 1623 wr %o4, 0x0, %psr ! the uwinmask state 1624 WRITE_PAUSE ! burn them cycles 16251: 1626 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state 1627 orcc %g0, %o0, %g0 ! did an interrupt come in? 1628 be 4f ! yep, we are done 1629 rd %wim, %o3 ! get current wim 1630 srl %o3, 1, %o4 ! simulate a save 1631kuw_patch1: 1632 sll %o3, 7, %o3 ! compute next wim 1633 or %o4, %o3, %o3 ! result 1634 andncc %o0, %o3, %o0 ! clean this bit in umask 1635 bne kuw_patch1 ! not done yet 1636 srl %o3, 1, %o4 ! begin another save simulation 1637 wr %o3, 0x0, %wim ! set the new wim 1638 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask 16394: 1640 wr %o5, 0x0, %psr ! re-enable interrupts 1641 WRITE_PAUSE ! burn baby burn 16423: 1643 retl ! return 1644 st %g0, [%g6 + TI_W_SAVED] ! no windows saved 1645 1646 .align 4 1647 .globl restore_current 1648restore_current: 1649 LOAD_CURRENT(g6, o0) 1650 retl 1651 nop 1652 1653#ifdef CONFIG_PCI 1654#include <asm/pcic.h> 1655 1656 .align 4 1657 .globl linux_trap_ipi15_pcic 1658linux_trap_ipi15_pcic: 1659 rd %wim, %l3 1660 SAVE_ALL 1661 1662 /* 1663 * First deactivate NMI 1664 * or we cannot drop ET, cannot get window spill traps. 1665 * The busy loop is necessary because the PIO error 1666 * sometimes does not go away quickly and we trap again. 1667 */ 1668 sethi %hi(pcic_regs), %o1 1669 ld [%o1 + %lo(pcic_regs)], %o2 1670 1671 ! Get pending status for printouts later. 1672 ld [%o2 + PCI_SYS_INT_PENDING], %o0 1673 1674 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 1675 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] 16761: 1677 ld [%o2 + PCI_SYS_INT_PENDING], %o1 1678 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 1679 bne 1b 1680 nop 1681 1682 or %l0, PSR_PIL, %l4 1683 wr %l4, 0x0, %psr 1684 WRITE_PAUSE 1685 wr %l4, PSR_ET, %psr 1686 WRITE_PAUSE 1687 1688 call pcic_nmi 1689 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs 1690 RESTORE_ALL 1691 1692 .globl pcic_nmi_trap_patch 1693pcic_nmi_trap_patch: 1694 sethi %hi(linux_trap_ipi15_pcic), %l3 1695 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 1696 rd %psr, %l0 1697 .word 0 1698 1699#endif /* CONFIG_PCI */ 1700 1701/* End of entry.S */ 1702