1/* -*- mode: asm -*- 2 * 3 * linux/arch/m68k/kernel/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file README.legal in the main directory of this archive 9 * for more details. 10 * 11 * Linux/m68k support by Hamish Macdonald 12 * 13 * 68060 fixes by Jesper Skov 14 * 15 */ 16 17/* 18 * entry.S contains the system-call and fault low-level handling routines. 19 * This also contains the timer-interrupt handler, as well as all interrupts 20 * and faults that can result in a task-switch. 21 * 22 * NOTE: This code handles signal-recognition, which happens every time 23 * after a timer-interrupt and after each system call. 24 * 25 */ 26 27/* 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 29 * all pointers that used to be 'current' are now entry 30 * number 0 in the 'current_set' list. 31 * 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 33 * for 68040 34 */ 35 36#include <linux/linkage.h> 37#include <asm/errno.h> 38#include <asm/setup.h> 39#include <asm/segment.h> 40#include <asm/traps.h> 41#include <asm/unistd.h> 42#include <asm/asm-offsets.h> 43#include <asm/entry.h> 44 45.globl system_call, buserr, trap, resume 46.globl sys_call_table 47.globl sys_fork, sys_clone, sys_vfork 48.globl ret_from_interrupt, bad_interrupt 49.globl auto_irqhandler_fixup 50.globl user_irqvec_fixup 51 52.text 53ENTRY(sys_fork) 54 SAVE_SWITCH_STACK 55 pea %sp@(SWITCH_STACK_SIZE) 56 jbsr m68k_fork 57 addql #4,%sp 58 RESTORE_SWITCH_STACK 59 rts 60 61ENTRY(sys_clone) 62 SAVE_SWITCH_STACK 63 pea %sp@(SWITCH_STACK_SIZE) 64 jbsr m68k_clone 65 addql #4,%sp 66 RESTORE_SWITCH_STACK 67 rts 68 69ENTRY(sys_vfork) 70 SAVE_SWITCH_STACK 71 pea %sp@(SWITCH_STACK_SIZE) 72 jbsr m68k_vfork 73 addql #4,%sp 74 RESTORE_SWITCH_STACK 75 rts 76 77ENTRY(sys_sigreturn) 78 SAVE_SWITCH_STACK 79 jbsr do_sigreturn 80 RESTORE_SWITCH_STACK 81 rts 82 83ENTRY(sys_rt_sigreturn) 84 SAVE_SWITCH_STACK 85 jbsr do_rt_sigreturn 86 RESTORE_SWITCH_STACK 87 rts 88 89ENTRY(buserr) 90 SAVE_ALL_INT 91 GET_CURRENT(%d0) 92 movel %sp,%sp@- | stack frame pointer argument 93 jbsr buserr_c 94 addql #4,%sp 95 jra ret_from_exception 96 97ENTRY(trap) 98 SAVE_ALL_INT 99 GET_CURRENT(%d0) 100 movel %sp,%sp@- | stack frame pointer argument 101 jbsr trap_c 102 addql #4,%sp 103 jra ret_from_exception 104 105 | After a fork we jump here directly from resume, 106 | so that %d1 contains the previous task 107 | schedule_tail now used regardless of CONFIG_SMP 108ENTRY(ret_from_fork) 109 movel %d1,%sp@- 110 jsr schedule_tail 111 addql #4,%sp 112 jra ret_from_exception 113 114#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 115 116#ifdef TRAP_DBG_INTERRUPT 117 118.globl dbginterrupt 119ENTRY(dbginterrupt) 120 SAVE_ALL_INT 121 GET_CURRENT(%d0) 122 movel %sp,%sp@- /* stack frame pointer argument */ 123 jsr dbginterrupt_c 124 addql #4,%sp 125 jra ret_from_exception 126#endif 127 128ENTRY(reschedule) 129 /* save top of frame */ 130 pea %sp@ 131 jbsr set_esp0 132 addql #4,%sp 133 pea ret_from_exception 134 jmp schedule 135 136ENTRY(ret_from_user_signal) 137 moveq #__NR_sigreturn,%d0 138 trap #0 139 140ENTRY(ret_from_user_rt_signal) 141 movel #__NR_rt_sigreturn,%d0 142 trap #0 143 144#else 145 146do_trace_entry: 147 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 148 subql #4,%sp 149 SAVE_SWITCH_STACK 150 jbsr syscall_trace 151 RESTORE_SWITCH_STACK 152 addql #4,%sp 153 movel %sp@(PT_OFF_ORIG_D0),%d0 154 cmpl #NR_syscalls,%d0 155 jcs syscall 156badsys: 157 movel #-ENOSYS,%sp@(PT_OFF_D0) 158 jra ret_from_syscall 159 160do_trace_exit: 161 subql #4,%sp 162 SAVE_SWITCH_STACK 163 jbsr syscall_trace 164 RESTORE_SWITCH_STACK 165 addql #4,%sp 166 jra .Lret_from_exception 167 168ENTRY(ret_from_signal) 169 movel %curptr@(TASK_STACK),%a1 170 tstb %a1@(TINFO_FLAGS+2) 171 jge 1f 172 jbsr syscall_trace 1731: RESTORE_SWITCH_STACK 174 addql #4,%sp 175/* on 68040 complete pending writebacks if any */ 176#ifdef CONFIG_M68040 177 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 178 subql #7,%d0 | bus error frame ? 179 jbne 1f 180 movel %sp,%sp@- 181 jbsr berr_040cleanup 182 addql #4,%sp 1831: 184#endif 185 jra .Lret_from_exception 186 187ENTRY(system_call) 188 SAVE_ALL_SYS 189 190 GET_CURRENT(%d1) 191 movel %d1,%a1 192 193 | save top of frame 194 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 195 196 | syscall trace? 197 tstb %a1@(TINFO_FLAGS+2) 198 jmi do_trace_entry 199 cmpl #NR_syscalls,%d0 200 jcc badsys 201syscall: 202 jbsr @(sys_call_table,%d0:l:4)@(0) 203 movel %d0,%sp@(PT_OFF_D0) | save the return value 204ret_from_syscall: 205 |oriw #0x0700,%sr 206 movel %curptr@(TASK_STACK),%a1 207 movew %a1@(TINFO_FLAGS+2),%d0 208 jne syscall_exit_work 2091: RESTORE_ALL 210 211syscall_exit_work: 212 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 213 bnes 1b | if so, skip resched, signals 214 lslw #1,%d0 215 jcs do_trace_exit 216 jmi do_delayed_trace 217 lslw #8,%d0 218 jne do_signal_return 219 pea resume_userspace 220 jra schedule 221 222 223ENTRY(ret_from_exception) 224.Lret_from_exception: 225 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 226 bnes 1f | if so, skip resched, signals 227 | only allow interrupts when we are really the last one on the 228 | kernel stack, otherwise stack overflow can occur during 229 | heavy interrupt load 230 andw #ALLOWINT,%sr 231 232resume_userspace: 233 movel %curptr@(TASK_STACK),%a1 234 moveb %a1@(TINFO_FLAGS+3),%d0 235 jne exit_work 2361: RESTORE_ALL 237 238exit_work: 239 | save top of frame 240 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 241 lslb #1,%d0 242 jne do_signal_return 243 pea resume_userspace 244 jra schedule 245 246 247do_signal_return: 248 |andw #ALLOWINT,%sr 249 subql #4,%sp | dummy return address 250 SAVE_SWITCH_STACK 251 pea %sp@(SWITCH_STACK_SIZE) 252 bsrl do_notify_resume 253 addql #4,%sp 254 RESTORE_SWITCH_STACK 255 addql #4,%sp 256 jbra resume_userspace 257 258do_delayed_trace: 259 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR 260 pea 1 | send SIGTRAP 261 movel %curptr,%sp@- 262 pea LSIGTRAP 263 jbsr send_sig 264 addql #8,%sp 265 addql #4,%sp 266 jbra resume_userspace 267 268 269/* This is the main interrupt handler for autovector interrupts */ 270 271ENTRY(auto_inthandler) 272 SAVE_ALL_INT 273 GET_CURRENT(%d0) 274 movel %d0,%a1 275 addqb #1,%a1@(TINFO_PREEMPT+1) 276 | put exception # in d0 277 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 278 subw #VEC_SPUR,%d0 279 280 movel %sp,%sp@- 281 movel %d0,%sp@- | put vector # on stack 282auto_irqhandler_fixup = . + 2 283 jsr do_IRQ | process the IRQ 284 addql #8,%sp | pop parameters off stack 285 286ret_from_interrupt: 287 movel %curptr@(TASK_STACK),%a1 288 subqb #1,%a1@(TINFO_PREEMPT+1) 289 jeq ret_from_last_interrupt 2902: RESTORE_ALL 291 292 ALIGN 293ret_from_last_interrupt: 294 moveq #(~ALLOWINT>>8)&0xff,%d0 295 andb %sp@(PT_OFF_SR),%d0 296 jne 2b 297 298 /* check if we need to do software interrupts */ 299 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING 300 jeq .Lret_from_exception 301 pea ret_from_exception 302 jra do_softirq 303 304/* Handler for user defined interrupt vectors */ 305 306ENTRY(user_inthandler) 307 SAVE_ALL_INT 308 GET_CURRENT(%d0) 309 movel %d0,%a1 310 addqb #1,%a1@(TINFO_PREEMPT+1) 311 | put exception # in d0 312 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 313user_irqvec_fixup = . + 2 314 subw #VEC_USER,%d0 315 316 movel %sp,%sp@- 317 movel %d0,%sp@- | put vector # on stack 318 jsr do_IRQ | process the IRQ 319 addql #8,%sp | pop parameters off stack 320 321 movel %curptr@(TASK_STACK),%a1 322 subqb #1,%a1@(TINFO_PREEMPT+1) 323 jeq ret_from_last_interrupt 324 RESTORE_ALL 325 326/* Handler for uninitialized and spurious interrupts */ 327 328ENTRY(bad_inthandler) 329 SAVE_ALL_INT 330 GET_CURRENT(%d0) 331 movel %d0,%a1 332 addqb #1,%a1@(TINFO_PREEMPT+1) 333 334 movel %sp,%sp@- 335 jsr handle_badint 336 addql #4,%sp 337 338 movel %curptr@(TASK_STACK),%a1 339 subqb #1,%a1@(TINFO_PREEMPT+1) 340 jeq ret_from_last_interrupt 341 RESTORE_ALL 342 343 344resume: 345 /* 346 * Beware - when entering resume, prev (the current task) is 347 * in a0, next (the new task) is in a1,so don't change these 348 * registers until their contents are no longer needed. 349 */ 350 351 /* save sr */ 352 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 353 354 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 355 movec %sfc,%d0 356 movew %d0,%a0@(TASK_THREAD+THREAD_FS) 357 358 /* save usp */ 359 /* it is better to use a movel here instead of a movew 8*) */ 360 movec %usp,%d0 361 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 362 363 /* save non-scratch registers on stack */ 364 SAVE_SWITCH_STACK 365 366 /* save current kernel stack pointer */ 367 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 368 369 /* save floating point context */ 370#ifndef CONFIG_M68KFPU_EMU_ONLY 371#ifdef CONFIG_M68KFPU_EMU 372 tstl m68k_fputype 373 jeq 3f 374#endif 375 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 376 377#if defined(CONFIG_M68060) 378#if !defined(CPU_M68060_ONLY) 379 btst #3,m68k_cputype+3 380 beqs 1f 381#endif 382 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 383 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 384 jeq 3f 385#if !defined(CPU_M68060_ONLY) 386 jra 2f 387#endif 388#endif /* CONFIG_M68060 */ 389#if !defined(CPU_M68060_ONLY) 3901: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 391 jeq 3f 392#endif 3932: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 394 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 3953: 396#endif /* CONFIG_M68KFPU_EMU_ONLY */ 397 /* Return previous task in %d1 */ 398 movel %curptr,%d1 399 400 /* switch to new task (a1 contains new task) */ 401 movel %a1,%curptr 402 403 /* restore floating point context */ 404#ifndef CONFIG_M68KFPU_EMU_ONLY 405#ifdef CONFIG_M68KFPU_EMU 406 tstl m68k_fputype 407 jeq 4f 408#endif 409#if defined(CONFIG_M68060) 410#if !defined(CPU_M68060_ONLY) 411 btst #3,m68k_cputype+3 412 beqs 1f 413#endif 414 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 415 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 416 jeq 3f 417#if !defined(CPU_M68060_ONLY) 418 jra 2f 419#endif 420#endif /* CONFIG_M68060 */ 421#if !defined(CPU_M68060_ONLY) 4221: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 423 jeq 3f 424#endif 4252: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 426 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 4273: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 4284: 429#endif /* CONFIG_M68KFPU_EMU_ONLY */ 430 431 /* restore the kernel stack pointer */ 432 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 433 434 /* restore non-scratch registers */ 435 RESTORE_SWITCH_STACK 436 437 /* restore user stack pointer */ 438 movel %a1@(TASK_THREAD+THREAD_USP),%a0 439 movel %a0,%usp 440 441 /* restore fs (sfc,%dfc) */ 442 movew %a1@(TASK_THREAD+THREAD_FS),%a0 443 movec %a0,%sfc 444 movec %a0,%dfc 445 446 /* restore status register */ 447 movew %a1@(TASK_THREAD+THREAD_SR),%sr 448 449 rts 450 451#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 452