1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP 6 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> 7 * Adapted for Power Macintosh by Paul Mackerras. 8 * Low-level exception handlers and MMU support 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 12 * 13 * This file contains the system call entry code, context switch 14 * code, and exception/interrupt return code for PowerPC. 15 */ 16 17#include <linux/errno.h> 18#include <linux/err.h> 19#include <linux/sys.h> 20#include <linux/threads.h> 21#include <linux/linkage.h> 22 23#include <asm/reg.h> 24#include <asm/page.h> 25#include <asm/mmu.h> 26#include <asm/cputable.h> 27#include <asm/thread_info.h> 28#include <asm/ppc_asm.h> 29#include <asm/asm-offsets.h> 30#include <asm/unistd.h> 31#include <asm/ptrace.h> 32#include <asm/feature-fixups.h> 33#include <asm/barrier.h> 34#include <asm/kup.h> 35#include <asm/bug.h> 36#include <asm/interrupt.h> 37 38#include "head_32.h" 39 40/* 41 * powerpc relies on return from interrupt/syscall being context synchronising 42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional 43 * synchronisation instructions. 44 */ 45 46/* 47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1 48 * fit into one page in order to not encounter a TLB miss between the 49 * modification of srr0/srr1 and the associated rfi. 50 */ 51 .align 12 52 53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) 54 .globl prepare_transfer_to_handler 55prepare_transfer_to_handler: 56 /* if from kernel, check interrupted DOZE/NAP mode */ 57 lwz r12,TI_LOCAL_FLAGS(r2) 58 mtcrf 0x01,r12 59 bt- 31-TLF_NAPPING,4f 60 bt- 31-TLF_SLEEPING,7f 61 blr 62 634: rlwinm r12,r12,0,~_TLF_NAPPING 64 stw r12,TI_LOCAL_FLAGS(r2) 65 b power_save_ppc32_restore 66 677: rlwinm r12,r12,0,~_TLF_SLEEPING 68 stw r12,TI_LOCAL_FLAGS(r2) 69 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ 70 rlwinm r9,r9,0,~MSR_EE 71 lwz r12,_LINK(r11) /* and return to address in LR */ 72 REST_GPR(2, r11) 73 b fast_exception_return 74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) 75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ 76 77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) 78SYM_FUNC_START(__kuep_lock) 79 lwz r9, THREAD+THSR0(r2) 80 update_user_segments_by_4 r9, r10, r11, r12 81 blr 82SYM_FUNC_END(__kuep_lock) 83 84SYM_FUNC_START_LOCAL(__kuep_unlock) 85 lwz r9, THREAD+THSR0(r2) 86 rlwinm r9,r9,0,~SR_NX 87 update_user_segments_by_4 r9, r10, r11, r12 88 blr 89SYM_FUNC_END(__kuep_unlock) 90 91.macro kuep_lock 92 bl __kuep_lock 93.endm 94.macro kuep_unlock 95 bl __kuep_unlock 96.endm 97#else 98.macro kuep_lock 99.endm 100.macro kuep_unlock 101.endm 102#endif 103 104.macro clr_ri trash 105#ifndef CONFIG_BOOKE 106#ifdef CONFIG_PPC_8xx 107 mtspr SPRN_NRI, \trash 108#else 109 li \trash, MSR_KERNEL & ~MSR_RI 110 mtmsr \trash 111#endif 112#endif 113.endm 114 115 .globl transfer_to_syscall 116transfer_to_syscall: 117 stw r3, ORIG_GPR3(r1) 118 stw r11, GPR1(r1) 119 stw r11, 0(r1) 120 mflr r12 121 stw r12, _LINK(r1) 122#ifdef CONFIG_BOOKE 123 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ 124#endif 125 lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ 126 SAVE_GPR(2, r1) 127 addi r12,r12,STACK_FRAME_REGS_MARKER@l 128 stw r9,_MSR(r1) 129 li r2, INTERRUPT_SYSCALL 130 stw r12,STACK_INT_FRAME_MARKER(r1) 131 stw r2,_TRAP(r1) 132 SAVE_GPR(0, r1) 133 SAVE_GPRS(3, 8, r1) 134 addi r2,r10,-THREAD 135 SAVE_NVGPRS(r1) 136 kuep_lock 137 138 /* Calling convention has r3 = regs, r4 = orig r0 */ 139 addi r3,r1,STACK_INT_FRAME_REGS 140 mr r4,r0 141 bl system_call_exception 142 143ret_from_syscall: 144 addi r4,r1,STACK_INT_FRAME_REGS 145 li r5,0 146 bl syscall_exit_prepare 147#ifdef CONFIG_PPC_47x 148 lis r4,icache_44x_need_flush@ha 149 lwz r5,icache_44x_need_flush@l(r4) 150 cmplwi cr0,r5,0 151 bne- .L44x_icache_flush 152#endif /* CONFIG_PPC_47x */ 153.L44x_icache_flush_return: 154 kuep_unlock 155 lwz r4,_LINK(r1) 156 lwz r5,_CCR(r1) 157 mtlr r4 158 lwz r7,_NIP(r1) 159 lwz r8,_MSR(r1) 160 cmpwi r3,0 161 REST_GPR(3, r1) 162syscall_exit_finish: 163 clr_ri r4 164 mtspr SPRN_SRR0,r7 165 mtspr SPRN_SRR1,r8 166 167 bne 3f 168 mtcr r5 169 1701: REST_GPR(2, r1) 171 REST_GPR(1, r1) 172 rfi 173 1743: mtcr r5 175 lwz r4,_CTR(r1) 176 lwz r5,_XER(r1) 177 REST_NVGPRS(r1) 178 mtctr r4 179 mtxer r5 180 REST_GPR(0, r1) 181 REST_GPRS(3, 12, r1) 182 b 1b 183_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) 184 185#ifdef CONFIG_44x 186.L44x_icache_flush: 187 li r7,0 188 iccci r0,r0 189 stw r7,icache_44x_need_flush@l(r4) 190 b .L44x_icache_flush_return 191#endif /* CONFIG_44x */ 192 193 .globl ret_from_fork 194ret_from_fork: 195 REST_NVGPRS(r1) 196 bl schedule_tail 197 li r3,0 /* fork() return value */ 198 b ret_from_syscall 199 200 .globl ret_from_kernel_user_thread 201ret_from_kernel_user_thread: 202 bl schedule_tail 203 mtctr r14 204 mr r3,r15 205 PPC440EP_ERR42 206 bctrl 207 li r3,0 208 b ret_from_syscall 209 210 .globl start_kernel_thread 211start_kernel_thread: 212 bl schedule_tail 213 mtctr r14 214 mr r3,r15 215 PPC440EP_ERR42 216 bctrl 217 /* 218 * This must not return. We actually want to BUG here, not WARN, 219 * because BUG will exit the process which is what the kernel thread 220 * should have done, which may give some hope of continuing. 221 */ 222100: trap 223 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 224 225 .globl fast_exception_return 226fast_exception_return: 227#ifndef CONFIG_BOOKE 228 andi. r10,r9,MSR_RI /* check for recoverable interrupt */ 229 beq 3f /* if not, we've got problems */ 230#endif 231 232 lwz r10,_CCR(r11) 233 REST_GPRS(1, 6, r11) 234 mtcr r10 235 lwz r10,_LINK(r11) 236 mtlr r10 237 /* Clear the exception marker on the stack to avoid confusing stacktrace */ 238 li r10, 0 239 stw r10, 8(r11) 240 clr_ri r10 241 mtspr SPRN_SRR1,r9 242 mtspr SPRN_SRR0,r12 243 REST_GPR(9, r11) 244 REST_GPR(10, r11) 245 REST_GPR(12, r11) 246 REST_GPR(11, r11) 247 rfi 248_ASM_NOKPROBE_SYMBOL(fast_exception_return) 249 250/* aargh, a nonrecoverable interrupt, panic */ 251/* aargh, we don't know which trap this is */ 2523: 253 li r10,-1 254 stw r10,_TRAP(r11) 255 prepare_transfer_to_handler 256 bl unrecoverable_exception 257 trap /* should not get here */ 258 259 .globl interrupt_return 260interrupt_return: 261 lwz r4,_MSR(r1) 262 addi r3,r1,STACK_INT_FRAME_REGS 263 andi. r0,r4,MSR_PR 264 beq .Lkernel_interrupt_return 265 bl interrupt_exit_user_prepare 266 cmpwi r3,0 267 kuep_unlock 268 bne- .Lrestore_nvgprs 269 270.Lfast_user_interrupt_return: 271 lwz r11,_NIP(r1) 272 lwz r12,_MSR(r1) 273 clr_ri r4 274 mtspr SPRN_SRR0,r11 275 mtspr SPRN_SRR1,r12 276 277BEGIN_FTR_SECTION 278 lwarx r0,0,r1 279END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) 280 stwcx. r0,0,r1 /* to clear the reservation */ 281 282 lwz r3,_CCR(r1) 283 lwz r4,_LINK(r1) 284 lwz r5,_CTR(r1) 285 lwz r6,_XER(r1) 286 li r0,0 287 288 /* 289 * Leaving a stale exception marker on the stack can confuse 290 * the reliable stack unwinder later on. Clear it. 291 */ 292 stw r0,8(r1) 293 REST_GPRS(7, 12, r1) 294 295 mtcr r3 296 mtlr r4 297 mtctr r5 298 mtspr SPRN_XER,r6 299 300 REST_GPRS(2, 6, r1) 301 REST_GPR(0, r1) 302 REST_GPR(1, r1) 303 rfi 304 305.Lrestore_nvgprs: 306 REST_NVGPRS(r1) 307 b .Lfast_user_interrupt_return 308 309.Lkernel_interrupt_return: 310 bl interrupt_exit_kernel_prepare 311 312.Lfast_kernel_interrupt_return: 313 cmpwi cr1,r3,0 314 lwz r11,_NIP(r1) 315 lwz r12,_MSR(r1) 316 clr_ri r4 317 mtspr SPRN_SRR0,r11 318 mtspr SPRN_SRR1,r12 319 320BEGIN_FTR_SECTION 321 lwarx r0,0,r1 322END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) 323 stwcx. r0,0,r1 /* to clear the reservation */ 324 325 lwz r3,_LINK(r1) 326 lwz r4,_CTR(r1) 327 lwz r5,_XER(r1) 328 lwz r6,_CCR(r1) 329 li r0,0 330 331 REST_GPRS(7, 12, r1) 332 333 mtlr r3 334 mtctr r4 335 mtspr SPRN_XER,r5 336 337 /* 338 * Leaving a stale exception marker on the stack can confuse 339 * the reliable stack unwinder later on. Clear it. 340 */ 341 stw r0,8(r1) 342 343 REST_GPRS(2, 5, r1) 344 345 bne- cr1,1f /* emulate stack store */ 346 mtcr r6 347 REST_GPR(6, r1) 348 REST_GPR(0, r1) 349 REST_GPR(1, r1) 350 rfi 351 3521: /* 353 * Emulate stack store with update. New r1 value was already calculated 354 * and updated in our interrupt regs by emulate_loadstore, but we can't 355 * store the previous value of r1 to the stack before re-loading our 356 * registers from it, otherwise they could be clobbered. Use 357 * SPRG Scratch0 as temporary storage to hold the store 358 * data, as interrupts are disabled here so it won't be clobbered. 359 */ 360 mtcr r6 361#ifdef CONFIG_BOOKE 362 mtspr SPRN_SPRG_WSCRATCH0, r9 363#else 364 mtspr SPRN_SPRG_SCRATCH0, r9 365#endif 366 addi r9,r1,INT_FRAME_SIZE /* get original r1 */ 367 REST_GPR(6, r1) 368 REST_GPR(0, r1) 369 REST_GPR(1, r1) 370 stw r9,0(r1) /* perform store component of stwu */ 371#ifdef CONFIG_BOOKE 372 mfspr r9, SPRN_SPRG_RSCRATCH0 373#else 374 mfspr r9, SPRN_SPRG_SCRATCH0 375#endif 376 rfi 377_ASM_NOKPROBE_SYMBOL(interrupt_return) 378 379#ifdef CONFIG_BOOKE 380 381/* 382 * Returning from a critical interrupt in user mode doesn't need 383 * to be any different from a normal exception. For a critical 384 * interrupt in the kernel, we just return (without checking for 385 * preemption) since the interrupt may have happened at some crucial 386 * place (e.g. inside the TLB miss handler), and because we will be 387 * running with r1 pointing into critical_stack, not the current 388 * process's kernel stack (and therefore current_thread_info() will 389 * give the wrong answer). 390 * We have to restore various SPRs that may have been in use at the 391 * time of the critical interrupt. 392 * 393 */ 394 395#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ 396 REST_NVGPRS(r1); \ 397 lwz r3,_MSR(r1); \ 398 andi. r3,r3,MSR_PR; \ 399 bne interrupt_return; \ 400 REST_GPR(0, r1); \ 401 REST_GPRS(2, 8, r1); \ 402 lwz r10,_XER(r1); \ 403 lwz r11,_CTR(r1); \ 404 mtspr SPRN_XER,r10; \ 405 mtctr r11; \ 406 stwcx. r0,0,r1; /* to clear the reservation */ \ 407 lwz r11,_LINK(r1); \ 408 mtlr r11; \ 409 lwz r10,_CCR(r1); \ 410 mtcrf 0xff,r10; \ 411 lwz r9,_DEAR(r1); \ 412 lwz r10,_ESR(r1); \ 413 mtspr SPRN_DEAR,r9; \ 414 mtspr SPRN_ESR,r10; \ 415 lwz r11,_NIP(r1); \ 416 lwz r12,_MSR(r1); \ 417 mtspr exc_lvl_srr0,r11; \ 418 mtspr exc_lvl_srr1,r12; \ 419 REST_GPRS(9, 12, r1); \ 420 REST_GPR(1, r1); \ 421 exc_lvl_rfi; \ 422 b .; /* prevent prefetch past exc_lvl_rfi */ 423 424#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ 425 lwz r9,_##exc_lvl_srr0(r1); \ 426 lwz r10,_##exc_lvl_srr1(r1); \ 427 mtspr SPRN_##exc_lvl_srr0,r9; \ 428 mtspr SPRN_##exc_lvl_srr1,r10; 429 430#if defined(CONFIG_PPC_E500) 431#ifdef CONFIG_PHYS_64BIT 432#define RESTORE_MAS7 \ 433 lwz r11,MAS7(r1); \ 434 mtspr SPRN_MAS7,r11; 435#else 436#define RESTORE_MAS7 437#endif /* CONFIG_PHYS_64BIT */ 438#define RESTORE_MMU_REGS \ 439 lwz r9,MAS0(r1); \ 440 lwz r10,MAS1(r1); \ 441 lwz r11,MAS2(r1); \ 442 mtspr SPRN_MAS0,r9; \ 443 lwz r9,MAS3(r1); \ 444 mtspr SPRN_MAS1,r10; \ 445 lwz r10,MAS6(r1); \ 446 mtspr SPRN_MAS2,r11; \ 447 mtspr SPRN_MAS3,r9; \ 448 mtspr SPRN_MAS6,r10; \ 449 RESTORE_MAS7; 450#elif defined(CONFIG_44x) 451#define RESTORE_MMU_REGS \ 452 lwz r9,MMUCR(r1); \ 453 mtspr SPRN_MMUCR,r9; 454#else 455#define RESTORE_MMU_REGS 456#endif 457 458 .globl ret_from_crit_exc 459ret_from_crit_exc: 460 RESTORE_xSRR(SRR0,SRR1); 461 RESTORE_MMU_REGS; 462 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) 463_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) 464 465 .globl ret_from_debug_exc 466ret_from_debug_exc: 467 RESTORE_xSRR(SRR0,SRR1); 468 RESTORE_xSRR(CSRR0,CSRR1); 469 RESTORE_MMU_REGS; 470 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) 471_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) 472 473 .globl ret_from_mcheck_exc 474ret_from_mcheck_exc: 475 RESTORE_xSRR(SRR0,SRR1); 476 RESTORE_xSRR(CSRR0,CSRR1); 477 RESTORE_xSRR(DSRR0,DSRR1); 478 RESTORE_MMU_REGS; 479 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) 480_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) 481#endif /* CONFIG_BOOKE */ 482