1 #ifndef _ASM_POWERPC_EXCEPTION_H 2 #define _ASM_POWERPC_EXCEPTION_H 3 /* 4 * Extracted from head_64.S 5 * 6 * PowerPC version 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * 9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 11 * Adapted for Power Macintosh by Paul Mackerras. 12 * Low-level exception handlers and MMU support 13 * rewritten by Paul Mackerras. 14 * Copyright (C) 1996 Paul Mackerras. 15 * 16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 18 * 19 * This file contains the low-level support and setup for the 20 * PowerPC-64 platform, including trap and interrupt dispatch. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 /* 28 * The following macros define the code that appears as 29 * the prologue to each of the exception handlers. They 30 * are split into two parts to allow a single kernel binary 31 * to be used for pSeries and iSeries. 32 * 33 * We make as much of the exception code common between native 34 * exception handlers (including pSeries LPAR) and iSeries LPAR 35 * implementations as possible. 36 */ 37 38 #define EX_R9 0 39 #define EX_R10 8 40 #define EX_R11 16 41 #define EX_R12 24 42 #define EX_R13 32 43 #define EX_SRR0 40 44 #define EX_DAR 48 45 #define EX_DSISR 56 46 #define EX_CCR 60 47 #define EX_R3 64 48 #define EX_LR 72 49 #define EX_CFAR 80 50 51 /* 52 * We're short on space and time in the exception prolog, so we can't 53 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the 54 * low halfword of the address, but for Kdump we need the whole low 55 * word. 56 */ 57 #define LOAD_HANDLER(reg, label) \ 58 addi reg,reg,(label)-_stext; /* virt addr of handler ... */ 59 60 /* Exception register prefixes */ 61 #define EXC_HV H 62 #define EXC_STD 63 64 #define __EXCEPTION_PROLOG_1(area, extra, vec) \ 65 GET_PACA(r13); \ 66 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 67 std r10,area+EX_R10(r13); \ 68 BEGIN_FTR_SECTION_NESTED(66); \ 69 mfspr r10,SPRN_CFAR; \ 70 std r10,area+EX_CFAR(r13); \ 71 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 72 mfcr r9; \ 73 extra(vec); \ 74 std r11,area+EX_R11(r13); \ 75 std r12,area+EX_R12(r13); \ 76 GET_SCRATCH0(r10); \ 77 std r10,area+EX_R13(r13) 78 #define EXCEPTION_PROLOG_1(area, extra, vec) \ 79 __EXCEPTION_PROLOG_1(area, extra, vec) 80 81 #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ 82 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 83 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 84 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 85 LOAD_HANDLER(r12,label) \ 86 mtspr SPRN_##h##SRR0,r12; \ 87 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 88 mtspr SPRN_##h##SRR1,r10; \ 89 h##rfid; \ 90 b . /* prevent speculative execution */ 91 #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 92 __EXCEPTION_PROLOG_PSERIES_1(label, h) 93 94 #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ 95 EXCEPTION_PROLOG_1(area, extra, vec); \ 96 EXCEPTION_PROLOG_PSERIES_1(label, h); 97 98 #define __KVMTEST(n) \ 99 lbz r10,HSTATE_IN_GUEST(r13); \ 100 cmpwi r10,0; \ 101 bne do_kvm_##n 102 103 #define __KVM_HANDLER(area, h, n) \ 104 do_kvm_##n: \ 105 ld r10,area+EX_R10(r13); \ 106 stw r9,HSTATE_SCRATCH1(r13); \ 107 ld r9,area+EX_R9(r13); \ 108 std r12,HSTATE_SCRATCH0(r13); \ 109 li r12,n; \ 110 b kvmppc_interrupt 111 112 #define __KVM_HANDLER_SKIP(area, h, n) \ 113 do_kvm_##n: \ 114 cmpwi r10,KVM_GUEST_MODE_SKIP; \ 115 ld r10,area+EX_R10(r13); \ 116 beq 89f; \ 117 stw r9,HSTATE_SCRATCH1(r13); \ 118 ld r9,area+EX_R9(r13); \ 119 std r12,HSTATE_SCRATCH0(r13); \ 120 li r12,n; \ 121 b kvmppc_interrupt; \ 122 89: mtocrf 0x80,r9; \ 123 ld r9,area+EX_R9(r13); \ 124 b kvmppc_skip_##h##interrupt 125 126 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 127 #define KVMTEST(n) __KVMTEST(n) 128 #define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) 129 #define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 130 131 #else 132 #define KVMTEST(n) 133 #define KVM_HANDLER(area, h, n) 134 #define KVM_HANDLER_SKIP(area, h, n) 135 #endif 136 137 #ifdef CONFIG_KVM_BOOK3S_PR 138 #define KVMTEST_PR(n) __KVMTEST(n) 139 #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 140 #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 141 142 #else 143 #define KVMTEST_PR(n) 144 #define KVM_HANDLER_PR(area, h, n) 145 #define KVM_HANDLER_PR_SKIP(area, h, n) 146 #endif 147 148 #define NOTEST(n) 149 150 /* 151 * The common exception prolog is used for all except a few exceptions 152 * such as a segment miss on a kernel address. We have to be prepared 153 * to take another exception from the point where we first touch the 154 * kernel stack onwards. 155 * 156 * On entry r13 points to the paca, r9-r13 are saved in the paca, 157 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 158 * SRR1, and relocation is on. 159 */ 160 #define EXCEPTION_PROLOG_COMMON(n, area) \ 161 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 162 mr r10,r1; /* Save r1 */ \ 163 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 164 beq- 1f; \ 165 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 166 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 167 blt+ cr1,3f; /* abort if it is */ \ 168 li r1,(n); /* will be reloaded later */ \ 169 sth r1,PACA_TRAP_SAVE(r13); \ 170 std r3,area+EX_R3(r13); \ 171 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 172 b bad_stack; \ 173 3: std r9,_CCR(r1); /* save CR in stackframe */ \ 174 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 175 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 176 std r10,0(r1); /* make stack chain pointer */ \ 177 std r0,GPR0(r1); /* save r0 in stackframe */ \ 178 std r10,GPR1(r1); /* save r1 in stackframe */ \ 179 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 180 std r2,GPR2(r1); /* save r2 in stackframe */ \ 181 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 182 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 183 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 184 ld r10,area+EX_R10(r13); \ 185 std r9,GPR9(r1); \ 186 std r10,GPR10(r1); \ 187 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 188 ld r10,area+EX_R12(r13); \ 189 ld r11,area+EX_R13(r13); \ 190 std r9,GPR11(r1); \ 191 std r10,GPR12(r1); \ 192 std r11,GPR13(r1); \ 193 BEGIN_FTR_SECTION_NESTED(66); \ 194 ld r10,area+EX_CFAR(r13); \ 195 std r10,ORIG_GPR3(r1); \ 196 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 197 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 198 mflr r9; /* save LR in stackframe */ \ 199 std r9,_LINK(r1); \ 200 mfctr r10; /* save CTR in stackframe */ \ 201 std r10,_CTR(r1); \ 202 lbz r10,PACASOFTIRQEN(r13); \ 203 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 204 std r10,SOFTE(r1); \ 205 std r11,_XER(r1); \ 206 li r9,(n)+1; \ 207 std r9,_TRAP(r1); /* set trap number */ \ 208 li r10,0; \ 209 ld r11,exception_marker@toc(r2); \ 210 std r10,RESULT(r1); /* clear regs->result */ \ 211 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 212 ACCOUNT_STOLEN_TIME 213 214 /* 215 * Exception vectors. 216 */ 217 #define STD_EXCEPTION_PSERIES(loc, vec, label) \ 218 . = loc; \ 219 .globl label##_pSeries; \ 220 label##_pSeries: \ 221 HMT_MEDIUM; \ 222 SET_SCRATCH0(r13); /* save r13 */ \ 223 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 224 EXC_STD, KVMTEST_PR, vec) 225 226 #define STD_EXCEPTION_HV(loc, vec, label) \ 227 . = loc; \ 228 .globl label##_hv; \ 229 label##_hv: \ 230 HMT_MEDIUM; \ 231 SET_SCRATCH0(r13); /* save r13 */ \ 232 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 233 EXC_HV, KVMTEST, vec) 234 235 /* This associate vector numbers with bits in paca->irq_happened */ 236 #define SOFTEN_VALUE_0x500 PACA_IRQ_EE 237 #define SOFTEN_VALUE_0x502 PACA_IRQ_EE 238 #define SOFTEN_VALUE_0x900 PACA_IRQ_DEC 239 #define SOFTEN_VALUE_0x982 PACA_IRQ_DEC 240 241 #define __SOFTEN_TEST(h, vec) \ 242 lbz r10,PACASOFTIRQEN(r13); \ 243 cmpwi r10,0; \ 244 li r10,SOFTEN_VALUE_##vec; \ 245 beq masked_##h##interrupt 246 #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) 247 248 #define SOFTEN_TEST_PR(vec) \ 249 KVMTEST_PR(vec); \ 250 _SOFTEN_TEST(EXC_STD, vec) 251 252 #define SOFTEN_TEST_HV(vec) \ 253 KVMTEST(vec); \ 254 _SOFTEN_TEST(EXC_HV, vec) 255 256 #define SOFTEN_TEST_HV_201(vec) \ 257 KVMTEST(vec); \ 258 _SOFTEN_TEST(EXC_STD, vec) 259 260 #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 261 HMT_MEDIUM; \ 262 SET_SCRATCH0(r13); /* save r13 */ \ 263 __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ 264 EXCEPTION_PROLOG_PSERIES_1(label##_common, h); 265 #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 266 __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) 267 268 #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ 269 . = loc; \ 270 .globl label##_pSeries; \ 271 label##_pSeries: \ 272 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 273 EXC_STD, SOFTEN_TEST_PR) 274 275 #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ 276 . = loc; \ 277 .globl label##_hv; \ 278 label##_hv: \ 279 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 280 EXC_HV, SOFTEN_TEST_HV) 281 282 /* 283 * Our exception common code can be passed various "additions" 284 * to specify the behaviour of interrupts, whether to kick the 285 * runlatch, etc... 286 */ 287 288 /* Exception addition: Hard disable interrupts */ 289 #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) 290 291 #define ADD_NVGPRS \ 292 bl .save_nvgprs 293 294 #define RUNLATCH_ON \ 295 BEGIN_FTR_SECTION \ 296 CURRENT_THREAD_INFO(r3, r1); \ 297 ld r4,TI_LOCAL_FLAGS(r3); \ 298 andi. r0,r4,_TLF_RUNLATCH; \ 299 beql ppc64_runlatch_on_trampoline; \ 300 END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 301 302 #define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \ 303 .align 7; \ 304 .globl label##_common; \ 305 label##_common: \ 306 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 307 additions; \ 308 addi r3,r1,STACK_FRAME_OVERHEAD; \ 309 bl hdlr; \ 310 b ret 311 312 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 313 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \ 314 ADD_NVGPRS;DISABLE_INTS) 315 316 /* 317 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 318 * in the idle task and therefore need the special idle handling 319 * (finish nap and runlatch) 320 */ 321 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ 322 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ 323 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) 324 325 /* 326 * When the idle code in power4_idle puts the CPU into NAP mode, 327 * it has to do so in a loop, and relies on the external interrupt 328 * and decrementer interrupt entry code to get it out of the loop. 329 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 330 * to signal that it is in the loop and needs help to get out. 331 */ 332 #ifdef CONFIG_PPC_970_NAP 333 #define FINISH_NAP \ 334 BEGIN_FTR_SECTION \ 335 CURRENT_THREAD_INFO(r11, r1); \ 336 ld r9,TI_LOCAL_FLAGS(r11); \ 337 andi. r10,r9,_TLF_NAPPING; \ 338 bnel power4_fixup_nap; \ 339 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 340 #else 341 #define FINISH_NAP 342 #endif 343 344 #endif /* _ASM_POWERPC_EXCEPTION_H */ 345