1 /* 2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 3 */ 4 #ifndef _ASM_POWERPC_PPC_ASM_H 5 #define _ASM_POWERPC_PPC_ASM_H 6 7 #include <linux/stringify.h> 8 #include <asm/asm-compat.h> 9 #include <asm/processor.h> 10 #include <asm/ppc-opcode.h> 11 #include <asm/firmware.h> 12 #include <asm/feature-fixups.h> 13 #include <asm/extable.h> 14 15 #ifdef __ASSEMBLY__ 16 17 #define SZL (BITS_PER_LONG/8) 18 19 /* 20 * This expands to a sequence of operations with reg incrementing from 21 * start to end inclusive, of this form: 22 * 23 * op reg, (offset + (width * reg))(base) 24 * 25 * Note that offset is not the offset of the first operation unless start 26 * is zero (or width is zero). 27 */ 28 .macro OP_REGS op, width, start, end, base, offset 29 .Lreg=\start 30 .rept (\end - \start + 1) 31 \op .Lreg, \offset + \width * .Lreg(\base) 32 .Lreg=.Lreg+1 33 .endr 34 .endm 35 36 /* 37 * This expands to a sequence of register clears for regs start to end 38 * inclusive, of the form: 39 * 40 * li rN, 0 41 */ 42 .macro ZEROIZE_REGS start, end 43 .Lreg=\start 44 .rept (\end - \start + 1) 45 li .Lreg, 0 46 .Lreg=.Lreg+1 47 .endr 48 .endm 49 50 /* 51 * Macros for storing registers into and loading registers from 52 * exception frames. 53 */ 54 #ifdef __powerpc64__ 55 #define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0 56 #define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0 57 #define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base) 58 #define REST_NVGPRS(base) REST_GPRS(14, 31, base) 59 #else 60 #define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0 61 #define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0 62 #define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base) 63 #define REST_NVGPRS(base) REST_GPRS(13, 31, base) 64 #endif 65 66 #define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end 67 #ifdef __powerpc64__ 68 #define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31) 69 #else 70 #define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31) 71 #endif 72 #define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n) 73 74 #define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) 75 #define REST_GPR(n, base) REST_GPRS(n, n, base) 76 77 #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) 78 #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 79 #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 80 #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) 81 #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) 82 #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) 83 #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) 84 #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) 85 #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) 86 #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) 87 #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 88 #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 89 90 #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b 91 #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 92 #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 93 #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 94 #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 95 #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 96 #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b 97 #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 98 #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 99 #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 100 #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) 101 #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 102 103 #ifdef __BIG_ENDIAN__ 104 #define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) 105 #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) 106 #else 107 #define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ 108 STXVD2X(n,b,base); \ 109 XXSWAPD(n,n) 110 111 #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ 112 XXSWAPD(n,n) 113 #endif 114 /* Save the lower 32 VSRs in the thread VSR region */ 115 #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) 116 #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 117 #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 118 #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 119 #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 120 #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 121 #define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) 122 #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 123 #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 124 #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 125 #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) 126 #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) 127 128 /* 129 * b = base register for addressing, o = base offset from register of 1st EVR 130 * n = first EVR, s = scratch 131 */ 132 #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b) 133 #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o) 134 #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o) 135 #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o) 136 #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o) 137 #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o) 138 #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n 139 #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o) 140 #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o) 141 #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o) 142 #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o) 143 #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o) 144 145 /* Macros to adjust thread priority for hardware multithreading */ 146 #define HMT_VERY_LOW or 31,31,31 # very low priority 147 #define HMT_LOW or 1,1,1 148 #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority 149 #define HMT_MEDIUM or 2,2,2 150 #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority 151 #define HMT_HIGH or 3,3,3 152 #define HMT_EXTRA_HIGH or 7,7,7 # power7 only 153 154 #ifdef CONFIG_PPC64 155 #define ULONG_SIZE 8 156 #else 157 #define ULONG_SIZE 4 158 #endif 159 #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 160 #define VCPU_GPR(n) __VCPU_GPR(__REG_##n) 161 162 #ifdef __KERNEL__ 163 164 /* 165 * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit 166 * version below in the else case of the ifdef. 167 */ 168 #ifdef __powerpc64__ 169 170 #define STACKFRAMESIZE 256 171 #define __STK_REG(i) (112 + ((i)-14)*8) 172 #define STK_REG(i) __STK_REG(__REG_##i) 173 174 #ifdef CONFIG_PPC64_ELF_ABI_V2 175 #define STK_GOT 24 176 #define __STK_PARAM(i) (32 + ((i)-3)*8) 177 #else 178 #define STK_GOT 40 179 #define __STK_PARAM(i) (48 + ((i)-3)*8) 180 #endif 181 #define STK_PARAM(i) __STK_PARAM(__REG_##i) 182 183 #ifdef CONFIG_PPC64_ELF_ABI_V2 184 185 #define _GLOBAL(name) \ 186 .align 2 ; \ 187 .type name,@function; \ 188 .globl name; \ 189 name: 190 191 #define _GLOBAL_TOC(name) \ 192 .align 2 ; \ 193 .type name,@function; \ 194 .globl name; \ 195 name: \ 196 0: addis r2,r12,(.TOC.-0b)@ha; \ 197 addi r2,r2,(.TOC.-0b)@l; \ 198 .localentry name,.-name 199 200 #define DOTSYM(a) a 201 202 #else 203 204 #define XGLUE(a,b) a##b 205 #define GLUE(a,b) XGLUE(a,b) 206 207 #define _GLOBAL(name) \ 208 .align 2 ; \ 209 .globl name; \ 210 .globl GLUE(.,name); \ 211 .pushsection ".opd","aw"; \ 212 name: \ 213 .quad GLUE(.,name); \ 214 .quad .TOC.@tocbase; \ 215 .quad 0; \ 216 .popsection; \ 217 .type GLUE(.,name),@function; \ 218 GLUE(.,name): 219 220 #define _GLOBAL_TOC(name) _GLOBAL(name) 221 222 #define DOTSYM(a) GLUE(.,a) 223 224 #endif 225 226 #else /* 32-bit */ 227 228 #define _GLOBAL(n) \ 229 .globl n; \ 230 n: 231 232 #define _GLOBAL_TOC(name) _GLOBAL(name) 233 234 #define DOTSYM(a) a 235 236 #endif 237 238 /* 239 * __kprobes (the C annotation) puts the symbol into the .kprobes.text 240 * section, which gets emitted at the end of regular text. 241 * 242 * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to 243 * a blacklist. The former is for core kprobe functions/data, the 244 * latter is for those that incdentially must be excluded from probing 245 * and allows them to be linked at more optimal location within text. 246 */ 247 #ifdef CONFIG_KPROBES 248 #define _ASM_NOKPROBE_SYMBOL(entry) \ 249 .pushsection "_kprobe_blacklist","aw"; \ 250 PPC_LONG (entry) ; \ 251 .popsection 252 #else 253 #define _ASM_NOKPROBE_SYMBOL(entry) 254 #endif 255 256 #define FUNC_START(name) _GLOBAL(name) 257 #define FUNC_END(name) 258 259 /* 260 * LOAD_REG_IMMEDIATE(rn, expr) 261 * Loads the value of the constant expression 'expr' into register 'rn' 262 * using immediate instructions only. Use this when it's important not 263 * to reference other data (i.e. on ppc64 when the TOC pointer is not 264 * valid) and when 'expr' is a constant or absolute address. 265 * 266 * LOAD_REG_ADDR(rn, name) 267 * Loads the address of label 'name' into register 'rn'. Use this when 268 * you don't particularly need immediate instructions only, but you need 269 * the whole address in one register (e.g. it's a structure address and 270 * you want to access various offsets within it). On ppc32 this is 271 * identical to LOAD_REG_IMMEDIATE. 272 * 273 * LOAD_REG_ADDR_PIC(rn, name) 274 * Loads the address of label 'name' into register 'run'. Use this when 275 * the kernel doesn't run at the linked or relocated address. Please 276 * note that this macro will clobber the lr register. 277 * 278 * LOAD_REG_ADDRBASE(rn, name) 279 * ADDROFF(name) 280 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into 281 * register 'rn'. ADDROFF(name) returns the remainder of the address as 282 * a constant expression. ADDROFF(name) is a signed expression < 16 bits 283 * in size, so is suitable for use directly as an offset in load and store 284 * instructions. Use this when loading/storing a single word or less as: 285 * LOAD_REG_ADDRBASE(rX, name) 286 * ld rY,ADDROFF(name)(rX) 287 */ 288 289 /* Be careful, this will clobber the lr register. */ 290 #define LOAD_REG_ADDR_PIC(reg, name) \ 291 bcl 20,31,$+4; \ 292 0: mflr reg; \ 293 addis reg,reg,(name - 0b)@ha; \ 294 addi reg,reg,(name - 0b)@l; 295 296 #if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH) 297 #define __AS_ATHIGH high 298 #else 299 #define __AS_ATHIGH h 300 #endif 301 302 .macro __LOAD_REG_IMMEDIATE_32 r, x 303 .if (\x) >= 0x8000 || (\x) < -0x8000 304 lis \r, (\x)@__AS_ATHIGH 305 .if (\x) & 0xffff != 0 306 ori \r, \r, (\x)@l 307 .endif 308 .else 309 li \r, (\x)@l 310 .endif 311 .endm 312 313 .macro __LOAD_REG_IMMEDIATE r, x 314 .if (\x) >= 0x80000000 || (\x) < -0x80000000 315 __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32 316 sldi \r, \r, 32 317 .if (\x) & 0xffff0000 != 0 318 oris \r, \r, (\x)@__AS_ATHIGH 319 .endif 320 .if (\x) & 0xffff != 0 321 ori \r, \r, (\x)@l 322 .endif 323 .else 324 __LOAD_REG_IMMEDIATE_32 \r, \x 325 .endif 326 .endm 327 328 #ifdef __powerpc64__ 329 330 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr 331 332 #define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \ 333 lis tmp, (expr)@highest; \ 334 lis reg, (expr)@__AS_ATHIGH; \ 335 ori tmp, tmp, (expr)@higher; \ 336 ori reg, reg, (expr)@l; \ 337 rldimi reg, tmp, 32, 0 338 339 #define LOAD_REG_ADDR(reg,name) \ 340 ld reg,name@got(r2) 341 342 #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) 343 #define ADDROFF(name) 0 344 345 /* offsets for stack frame layout */ 346 #define LRSAVE 16 347 348 #else /* 32-bit */ 349 350 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr 351 352 #define LOAD_REG_IMMEDIATE_SYM(reg,expr) \ 353 lis reg,(expr)@ha; \ 354 addi reg,reg,(expr)@l; 355 356 #define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE_SYM(reg, name) 357 358 #define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha 359 #define ADDROFF(name) name@l 360 361 /* offsets for stack frame layout */ 362 #define LRSAVE 4 363 364 #endif 365 366 /* various errata or part fixups */ 367 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500) 368 #define MFTB(dest) \ 369 90: mfspr dest, SPRN_TBRL; \ 370 BEGIN_FTR_SECTION_NESTED(96); \ 371 cmpwi dest,0; \ 372 beq- 90b; \ 373 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) 374 #else 375 #define MFTB(dest) MFTBL(dest) 376 #endif 377 378 #ifdef CONFIG_PPC_8xx 379 #define MFTBL(dest) mftb dest 380 #define MFTBU(dest) mftbu dest 381 #else 382 #define MFTBL(dest) mfspr dest, SPRN_TBRL 383 #define MFTBU(dest) mfspr dest, SPRN_TBRU 384 #endif 385 386 #ifndef CONFIG_SMP 387 #define TLBSYNC 388 #else 389 #define TLBSYNC tlbsync; sync 390 #endif 391 392 #ifdef CONFIG_PPC64 393 #define MTOCRF(FXM, RS) \ 394 BEGIN_FTR_SECTION_NESTED(848); \ 395 mtcrf (FXM), RS; \ 396 FTR_SECTION_ELSE_NESTED(848); \ 397 mtocrf (FXM), RS; \ 398 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) 399 #endif 400 401 /* 402 * This instruction is not implemented on the PPC 603 or 601; however, on 403 * the 403GCX and 405GP tlbia IS defined and tlbie is not. 404 * All of these instructions exist in the 8xx, they have magical powers, 405 * and they must be used. 406 */ 407 408 #if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx) 409 #define tlbia \ 410 li r4,1024; \ 411 mtctr r4; \ 412 lis r4,KERNELBASE@h; \ 413 .machine push; \ 414 .machine "power4"; \ 415 0: tlbie r4; \ 416 .machine pop; \ 417 addi r4,r4,0x1000; \ 418 bdnz 0b 419 #endif 420 421 422 #ifdef CONFIG_IBM440EP_ERR42 423 #define PPC440EP_ERR42 isync 424 #else 425 #define PPC440EP_ERR42 426 #endif 427 428 /* The following stops all load and store data streams associated with stream 429 * ID (ie. streams created explicitly). The embedded and server mnemonics for 430 * dcbt are different so this must only be used for server. 431 */ 432 #define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \ 433 lis scratch,0x60000000@h; \ 434 dcbt 0,scratch,0b01010 435 436 /* 437 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them 438 * keep the address intact to be compatible with code shared with 439 * 32-bit classic. 440 * 441 * On the other hand, I find it useful to have them behave as expected 442 * by their name (ie always do the addition) on 64-bit BookE 443 */ 444 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64) 445 #define toreal(rd) 446 #define fromreal(rd) 447 448 /* 449 * We use addis to ensure compatibility with the "classic" ppc versions of 450 * these macros, which use rs = 0 to get the tophys offset in rd, rather than 451 * converting the address in r0, and so this version has to do that too 452 * (i.e. set register rd to 0 when rs == 0). 453 */ 454 #define tophys(rd,rs) \ 455 addis rd,rs,0 456 457 #define tovirt(rd,rs) \ 458 addis rd,rs,0 459 460 #elif defined(CONFIG_PPC64) 461 #define toreal(rd) /* we can access c000... in real mode */ 462 #define fromreal(rd) 463 464 #define tophys(rd,rs) \ 465 clrldi rd,rs,2 466 467 #define tovirt(rd,rs) \ 468 rotldi rd,rs,16; \ 469 ori rd,rd,((KERNELBASE>>48)&0xFFFF);\ 470 rotldi rd,rd,48 471 #else 472 #define toreal(rd) tophys(rd,rd) 473 #define fromreal(rd) tovirt(rd,rd) 474 475 #define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h 476 #define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h 477 #endif 478 479 #ifdef CONFIG_PPC_BOOK3S_64 480 #define MTMSRD(r) mtmsrd r 481 #define MTMSR_EERI(reg) mtmsrd reg,1 482 #else 483 #define MTMSRD(r) mtmsr r 484 #define MTMSR_EERI(reg) mtmsr reg 485 #endif 486 487 #endif /* __KERNEL__ */ 488 489 /* The boring bits... */ 490 491 /* Condition Register Bit Fields */ 492 493 #define cr0 0 494 #define cr1 1 495 #define cr2 2 496 #define cr3 3 497 #define cr4 4 498 #define cr5 5 499 #define cr6 6 500 #define cr7 7 501 502 503 /* 504 * General Purpose Registers (GPRs) 505 * 506 * The lower case r0-r31 should be used in preference to the upper 507 * case R0-R31 as they provide more error checking in the assembler. 508 * Use R0-31 only when really nessesary. 509 */ 510 511 #define r0 %r0 512 #define r1 %r1 513 #define r2 %r2 514 #define r3 %r3 515 #define r4 %r4 516 #define r5 %r5 517 #define r6 %r6 518 #define r7 %r7 519 #define r8 %r8 520 #define r9 %r9 521 #define r10 %r10 522 #define r11 %r11 523 #define r12 %r12 524 #define r13 %r13 525 #define r14 %r14 526 #define r15 %r15 527 #define r16 %r16 528 #define r17 %r17 529 #define r18 %r18 530 #define r19 %r19 531 #define r20 %r20 532 #define r21 %r21 533 #define r22 %r22 534 #define r23 %r23 535 #define r24 %r24 536 #define r25 %r25 537 #define r26 %r26 538 #define r27 %r27 539 #define r28 %r28 540 #define r29 %r29 541 #define r30 %r30 542 #define r31 %r31 543 544 545 /* Floating Point Registers (FPRs) */ 546 547 #define fr0 0 548 #define fr1 1 549 #define fr2 2 550 #define fr3 3 551 #define fr4 4 552 #define fr5 5 553 #define fr6 6 554 #define fr7 7 555 #define fr8 8 556 #define fr9 9 557 #define fr10 10 558 #define fr11 11 559 #define fr12 12 560 #define fr13 13 561 #define fr14 14 562 #define fr15 15 563 #define fr16 16 564 #define fr17 17 565 #define fr18 18 566 #define fr19 19 567 #define fr20 20 568 #define fr21 21 569 #define fr22 22 570 #define fr23 23 571 #define fr24 24 572 #define fr25 25 573 #define fr26 26 574 #define fr27 27 575 #define fr28 28 576 #define fr29 29 577 #define fr30 30 578 #define fr31 31 579 580 /* AltiVec Registers (VPRs) */ 581 582 #define v0 0 583 #define v1 1 584 #define v2 2 585 #define v3 3 586 #define v4 4 587 #define v5 5 588 #define v6 6 589 #define v7 7 590 #define v8 8 591 #define v9 9 592 #define v10 10 593 #define v11 11 594 #define v12 12 595 #define v13 13 596 #define v14 14 597 #define v15 15 598 #define v16 16 599 #define v17 17 600 #define v18 18 601 #define v19 19 602 #define v20 20 603 #define v21 21 604 #define v22 22 605 #define v23 23 606 #define v24 24 607 #define v25 25 608 #define v26 26 609 #define v27 27 610 #define v28 28 611 #define v29 29 612 #define v30 30 613 #define v31 31 614 615 /* VSX Registers (VSRs) */ 616 617 #define vs0 0 618 #define vs1 1 619 #define vs2 2 620 #define vs3 3 621 #define vs4 4 622 #define vs5 5 623 #define vs6 6 624 #define vs7 7 625 #define vs8 8 626 #define vs9 9 627 #define vs10 10 628 #define vs11 11 629 #define vs12 12 630 #define vs13 13 631 #define vs14 14 632 #define vs15 15 633 #define vs16 16 634 #define vs17 17 635 #define vs18 18 636 #define vs19 19 637 #define vs20 20 638 #define vs21 21 639 #define vs22 22 640 #define vs23 23 641 #define vs24 24 642 #define vs25 25 643 #define vs26 26 644 #define vs27 27 645 #define vs28 28 646 #define vs29 29 647 #define vs30 30 648 #define vs31 31 649 #define vs32 32 650 #define vs33 33 651 #define vs34 34 652 #define vs35 35 653 #define vs36 36 654 #define vs37 37 655 #define vs38 38 656 #define vs39 39 657 #define vs40 40 658 #define vs41 41 659 #define vs42 42 660 #define vs43 43 661 #define vs44 44 662 #define vs45 45 663 #define vs46 46 664 #define vs47 47 665 #define vs48 48 666 #define vs49 49 667 #define vs50 50 668 #define vs51 51 669 #define vs52 52 670 #define vs53 53 671 #define vs54 54 672 #define vs55 55 673 #define vs56 56 674 #define vs57 57 675 #define vs58 58 676 #define vs59 59 677 #define vs60 60 678 #define vs61 61 679 #define vs62 62 680 #define vs63 63 681 682 /* SPE Registers (EVPRs) */ 683 684 #define evr0 0 685 #define evr1 1 686 #define evr2 2 687 #define evr3 3 688 #define evr4 4 689 #define evr5 5 690 #define evr6 6 691 #define evr7 7 692 #define evr8 8 693 #define evr9 9 694 #define evr10 10 695 #define evr11 11 696 #define evr12 12 697 #define evr13 13 698 #define evr14 14 699 #define evr15 15 700 #define evr16 16 701 #define evr17 17 702 #define evr18 18 703 #define evr19 19 704 #define evr20 20 705 #define evr21 21 706 #define evr22 22 707 #define evr23 23 708 #define evr24 24 709 #define evr25 25 710 #define evr26 26 711 #define evr27 27 712 #define evr28 28 713 #define evr29 29 714 #define evr30 30 715 #define evr31 31 716 717 #define RFSCV .long 0x4c0000a4 718 719 /* 720 * Create an endian fixup trampoline 721 * 722 * This starts with a "tdi 0,0,0x48" instruction which is 723 * essentially a "trap never", and thus akin to a nop. 724 * 725 * The opcode for this instruction read with the wrong endian 726 * however results in a b . + 8 727 * 728 * So essentially we use that trick to execute the following 729 * trampoline in "reverse endian" if we are running with the 730 * MSR_LE bit set the "wrong" way for whatever endianness the 731 * kernel is built for. 732 */ 733 734 #ifdef CONFIG_PPC_BOOK3E_64 735 #define FIXUP_ENDIAN 736 #else 737 /* 738 * This version may be used in HV or non-HV context. 739 * MSR[EE] must be disabled. 740 */ 741 #define FIXUP_ENDIAN \ 742 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ 743 b 191f; /* Skip trampoline if endian is good */ \ 744 .long 0xa600607d; /* mfmsr r11 */ \ 745 .long 0x01006b69; /* xori r11,r11,1 */ \ 746 .long 0x00004039; /* li r10,0 */ \ 747 .long 0x6401417d; /* mtmsrd r10,1 */ \ 748 .long 0x05009f42; /* bcl 20,31,$+4 */ \ 749 .long 0xa602487d; /* mflr r10 */ \ 750 .long 0x14004a39; /* addi r10,r10,20 */ \ 751 .long 0xa6035a7d; /* mtsrr0 r10 */ \ 752 .long 0xa6037b7d; /* mtsrr1 r11 */ \ 753 .long 0x2400004c; /* rfid */ \ 754 191: 755 756 /* 757 * This version that may only be used with MSR[HV]=1 758 * - Does not clear MSR[RI], so more robust. 759 * - Slightly smaller and faster. 760 */ 761 #define FIXUP_ENDIAN_HV \ 762 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ 763 b 191f; /* Skip trampoline if endian is good */ \ 764 .long 0xa600607d; /* mfmsr r11 */ \ 765 .long 0x01006b69; /* xori r11,r11,1 */ \ 766 .long 0x05009f42; /* bcl 20,31,$+4 */ \ 767 .long 0xa602487d; /* mflr r10 */ \ 768 .long 0x14004a39; /* addi r10,r10,20 */ \ 769 .long 0xa64b5a7d; /* mthsrr0 r10 */ \ 770 .long 0xa64b7b7d; /* mthsrr1 r11 */ \ 771 .long 0x2402004c; /* hrfid */ \ 772 191: 773 774 #endif /* !CONFIG_PPC_BOOK3E_64 */ 775 776 #endif /* __ASSEMBLY__ */ 777 778 #define SOFT_MASK_TABLE(_start, _end) \ 779 stringify_in_c(.section __soft_mask_table,"a";)\ 780 stringify_in_c(.balign 8;) \ 781 stringify_in_c(.llong (_start);) \ 782 stringify_in_c(.llong (_end);) \ 783 stringify_in_c(.previous) 784 785 #define RESTART_TABLE(_start, _end, _target) \ 786 stringify_in_c(.section __restart_table,"a";)\ 787 stringify_in_c(.balign 8;) \ 788 stringify_in_c(.llong (_start);) \ 789 stringify_in_c(.llong (_end);) \ 790 stringify_in_c(.llong (_target);) \ 791 stringify_in_c(.previous) 792 793 #ifdef CONFIG_PPC_E500 794 #define BTB_FLUSH(reg) \ 795 lis reg,BUCSR_INIT@h; \ 796 ori reg,reg,BUCSR_INIT@l; \ 797 mtspr SPRN_BUCSR,reg; \ 798 isync; 799 #else 800 #define BTB_FLUSH(reg) 801 #endif /* CONFIG_PPC_E500 */ 802 803 #endif /* _ASM_POWERPC_PPC_ASM_H */ 804