1 #ifndef _ASM_POWERPC_PROCESSOR_H 2 #define _ASM_POWERPC_PROCESSOR_H 3 4 /* 5 * Copyright (C) 2001 PPC 64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <asm/reg.h> 14 15 #ifdef CONFIG_VSX 16 #define TS_FPRWIDTH 2 17 18 #ifdef __BIG_ENDIAN__ 19 #define TS_FPROFFSET 0 20 #define TS_VSRLOWOFFSET 1 21 #else 22 #define TS_FPROFFSET 1 23 #define TS_VSRLOWOFFSET 0 24 #endif 25 26 #else 27 #define TS_FPRWIDTH 1 28 #define TS_FPROFFSET 0 29 #endif 30 31 #ifdef CONFIG_PPC64 32 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ 33 #define PPR_PRIORITY 3 34 #ifdef __ASSEMBLY__ 35 #define DEFAULT_PPR (PPR_PRIORITY << 50) 36 #else 37 #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50) 38 #endif /* __ASSEMBLY__ */ 39 #endif /* CONFIG_PPC64 */ 40 41 #ifndef __ASSEMBLY__ 42 #include <linux/types.h> 43 #include <asm/thread_info.h> 44 #include <asm/ptrace.h> 45 #include <asm/hw_breakpoint.h> 46 47 /* We do _not_ want to define new machine types at all, those must die 48 * in favor of using the device-tree 49 * -- BenH. 50 */ 51 52 /* PREP sub-platform types. Unused */ 53 #define _PREP_Motorola 0x01 /* motorola prep */ 54 #define _PREP_Firm 0x02 /* firmworks prep */ 55 #define _PREP_IBM 0x00 /* ibm prep */ 56 #define _PREP_Bull 0x03 /* bull prep */ 57 58 /* CHRP sub-platform types. These are arbitrary */ 59 #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */ 60 #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ 61 #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ 62 #define _CHRP_briq 0x07 /* TotalImpact's briQ */ 63 64 #if defined(__KERNEL__) && defined(CONFIG_PPC32) 65 66 extern int _chrp_type; 67 68 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ 69 70 /* 71 * Default implementation of macro that returns current 72 * instruction pointer ("program counter"). 73 */ 74 #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 75 76 /* Macros for adjusting thread priority (hardware multi-threading) */ 77 #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") 78 #define HMT_low() asm volatile("or 1,1,1 # low priority") 79 #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") 80 #define HMT_medium() asm volatile("or 2,2,2 # medium priority") 81 #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") 82 #define HMT_high() asm volatile("or 3,3,3 # high priority") 83 84 #ifdef __KERNEL__ 85 86 struct task_struct; 87 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); 88 void release_thread(struct task_struct *); 89 90 #ifdef CONFIG_PPC32 91 92 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START 93 #error User TASK_SIZE overlaps with KERNEL_START address 94 #endif 95 #define TASK_SIZE (CONFIG_TASK_SIZE) 96 97 /* This decides where the kernel will search for a free chunk of vm 98 * space during mmap's. 99 */ 100 #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) 101 #endif 102 103 #ifdef CONFIG_PPC64 104 /* 105 * 64-bit user address space can have multiple limits 106 * For now supported values are: 107 */ 108 #define TASK_SIZE_64TB (0x0000400000000000UL) 109 #define TASK_SIZE_128TB (0x0000800000000000UL) 110 #define TASK_SIZE_512TB (0x0002000000000000UL) 111 #define TASK_SIZE_1PB (0x0004000000000000UL) 112 #define TASK_SIZE_2PB (0x0008000000000000UL) 113 /* 114 * With 52 bits in the address we can support 115 * upto 4PB of range. 116 */ 117 #define TASK_SIZE_4PB (0x0010000000000000UL) 118 119 /* 120 * For now 512TB is only supported with book3s and 64K linux page size. 121 */ 122 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) 123 /* 124 * Max value currently used: 125 */ 126 #define TASK_SIZE_USER64 TASK_SIZE_4PB 127 #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB 128 #define TASK_CONTEXT_SIZE TASK_SIZE_512TB 129 #else 130 #define TASK_SIZE_USER64 TASK_SIZE_64TB 131 #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB 132 /* 133 * We don't need to allocate extended context ids for 4K page size, because 134 * we limit the max effective address on this config to 64TB. 135 */ 136 #define TASK_CONTEXT_SIZE TASK_SIZE_64TB 137 #endif 138 139 /* 140 * 32-bit user address space is 4GB - 1 page 141 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT 142 */ 143 #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) 144 145 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ 146 TASK_SIZE_USER32 : TASK_SIZE_USER64) 147 #define TASK_SIZE TASK_SIZE_OF(current) 148 /* This decides where the kernel will search for a free chunk of vm 149 * space during mmap's. 150 */ 151 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 152 #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) 153 154 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 155 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 156 #endif 157 158 /* 159 * Initial task size value for user applications. For book3s 64 we start 160 * with 128TB and conditionally enable upto 512TB 161 */ 162 #ifdef CONFIG_PPC_BOOK3S_64 163 #define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 164 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) 165 #else 166 #define DEFAULT_MAP_WINDOW TASK_SIZE 167 #endif 168 169 #ifdef __powerpc64__ 170 171 #define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 172 #define STACK_TOP_USER32 TASK_SIZE_USER32 173 174 #define STACK_TOP (is_32bit_task() ? \ 175 STACK_TOP_USER32 : STACK_TOP_USER64) 176 177 #define STACK_TOP_MAX TASK_SIZE_USER64 178 179 #else /* __powerpc64__ */ 180 181 #define STACK_TOP TASK_SIZE 182 #define STACK_TOP_MAX STACK_TOP 183 184 #endif /* __powerpc64__ */ 185 186 typedef struct { 187 unsigned long seg; 188 } mm_segment_t; 189 190 #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] 191 #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET] 192 193 /* FP and VSX 0-31 register set */ 194 struct thread_fp_state { 195 u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); 196 u64 fpscr; /* Floating point status */ 197 }; 198 199 /* Complete AltiVec register set including VSCR */ 200 struct thread_vr_state { 201 vector128 vr[32] __attribute__((aligned(16))); 202 vector128 vscr __attribute__((aligned(16))); 203 }; 204 205 struct debug_reg { 206 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 207 /* 208 * The following help to manage the use of Debug Control Registers 209 * om the BookE platforms. 210 */ 211 uint32_t dbcr0; 212 uint32_t dbcr1; 213 #ifdef CONFIG_BOOKE 214 uint32_t dbcr2; 215 #endif 216 /* 217 * The stored value of the DBSR register will be the value at the 218 * last debug interrupt. This register can only be read from the 219 * user (will never be written to) and has value while helping to 220 * describe the reason for the last debug trap. Torez 221 */ 222 uint32_t dbsr; 223 /* 224 * The following will contain addresses used by debug applications 225 * to help trace and trap on particular address locations. 226 * The bits in the Debug Control Registers above help define which 227 * of the following registers will contain valid data and/or addresses. 228 */ 229 unsigned long iac1; 230 unsigned long iac2; 231 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 232 unsigned long iac3; 233 unsigned long iac4; 234 #endif 235 unsigned long dac1; 236 unsigned long dac2; 237 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 238 unsigned long dvc1; 239 unsigned long dvc2; 240 #endif 241 #endif 242 }; 243 244 struct thread_struct { 245 unsigned long ksp; /* Kernel stack pointer */ 246 247 #ifdef CONFIG_PPC64 248 unsigned long ksp_vsid; 249 #endif 250 struct pt_regs *regs; /* Pointer to saved register state */ 251 mm_segment_t addr_limit; /* for get_fs() validation */ 252 #ifdef CONFIG_BOOKE 253 /* BookE base exception scratch space; align on cacheline */ 254 unsigned long normsave[8] ____cacheline_aligned; 255 #endif 256 #ifdef CONFIG_PPC32 257 void *pgdir; /* root of page-table tree */ 258 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ 259 #endif 260 /* Debug Registers */ 261 struct debug_reg debug; 262 struct thread_fp_state fp_state; 263 struct thread_fp_state *fp_save_area; 264 int fpexc_mode; /* floating-point exception mode */ 265 unsigned int align_ctl; /* alignment handling control */ 266 #ifdef CONFIG_HAVE_HW_BREAKPOINT 267 struct perf_event *ptrace_bps[HBP_NUM]; 268 /* 269 * Helps identify source of single-step exception and subsequent 270 * hw-breakpoint enablement 271 */ 272 struct perf_event *last_hit_ubp; 273 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 274 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ 275 unsigned long trap_nr; /* last trap # on this thread */ 276 u8 load_slb; /* Ages out SLB preload cache entries */ 277 u8 load_fp; 278 #ifdef CONFIG_ALTIVEC 279 u8 load_vec; 280 struct thread_vr_state vr_state; 281 struct thread_vr_state *vr_save_area; 282 unsigned long vrsave; 283 int used_vr; /* set if process has used altivec */ 284 #endif /* CONFIG_ALTIVEC */ 285 #ifdef CONFIG_VSX 286 /* VSR status */ 287 int used_vsr; /* set if process has used VSX */ 288 #endif /* CONFIG_VSX */ 289 #ifdef CONFIG_SPE 290 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 291 u64 acc; /* Accumulator */ 292 unsigned long spefscr; /* SPE & eFP status */ 293 unsigned long spefscr_last; /* SPEFSCR value on last prctl 294 call or trap return */ 295 int used_spe; /* set if process has used spe */ 296 #endif /* CONFIG_SPE */ 297 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 298 u8 load_tm; 299 u64 tm_tfhar; /* Transaction fail handler addr */ 300 u64 tm_texasr; /* Transaction exception & summary */ 301 u64 tm_tfiar; /* Transaction fail instr address reg */ 302 struct pt_regs ckpt_regs; /* Checkpointed registers */ 303 304 unsigned long tm_tar; 305 unsigned long tm_ppr; 306 unsigned long tm_dscr; 307 308 /* 309 * Checkpointed FP and VSX 0-31 register set. 310 * 311 * When a transaction is active/signalled/scheduled etc., *regs is the 312 * most recent set of/speculated GPRs with ckpt_regs being the older 313 * checkpointed regs to which we roll back if transaction aborts. 314 * 315 * These are analogous to how ckpt_regs and pt_regs work 316 */ 317 struct thread_fp_state ckfp_state; /* Checkpointed FP state */ 318 struct thread_vr_state ckvr_state; /* Checkpointed VR state */ 319 unsigned long ckvrsave; /* Checkpointed VRSAVE */ 320 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 321 #ifdef CONFIG_PPC_MEM_KEYS 322 unsigned long amr; 323 unsigned long iamr; 324 unsigned long uamor; 325 #endif 326 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 327 void* kvm_shadow_vcpu; /* KVM internal data */ 328 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 329 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) 330 struct kvm_vcpu *kvm_vcpu; 331 #endif 332 #ifdef CONFIG_PPC64 333 unsigned long dscr; 334 unsigned long fscr; 335 /* 336 * This member element dscr_inherit indicates that the process 337 * has explicitly attempted and changed the DSCR register value 338 * for itself. Hence kernel wont use the default CPU DSCR value 339 * contained in the PACA structure anymore during process context 340 * switch. Once this variable is set, this behaviour will also be 341 * inherited to all the children of this process from that point 342 * onwards. 343 */ 344 int dscr_inherit; 345 unsigned long tidr; 346 #endif 347 #ifdef CONFIG_PPC_BOOK3S_64 348 unsigned long tar; 349 unsigned long ebbrr; 350 unsigned long ebbhr; 351 unsigned long bescr; 352 unsigned long siar; 353 unsigned long sdar; 354 unsigned long sier; 355 unsigned long mmcr2; 356 unsigned mmcr0; 357 358 unsigned used_ebb; 359 unsigned int used_vas; 360 #endif 361 }; 362 363 #define ARCH_MIN_TASKALIGN 16 364 365 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) 366 #define INIT_SP_LIMIT \ 367 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 368 369 #ifdef CONFIG_SPE 370 #define SPEFSCR_INIT \ 371 .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ 372 .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 373 #else 374 #define SPEFSCR_INIT 375 #endif 376 377 #ifdef CONFIG_PPC32 378 #define INIT_THREAD { \ 379 .ksp = INIT_SP, \ 380 .ksp_limit = INIT_SP_LIMIT, \ 381 .addr_limit = KERNEL_DS, \ 382 .pgdir = swapper_pg_dir, \ 383 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 384 SPEFSCR_INIT \ 385 } 386 #else 387 #define INIT_THREAD { \ 388 .ksp = INIT_SP, \ 389 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 390 .addr_limit = KERNEL_DS, \ 391 .fpexc_mode = 0, \ 392 .fscr = FSCR_TAR | FSCR_EBB \ 393 } 394 #endif 395 396 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) 397 398 unsigned long get_wchan(struct task_struct *p); 399 400 #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 401 #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) 402 403 /* Get/set floating-point exception mode */ 404 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) 405 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) 406 407 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); 408 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); 409 410 #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr)) 411 #define SET_ENDIAN(tsk, val) set_endian((tsk), (val)) 412 413 extern int get_endian(struct task_struct *tsk, unsigned long adr); 414 extern int set_endian(struct task_struct *tsk, unsigned int val); 415 416 #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr)) 417 #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) 418 419 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 420 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 421 422 extern void load_fp_state(struct thread_fp_state *fp); 423 extern void store_fp_state(struct thread_fp_state *fp); 424 extern void load_vr_state(struct thread_vr_state *vr); 425 extern void store_vr_state(struct thread_vr_state *vr); 426 427 static inline unsigned int __unpack_fe01(unsigned long msr_bits) 428 { 429 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 430 } 431 432 static inline unsigned long __pack_fe01(unsigned int fpmode) 433 { 434 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); 435 } 436 437 #ifdef CONFIG_PPC64 438 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) 439 440 #define spin_begin() HMT_low() 441 442 #define spin_cpu_relax() barrier() 443 444 #define spin_cpu_yield() spin_cpu_relax() 445 446 #define spin_end() HMT_medium() 447 448 #define spin_until_cond(cond) \ 449 do { \ 450 if (unlikely(!(cond))) { \ 451 spin_begin(); \ 452 do { \ 453 spin_cpu_relax(); \ 454 } while (!(cond)); \ 455 spin_end(); \ 456 } \ 457 } while (0) 458 459 #else 460 #define cpu_relax() barrier() 461 #endif 462 463 /* Check that a certain kernel stack pointer is valid in task_struct p */ 464 int validate_sp(unsigned long sp, struct task_struct *p, 465 unsigned long nbytes); 466 467 /* 468 * Prefetch macros. 469 */ 470 #define ARCH_HAS_PREFETCH 471 #define ARCH_HAS_PREFETCHW 472 #define ARCH_HAS_SPINLOCK_PREFETCH 473 474 static inline void prefetch(const void *x) 475 { 476 if (unlikely(!x)) 477 return; 478 479 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); 480 } 481 482 static inline void prefetchw(const void *x) 483 { 484 if (unlikely(!x)) 485 return; 486 487 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); 488 } 489 490 #define spin_lock_prefetch(x) prefetchw(x) 491 492 #define HAVE_ARCH_PICK_MMAP_LAYOUT 493 494 #ifdef CONFIG_PPC64 495 static inline unsigned long get_clean_sp(unsigned long sp, int is_32) 496 { 497 if (is_32) 498 return sp & 0x0ffffffffUL; 499 return sp; 500 } 501 #else 502 static inline unsigned long get_clean_sp(unsigned long sp, int is_32) 503 { 504 return sp; 505 } 506 #endif 507 508 extern unsigned long cpuidle_disable; 509 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; 510 511 extern int powersave_nap; /* set if nap mode can be used in idle loop */ 512 extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/ 513 extern void power7_idle_type(unsigned long type); 514 extern unsigned long power9_idle_stop(unsigned long psscr_val); 515 extern unsigned long power9_offline_stop(unsigned long psscr_val); 516 extern void power9_idle_type(unsigned long stop_psscr_val, 517 unsigned long stop_psscr_mask); 518 519 extern void flush_instruction_cache(void); 520 extern void hard_reset_now(void); 521 extern void poweroff_now(void); 522 extern int fix_alignment(struct pt_regs *); 523 extern void cvt_fd(float *from, double *to); 524 extern void cvt_df(double *from, float *to); 525 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); 526 527 #ifdef CONFIG_PPC64 528 /* 529 * We handle most unaligned accesses in hardware. On the other hand 530 * unaligned DMA can be very expensive on some ppc64 IO chips (it does 531 * powers of 2 writes until it reaches sufficient alignment). 532 * 533 * Based on this we disable the IP header alignment in network drivers. 534 */ 535 #define NET_IP_ALIGN 0 536 #endif 537 538 #endif /* __KERNEL__ */ 539 #endif /* __ASSEMBLY__ */ 540 #endif /* _ASM_POWERPC_PROCESSOR_H */ 541