1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 Waldorf GMBH 7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle 8 * Copyright (C) 1996 Paul M. Antoine 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 */ 11 #ifndef _ASM_PROCESSOR_H 12 #define _ASM_PROCESSOR_H 13 14 #include <linux/cpumask.h> 15 #include <linux/threads.h> 16 17 #include <asm/cachectl.h> 18 #include <asm/cpu.h> 19 #include <asm/cpu-info.h> 20 #include <asm/mipsregs.h> 21 #include <asm/prefetch.h> 22 #include <asm/system.h> 23 24 /* 25 * Return current * instruction pointer ("program counter"). 26 */ 27 #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 28 29 /* 30 * System setup and hardware flags.. 31 */ 32 extern void (*cpu_wait)(void); 33 34 extern unsigned int vced_count, vcei_count; 35 36 /* 37 * A special page (the vdso) is mapped into all processes at the very 38 * top of the virtual memory space. 39 */ 40 #define SPECIAL_PAGES_SIZE PAGE_SIZE 41 42 #ifdef CONFIG_32BIT 43 /* 44 * User space process size: 2GB. This is hardcoded into a few places, 45 * so don't change it unless you know what you are doing. 46 */ 47 #define TASK_SIZE 0x7fff8000UL 48 #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE) 49 50 /* 51 * This decides where the kernel will search for a free chunk of vm 52 * space during mmap's. 53 */ 54 #define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) 55 #endif 56 57 #ifdef CONFIG_64BIT 58 /* 59 * User space process size: 1TB. This is hardcoded into a few places, 60 * so don't change it unless you know what you are doing. TASK_SIZE 61 * is limited to 1TB by the R4000 architecture; R10000 and better can 62 * support 16TB; the architectural reserve for future expansion is 63 * 8192EB ... 64 */ 65 #define TASK_SIZE32 0x7fff8000UL 66 #define TASK_SIZE 0x10000000000UL 67 #define STACK_TOP \ 68 (((test_thread_flag(TIF_32BIT_ADDR) ? \ 69 TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE) 70 71 /* 72 * This decides where the kernel will search for a free chunk of vm 73 * space during mmap's. 74 */ 75 #define TASK_UNMAPPED_BASE \ 76 (test_thread_flag(TIF_32BIT_ADDR) ? \ 77 PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3)) 78 #define TASK_SIZE_OF(tsk) \ 79 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE) 80 #endif 81 82 #ifdef __KERNEL__ 83 #define STACK_TOP_MAX TASK_SIZE 84 #endif 85 86 #define NUM_FPU_REGS 32 87 88 typedef __u64 fpureg_t; 89 90 /* 91 * It would be nice to add some more fields for emulator statistics, but there 92 * are a number of fixed offsets in offset.h and elsewhere that would have to 93 * be recalculated by hand. So the additional information will be private to 94 * the FPU emulator for now. See asm-mips/fpu_emulator.h. 95 */ 96 97 struct mips_fpu_struct { 98 fpureg_t fpr[NUM_FPU_REGS]; 99 unsigned int fcr31; 100 }; 101 102 #define NUM_DSP_REGS 6 103 104 typedef __u32 dspreg_t; 105 106 struct mips_dsp_state { 107 dspreg_t dspr[NUM_DSP_REGS]; 108 unsigned int dspcontrol; 109 }; 110 111 #define INIT_CPUMASK { \ 112 {0,} \ 113 } 114 115 struct mips3264_watch_reg_state { 116 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a 117 64 bit kernel. We use unsigned long as it has the same 118 property. */ 119 unsigned long watchlo[NUM_WATCH_REGS]; 120 /* Only the mask and IRW bits from watchhi. */ 121 u16 watchhi[NUM_WATCH_REGS]; 122 }; 123 124 union mips_watch_reg_state { 125 struct mips3264_watch_reg_state mips3264; 126 }; 127 128 #ifdef CONFIG_CPU_CAVIUM_OCTEON 129 130 struct octeon_cop2_state { 131 /* DMFC2 rt, 0x0201 */ 132 unsigned long cop2_crc_iv; 133 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */ 134 unsigned long cop2_crc_length; 135 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */ 136 unsigned long cop2_crc_poly; 137 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */ 138 unsigned long cop2_llm_dat[2]; 139 /* DMFC2 rt, 0x0084 */ 140 unsigned long cop2_3des_iv; 141 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */ 142 unsigned long cop2_3des_key[3]; 143 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */ 144 unsigned long cop2_3des_result; 145 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */ 146 unsigned long cop2_aes_inp0; 147 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */ 148 unsigned long cop2_aes_iv[2]; 149 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2 150 * rt, 0x0107 */ 151 unsigned long cop2_aes_key[4]; 152 /* DMFC2 rt, 0x0110 */ 153 unsigned long cop2_aes_keylen; 154 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */ 155 unsigned long cop2_aes_result[2]; 156 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2 157 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt, 158 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt, 159 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt, 160 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */ 161 unsigned long cop2_hsh_datw[15]; 162 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2 163 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt, 164 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */ 165 unsigned long cop2_hsh_ivw[8]; 166 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */ 167 unsigned long cop2_gfm_mult[2]; 168 /* DMFC2 rt, 0x025E - Pass2 */ 169 unsigned long cop2_gfm_poly; 170 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ 171 unsigned long cop2_gfm_result[2]; 172 }; 173 #define INIT_OCTEON_COP2 {0,} 174 175 struct octeon_cvmseg_state { 176 unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE] 177 [cpu_dcache_line_size() / sizeof(unsigned long)]; 178 }; 179 180 #endif 181 182 typedef struct { 183 unsigned long seg; 184 } mm_segment_t; 185 186 #define ARCH_MIN_TASKALIGN 8 187 188 struct mips_abi; 189 190 /* 191 * If you change thread_struct remember to change the #defines below too! 192 */ 193 struct thread_struct { 194 /* Saved main processor registers. */ 195 unsigned long reg16; 196 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23; 197 unsigned long reg29, reg30, reg31; 198 199 /* Saved cp0 stuff. */ 200 unsigned long cp0_status; 201 202 /* Saved fpu/fpu emulator stuff. */ 203 struct mips_fpu_struct fpu; 204 #ifdef CONFIG_MIPS_MT_FPAFF 205 /* Emulated instruction count */ 206 unsigned long emulated_fp; 207 /* Saved per-thread scheduler affinity mask */ 208 cpumask_t user_cpus_allowed; 209 #endif /* CONFIG_MIPS_MT_FPAFF */ 210 211 /* Saved state of the DSP ASE, if available. */ 212 struct mips_dsp_state dsp; 213 214 /* Saved watch register state, if available. */ 215 union mips_watch_reg_state watch; 216 217 /* Other stuff associated with the thread. */ 218 unsigned long cp0_badvaddr; /* Last user fault */ 219 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ 220 unsigned long error_code; 221 unsigned long trap_no; 222 unsigned long irix_trampoline; /* Wheee... */ 223 unsigned long irix_oldctx; 224 #ifdef CONFIG_CPU_CAVIUM_OCTEON 225 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); 226 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); 227 #endif 228 struct mips_abi *abi; 229 }; 230 231 #ifdef CONFIG_MIPS_MT_FPAFF 232 #define FPAFF_INIT \ 233 .emulated_fp = 0, \ 234 .user_cpus_allowed = INIT_CPUMASK, 235 #else 236 #define FPAFF_INIT 237 #endif /* CONFIG_MIPS_MT_FPAFF */ 238 239 #ifdef CONFIG_CPU_CAVIUM_OCTEON 240 #define OCTEON_INIT \ 241 .cp2 = INIT_OCTEON_COP2, 242 #else 243 #define OCTEON_INIT 244 #endif /* CONFIG_CPU_CAVIUM_OCTEON */ 245 246 #define INIT_THREAD { \ 247 /* \ 248 * Saved main processor registers \ 249 */ \ 250 .reg16 = 0, \ 251 .reg17 = 0, \ 252 .reg18 = 0, \ 253 .reg19 = 0, \ 254 .reg20 = 0, \ 255 .reg21 = 0, \ 256 .reg22 = 0, \ 257 .reg23 = 0, \ 258 .reg29 = 0, \ 259 .reg30 = 0, \ 260 .reg31 = 0, \ 261 /* \ 262 * Saved cp0 stuff \ 263 */ \ 264 .cp0_status = 0, \ 265 /* \ 266 * Saved FPU/FPU emulator stuff \ 267 */ \ 268 .fpu = { \ 269 .fpr = {0,}, \ 270 .fcr31 = 0, \ 271 }, \ 272 /* \ 273 * FPU affinity state (null if not FPAFF) \ 274 */ \ 275 FPAFF_INIT \ 276 /* \ 277 * Saved DSP stuff \ 278 */ \ 279 .dsp = { \ 280 .dspr = {0, }, \ 281 .dspcontrol = 0, \ 282 }, \ 283 /* \ 284 * saved watch register stuff \ 285 */ \ 286 .watch = {{{0,},},}, \ 287 /* \ 288 * Other stuff associated with the process \ 289 */ \ 290 .cp0_badvaddr = 0, \ 291 .cp0_baduaddr = 0, \ 292 .error_code = 0, \ 293 .trap_no = 0, \ 294 .irix_trampoline = 0, \ 295 .irix_oldctx = 0, \ 296 /* \ 297 * Cavium Octeon specifics (null if not Octeon) \ 298 */ \ 299 OCTEON_INIT \ 300 } 301 302 struct task_struct; 303 304 /* Free all resources held by a thread. */ 305 #define release_thread(thread) do { } while(0) 306 307 /* Prepare to copy thread state - unlazy all lazy status */ 308 #define prepare_to_copy(tsk) do { } while (0) 309 310 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 311 312 extern unsigned long thread_saved_pc(struct task_struct *tsk); 313 314 /* 315 * Do necessary setup to start up a newly executed thread. 316 */ 317 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp); 318 319 unsigned long get_wchan(struct task_struct *p); 320 321 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ 322 THREAD_SIZE - 32 - sizeof(struct pt_regs)) 323 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) 324 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) 325 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) 326 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) 327 328 #define cpu_relax() barrier() 329 330 /* 331 * Return_address is a replacement for __builtin_return_address(count) 332 * which on certain architectures cannot reasonably be implemented in GCC 333 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). 334 * Note that __builtin_return_address(x>=1) is forbidden because GCC 335 * aborts compilation on some CPUs. It's simply not possible to unwind 336 * some CPU's stackframes. 337 * 338 * __builtin_return_address works only for non-leaf functions. We avoid the 339 * overhead of a function call by forcing the compiler to save the return 340 * address register on the stack. 341 */ 342 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);}) 343 344 #ifdef CONFIG_CPU_HAS_PREFETCH 345 346 #define ARCH_HAS_PREFETCH 347 #define prefetch(x) __builtin_prefetch((x), 0, 1) 348 349 #define ARCH_HAS_PREFETCHW 350 #define prefetchw(x) __builtin_prefetch((x), 1, 1) 351 352 #endif 353 354 #endif /* _ASM_PROCESSOR_H */ 355