11965aae3SH. Peter Anvin #ifndef _ASM_X86_PROCESSOR_H 21965aae3SH. Peter Anvin #define _ASM_X86_PROCESSOR_H 3bb898558SAl Viro 4bb898558SAl Viro #include <asm/processor-flags.h> 5bb898558SAl Viro 6bb898558SAl Viro /* Forward declaration, a strange C thing */ 7bb898558SAl Viro struct task_struct; 8bb898558SAl Viro struct mm_struct; 9bb898558SAl Viro 10bb898558SAl Viro #include <asm/vm86.h> 11bb898558SAl Viro #include <asm/math_emu.h> 12bb898558SAl Viro #include <asm/segment.h> 13bb898558SAl Viro #include <asm/types.h> 14bb898558SAl Viro #include <asm/sigcontext.h> 15bb898558SAl Viro #include <asm/current.h> 16bb898558SAl Viro #include <asm/cpufeature.h> 17bb898558SAl Viro #include <asm/system.h> 18bb898558SAl Viro #include <asm/page.h> 1954321d94SJeremy Fitzhardinge #include <asm/pgtable_types.h> 20bb898558SAl Viro #include <asm/percpu.h> 21bb898558SAl Viro #include <asm/msr.h> 22bb898558SAl Viro #include <asm/desc_defs.h> 23bb898558SAl Viro #include <asm/nops.h> 24bb898558SAl Viro #include <asm/ds.h> 25bb898558SAl Viro 26bb898558SAl Viro #include <linux/personality.h> 27bb898558SAl Viro #include <linux/cpumask.h> 28bb898558SAl Viro #include <linux/cache.h> 29bb898558SAl Viro #include <linux/threads.h> 305cbc19a9SPeter Zijlstra #include <linux/math64.h> 31bb898558SAl Viro #include <linux/init.h> 32bb898558SAl Viro 33b332828cSK.Prasad #define HBP_NUM 4 34bb898558SAl Viro /* 35bb898558SAl Viro * Default implementation of macro that returns current 36bb898558SAl Viro * instruction pointer ("program counter"). 37bb898558SAl Viro */ 38bb898558SAl Viro static inline void *current_text_addr(void) 39bb898558SAl Viro { 40bb898558SAl Viro void *pc; 41bb898558SAl Viro 42bb898558SAl Viro asm volatile("mov $1f, %0; 1:":"=r" (pc)); 43bb898558SAl Viro 44bb898558SAl Viro return pc; 45bb898558SAl Viro } 46bb898558SAl Viro 47bb898558SAl Viro #ifdef CONFIG_X86_VSMP 48bb898558SAl Viro # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 49bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 50bb898558SAl Viro #else 51bb898558SAl Viro # define ARCH_MIN_TASKALIGN 16 52bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN 0 53bb898558SAl Viro #endif 54bb898558SAl Viro 55bb898558SAl Viro /* 56bb898558SAl Viro * CPU type and hardware bug flags. Kept separately for each CPU. 57bb898558SAl Viro * Members of this structure are referenced in head.S, so think twice 58bb898558SAl Viro * before touching them. [mj] 59bb898558SAl Viro */ 60bb898558SAl Viro 61bb898558SAl Viro struct cpuinfo_x86 { 62bb898558SAl Viro __u8 x86; /* CPU family */ 63bb898558SAl Viro __u8 x86_vendor; /* CPU vendor */ 64bb898558SAl Viro __u8 x86_model; 65bb898558SAl Viro __u8 x86_mask; 66bb898558SAl Viro #ifdef CONFIG_X86_32 67bb898558SAl Viro char wp_works_ok; /* It doesn't on 386's */ 68bb898558SAl Viro 69bb898558SAl Viro /* Problems on some 486Dx4's and old 386's: */ 70bb898558SAl Viro char hlt_works_ok; 71bb898558SAl Viro char hard_math; 72bb898558SAl Viro char rfu; 73bb898558SAl Viro char fdiv_bug; 74bb898558SAl Viro char f00f_bug; 75bb898558SAl Viro char coma_bug; 76bb898558SAl Viro char pad0; 77bb898558SAl Viro #else 78bb898558SAl Viro /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 79bb898558SAl Viro int x86_tlbsize; 8013c6c532SJan Beulich #endif 81bb898558SAl Viro __u8 x86_virt_bits; 82bb898558SAl Viro __u8 x86_phys_bits; 83bb898558SAl Viro /* CPUID returned core id bits: */ 84bb898558SAl Viro __u8 x86_coreid_bits; 85bb898558SAl Viro /* Max extended CPUID function supported: */ 86bb898558SAl Viro __u32 extended_cpuid_level; 87bb898558SAl Viro /* Maximum supported CPUID level, -1=no CPUID: */ 88bb898558SAl Viro int cpuid_level; 89bb898558SAl Viro __u32 x86_capability[NCAPINTS]; 90bb898558SAl Viro char x86_vendor_id[16]; 91bb898558SAl Viro char x86_model_id[64]; 92bb898558SAl Viro /* in KB - valid for CPUS which support this call: */ 93bb898558SAl Viro int x86_cache_size; 94bb898558SAl Viro int x86_cache_alignment; /* In bytes */ 95bb898558SAl Viro int x86_power; 96bb898558SAl Viro unsigned long loops_per_jiffy; 97bb898558SAl Viro #ifdef CONFIG_SMP 98bb898558SAl Viro /* cpus sharing the last level cache: */ 99155dd720SRusty Russell cpumask_var_t llc_shared_map; 100bb898558SAl Viro #endif 101bb898558SAl Viro /* cpuid returned max cores value: */ 102bb898558SAl Viro u16 x86_max_cores; 103bb898558SAl Viro u16 apicid; 104bb898558SAl Viro u16 initial_apicid; 105bb898558SAl Viro u16 x86_clflush_size; 106bb898558SAl Viro #ifdef CONFIG_SMP 107bb898558SAl Viro /* number of cores as seen by the OS: */ 108bb898558SAl Viro u16 booted_cores; 109bb898558SAl Viro /* Physical processor id: */ 110bb898558SAl Viro u16 phys_proc_id; 111bb898558SAl Viro /* Core id: */ 112bb898558SAl Viro u16 cpu_core_id; 113bb898558SAl Viro /* Index into per_cpu list: */ 114bb898558SAl Viro u16 cpu_index; 115bb898558SAl Viro #endif 11688b094fbSAlok Kataria unsigned int x86_hyper_vendor; 117bb898558SAl Viro } __attribute__((__aligned__(SMP_CACHE_BYTES))); 118bb898558SAl Viro 119bb898558SAl Viro #define X86_VENDOR_INTEL 0 120bb898558SAl Viro #define X86_VENDOR_CYRIX 1 121bb898558SAl Viro #define X86_VENDOR_AMD 2 122bb898558SAl Viro #define X86_VENDOR_UMC 3 123bb898558SAl Viro #define X86_VENDOR_CENTAUR 5 124bb898558SAl Viro #define X86_VENDOR_TRANSMETA 7 125bb898558SAl Viro #define X86_VENDOR_NSC 8 126bb898558SAl Viro #define X86_VENDOR_NUM 9 127bb898558SAl Viro 128bb898558SAl Viro #define X86_VENDOR_UNKNOWN 0xff 129bb898558SAl Viro 13088b094fbSAlok Kataria #define X86_HYPER_VENDOR_NONE 0 13188b094fbSAlok Kataria #define X86_HYPER_VENDOR_VMWARE 1 13288b094fbSAlok Kataria 133bb898558SAl Viro /* 134bb898558SAl Viro * capabilities of CPUs 135bb898558SAl Viro */ 136bb898558SAl Viro extern struct cpuinfo_x86 boot_cpu_data; 137bb898558SAl Viro extern struct cpuinfo_x86 new_cpu_data; 138bb898558SAl Viro 139bb898558SAl Viro extern struct tss_struct doublefault_tss; 1403e0c3737SYinghai Lu extern __u32 cpu_caps_cleared[NCAPINTS]; 1413e0c3737SYinghai Lu extern __u32 cpu_caps_set[NCAPINTS]; 142bb898558SAl Viro 143bb898558SAl Viro #ifdef CONFIG_SMP 1449b8de747SDavid Howells DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 145bb898558SAl Viro #define cpu_data(cpu) per_cpu(cpu_info, cpu) 146bb898558SAl Viro #define current_cpu_data __get_cpu_var(cpu_info) 147bb898558SAl Viro #else 148bb898558SAl Viro #define cpu_data(cpu) boot_cpu_data 149bb898558SAl Viro #define current_cpu_data boot_cpu_data 150bb898558SAl Viro #endif 151bb898558SAl Viro 152bb898558SAl Viro extern const struct seq_operations cpuinfo_op; 153bb898558SAl Viro 154bb898558SAl Viro static inline int hlt_works(int cpu) 155bb898558SAl Viro { 156bb898558SAl Viro #ifdef CONFIG_X86_32 157bb898558SAl Viro return cpu_data(cpu).hlt_works_ok; 158bb898558SAl Viro #else 159bb898558SAl Viro return 1; 160bb898558SAl Viro #endif 161bb898558SAl Viro } 162bb898558SAl Viro 163bb898558SAl Viro #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164bb898558SAl Viro 165bb898558SAl Viro extern void cpu_detect(struct cpuinfo_x86 *c); 166bb898558SAl Viro 167bb898558SAl Viro extern struct pt_regs *idle_regs(struct pt_regs *); 168bb898558SAl Viro 169bb898558SAl Viro extern void early_cpu_init(void); 170bb898558SAl Viro extern void identify_boot_cpu(void); 171bb898558SAl Viro extern void identify_secondary_cpu(struct cpuinfo_x86 *); 172bb898558SAl Viro extern void print_cpu_info(struct cpuinfo_x86 *); 173bb898558SAl Viro extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 174bb898558SAl Viro extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 175bb898558SAl Viro extern unsigned short num_cache_leaves; 176bb898558SAl Viro 177bb898558SAl Viro extern void detect_extended_topology(struct cpuinfo_x86 *c); 178bb898558SAl Viro extern void detect_ht(struct cpuinfo_x86 *c); 179bb898558SAl Viro 180bb898558SAl Viro static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 181bb898558SAl Viro unsigned int *ecx, unsigned int *edx) 182bb898558SAl Viro { 183bb898558SAl Viro /* ecx is often an input as well as an output. */ 184*45a94d7cSSuresh Siddha asm volatile("cpuid" 185bb898558SAl Viro : "=a" (*eax), 186bb898558SAl Viro "=b" (*ebx), 187bb898558SAl Viro "=c" (*ecx), 188bb898558SAl Viro "=d" (*edx) 189bb898558SAl Viro : "0" (*eax), "2" (*ecx)); 190bb898558SAl Viro } 191bb898558SAl Viro 192bb898558SAl Viro static inline void load_cr3(pgd_t *pgdir) 193bb898558SAl Viro { 194bb898558SAl Viro write_cr3(__pa(pgdir)); 195bb898558SAl Viro } 196bb898558SAl Viro 197bb898558SAl Viro #ifdef CONFIG_X86_32 198bb898558SAl Viro /* This is the TSS defined by the hardware. */ 199bb898558SAl Viro struct x86_hw_tss { 200bb898558SAl Viro unsigned short back_link, __blh; 201bb898558SAl Viro unsigned long sp0; 202bb898558SAl Viro unsigned short ss0, __ss0h; 203bb898558SAl Viro unsigned long sp1; 204bb898558SAl Viro /* ss1 caches MSR_IA32_SYSENTER_CS: */ 205bb898558SAl Viro unsigned short ss1, __ss1h; 206bb898558SAl Viro unsigned long sp2; 207bb898558SAl Viro unsigned short ss2, __ss2h; 208bb898558SAl Viro unsigned long __cr3; 209bb898558SAl Viro unsigned long ip; 210bb898558SAl Viro unsigned long flags; 211bb898558SAl Viro unsigned long ax; 212bb898558SAl Viro unsigned long cx; 213bb898558SAl Viro unsigned long dx; 214bb898558SAl Viro unsigned long bx; 215bb898558SAl Viro unsigned long sp; 216bb898558SAl Viro unsigned long bp; 217bb898558SAl Viro unsigned long si; 218bb898558SAl Viro unsigned long di; 219bb898558SAl Viro unsigned short es, __esh; 220bb898558SAl Viro unsigned short cs, __csh; 221bb898558SAl Viro unsigned short ss, __ssh; 222bb898558SAl Viro unsigned short ds, __dsh; 223bb898558SAl Viro unsigned short fs, __fsh; 224bb898558SAl Viro unsigned short gs, __gsh; 225bb898558SAl Viro unsigned short ldt, __ldth; 226bb898558SAl Viro unsigned short trace; 227bb898558SAl Viro unsigned short io_bitmap_base; 228bb898558SAl Viro 229bb898558SAl Viro } __attribute__((packed)); 230bb898558SAl Viro #else 231bb898558SAl Viro struct x86_hw_tss { 232bb898558SAl Viro u32 reserved1; 233bb898558SAl Viro u64 sp0; 234bb898558SAl Viro u64 sp1; 235bb898558SAl Viro u64 sp2; 236bb898558SAl Viro u64 reserved2; 237bb898558SAl Viro u64 ist[7]; 238bb898558SAl Viro u32 reserved3; 239bb898558SAl Viro u32 reserved4; 240bb898558SAl Viro u16 reserved5; 241bb898558SAl Viro u16 io_bitmap_base; 242bb898558SAl Viro 243bb898558SAl Viro } __attribute__((packed)) ____cacheline_aligned; 244bb898558SAl Viro #endif 245bb898558SAl Viro 246bb898558SAl Viro /* 247bb898558SAl Viro * IO-bitmap sizes: 248bb898558SAl Viro */ 249bb898558SAl Viro #define IO_BITMAP_BITS 65536 250bb898558SAl Viro #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 251bb898558SAl Viro #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 252bb898558SAl Viro #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 253bb898558SAl Viro #define INVALID_IO_BITMAP_OFFSET 0x8000 254bb898558SAl Viro 255bb898558SAl Viro struct tss_struct { 256bb898558SAl Viro /* 257bb898558SAl Viro * The hardware state: 258bb898558SAl Viro */ 259bb898558SAl Viro struct x86_hw_tss x86_tss; 260bb898558SAl Viro 261bb898558SAl Viro /* 262bb898558SAl Viro * The extra 1 is there because the CPU will access an 263bb898558SAl Viro * additional byte beyond the end of the IO permission 264bb898558SAl Viro * bitmap. The extra byte must be all 1 bits, and must 265bb898558SAl Viro * be within the limit. 266bb898558SAl Viro */ 267bb898558SAl Viro unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 268bb898558SAl Viro 269bb898558SAl Viro /* 270bb898558SAl Viro * .. and then another 0x100 bytes for the emergency kernel stack: 271bb898558SAl Viro */ 272bb898558SAl Viro unsigned long stack[64]; 273bb898558SAl Viro 274bb898558SAl Viro } ____cacheline_aligned; 275bb898558SAl Viro 2769b8de747SDavid Howells DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); 277bb898558SAl Viro 278bb898558SAl Viro /* 279bb898558SAl Viro * Save the original ist values for checking stack pointers during debugging 280bb898558SAl Viro */ 281bb898558SAl Viro struct orig_ist { 282bb898558SAl Viro unsigned long ist[7]; 283bb898558SAl Viro }; 284bb898558SAl Viro 285bb898558SAl Viro #define MXCSR_DEFAULT 0x1f80 286bb898558SAl Viro 287bb898558SAl Viro struct i387_fsave_struct { 288bb898558SAl Viro u32 cwd; /* FPU Control Word */ 289bb898558SAl Viro u32 swd; /* FPU Status Word */ 290bb898558SAl Viro u32 twd; /* FPU Tag Word */ 291bb898558SAl Viro u32 fip; /* FPU IP Offset */ 292bb898558SAl Viro u32 fcs; /* FPU IP Selector */ 293bb898558SAl Viro u32 foo; /* FPU Operand Pointer Offset */ 294bb898558SAl Viro u32 fos; /* FPU Operand Pointer Selector */ 295bb898558SAl Viro 296bb898558SAl Viro /* 8*10 bytes for each FP-reg = 80 bytes: */ 297bb898558SAl Viro u32 st_space[20]; 298bb898558SAl Viro 299bb898558SAl Viro /* Software status information [not touched by FSAVE ]: */ 300bb898558SAl Viro u32 status; 301bb898558SAl Viro }; 302bb898558SAl Viro 303bb898558SAl Viro struct i387_fxsave_struct { 304bb898558SAl Viro u16 cwd; /* Control Word */ 305bb898558SAl Viro u16 swd; /* Status Word */ 306bb898558SAl Viro u16 twd; /* Tag Word */ 307bb898558SAl Viro u16 fop; /* Last Instruction Opcode */ 308bb898558SAl Viro union { 309bb898558SAl Viro struct { 310bb898558SAl Viro u64 rip; /* Instruction Pointer */ 311bb898558SAl Viro u64 rdp; /* Data Pointer */ 312bb898558SAl Viro }; 313bb898558SAl Viro struct { 314bb898558SAl Viro u32 fip; /* FPU IP Offset */ 315bb898558SAl Viro u32 fcs; /* FPU IP Selector */ 316bb898558SAl Viro u32 foo; /* FPU Operand Offset */ 317bb898558SAl Viro u32 fos; /* FPU Operand Selector */ 318bb898558SAl Viro }; 319bb898558SAl Viro }; 320bb898558SAl Viro u32 mxcsr; /* MXCSR Register State */ 321bb898558SAl Viro u32 mxcsr_mask; /* MXCSR Mask */ 322bb898558SAl Viro 323bb898558SAl Viro /* 8*16 bytes for each FP-reg = 128 bytes: */ 324bb898558SAl Viro u32 st_space[32]; 325bb898558SAl Viro 326bb898558SAl Viro /* 16*16 bytes for each XMM-reg = 256 bytes: */ 327bb898558SAl Viro u32 xmm_space[64]; 328bb898558SAl Viro 329bb898558SAl Viro u32 padding[12]; 330bb898558SAl Viro 331bb898558SAl Viro union { 332bb898558SAl Viro u32 padding1[12]; 333bb898558SAl Viro u32 sw_reserved[12]; 334bb898558SAl Viro }; 335bb898558SAl Viro 336bb898558SAl Viro } __attribute__((aligned(16))); 337bb898558SAl Viro 338bb898558SAl Viro struct i387_soft_struct { 339bb898558SAl Viro u32 cwd; 340bb898558SAl Viro u32 swd; 341bb898558SAl Viro u32 twd; 342bb898558SAl Viro u32 fip; 343bb898558SAl Viro u32 fcs; 344bb898558SAl Viro u32 foo; 345bb898558SAl Viro u32 fos; 346bb898558SAl Viro /* 8*10 bytes for each FP-reg = 80 bytes: */ 347bb898558SAl Viro u32 st_space[20]; 348bb898558SAl Viro u8 ftop; 349bb898558SAl Viro u8 changed; 350bb898558SAl Viro u8 lookahead; 351bb898558SAl Viro u8 no_update; 352bb898558SAl Viro u8 rm; 353bb898558SAl Viro u8 alimit; 354ae6af41fSTejun Heo struct math_emu_info *info; 355bb898558SAl Viro u32 entry_eip; 356bb898558SAl Viro }; 357bb898558SAl Viro 358a30469e7SSuresh Siddha struct ymmh_struct { 359a30469e7SSuresh Siddha /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ 360a30469e7SSuresh Siddha u32 ymmh_space[64]; 361a30469e7SSuresh Siddha }; 362a30469e7SSuresh Siddha 363bb898558SAl Viro struct xsave_hdr_struct { 364bb898558SAl Viro u64 xstate_bv; 365bb898558SAl Viro u64 reserved1[2]; 366bb898558SAl Viro u64 reserved2[5]; 367bb898558SAl Viro } __attribute__((packed)); 368bb898558SAl Viro 369bb898558SAl Viro struct xsave_struct { 370bb898558SAl Viro struct i387_fxsave_struct i387; 371bb898558SAl Viro struct xsave_hdr_struct xsave_hdr; 372a30469e7SSuresh Siddha struct ymmh_struct ymmh; 373bb898558SAl Viro /* new processor state extensions will go here */ 374bb898558SAl Viro } __attribute__ ((packed, aligned (64))); 375bb898558SAl Viro 376bb898558SAl Viro union thread_xstate { 377bb898558SAl Viro struct i387_fsave_struct fsave; 378bb898558SAl Viro struct i387_fxsave_struct fxsave; 379bb898558SAl Viro struct i387_soft_struct soft; 380bb898558SAl Viro struct xsave_struct xsave; 381bb898558SAl Viro }; 382bb898558SAl Viro 383bb898558SAl Viro #ifdef CONFIG_X86_64 384bb898558SAl Viro DECLARE_PER_CPU(struct orig_ist, orig_ist); 38526f80bd6SBrian Gerst 386947e76cdSBrian Gerst union irq_stack_union { 387947e76cdSBrian Gerst char irq_stack[IRQ_STACK_SIZE]; 388947e76cdSBrian Gerst /* 389947e76cdSBrian Gerst * GCC hardcodes the stack canary as %gs:40. Since the 390947e76cdSBrian Gerst * irq_stack is the object at %gs:0, we reserve the bottom 391947e76cdSBrian Gerst * 48 bytes of the irq stack for the canary. 392947e76cdSBrian Gerst */ 393947e76cdSBrian Gerst struct { 394947e76cdSBrian Gerst char gs_base[40]; 395947e76cdSBrian Gerst unsigned long stack_canary; 396947e76cdSBrian Gerst }; 397947e76cdSBrian Gerst }; 398947e76cdSBrian Gerst 3999b8de747SDavid Howells DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); 4002add8e23SBrian Gerst DECLARE_INIT_PER_CPU(irq_stack_union); 4012add8e23SBrian Gerst 40226f80bd6SBrian Gerst DECLARE_PER_CPU(char *, irq_stack_ptr); 4039766cdbcSJaswinder Singh Rajput DECLARE_PER_CPU(unsigned int, irq_count); 4049766cdbcSJaswinder Singh Rajput extern unsigned long kernel_eflags; 4059766cdbcSJaswinder Singh Rajput extern asmlinkage void ignore_sysret(void); 40660a5317fSTejun Heo #else /* X86_64 */ 40760a5317fSTejun Heo #ifdef CONFIG_CC_STACKPROTECTOR 4081ea0d14eSJeremy Fitzhardinge /* 4091ea0d14eSJeremy Fitzhardinge * Make sure stack canary segment base is cached-aligned: 4101ea0d14eSJeremy Fitzhardinge * "For Intel Atom processors, avoid non zero segment base address 4111ea0d14eSJeremy Fitzhardinge * that is not aligned to cache line boundary at all cost." 4121ea0d14eSJeremy Fitzhardinge * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 4131ea0d14eSJeremy Fitzhardinge */ 4141ea0d14eSJeremy Fitzhardinge struct stack_canary { 4151ea0d14eSJeremy Fitzhardinge char __pad[20]; /* canary at %gs:20 */ 4161ea0d14eSJeremy Fitzhardinge unsigned long canary; 4171ea0d14eSJeremy Fitzhardinge }; 41853f82452SJeremy Fitzhardinge DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 419bb898558SAl Viro #endif 42060a5317fSTejun Heo #endif /* X86_64 */ 421bb898558SAl Viro 422bb898558SAl Viro extern unsigned int xstate_size; 423bb898558SAl Viro extern void free_thread_xstate(struct task_struct *); 424bb898558SAl Viro extern struct kmem_cache *task_xstate_cachep; 425bb898558SAl Viro 42624f1e32cSFrederic Weisbecker struct perf_event; 42724f1e32cSFrederic Weisbecker 428bb898558SAl Viro struct thread_struct { 429bb898558SAl Viro /* Cached TLS descriptors: */ 430bb898558SAl Viro struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 431bb898558SAl Viro unsigned long sp0; 432bb898558SAl Viro unsigned long sp; 433bb898558SAl Viro #ifdef CONFIG_X86_32 434bb898558SAl Viro unsigned long sysenter_cs; 435bb898558SAl Viro #else 436bb898558SAl Viro unsigned long usersp; /* Copy from PDA */ 437bb898558SAl Viro unsigned short es; 438bb898558SAl Viro unsigned short ds; 439bb898558SAl Viro unsigned short fsindex; 440bb898558SAl Viro unsigned short gsindex; 441bb898558SAl Viro #endif 4420c23590fSAlexey Dobriyan #ifdef CONFIG_X86_32 443bb898558SAl Viro unsigned long ip; 4440c23590fSAlexey Dobriyan #endif 445d756f4adSAlexey Dobriyan #ifdef CONFIG_X86_64 446bb898558SAl Viro unsigned long fs; 447d756f4adSAlexey Dobriyan #endif 448bb898558SAl Viro unsigned long gs; 44924f1e32cSFrederic Weisbecker /* Save middle states of ptrace breakpoints */ 45024f1e32cSFrederic Weisbecker struct perf_event *ptrace_bps[HBP_NUM]; 45124f1e32cSFrederic Weisbecker /* Debug status used for traps, single steps, etc... */ 452bb898558SAl Viro unsigned long debugreg6; 453bb898558SAl Viro /* Fault info: */ 454bb898558SAl Viro unsigned long cr2; 455bb898558SAl Viro unsigned long trap_no; 456bb898558SAl Viro unsigned long error_code; 457bb898558SAl Viro /* floating point and extended processor state */ 458bb898558SAl Viro union thread_xstate *xstate; 459bb898558SAl Viro #ifdef CONFIG_X86_32 460bb898558SAl Viro /* Virtual 86 mode info */ 461bb898558SAl Viro struct vm86_struct __user *vm86_info; 462bb898558SAl Viro unsigned long screen_bitmap; 463bb898558SAl Viro unsigned long v86flags; 464bb898558SAl Viro unsigned long v86mask; 465bb898558SAl Viro unsigned long saved_sp0; 466bb898558SAl Viro unsigned int saved_fs; 467bb898558SAl Viro unsigned int saved_gs; 468bb898558SAl Viro #endif 469bb898558SAl Viro /* IO permissions: */ 470bb898558SAl Viro unsigned long *io_bitmap_ptr; 471bb898558SAl Viro unsigned long iopl; 472bb898558SAl Viro /* Max allowed port in the bitmap, in bytes: */ 473bb898558SAl Viro unsigned io_bitmap_max; 474bb898558SAl Viro /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 475bb898558SAl Viro unsigned long debugctlmsr; 4762311f0deSMarkus Metzger /* Debug Store context; see asm/ds.h */ 477bb898558SAl Viro struct ds_context *ds_ctx; 478bb898558SAl Viro }; 479bb898558SAl Viro 480bb898558SAl Viro static inline unsigned long native_get_debugreg(int regno) 481bb898558SAl Viro { 482bb898558SAl Viro unsigned long val = 0; /* Damn you, gcc! */ 483bb898558SAl Viro 484bb898558SAl Viro switch (regno) { 485bb898558SAl Viro case 0: 486bb898558SAl Viro asm("mov %%db0, %0" :"=r" (val)); 487bb898558SAl Viro break; 488bb898558SAl Viro case 1: 489bb898558SAl Viro asm("mov %%db1, %0" :"=r" (val)); 490bb898558SAl Viro break; 491bb898558SAl Viro case 2: 492bb898558SAl Viro asm("mov %%db2, %0" :"=r" (val)); 493bb898558SAl Viro break; 494bb898558SAl Viro case 3: 495bb898558SAl Viro asm("mov %%db3, %0" :"=r" (val)); 496bb898558SAl Viro break; 497bb898558SAl Viro case 6: 498bb898558SAl Viro asm("mov %%db6, %0" :"=r" (val)); 499bb898558SAl Viro break; 500bb898558SAl Viro case 7: 501bb898558SAl Viro asm("mov %%db7, %0" :"=r" (val)); 502bb898558SAl Viro break; 503bb898558SAl Viro default: 504bb898558SAl Viro BUG(); 505bb898558SAl Viro } 506bb898558SAl Viro return val; 507bb898558SAl Viro } 508bb898558SAl Viro 509bb898558SAl Viro static inline void native_set_debugreg(int regno, unsigned long value) 510bb898558SAl Viro { 511bb898558SAl Viro switch (regno) { 512bb898558SAl Viro case 0: 513bb898558SAl Viro asm("mov %0, %%db0" ::"r" (value)); 514bb898558SAl Viro break; 515bb898558SAl Viro case 1: 516bb898558SAl Viro asm("mov %0, %%db1" ::"r" (value)); 517bb898558SAl Viro break; 518bb898558SAl Viro case 2: 519bb898558SAl Viro asm("mov %0, %%db2" ::"r" (value)); 520bb898558SAl Viro break; 521bb898558SAl Viro case 3: 522bb898558SAl Viro asm("mov %0, %%db3" ::"r" (value)); 523bb898558SAl Viro break; 524bb898558SAl Viro case 6: 525bb898558SAl Viro asm("mov %0, %%db6" ::"r" (value)); 526bb898558SAl Viro break; 527bb898558SAl Viro case 7: 528bb898558SAl Viro asm("mov %0, %%db7" ::"r" (value)); 529bb898558SAl Viro break; 530bb898558SAl Viro default: 531bb898558SAl Viro BUG(); 532bb898558SAl Viro } 533bb898558SAl Viro } 534bb898558SAl Viro 535bb898558SAl Viro /* 536bb898558SAl Viro * Set IOPL bits in EFLAGS from given mask 537bb898558SAl Viro */ 538bb898558SAl Viro static inline void native_set_iopl_mask(unsigned mask) 539bb898558SAl Viro { 540bb898558SAl Viro #ifdef CONFIG_X86_32 541bb898558SAl Viro unsigned int reg; 542bb898558SAl Viro 543bb898558SAl Viro asm volatile ("pushfl;" 544bb898558SAl Viro "popl %0;" 545bb898558SAl Viro "andl %1, %0;" 546bb898558SAl Viro "orl %2, %0;" 547bb898558SAl Viro "pushl %0;" 548bb898558SAl Viro "popfl" 549bb898558SAl Viro : "=&r" (reg) 550bb898558SAl Viro : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 551bb898558SAl Viro #endif 552bb898558SAl Viro } 553bb898558SAl Viro 554bb898558SAl Viro static inline void 555bb898558SAl Viro native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 556bb898558SAl Viro { 557bb898558SAl Viro tss->x86_tss.sp0 = thread->sp0; 558bb898558SAl Viro #ifdef CONFIG_X86_32 559bb898558SAl Viro /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 560bb898558SAl Viro if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 561bb898558SAl Viro tss->x86_tss.ss1 = thread->sysenter_cs; 562bb898558SAl Viro wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 563bb898558SAl Viro } 564bb898558SAl Viro #endif 565bb898558SAl Viro } 566bb898558SAl Viro 567bb898558SAl Viro static inline void native_swapgs(void) 568bb898558SAl Viro { 569bb898558SAl Viro #ifdef CONFIG_X86_64 570bb898558SAl Viro asm volatile("swapgs" ::: "memory"); 571bb898558SAl Viro #endif 572bb898558SAl Viro } 573bb898558SAl Viro 574bb898558SAl Viro #ifdef CONFIG_PARAVIRT 575bb898558SAl Viro #include <asm/paravirt.h> 576bb898558SAl Viro #else 577bb898558SAl Viro #define __cpuid native_cpuid 578bb898558SAl Viro #define paravirt_enabled() 0 579bb898558SAl Viro 580bb898558SAl Viro /* 581bb898558SAl Viro * These special macros can be used to get or set a debugging register 582bb898558SAl Viro */ 583bb898558SAl Viro #define get_debugreg(var, register) \ 584bb898558SAl Viro (var) = native_get_debugreg(register) 585bb898558SAl Viro #define set_debugreg(value, register) \ 586bb898558SAl Viro native_set_debugreg(register, value) 587bb898558SAl Viro 588bb898558SAl Viro static inline void load_sp0(struct tss_struct *tss, 589bb898558SAl Viro struct thread_struct *thread) 590bb898558SAl Viro { 591bb898558SAl Viro native_load_sp0(tss, thread); 592bb898558SAl Viro } 593bb898558SAl Viro 594bb898558SAl Viro #define set_iopl_mask native_set_iopl_mask 595bb898558SAl Viro #endif /* CONFIG_PARAVIRT */ 596bb898558SAl Viro 597bb898558SAl Viro /* 598bb898558SAl Viro * Save the cr4 feature set we're using (ie 599bb898558SAl Viro * Pentium 4MB enable and PPro Global page 600bb898558SAl Viro * enable), so that any CPU's that boot up 601bb898558SAl Viro * after us can get the correct flags. 602bb898558SAl Viro */ 603bb898558SAl Viro extern unsigned long mmu_cr4_features; 604bb898558SAl Viro 605bb898558SAl Viro static inline void set_in_cr4(unsigned long mask) 606bb898558SAl Viro { 607bb898558SAl Viro unsigned cr4; 608bb898558SAl Viro 609bb898558SAl Viro mmu_cr4_features |= mask; 610bb898558SAl Viro cr4 = read_cr4(); 611bb898558SAl Viro cr4 |= mask; 612bb898558SAl Viro write_cr4(cr4); 613bb898558SAl Viro } 614bb898558SAl Viro 615bb898558SAl Viro static inline void clear_in_cr4(unsigned long mask) 616bb898558SAl Viro { 617bb898558SAl Viro unsigned cr4; 618bb898558SAl Viro 619bb898558SAl Viro mmu_cr4_features &= ~mask; 620bb898558SAl Viro cr4 = read_cr4(); 621bb898558SAl Viro cr4 &= ~mask; 622bb898558SAl Viro write_cr4(cr4); 623bb898558SAl Viro } 624bb898558SAl Viro 625bb898558SAl Viro typedef struct { 626bb898558SAl Viro unsigned long seg; 627bb898558SAl Viro } mm_segment_t; 628bb898558SAl Viro 629bb898558SAl Viro 630bb898558SAl Viro /* 631bb898558SAl Viro * create a kernel thread without removing it from tasklists 632bb898558SAl Viro */ 633bb898558SAl Viro extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 634bb898558SAl Viro 635bb898558SAl Viro /* Free all resources held by a thread. */ 636bb898558SAl Viro extern void release_thread(struct task_struct *); 637bb898558SAl Viro 638bb898558SAl Viro /* Prepare to copy thread state - unlazy all lazy state */ 639bb898558SAl Viro extern void prepare_to_copy(struct task_struct *tsk); 640bb898558SAl Viro 641bb898558SAl Viro unsigned long get_wchan(struct task_struct *p); 642bb898558SAl Viro 643bb898558SAl Viro /* 644bb898558SAl Viro * Generic CPUID function 645bb898558SAl Viro * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 646bb898558SAl Viro * resulting in stale register contents being returned. 647bb898558SAl Viro */ 648bb898558SAl Viro static inline void cpuid(unsigned int op, 649bb898558SAl Viro unsigned int *eax, unsigned int *ebx, 650bb898558SAl Viro unsigned int *ecx, unsigned int *edx) 651bb898558SAl Viro { 652bb898558SAl Viro *eax = op; 653bb898558SAl Viro *ecx = 0; 654bb898558SAl Viro __cpuid(eax, ebx, ecx, edx); 655bb898558SAl Viro } 656bb898558SAl Viro 657bb898558SAl Viro /* Some CPUID calls want 'count' to be placed in ecx */ 658bb898558SAl Viro static inline void cpuid_count(unsigned int op, int count, 659bb898558SAl Viro unsigned int *eax, unsigned int *ebx, 660bb898558SAl Viro unsigned int *ecx, unsigned int *edx) 661bb898558SAl Viro { 662bb898558SAl Viro *eax = op; 663bb898558SAl Viro *ecx = count; 664bb898558SAl Viro __cpuid(eax, ebx, ecx, edx); 665bb898558SAl Viro } 666bb898558SAl Viro 667bb898558SAl Viro /* 668bb898558SAl Viro * CPUID functions returning a single datum 669bb898558SAl Viro */ 670bb898558SAl Viro static inline unsigned int cpuid_eax(unsigned int op) 671bb898558SAl Viro { 672bb898558SAl Viro unsigned int eax, ebx, ecx, edx; 673bb898558SAl Viro 674bb898558SAl Viro cpuid(op, &eax, &ebx, &ecx, &edx); 675bb898558SAl Viro 676bb898558SAl Viro return eax; 677bb898558SAl Viro } 678bb898558SAl Viro 679bb898558SAl Viro static inline unsigned int cpuid_ebx(unsigned int op) 680bb898558SAl Viro { 681bb898558SAl Viro unsigned int eax, ebx, ecx, edx; 682bb898558SAl Viro 683bb898558SAl Viro cpuid(op, &eax, &ebx, &ecx, &edx); 684bb898558SAl Viro 685bb898558SAl Viro return ebx; 686bb898558SAl Viro } 687bb898558SAl Viro 688bb898558SAl Viro static inline unsigned int cpuid_ecx(unsigned int op) 689bb898558SAl Viro { 690bb898558SAl Viro unsigned int eax, ebx, ecx, edx; 691bb898558SAl Viro 692bb898558SAl Viro cpuid(op, &eax, &ebx, &ecx, &edx); 693bb898558SAl Viro 694bb898558SAl Viro return ecx; 695bb898558SAl Viro } 696bb898558SAl Viro 697bb898558SAl Viro static inline unsigned int cpuid_edx(unsigned int op) 698bb898558SAl Viro { 699bb898558SAl Viro unsigned int eax, ebx, ecx, edx; 700bb898558SAl Viro 701bb898558SAl Viro cpuid(op, &eax, &ebx, &ecx, &edx); 702bb898558SAl Viro 703bb898558SAl Viro return edx; 704bb898558SAl Viro } 705bb898558SAl Viro 706bb898558SAl Viro /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 707bb898558SAl Viro static inline void rep_nop(void) 708bb898558SAl Viro { 709bb898558SAl Viro asm volatile("rep; nop" ::: "memory"); 710bb898558SAl Viro } 711bb898558SAl Viro 712bb898558SAl Viro static inline void cpu_relax(void) 713bb898558SAl Viro { 714bb898558SAl Viro rep_nop(); 715bb898558SAl Viro } 716bb898558SAl Viro 7175367b688SBen Hutchings /* Stop speculative execution and prefetching of modified code. */ 718bb898558SAl Viro static inline void sync_core(void) 719bb898558SAl Viro { 720bb898558SAl Viro int tmp; 721bb898558SAl Viro 7225367b688SBen Hutchings #if defined(CONFIG_M386) || defined(CONFIG_M486) 7235367b688SBen Hutchings if (boot_cpu_data.x86 < 5) 7245367b688SBen Hutchings /* There is no speculative execution. 7255367b688SBen Hutchings * jmp is a barrier to prefetching. */ 7265367b688SBen Hutchings asm volatile("jmp 1f\n1:\n" ::: "memory"); 7275367b688SBen Hutchings else 7285367b688SBen Hutchings #endif 7295367b688SBen Hutchings /* cpuid is a barrier to speculative execution. 7305367b688SBen Hutchings * Prefetched instructions are automatically 7315367b688SBen Hutchings * invalidated when modified. */ 732bb898558SAl Viro asm volatile("cpuid" : "=a" (tmp) : "0" (1) 733bb898558SAl Viro : "ebx", "ecx", "edx", "memory"); 734bb898558SAl Viro } 735bb898558SAl Viro 736bb898558SAl Viro static inline void __monitor(const void *eax, unsigned long ecx, 737bb898558SAl Viro unsigned long edx) 738bb898558SAl Viro { 739bb898558SAl Viro /* "monitor %eax, %ecx, %edx;" */ 740bb898558SAl Viro asm volatile(".byte 0x0f, 0x01, 0xc8;" 741bb898558SAl Viro :: "a" (eax), "c" (ecx), "d"(edx)); 742bb898558SAl Viro } 743bb898558SAl Viro 744bb898558SAl Viro static inline void __mwait(unsigned long eax, unsigned long ecx) 745bb898558SAl Viro { 746bb898558SAl Viro /* "mwait %eax, %ecx;" */ 747bb898558SAl Viro asm volatile(".byte 0x0f, 0x01, 0xc9;" 748bb898558SAl Viro :: "a" (eax), "c" (ecx)); 749bb898558SAl Viro } 750bb898558SAl Viro 751bb898558SAl Viro static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 752bb898558SAl Viro { 753bb898558SAl Viro trace_hardirqs_on(); 754bb898558SAl Viro /* "mwait %eax, %ecx;" */ 755bb898558SAl Viro asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 756bb898558SAl Viro :: "a" (eax), "c" (ecx)); 757bb898558SAl Viro } 758bb898558SAl Viro 759bb898558SAl Viro extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 760bb898558SAl Viro 761bb898558SAl Viro extern void select_idle_routine(const struct cpuinfo_x86 *c); 76230e1e6d1SRusty Russell extern void init_c1e_mask(void); 763bb898558SAl Viro 764bb898558SAl Viro extern unsigned long boot_option_idle_override; 765bb898558SAl Viro extern unsigned long idle_halt; 766bb898558SAl Viro extern unsigned long idle_nomwait; 767bb898558SAl Viro 768bb898558SAl Viro /* 769bb898558SAl Viro * on systems with caches, caches must be flashed as the absolute 770bb898558SAl Viro * last instruction before going into a suspended halt. Otherwise, 771bb898558SAl Viro * dirty data can linger in the cache and become stale on resume, 772bb898558SAl Viro * leading to strange errors. 773bb898558SAl Viro * 774bb898558SAl Viro * perform a variety of operations to guarantee that the compiler 775bb898558SAl Viro * will not reorder instructions. wbinvd itself is serializing 776bb898558SAl Viro * so the processor will not reorder. 777bb898558SAl Viro * 778bb898558SAl Viro * Systems without cache can just go into halt. 779bb898558SAl Viro */ 780bb898558SAl Viro static inline void wbinvd_halt(void) 781bb898558SAl Viro { 782bb898558SAl Viro mb(); 783bb898558SAl Viro /* check for clflush to determine if wbinvd is legal */ 784bb898558SAl Viro if (cpu_has_clflush) 785bb898558SAl Viro asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); 786bb898558SAl Viro else 787bb898558SAl Viro while (1) 788bb898558SAl Viro halt(); 789bb898558SAl Viro } 790bb898558SAl Viro 791bb898558SAl Viro extern void enable_sep_cpu(void); 792bb898558SAl Viro extern int sysenter_setup(void); 793bb898558SAl Viro 794bb898558SAl Viro /* Defined in head.S */ 795bb898558SAl Viro extern struct desc_ptr early_gdt_descr; 796bb898558SAl Viro 797bb898558SAl Viro extern void cpu_set_gdt(int); 798552be871SBrian Gerst extern void switch_to_new_gdt(int); 79911e3a840SJeremy Fitzhardinge extern void load_percpu_segment(int); 800bb898558SAl Viro extern void cpu_init(void); 801bb898558SAl Viro 802c2724775SMarkus Metzger static inline unsigned long get_debugctlmsr(void) 803c2724775SMarkus Metzger { 804c2724775SMarkus Metzger unsigned long debugctlmsr = 0; 805c2724775SMarkus Metzger 806c2724775SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR 807c2724775SMarkus Metzger if (boot_cpu_data.x86 < 6) 808c2724775SMarkus Metzger return 0; 809c2724775SMarkus Metzger #endif 810c2724775SMarkus Metzger rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 811c2724775SMarkus Metzger 812c2724775SMarkus Metzger return debugctlmsr; 813c2724775SMarkus Metzger } 814c2724775SMarkus Metzger 81535bb7600SMarkus Metzger static inline unsigned long get_debugctlmsr_on_cpu(int cpu) 81635bb7600SMarkus Metzger { 81735bb7600SMarkus Metzger u64 debugctlmsr = 0; 81835bb7600SMarkus Metzger u32 val1, val2; 81935bb7600SMarkus Metzger 82035bb7600SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR 82135bb7600SMarkus Metzger if (boot_cpu_data.x86 < 6) 82235bb7600SMarkus Metzger return 0; 82335bb7600SMarkus Metzger #endif 82435bb7600SMarkus Metzger rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2); 82535bb7600SMarkus Metzger debugctlmsr = val1 | ((u64)val2 << 32); 82635bb7600SMarkus Metzger 82735bb7600SMarkus Metzger return debugctlmsr; 82835bb7600SMarkus Metzger } 82935bb7600SMarkus Metzger 830bb898558SAl Viro static inline void update_debugctlmsr(unsigned long debugctlmsr) 831bb898558SAl Viro { 832bb898558SAl Viro #ifndef CONFIG_X86_DEBUGCTLMSR 833bb898558SAl Viro if (boot_cpu_data.x86 < 6) 834bb898558SAl Viro return; 835bb898558SAl Viro #endif 836bb898558SAl Viro wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 837bb898558SAl Viro } 838bb898558SAl Viro 83935bb7600SMarkus Metzger static inline void update_debugctlmsr_on_cpu(int cpu, 84035bb7600SMarkus Metzger unsigned long debugctlmsr) 84135bb7600SMarkus Metzger { 84235bb7600SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR 84335bb7600SMarkus Metzger if (boot_cpu_data.x86 < 6) 84435bb7600SMarkus Metzger return; 84535bb7600SMarkus Metzger #endif 84635bb7600SMarkus Metzger wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, 84735bb7600SMarkus Metzger (u32)((u64)debugctlmsr), 84835bb7600SMarkus Metzger (u32)((u64)debugctlmsr >> 32)); 84935bb7600SMarkus Metzger } 85035bb7600SMarkus Metzger 851bb898558SAl Viro /* 852bb898558SAl Viro * from system description table in BIOS. Mostly for MCA use, but 853bb898558SAl Viro * others may find it useful: 854bb898558SAl Viro */ 855bb898558SAl Viro extern unsigned int machine_id; 856bb898558SAl Viro extern unsigned int machine_submodel_id; 857bb898558SAl Viro extern unsigned int BIOS_revision; 858bb898558SAl Viro 859bb898558SAl Viro /* Boot loader type from the setup header: */ 860bb898558SAl Viro extern int bootloader_type; 8615031296cSH. Peter Anvin extern int bootloader_version; 862bb898558SAl Viro 863bb898558SAl Viro extern char ignore_fpu_irq; 864bb898558SAl Viro 865bb898558SAl Viro #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 866bb898558SAl Viro #define ARCH_HAS_PREFETCHW 867bb898558SAl Viro #define ARCH_HAS_SPINLOCK_PREFETCH 868bb898558SAl Viro 869bb898558SAl Viro #ifdef CONFIG_X86_32 870bb898558SAl Viro # define BASE_PREFETCH ASM_NOP4 871bb898558SAl Viro # define ARCH_HAS_PREFETCH 872bb898558SAl Viro #else 873bb898558SAl Viro # define BASE_PREFETCH "prefetcht0 (%1)" 874bb898558SAl Viro #endif 875bb898558SAl Viro 876bb898558SAl Viro /* 877bb898558SAl Viro * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 878bb898558SAl Viro * 879bb898558SAl Viro * It's not worth to care about 3dnow prefetches for the K6 880bb898558SAl Viro * because they are microcoded there and very slow. 881bb898558SAl Viro */ 882bb898558SAl Viro static inline void prefetch(const void *x) 883bb898558SAl Viro { 884bb898558SAl Viro alternative_input(BASE_PREFETCH, 885bb898558SAl Viro "prefetchnta (%1)", 886bb898558SAl Viro X86_FEATURE_XMM, 887bb898558SAl Viro "r" (x)); 888bb898558SAl Viro } 889bb898558SAl Viro 890bb898558SAl Viro /* 891bb898558SAl Viro * 3dnow prefetch to get an exclusive cache line. 892bb898558SAl Viro * Useful for spinlocks to avoid one state transition in the 893bb898558SAl Viro * cache coherency protocol: 894bb898558SAl Viro */ 895bb898558SAl Viro static inline void prefetchw(const void *x) 896bb898558SAl Viro { 897bb898558SAl Viro alternative_input(BASE_PREFETCH, 898bb898558SAl Viro "prefetchw (%1)", 899bb898558SAl Viro X86_FEATURE_3DNOW, 900bb898558SAl Viro "r" (x)); 901bb898558SAl Viro } 902bb898558SAl Viro 903bb898558SAl Viro static inline void spin_lock_prefetch(const void *x) 904bb898558SAl Viro { 905bb898558SAl Viro prefetchw(x); 906bb898558SAl Viro } 907bb898558SAl Viro 908bb898558SAl Viro #ifdef CONFIG_X86_32 909bb898558SAl Viro /* 910bb898558SAl Viro * User space process size: 3GB (default). 911bb898558SAl Viro */ 912bb898558SAl Viro #define TASK_SIZE PAGE_OFFSET 913d9517346SIngo Molnar #define TASK_SIZE_MAX TASK_SIZE 914bb898558SAl Viro #define STACK_TOP TASK_SIZE 915bb898558SAl Viro #define STACK_TOP_MAX STACK_TOP 916bb898558SAl Viro 917bb898558SAl Viro #define INIT_THREAD { \ 918bb898558SAl Viro .sp0 = sizeof(init_stack) + (long)&init_stack, \ 919bb898558SAl Viro .vm86_info = NULL, \ 920bb898558SAl Viro .sysenter_cs = __KERNEL_CS, \ 921bb898558SAl Viro .io_bitmap_ptr = NULL, \ 922bb898558SAl Viro } 923bb898558SAl Viro 924bb898558SAl Viro /* 925bb898558SAl Viro * Note that the .io_bitmap member must be extra-big. This is because 926bb898558SAl Viro * the CPU will access an additional byte beyond the end of the IO 927bb898558SAl Viro * permission bitmap. The extra byte must be all 1 bits, and must 928bb898558SAl Viro * be within the limit. 929bb898558SAl Viro */ 930bb898558SAl Viro #define INIT_TSS { \ 931bb898558SAl Viro .x86_tss = { \ 932bb898558SAl Viro .sp0 = sizeof(init_stack) + (long)&init_stack, \ 933bb898558SAl Viro .ss0 = __KERNEL_DS, \ 934bb898558SAl Viro .ss1 = __KERNEL_CS, \ 935bb898558SAl Viro .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 936bb898558SAl Viro }, \ 937bb898558SAl Viro .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 938bb898558SAl Viro } 939bb898558SAl Viro 940bb898558SAl Viro extern unsigned long thread_saved_pc(struct task_struct *tsk); 941bb898558SAl Viro 942bb898558SAl Viro #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 943bb898558SAl Viro #define KSTK_TOP(info) \ 944bb898558SAl Viro ({ \ 945bb898558SAl Viro unsigned long *__ptr = (unsigned long *)(info); \ 946bb898558SAl Viro (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 947bb898558SAl Viro }) 948bb898558SAl Viro 949bb898558SAl Viro /* 950bb898558SAl Viro * The below -8 is to reserve 8 bytes on top of the ring0 stack. 951bb898558SAl Viro * This is necessary to guarantee that the entire "struct pt_regs" 952bb898558SAl Viro * is accessable even if the CPU haven't stored the SS/ESP registers 953bb898558SAl Viro * on the stack (interrupt gate does not save these registers 954bb898558SAl Viro * when switching to the same priv ring). 955bb898558SAl Viro * Therefore beware: accessing the ss/esp fields of the 956bb898558SAl Viro * "struct pt_regs" is possible, but they may contain the 957bb898558SAl Viro * completely wrong values. 958bb898558SAl Viro */ 959bb898558SAl Viro #define task_pt_regs(task) \ 960bb898558SAl Viro ({ \ 961bb898558SAl Viro struct pt_regs *__regs__; \ 962bb898558SAl Viro __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ 963bb898558SAl Viro __regs__ - 1; \ 964bb898558SAl Viro }) 965bb898558SAl Viro 966bb898558SAl Viro #define KSTK_ESP(task) (task_pt_regs(task)->sp) 967bb898558SAl Viro 968bb898558SAl Viro #else 969bb898558SAl Viro /* 970bb898558SAl Viro * User space process size. 47bits minus one guard page. 971bb898558SAl Viro */ 972d9517346SIngo Molnar #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 973bb898558SAl Viro 974bb898558SAl Viro /* This decides where the kernel will search for a free chunk of vm 975bb898558SAl Viro * space during mmap's. 976bb898558SAl Viro */ 977bb898558SAl Viro #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 978bb898558SAl Viro 0xc0000000 : 0xFFFFe000) 979bb898558SAl Viro 980bb898558SAl Viro #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 981d9517346SIngo Molnar IA32_PAGE_OFFSET : TASK_SIZE_MAX) 982bb898558SAl Viro #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 983d9517346SIngo Molnar IA32_PAGE_OFFSET : TASK_SIZE_MAX) 984bb898558SAl Viro 985bb898558SAl Viro #define STACK_TOP TASK_SIZE 986d9517346SIngo Molnar #define STACK_TOP_MAX TASK_SIZE_MAX 987bb898558SAl Viro 988bb898558SAl Viro #define INIT_THREAD { \ 989bb898558SAl Viro .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 990bb898558SAl Viro } 991bb898558SAl Viro 992bb898558SAl Viro #define INIT_TSS { \ 993bb898558SAl Viro .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 994bb898558SAl Viro } 995bb898558SAl Viro 996bb898558SAl Viro /* 997bb898558SAl Viro * Return saved PC of a blocked thread. 998bb898558SAl Viro * What is this good for? it will be always the scheduler or ret_from_fork. 999bb898558SAl Viro */ 1000bb898558SAl Viro #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 1001bb898558SAl Viro 1002bb898558SAl Viro #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 100389240ba0SStefani Seibold extern unsigned long KSTK_ESP(struct task_struct *task); 1004bb898558SAl Viro #endif /* CONFIG_X86_64 */ 1005bb898558SAl Viro 1006bb898558SAl Viro extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 1007bb898558SAl Viro unsigned long new_sp); 1008bb898558SAl Viro 1009bb898558SAl Viro /* 1010bb898558SAl Viro * This decides where the kernel will search for a free chunk of vm 1011bb898558SAl Viro * space during mmap's. 1012bb898558SAl Viro */ 1013bb898558SAl Viro #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 1014bb898558SAl Viro 1015bb898558SAl Viro #define KSTK_EIP(task) (task_pt_regs(task)->ip) 1016bb898558SAl Viro 1017bb898558SAl Viro /* Get/set a process' ability to use the timestamp counter instruction */ 1018bb898558SAl Viro #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 1019bb898558SAl Viro #define SET_TSC_CTL(val) set_tsc_mode((val)) 1020bb898558SAl Viro 1021bb898558SAl Viro extern int get_tsc_mode(unsigned long adr); 1022bb898558SAl Viro extern int set_tsc_mode(unsigned int val); 1023bb898558SAl Viro 10246a812691SAndreas Herrmann extern int amd_get_nb_id(int cpu); 10256a812691SAndreas Herrmann 10265cbc19a9SPeter Zijlstra struct aperfmperf { 10275cbc19a9SPeter Zijlstra u64 aperf, mperf; 10285cbc19a9SPeter Zijlstra }; 10295cbc19a9SPeter Zijlstra 10305cbc19a9SPeter Zijlstra static inline void get_aperfmperf(struct aperfmperf *am) 10315cbc19a9SPeter Zijlstra { 10325cbc19a9SPeter Zijlstra WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); 10335cbc19a9SPeter Zijlstra 10345cbc19a9SPeter Zijlstra rdmsrl(MSR_IA32_APERF, am->aperf); 10355cbc19a9SPeter Zijlstra rdmsrl(MSR_IA32_MPERF, am->mperf); 10365cbc19a9SPeter Zijlstra } 10375cbc19a9SPeter Zijlstra 10385cbc19a9SPeter Zijlstra #define APERFMPERF_SHIFT 10 10395cbc19a9SPeter Zijlstra 10405cbc19a9SPeter Zijlstra static inline 10415cbc19a9SPeter Zijlstra unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, 10425cbc19a9SPeter Zijlstra struct aperfmperf *new) 10435cbc19a9SPeter Zijlstra { 10445cbc19a9SPeter Zijlstra u64 aperf = new->aperf - old->aperf; 10455cbc19a9SPeter Zijlstra u64 mperf = new->mperf - old->mperf; 10465cbc19a9SPeter Zijlstra unsigned long ratio = aperf; 10475cbc19a9SPeter Zijlstra 10485cbc19a9SPeter Zijlstra mperf >>= APERFMPERF_SHIFT; 10495cbc19a9SPeter Zijlstra if (mperf) 10505cbc19a9SPeter Zijlstra ratio = div64_u64(aperf, mperf); 10515cbc19a9SPeter Zijlstra 10525cbc19a9SPeter Zijlstra return ratio; 10535cbc19a9SPeter Zijlstra } 10545cbc19a9SPeter Zijlstra 10551965aae3SH. Peter Anvin #endif /* _ASM_X86_PROCESSOR_H */ 1056