1 #ifndef _ASM_X86_PROCESSOR_H 2 #define _ASM_X86_PROCESSOR_H 3 4 #include <asm/processor-flags.h> 5 6 /* Forward declaration, a strange C thing */ 7 struct task_struct; 8 struct mm_struct; 9 10 #include <asm/vm86.h> 11 #include <asm/math_emu.h> 12 #include <asm/segment.h> 13 #include <asm/types.h> 14 #include <asm/sigcontext.h> 15 #include <asm/current.h> 16 #include <asm/cpufeature.h> 17 #include <asm/system.h> 18 #include <asm/page.h> 19 #include <asm/pgtable_types.h> 20 #include <asm/percpu.h> 21 #include <asm/msr.h> 22 #include <asm/desc_defs.h> 23 #include <asm/nops.h> 24 #include <asm/ds.h> 25 26 #include <linux/personality.h> 27 #include <linux/cpumask.h> 28 #include <linux/cache.h> 29 #include <linux/threads.h> 30 #include <linux/init.h> 31 32 /* 33 * Default implementation of macro that returns current 34 * instruction pointer ("program counter"). 35 */ 36 static inline void *current_text_addr(void) 37 { 38 void *pc; 39 40 asm volatile("mov $1f, %0; 1:":"=r" (pc)); 41 42 return pc; 43 } 44 45 #ifdef CONFIG_X86_VSMP 46 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 47 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 48 #else 49 # define ARCH_MIN_TASKALIGN 16 50 # define ARCH_MIN_MMSTRUCT_ALIGN 0 51 #endif 52 53 /* 54 * CPU type and hardware bug flags. Kept separately for each CPU. 55 * Members of this structure are referenced in head.S, so think twice 56 * before touching them. [mj] 57 */ 58 59 struct cpuinfo_x86 { 60 __u8 x86; /* CPU family */ 61 __u8 x86_vendor; /* CPU vendor */ 62 __u8 x86_model; 63 __u8 x86_mask; 64 #ifdef CONFIG_X86_32 65 char wp_works_ok; /* It doesn't on 386's */ 66 67 /* Problems on some 486Dx4's and old 386's: */ 68 char hlt_works_ok; 69 char hard_math; 70 char rfu; 71 char fdiv_bug; 72 char f00f_bug; 73 char coma_bug; 74 char pad0; 75 #else 76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 77 int x86_tlbsize; 78 __u8 x86_virt_bits; 79 __u8 x86_phys_bits; 80 #endif 81 /* CPUID returned core id bits: */ 82 __u8 x86_coreid_bits; 83 /* Max extended CPUID function supported: */ 84 __u32 extended_cpuid_level; 85 /* Maximum supported CPUID level, -1=no CPUID: */ 86 int cpuid_level; 87 __u32 x86_capability[NCAPINTS]; 88 char x86_vendor_id[16]; 89 char x86_model_id[64]; 90 /* in KB - valid for CPUS which support this call: */ 91 int x86_cache_size; 92 int x86_cache_alignment; /* In bytes */ 93 int x86_power; 94 unsigned long loops_per_jiffy; 95 #ifdef CONFIG_SMP 96 /* cpus sharing the last level cache: */ 97 cpumask_t llc_shared_map; 98 #endif 99 /* cpuid returned max cores value: */ 100 u16 x86_max_cores; 101 u16 apicid; 102 u16 initial_apicid; 103 u16 x86_clflush_size; 104 #ifdef CONFIG_SMP 105 /* number of cores as seen by the OS: */ 106 u16 booted_cores; 107 /* Physical processor id: */ 108 u16 phys_proc_id; 109 /* Core id: */ 110 u16 cpu_core_id; 111 /* Index into per_cpu list: */ 112 u16 cpu_index; 113 #endif 114 unsigned int x86_hyper_vendor; 115 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 116 117 #define X86_VENDOR_INTEL 0 118 #define X86_VENDOR_CYRIX 1 119 #define X86_VENDOR_AMD 2 120 #define X86_VENDOR_UMC 3 121 #define X86_VENDOR_CENTAUR 5 122 #define X86_VENDOR_TRANSMETA 7 123 #define X86_VENDOR_NSC 8 124 #define X86_VENDOR_NUM 9 125 126 #define X86_VENDOR_UNKNOWN 0xff 127 128 #define X86_HYPER_VENDOR_NONE 0 129 #define X86_HYPER_VENDOR_VMWARE 1 130 131 /* 132 * capabilities of CPUs 133 */ 134 extern struct cpuinfo_x86 boot_cpu_data; 135 extern struct cpuinfo_x86 new_cpu_data; 136 137 extern struct tss_struct doublefault_tss; 138 extern __u32 cleared_cpu_caps[NCAPINTS]; 139 140 #ifdef CONFIG_SMP 141 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 142 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 143 #define current_cpu_data __get_cpu_var(cpu_info) 144 #else 145 #define cpu_data(cpu) boot_cpu_data 146 #define current_cpu_data boot_cpu_data 147 #endif 148 149 extern const struct seq_operations cpuinfo_op; 150 151 static inline int hlt_works(int cpu) 152 { 153 #ifdef CONFIG_X86_32 154 return cpu_data(cpu).hlt_works_ok; 155 #else 156 return 1; 157 #endif 158 } 159 160 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 161 162 extern void cpu_detect(struct cpuinfo_x86 *c); 163 164 extern struct pt_regs *idle_regs(struct pt_regs *); 165 166 extern void early_cpu_init(void); 167 extern void identify_boot_cpu(void); 168 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 169 extern void print_cpu_info(struct cpuinfo_x86 *); 170 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 171 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 172 extern unsigned short num_cache_leaves; 173 174 extern void detect_extended_topology(struct cpuinfo_x86 *c); 175 extern void detect_ht(struct cpuinfo_x86 *c); 176 177 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 178 unsigned int *ecx, unsigned int *edx) 179 { 180 /* ecx is often an input as well as an output. */ 181 asm("cpuid" 182 : "=a" (*eax), 183 "=b" (*ebx), 184 "=c" (*ecx), 185 "=d" (*edx) 186 : "0" (*eax), "2" (*ecx)); 187 } 188 189 static inline void load_cr3(pgd_t *pgdir) 190 { 191 write_cr3(__pa(pgdir)); 192 } 193 194 #ifdef CONFIG_X86_32 195 /* This is the TSS defined by the hardware. */ 196 struct x86_hw_tss { 197 unsigned short back_link, __blh; 198 unsigned long sp0; 199 unsigned short ss0, __ss0h; 200 unsigned long sp1; 201 /* ss1 caches MSR_IA32_SYSENTER_CS: */ 202 unsigned short ss1, __ss1h; 203 unsigned long sp2; 204 unsigned short ss2, __ss2h; 205 unsigned long __cr3; 206 unsigned long ip; 207 unsigned long flags; 208 unsigned long ax; 209 unsigned long cx; 210 unsigned long dx; 211 unsigned long bx; 212 unsigned long sp; 213 unsigned long bp; 214 unsigned long si; 215 unsigned long di; 216 unsigned short es, __esh; 217 unsigned short cs, __csh; 218 unsigned short ss, __ssh; 219 unsigned short ds, __dsh; 220 unsigned short fs, __fsh; 221 unsigned short gs, __gsh; 222 unsigned short ldt, __ldth; 223 unsigned short trace; 224 unsigned short io_bitmap_base; 225 226 } __attribute__((packed)); 227 #else 228 struct x86_hw_tss { 229 u32 reserved1; 230 u64 sp0; 231 u64 sp1; 232 u64 sp2; 233 u64 reserved2; 234 u64 ist[7]; 235 u32 reserved3; 236 u32 reserved4; 237 u16 reserved5; 238 u16 io_bitmap_base; 239 240 } __attribute__((packed)) ____cacheline_aligned; 241 #endif 242 243 /* 244 * IO-bitmap sizes: 245 */ 246 #define IO_BITMAP_BITS 65536 247 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 248 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 249 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 250 #define INVALID_IO_BITMAP_OFFSET 0x8000 251 252 struct tss_struct { 253 /* 254 * The hardware state: 255 */ 256 struct x86_hw_tss x86_tss; 257 258 /* 259 * The extra 1 is there because the CPU will access an 260 * additional byte beyond the end of the IO permission 261 * bitmap. The extra byte must be all 1 bits, and must 262 * be within the limit. 263 */ 264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 265 266 /* 267 * .. and then another 0x100 bytes for the emergency kernel stack: 268 */ 269 unsigned long stack[64]; 270 271 } ____cacheline_aligned; 272 273 DECLARE_PER_CPU(struct tss_struct, init_tss); 274 275 /* 276 * Save the original ist values for checking stack pointers during debugging 277 */ 278 struct orig_ist { 279 unsigned long ist[7]; 280 }; 281 282 #define MXCSR_DEFAULT 0x1f80 283 284 struct i387_fsave_struct { 285 u32 cwd; /* FPU Control Word */ 286 u32 swd; /* FPU Status Word */ 287 u32 twd; /* FPU Tag Word */ 288 u32 fip; /* FPU IP Offset */ 289 u32 fcs; /* FPU IP Selector */ 290 u32 foo; /* FPU Operand Pointer Offset */ 291 u32 fos; /* FPU Operand Pointer Selector */ 292 293 /* 8*10 bytes for each FP-reg = 80 bytes: */ 294 u32 st_space[20]; 295 296 /* Software status information [not touched by FSAVE ]: */ 297 u32 status; 298 }; 299 300 struct i387_fxsave_struct { 301 u16 cwd; /* Control Word */ 302 u16 swd; /* Status Word */ 303 u16 twd; /* Tag Word */ 304 u16 fop; /* Last Instruction Opcode */ 305 union { 306 struct { 307 u64 rip; /* Instruction Pointer */ 308 u64 rdp; /* Data Pointer */ 309 }; 310 struct { 311 u32 fip; /* FPU IP Offset */ 312 u32 fcs; /* FPU IP Selector */ 313 u32 foo; /* FPU Operand Offset */ 314 u32 fos; /* FPU Operand Selector */ 315 }; 316 }; 317 u32 mxcsr; /* MXCSR Register State */ 318 u32 mxcsr_mask; /* MXCSR Mask */ 319 320 /* 8*16 bytes for each FP-reg = 128 bytes: */ 321 u32 st_space[32]; 322 323 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 324 u32 xmm_space[64]; 325 326 u32 padding[12]; 327 328 union { 329 u32 padding1[12]; 330 u32 sw_reserved[12]; 331 }; 332 333 } __attribute__((aligned(16))); 334 335 struct i387_soft_struct { 336 u32 cwd; 337 u32 swd; 338 u32 twd; 339 u32 fip; 340 u32 fcs; 341 u32 foo; 342 u32 fos; 343 /* 8*10 bytes for each FP-reg = 80 bytes: */ 344 u32 st_space[20]; 345 u8 ftop; 346 u8 changed; 347 u8 lookahead; 348 u8 no_update; 349 u8 rm; 350 u8 alimit; 351 struct math_emu_info *info; 352 u32 entry_eip; 353 }; 354 355 struct xsave_hdr_struct { 356 u64 xstate_bv; 357 u64 reserved1[2]; 358 u64 reserved2[5]; 359 } __attribute__((packed)); 360 361 struct xsave_struct { 362 struct i387_fxsave_struct i387; 363 struct xsave_hdr_struct xsave_hdr; 364 /* new processor state extensions will go here */ 365 } __attribute__ ((packed, aligned (64))); 366 367 union thread_xstate { 368 struct i387_fsave_struct fsave; 369 struct i387_fxsave_struct fxsave; 370 struct i387_soft_struct soft; 371 struct xsave_struct xsave; 372 }; 373 374 #ifdef CONFIG_X86_64 375 DECLARE_PER_CPU(struct orig_ist, orig_ist); 376 377 union irq_stack_union { 378 char irq_stack[IRQ_STACK_SIZE]; 379 /* 380 * GCC hardcodes the stack canary as %gs:40. Since the 381 * irq_stack is the object at %gs:0, we reserve the bottom 382 * 48 bytes of the irq stack for the canary. 383 */ 384 struct { 385 char gs_base[40]; 386 unsigned long stack_canary; 387 }; 388 }; 389 390 DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 391 DECLARE_INIT_PER_CPU(irq_stack_union); 392 393 DECLARE_PER_CPU(char *, irq_stack_ptr); 394 #else /* X86_64 */ 395 #ifdef CONFIG_CC_STACKPROTECTOR 396 DECLARE_PER_CPU(unsigned long, stack_canary); 397 #endif 398 #endif /* X86_64 */ 399 400 extern unsigned int xstate_size; 401 extern void free_thread_xstate(struct task_struct *); 402 extern struct kmem_cache *task_xstate_cachep; 403 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 404 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 405 extern unsigned short num_cache_leaves; 406 407 struct thread_struct { 408 /* Cached TLS descriptors: */ 409 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 410 unsigned long sp0; 411 unsigned long sp; 412 #ifdef CONFIG_X86_32 413 unsigned long sysenter_cs; 414 #else 415 unsigned long usersp; /* Copy from PDA */ 416 unsigned short es; 417 unsigned short ds; 418 unsigned short fsindex; 419 unsigned short gsindex; 420 #endif 421 unsigned long ip; 422 unsigned long fs; 423 unsigned long gs; 424 /* Hardware debugging registers: */ 425 unsigned long debugreg0; 426 unsigned long debugreg1; 427 unsigned long debugreg2; 428 unsigned long debugreg3; 429 unsigned long debugreg6; 430 unsigned long debugreg7; 431 /* Fault info: */ 432 unsigned long cr2; 433 unsigned long trap_no; 434 unsigned long error_code; 435 /* floating point and extended processor state */ 436 union thread_xstate *xstate; 437 #ifdef CONFIG_X86_32 438 /* Virtual 86 mode info */ 439 struct vm86_struct __user *vm86_info; 440 unsigned long screen_bitmap; 441 unsigned long v86flags; 442 unsigned long v86mask; 443 unsigned long saved_sp0; 444 unsigned int saved_fs; 445 unsigned int saved_gs; 446 #endif 447 /* IO permissions: */ 448 unsigned long *io_bitmap_ptr; 449 unsigned long iopl; 450 /* Max allowed port in the bitmap, in bytes: */ 451 unsigned io_bitmap_max; 452 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 453 unsigned long debugctlmsr; 454 #ifdef CONFIG_X86_DS 455 /* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */ 456 struct ds_context *ds_ctx; 457 #endif /* CONFIG_X86_DS */ 458 #ifdef CONFIG_X86_PTRACE_BTS 459 /* the signal to send on a bts buffer overflow */ 460 unsigned int bts_ovfl_signal; 461 #endif /* CONFIG_X86_PTRACE_BTS */ 462 }; 463 464 static inline unsigned long native_get_debugreg(int regno) 465 { 466 unsigned long val = 0; /* Damn you, gcc! */ 467 468 switch (regno) { 469 case 0: 470 asm("mov %%db0, %0" :"=r" (val)); 471 break; 472 case 1: 473 asm("mov %%db1, %0" :"=r" (val)); 474 break; 475 case 2: 476 asm("mov %%db2, %0" :"=r" (val)); 477 break; 478 case 3: 479 asm("mov %%db3, %0" :"=r" (val)); 480 break; 481 case 6: 482 asm("mov %%db6, %0" :"=r" (val)); 483 break; 484 case 7: 485 asm("mov %%db7, %0" :"=r" (val)); 486 break; 487 default: 488 BUG(); 489 } 490 return val; 491 } 492 493 static inline void native_set_debugreg(int regno, unsigned long value) 494 { 495 switch (regno) { 496 case 0: 497 asm("mov %0, %%db0" ::"r" (value)); 498 break; 499 case 1: 500 asm("mov %0, %%db1" ::"r" (value)); 501 break; 502 case 2: 503 asm("mov %0, %%db2" ::"r" (value)); 504 break; 505 case 3: 506 asm("mov %0, %%db3" ::"r" (value)); 507 break; 508 case 6: 509 asm("mov %0, %%db6" ::"r" (value)); 510 break; 511 case 7: 512 asm("mov %0, %%db7" ::"r" (value)); 513 break; 514 default: 515 BUG(); 516 } 517 } 518 519 /* 520 * Set IOPL bits in EFLAGS from given mask 521 */ 522 static inline void native_set_iopl_mask(unsigned mask) 523 { 524 #ifdef CONFIG_X86_32 525 unsigned int reg; 526 527 asm volatile ("pushfl;" 528 "popl %0;" 529 "andl %1, %0;" 530 "orl %2, %0;" 531 "pushl %0;" 532 "popfl" 533 : "=&r" (reg) 534 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 535 #endif 536 } 537 538 static inline void 539 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 540 { 541 tss->x86_tss.sp0 = thread->sp0; 542 #ifdef CONFIG_X86_32 543 /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 544 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 545 tss->x86_tss.ss1 = thread->sysenter_cs; 546 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 547 } 548 #endif 549 } 550 551 static inline void native_swapgs(void) 552 { 553 #ifdef CONFIG_X86_64 554 asm volatile("swapgs" ::: "memory"); 555 #endif 556 } 557 558 #ifdef CONFIG_PARAVIRT 559 #include <asm/paravirt.h> 560 #else 561 #define __cpuid native_cpuid 562 #define paravirt_enabled() 0 563 564 /* 565 * These special macros can be used to get or set a debugging register 566 */ 567 #define get_debugreg(var, register) \ 568 (var) = native_get_debugreg(register) 569 #define set_debugreg(value, register) \ 570 native_set_debugreg(register, value) 571 572 static inline void load_sp0(struct tss_struct *tss, 573 struct thread_struct *thread) 574 { 575 native_load_sp0(tss, thread); 576 } 577 578 #define set_iopl_mask native_set_iopl_mask 579 #endif /* CONFIG_PARAVIRT */ 580 581 /* 582 * Save the cr4 feature set we're using (ie 583 * Pentium 4MB enable and PPro Global page 584 * enable), so that any CPU's that boot up 585 * after us can get the correct flags. 586 */ 587 extern unsigned long mmu_cr4_features; 588 589 static inline void set_in_cr4(unsigned long mask) 590 { 591 unsigned cr4; 592 593 mmu_cr4_features |= mask; 594 cr4 = read_cr4(); 595 cr4 |= mask; 596 write_cr4(cr4); 597 } 598 599 static inline void clear_in_cr4(unsigned long mask) 600 { 601 unsigned cr4; 602 603 mmu_cr4_features &= ~mask; 604 cr4 = read_cr4(); 605 cr4 &= ~mask; 606 write_cr4(cr4); 607 } 608 609 typedef struct { 610 unsigned long seg; 611 } mm_segment_t; 612 613 614 /* 615 * create a kernel thread without removing it from tasklists 616 */ 617 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 618 619 /* Free all resources held by a thread. */ 620 extern void release_thread(struct task_struct *); 621 622 /* Prepare to copy thread state - unlazy all lazy state */ 623 extern void prepare_to_copy(struct task_struct *tsk); 624 625 unsigned long get_wchan(struct task_struct *p); 626 627 /* 628 * Generic CPUID function 629 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 630 * resulting in stale register contents being returned. 631 */ 632 static inline void cpuid(unsigned int op, 633 unsigned int *eax, unsigned int *ebx, 634 unsigned int *ecx, unsigned int *edx) 635 { 636 *eax = op; 637 *ecx = 0; 638 __cpuid(eax, ebx, ecx, edx); 639 } 640 641 /* Some CPUID calls want 'count' to be placed in ecx */ 642 static inline void cpuid_count(unsigned int op, int count, 643 unsigned int *eax, unsigned int *ebx, 644 unsigned int *ecx, unsigned int *edx) 645 { 646 *eax = op; 647 *ecx = count; 648 __cpuid(eax, ebx, ecx, edx); 649 } 650 651 /* 652 * CPUID functions returning a single datum 653 */ 654 static inline unsigned int cpuid_eax(unsigned int op) 655 { 656 unsigned int eax, ebx, ecx, edx; 657 658 cpuid(op, &eax, &ebx, &ecx, &edx); 659 660 return eax; 661 } 662 663 static inline unsigned int cpuid_ebx(unsigned int op) 664 { 665 unsigned int eax, ebx, ecx, edx; 666 667 cpuid(op, &eax, &ebx, &ecx, &edx); 668 669 return ebx; 670 } 671 672 static inline unsigned int cpuid_ecx(unsigned int op) 673 { 674 unsigned int eax, ebx, ecx, edx; 675 676 cpuid(op, &eax, &ebx, &ecx, &edx); 677 678 return ecx; 679 } 680 681 static inline unsigned int cpuid_edx(unsigned int op) 682 { 683 unsigned int eax, ebx, ecx, edx; 684 685 cpuid(op, &eax, &ebx, &ecx, &edx); 686 687 return edx; 688 } 689 690 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 691 static inline void rep_nop(void) 692 { 693 asm volatile("rep; nop" ::: "memory"); 694 } 695 696 static inline void cpu_relax(void) 697 { 698 rep_nop(); 699 } 700 701 /* Stop speculative execution: */ 702 static inline void sync_core(void) 703 { 704 int tmp; 705 706 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 707 : "ebx", "ecx", "edx", "memory"); 708 } 709 710 static inline void __monitor(const void *eax, unsigned long ecx, 711 unsigned long edx) 712 { 713 /* "monitor %eax, %ecx, %edx;" */ 714 asm volatile(".byte 0x0f, 0x01, 0xc8;" 715 :: "a" (eax), "c" (ecx), "d"(edx)); 716 } 717 718 static inline void __mwait(unsigned long eax, unsigned long ecx) 719 { 720 /* "mwait %eax, %ecx;" */ 721 asm volatile(".byte 0x0f, 0x01, 0xc9;" 722 :: "a" (eax), "c" (ecx)); 723 } 724 725 static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 726 { 727 trace_hardirqs_on(); 728 /* "mwait %eax, %ecx;" */ 729 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 730 :: "a" (eax), "c" (ecx)); 731 } 732 733 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 734 735 extern void select_idle_routine(const struct cpuinfo_x86 *c); 736 737 extern unsigned long boot_option_idle_override; 738 extern unsigned long idle_halt; 739 extern unsigned long idle_nomwait; 740 741 /* 742 * on systems with caches, caches must be flashed as the absolute 743 * last instruction before going into a suspended halt. Otherwise, 744 * dirty data can linger in the cache and become stale on resume, 745 * leading to strange errors. 746 * 747 * perform a variety of operations to guarantee that the compiler 748 * will not reorder instructions. wbinvd itself is serializing 749 * so the processor will not reorder. 750 * 751 * Systems without cache can just go into halt. 752 */ 753 static inline void wbinvd_halt(void) 754 { 755 mb(); 756 /* check for clflush to determine if wbinvd is legal */ 757 if (cpu_has_clflush) 758 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); 759 else 760 while (1) 761 halt(); 762 } 763 764 extern void enable_sep_cpu(void); 765 extern int sysenter_setup(void); 766 767 /* Defined in head.S */ 768 extern struct desc_ptr early_gdt_descr; 769 770 extern void cpu_set_gdt(int); 771 extern void switch_to_new_gdt(int); 772 extern void load_percpu_segment(int); 773 extern void cpu_init(void); 774 775 static inline unsigned long get_debugctlmsr(void) 776 { 777 unsigned long debugctlmsr = 0; 778 779 #ifndef CONFIG_X86_DEBUGCTLMSR 780 if (boot_cpu_data.x86 < 6) 781 return 0; 782 #endif 783 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 784 785 return debugctlmsr; 786 } 787 788 static inline void update_debugctlmsr(unsigned long debugctlmsr) 789 { 790 #ifndef CONFIG_X86_DEBUGCTLMSR 791 if (boot_cpu_data.x86 < 6) 792 return; 793 #endif 794 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 795 } 796 797 /* 798 * from system description table in BIOS. Mostly for MCA use, but 799 * others may find it useful: 800 */ 801 extern unsigned int machine_id; 802 extern unsigned int machine_submodel_id; 803 extern unsigned int BIOS_revision; 804 805 /* Boot loader type from the setup header: */ 806 extern int bootloader_type; 807 808 extern char ignore_fpu_irq; 809 810 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 811 #define ARCH_HAS_PREFETCHW 812 #define ARCH_HAS_SPINLOCK_PREFETCH 813 814 #ifdef CONFIG_X86_32 815 # define BASE_PREFETCH ASM_NOP4 816 # define ARCH_HAS_PREFETCH 817 #else 818 # define BASE_PREFETCH "prefetcht0 (%1)" 819 #endif 820 821 /* 822 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 823 * 824 * It's not worth to care about 3dnow prefetches for the K6 825 * because they are microcoded there and very slow. 826 */ 827 static inline void prefetch(const void *x) 828 { 829 alternative_input(BASE_PREFETCH, 830 "prefetchnta (%1)", 831 X86_FEATURE_XMM, 832 "r" (x)); 833 } 834 835 /* 836 * 3dnow prefetch to get an exclusive cache line. 837 * Useful for spinlocks to avoid one state transition in the 838 * cache coherency protocol: 839 */ 840 static inline void prefetchw(const void *x) 841 { 842 alternative_input(BASE_PREFETCH, 843 "prefetchw (%1)", 844 X86_FEATURE_3DNOW, 845 "r" (x)); 846 } 847 848 static inline void spin_lock_prefetch(const void *x) 849 { 850 prefetchw(x); 851 } 852 853 #ifdef CONFIG_X86_32 854 /* 855 * User space process size: 3GB (default). 856 */ 857 #define TASK_SIZE PAGE_OFFSET 858 #define TASK_SIZE_MAX TASK_SIZE 859 #define STACK_TOP TASK_SIZE 860 #define STACK_TOP_MAX STACK_TOP 861 862 #define INIT_THREAD { \ 863 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 864 .vm86_info = NULL, \ 865 .sysenter_cs = __KERNEL_CS, \ 866 .io_bitmap_ptr = NULL, \ 867 .fs = __KERNEL_PERCPU, \ 868 } 869 870 /* 871 * Note that the .io_bitmap member must be extra-big. This is because 872 * the CPU will access an additional byte beyond the end of the IO 873 * permission bitmap. The extra byte must be all 1 bits, and must 874 * be within the limit. 875 */ 876 #define INIT_TSS { \ 877 .x86_tss = { \ 878 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 879 .ss0 = __KERNEL_DS, \ 880 .ss1 = __KERNEL_CS, \ 881 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 882 }, \ 883 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 884 } 885 886 extern unsigned long thread_saved_pc(struct task_struct *tsk); 887 888 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 889 #define KSTK_TOP(info) \ 890 ({ \ 891 unsigned long *__ptr = (unsigned long *)(info); \ 892 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 893 }) 894 895 /* 896 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 897 * This is necessary to guarantee that the entire "struct pt_regs" 898 * is accessable even if the CPU haven't stored the SS/ESP registers 899 * on the stack (interrupt gate does not save these registers 900 * when switching to the same priv ring). 901 * Therefore beware: accessing the ss/esp fields of the 902 * "struct pt_regs" is possible, but they may contain the 903 * completely wrong values. 904 */ 905 #define task_pt_regs(task) \ 906 ({ \ 907 struct pt_regs *__regs__; \ 908 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ 909 __regs__ - 1; \ 910 }) 911 912 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 913 914 #else 915 /* 916 * User space process size. 47bits minus one guard page. 917 */ 918 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 919 920 /* This decides where the kernel will search for a free chunk of vm 921 * space during mmap's. 922 */ 923 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 924 0xc0000000 : 0xFFFFe000) 925 926 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 927 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 928 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 929 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 930 931 #define STACK_TOP TASK_SIZE 932 #define STACK_TOP_MAX TASK_SIZE_MAX 933 934 #define INIT_THREAD { \ 935 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 936 } 937 938 #define INIT_TSS { \ 939 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 940 } 941 942 /* 943 * Return saved PC of a blocked thread. 944 * What is this good for? it will be always the scheduler or ret_from_fork. 945 */ 946 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 947 948 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 949 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 950 #endif /* CONFIG_X86_64 */ 951 952 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 953 unsigned long new_sp); 954 955 /* 956 * This decides where the kernel will search for a free chunk of vm 957 * space during mmap's. 958 */ 959 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 960 961 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 962 963 /* Get/set a process' ability to use the timestamp counter instruction */ 964 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 965 #define SET_TSC_CTL(val) set_tsc_mode((val)) 966 967 extern int get_tsc_mode(unsigned long adr); 968 extern int set_tsc_mode(unsigned int val); 969 970 #endif /* _ASM_X86_PROCESSOR_H */ 971