xref: /linux/arch/x86/include/asm/processor.h (revision a0b54e256d513ed99e456bea6e4e188ff92e7c46)
1 #ifndef _ASM_X86_PROCESSOR_H
2 #define _ASM_X86_PROCESSOR_H
3 
4 #include <asm/processor-flags.h>
5 
6 /* Forward declaration, a strange C thing */
7 struct task_struct;
8 struct mm_struct;
9 
10 #include <asm/vm86.h>
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
13 #include <asm/types.h>
14 #include <asm/sigcontext.h>
15 #include <asm/current.h>
16 #include <asm/cpufeature.h>
17 #include <asm/system.h>
18 #include <asm/page.h>
19 #include <asm/pgtable_types.h>
20 #include <asm/percpu.h>
21 #include <asm/msr.h>
22 #include <asm/desc_defs.h>
23 #include <asm/nops.h>
24 #include <asm/ds.h>
25 
26 #include <linux/personality.h>
27 #include <linux/cpumask.h>
28 #include <linux/cache.h>
29 #include <linux/threads.h>
30 #include <linux/init.h>
31 
32 /*
33  * Default implementation of macro that returns current
34  * instruction pointer ("program counter").
35  */
36 static inline void *current_text_addr(void)
37 {
38 	void *pc;
39 
40 	asm volatile("mov $1f, %0; 1:":"=r" (pc));
41 
42 	return pc;
43 }
44 
45 #ifdef CONFIG_X86_VSMP
46 # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
47 # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
48 #else
49 # define ARCH_MIN_TASKALIGN		16
50 # define ARCH_MIN_MMSTRUCT_ALIGN	0
51 #endif
52 
53 /*
54  *  CPU type and hardware bug flags. Kept separately for each CPU.
55  *  Members of this structure are referenced in head.S, so think twice
56  *  before touching them. [mj]
57  */
58 
59 struct cpuinfo_x86 {
60 	__u8			x86;		/* CPU family */
61 	__u8			x86_vendor;	/* CPU vendor */
62 	__u8			x86_model;
63 	__u8			x86_mask;
64 #ifdef CONFIG_X86_32
65 	char			wp_works_ok;	/* It doesn't on 386's */
66 
67 	/* Problems on some 486Dx4's and old 386's: */
68 	char			hlt_works_ok;
69 	char			hard_math;
70 	char			rfu;
71 	char			fdiv_bug;
72 	char			f00f_bug;
73 	char			coma_bug;
74 	char			pad0;
75 #else
76 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
77 	int			x86_tlbsize;
78 #endif
79 	__u8			x86_virt_bits;
80 	__u8			x86_phys_bits;
81 	/* CPUID returned core id bits: */
82 	__u8			x86_coreid_bits;
83 	/* Max extended CPUID function supported: */
84 	__u32			extended_cpuid_level;
85 	/* Maximum supported CPUID level, -1=no CPUID: */
86 	int			cpuid_level;
87 	__u32			x86_capability[NCAPINTS];
88 	char			x86_vendor_id[16];
89 	char			x86_model_id[64];
90 	/* in KB - valid for CPUS which support this call: */
91 	int			x86_cache_size;
92 	int			x86_cache_alignment;	/* In bytes */
93 	int			x86_power;
94 	unsigned long		loops_per_jiffy;
95 #ifdef CONFIG_SMP
96 	/* cpus sharing the last level cache: */
97 	cpumask_var_t		llc_shared_map;
98 #endif
99 	/* cpuid returned max cores value: */
100 	u16			 x86_max_cores;
101 	u16			apicid;
102 	u16			initial_apicid;
103 	u16			x86_clflush_size;
104 #ifdef CONFIG_SMP
105 	/* number of cores as seen by the OS: */
106 	u16			booted_cores;
107 	/* Physical processor id: */
108 	u16			phys_proc_id;
109 	/* Core id: */
110 	u16			cpu_core_id;
111 	/* Index into per_cpu list: */
112 	u16			cpu_index;
113 #endif
114 	unsigned int		x86_hyper_vendor;
115 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
116 
117 #define X86_VENDOR_INTEL	0
118 #define X86_VENDOR_CYRIX	1
119 #define X86_VENDOR_AMD		2
120 #define X86_VENDOR_UMC		3
121 #define X86_VENDOR_CENTAUR	5
122 #define X86_VENDOR_TRANSMETA	7
123 #define X86_VENDOR_NSC		8
124 #define X86_VENDOR_NUM		9
125 
126 #define X86_VENDOR_UNKNOWN	0xff
127 
128 #define X86_HYPER_VENDOR_NONE  0
129 #define X86_HYPER_VENDOR_VMWARE 1
130 
131 /*
132  * capabilities of CPUs
133  */
134 extern struct cpuinfo_x86	boot_cpu_data;
135 extern struct cpuinfo_x86	new_cpu_data;
136 
137 extern struct tss_struct	doublefault_tss;
138 extern __u32			cpu_caps_cleared[NCAPINTS];
139 extern __u32			cpu_caps_set[NCAPINTS];
140 
141 #ifdef CONFIG_SMP
142 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
143 #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
144 #define current_cpu_data	__get_cpu_var(cpu_info)
145 #else
146 #define cpu_data(cpu)		boot_cpu_data
147 #define current_cpu_data	boot_cpu_data
148 #endif
149 
150 extern const struct seq_operations cpuinfo_op;
151 
152 static inline int hlt_works(int cpu)
153 {
154 #ifdef CONFIG_X86_32
155 	return cpu_data(cpu).hlt_works_ok;
156 #else
157 	return 1;
158 #endif
159 }
160 
161 #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
162 
163 extern void cpu_detect(struct cpuinfo_x86 *c);
164 
165 extern struct pt_regs *idle_regs(struct pt_regs *);
166 
167 extern void early_cpu_init(void);
168 extern void identify_boot_cpu(void);
169 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
170 extern void print_cpu_info(struct cpuinfo_x86 *);
171 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
172 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
173 extern unsigned short num_cache_leaves;
174 
175 extern void detect_extended_topology(struct cpuinfo_x86 *c);
176 extern void detect_ht(struct cpuinfo_x86 *c);
177 
178 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
179 				unsigned int *ecx, unsigned int *edx)
180 {
181 	/* ecx is often an input as well as an output. */
182 	asm("cpuid"
183 	    : "=a" (*eax),
184 	      "=b" (*ebx),
185 	      "=c" (*ecx),
186 	      "=d" (*edx)
187 	    : "0" (*eax), "2" (*ecx));
188 }
189 
190 static inline void load_cr3(pgd_t *pgdir)
191 {
192 	write_cr3(__pa(pgdir));
193 }
194 
195 #ifdef CONFIG_X86_32
196 /* This is the TSS defined by the hardware. */
197 struct x86_hw_tss {
198 	unsigned short		back_link, __blh;
199 	unsigned long		sp0;
200 	unsigned short		ss0, __ss0h;
201 	unsigned long		sp1;
202 	/* ss1 caches MSR_IA32_SYSENTER_CS: */
203 	unsigned short		ss1, __ss1h;
204 	unsigned long		sp2;
205 	unsigned short		ss2, __ss2h;
206 	unsigned long		__cr3;
207 	unsigned long		ip;
208 	unsigned long		flags;
209 	unsigned long		ax;
210 	unsigned long		cx;
211 	unsigned long		dx;
212 	unsigned long		bx;
213 	unsigned long		sp;
214 	unsigned long		bp;
215 	unsigned long		si;
216 	unsigned long		di;
217 	unsigned short		es, __esh;
218 	unsigned short		cs, __csh;
219 	unsigned short		ss, __ssh;
220 	unsigned short		ds, __dsh;
221 	unsigned short		fs, __fsh;
222 	unsigned short		gs, __gsh;
223 	unsigned short		ldt, __ldth;
224 	unsigned short		trace;
225 	unsigned short		io_bitmap_base;
226 
227 } __attribute__((packed));
228 #else
229 struct x86_hw_tss {
230 	u32			reserved1;
231 	u64			sp0;
232 	u64			sp1;
233 	u64			sp2;
234 	u64			reserved2;
235 	u64			ist[7];
236 	u32			reserved3;
237 	u32			reserved4;
238 	u16			reserved5;
239 	u16			io_bitmap_base;
240 
241 } __attribute__((packed)) ____cacheline_aligned;
242 #endif
243 
244 /*
245  * IO-bitmap sizes:
246  */
247 #define IO_BITMAP_BITS			65536
248 #define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
249 #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
250 #define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
251 #define INVALID_IO_BITMAP_OFFSET	0x8000
252 
253 struct tss_struct {
254 	/*
255 	 * The hardware state:
256 	 */
257 	struct x86_hw_tss	x86_tss;
258 
259 	/*
260 	 * The extra 1 is there because the CPU will access an
261 	 * additional byte beyond the end of the IO permission
262 	 * bitmap. The extra byte must be all 1 bits, and must
263 	 * be within the limit.
264 	 */
265 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
266 
267 	/*
268 	 * .. and then another 0x100 bytes for the emergency kernel stack:
269 	 */
270 	unsigned long		stack[64];
271 
272 } ____cacheline_aligned;
273 
274 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
275 
276 /*
277  * Save the original ist values for checking stack pointers during debugging
278  */
279 struct orig_ist {
280 	unsigned long		ist[7];
281 };
282 
283 #define	MXCSR_DEFAULT		0x1f80
284 
285 struct i387_fsave_struct {
286 	u32			cwd;	/* FPU Control Word		*/
287 	u32			swd;	/* FPU Status Word		*/
288 	u32			twd;	/* FPU Tag Word			*/
289 	u32			fip;	/* FPU IP Offset		*/
290 	u32			fcs;	/* FPU IP Selector		*/
291 	u32			foo;	/* FPU Operand Pointer Offset	*/
292 	u32			fos;	/* FPU Operand Pointer Selector	*/
293 
294 	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
295 	u32			st_space[20];
296 
297 	/* Software status information [not touched by FSAVE ]:		*/
298 	u32			status;
299 };
300 
301 struct i387_fxsave_struct {
302 	u16			cwd; /* Control Word			*/
303 	u16			swd; /* Status Word			*/
304 	u16			twd; /* Tag Word			*/
305 	u16			fop; /* Last Instruction Opcode		*/
306 	union {
307 		struct {
308 			u64	rip; /* Instruction Pointer		*/
309 			u64	rdp; /* Data Pointer			*/
310 		};
311 		struct {
312 			u32	fip; /* FPU IP Offset			*/
313 			u32	fcs; /* FPU IP Selector			*/
314 			u32	foo; /* FPU Operand Offset		*/
315 			u32	fos; /* FPU Operand Selector		*/
316 		};
317 	};
318 	u32			mxcsr;		/* MXCSR Register State */
319 	u32			mxcsr_mask;	/* MXCSR Mask		*/
320 
321 	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
322 	u32			st_space[32];
323 
324 	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
325 	u32			xmm_space[64];
326 
327 	u32			padding[12];
328 
329 	union {
330 		u32		padding1[12];
331 		u32		sw_reserved[12];
332 	};
333 
334 } __attribute__((aligned(16)));
335 
336 struct i387_soft_struct {
337 	u32			cwd;
338 	u32			swd;
339 	u32			twd;
340 	u32			fip;
341 	u32			fcs;
342 	u32			foo;
343 	u32			fos;
344 	/* 8*10 bytes for each FP-reg = 80 bytes: */
345 	u32			st_space[20];
346 	u8			ftop;
347 	u8			changed;
348 	u8			lookahead;
349 	u8			no_update;
350 	u8			rm;
351 	u8			alimit;
352 	struct math_emu_info	*info;
353 	u32			entry_eip;
354 };
355 
356 struct ymmh_struct {
357 	/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
358 	u32 ymmh_space[64];
359 };
360 
361 struct xsave_hdr_struct {
362 	u64 xstate_bv;
363 	u64 reserved1[2];
364 	u64 reserved2[5];
365 } __attribute__((packed));
366 
367 struct xsave_struct {
368 	struct i387_fxsave_struct i387;
369 	struct xsave_hdr_struct xsave_hdr;
370 	struct ymmh_struct ymmh;
371 	/* new processor state extensions will go here */
372 } __attribute__ ((packed, aligned (64)));
373 
374 union thread_xstate {
375 	struct i387_fsave_struct	fsave;
376 	struct i387_fxsave_struct	fxsave;
377 	struct i387_soft_struct		soft;
378 	struct xsave_struct		xsave;
379 };
380 
381 #ifdef CONFIG_X86_64
382 DECLARE_PER_CPU(struct orig_ist, orig_ist);
383 
384 union irq_stack_union {
385 	char irq_stack[IRQ_STACK_SIZE];
386 	/*
387 	 * GCC hardcodes the stack canary as %gs:40.  Since the
388 	 * irq_stack is the object at %gs:0, we reserve the bottom
389 	 * 48 bytes of the irq stack for the canary.
390 	 */
391 	struct {
392 		char gs_base[40];
393 		unsigned long stack_canary;
394 	};
395 };
396 
397 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
398 DECLARE_INIT_PER_CPU(irq_stack_union);
399 
400 DECLARE_PER_CPU(char *, irq_stack_ptr);
401 DECLARE_PER_CPU(unsigned int, irq_count);
402 extern unsigned long kernel_eflags;
403 extern asmlinkage void ignore_sysret(void);
404 #else	/* X86_64 */
405 #ifdef CONFIG_CC_STACKPROTECTOR
406 /*
407  * Make sure stack canary segment base is cached-aligned:
408  *   "For Intel Atom processors, avoid non zero segment base address
409  *    that is not aligned to cache line boundary at all cost."
410  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
411  */
412 struct stack_canary {
413 	char __pad[20];		/* canary at %gs:20 */
414 	unsigned long canary;
415 };
416 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
417 #endif
418 #endif	/* X86_64 */
419 
420 extern unsigned int xstate_size;
421 extern void free_thread_xstate(struct task_struct *);
422 extern struct kmem_cache *task_xstate_cachep;
423 
424 struct thread_struct {
425 	/* Cached TLS descriptors: */
426 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
427 	unsigned long		sp0;
428 	unsigned long		sp;
429 #ifdef CONFIG_X86_32
430 	unsigned long		sysenter_cs;
431 #else
432 	unsigned long		usersp;	/* Copy from PDA */
433 	unsigned short		es;
434 	unsigned short		ds;
435 	unsigned short		fsindex;
436 	unsigned short		gsindex;
437 #endif
438 #ifdef CONFIG_X86_32
439 	unsigned long		ip;
440 #endif
441 #ifdef CONFIG_X86_64
442 	unsigned long		fs;
443 #endif
444 	unsigned long		gs;
445 	/* Hardware debugging registers: */
446 	unsigned long		debugreg0;
447 	unsigned long		debugreg1;
448 	unsigned long		debugreg2;
449 	unsigned long		debugreg3;
450 	unsigned long		debugreg6;
451 	unsigned long		debugreg7;
452 	/* Fault info: */
453 	unsigned long		cr2;
454 	unsigned long		trap_no;
455 	unsigned long		error_code;
456 	/* floating point and extended processor state */
457 	union thread_xstate	*xstate;
458 #ifdef CONFIG_X86_32
459 	/* Virtual 86 mode info */
460 	struct vm86_struct __user *vm86_info;
461 	unsigned long		screen_bitmap;
462 	unsigned long		v86flags;
463 	unsigned long		v86mask;
464 	unsigned long		saved_sp0;
465 	unsigned int		saved_fs;
466 	unsigned int		saved_gs;
467 #endif
468 	/* IO permissions: */
469 	unsigned long		*io_bitmap_ptr;
470 	unsigned long		iopl;
471 	/* Max allowed port in the bitmap, in bytes: */
472 	unsigned		io_bitmap_max;
473 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */
474 	unsigned long	debugctlmsr;
475 	/* Debug Store context; see asm/ds.h */
476 	struct ds_context	*ds_ctx;
477 };
478 
479 static inline unsigned long native_get_debugreg(int regno)
480 {
481 	unsigned long val = 0;	/* Damn you, gcc! */
482 
483 	switch (regno) {
484 	case 0:
485 		asm("mov %%db0, %0" :"=r" (val));
486 		break;
487 	case 1:
488 		asm("mov %%db1, %0" :"=r" (val));
489 		break;
490 	case 2:
491 		asm("mov %%db2, %0" :"=r" (val));
492 		break;
493 	case 3:
494 		asm("mov %%db3, %0" :"=r" (val));
495 		break;
496 	case 6:
497 		asm("mov %%db6, %0" :"=r" (val));
498 		break;
499 	case 7:
500 		asm("mov %%db7, %0" :"=r" (val));
501 		break;
502 	default:
503 		BUG();
504 	}
505 	return val;
506 }
507 
508 static inline void native_set_debugreg(int regno, unsigned long value)
509 {
510 	switch (regno) {
511 	case 0:
512 		asm("mov %0, %%db0"	::"r" (value));
513 		break;
514 	case 1:
515 		asm("mov %0, %%db1"	::"r" (value));
516 		break;
517 	case 2:
518 		asm("mov %0, %%db2"	::"r" (value));
519 		break;
520 	case 3:
521 		asm("mov %0, %%db3"	::"r" (value));
522 		break;
523 	case 6:
524 		asm("mov %0, %%db6"	::"r" (value));
525 		break;
526 	case 7:
527 		asm("mov %0, %%db7"	::"r" (value));
528 		break;
529 	default:
530 		BUG();
531 	}
532 }
533 
534 /*
535  * Set IOPL bits in EFLAGS from given mask
536  */
537 static inline void native_set_iopl_mask(unsigned mask)
538 {
539 #ifdef CONFIG_X86_32
540 	unsigned int reg;
541 
542 	asm volatile ("pushfl;"
543 		      "popl %0;"
544 		      "andl %1, %0;"
545 		      "orl %2, %0;"
546 		      "pushl %0;"
547 		      "popfl"
548 		      : "=&r" (reg)
549 		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
550 #endif
551 }
552 
553 static inline void
554 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
555 {
556 	tss->x86_tss.sp0 = thread->sp0;
557 #ifdef CONFIG_X86_32
558 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
559 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
560 		tss->x86_tss.ss1 = thread->sysenter_cs;
561 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
562 	}
563 #endif
564 }
565 
566 static inline void native_swapgs(void)
567 {
568 #ifdef CONFIG_X86_64
569 	asm volatile("swapgs" ::: "memory");
570 #endif
571 }
572 
573 #ifdef CONFIG_PARAVIRT
574 #include <asm/paravirt.h>
575 #else
576 #define __cpuid			native_cpuid
577 #define paravirt_enabled()	0
578 
579 /*
580  * These special macros can be used to get or set a debugging register
581  */
582 #define get_debugreg(var, register)				\
583 	(var) = native_get_debugreg(register)
584 #define set_debugreg(value, register)				\
585 	native_set_debugreg(register, value)
586 
587 static inline void load_sp0(struct tss_struct *tss,
588 			    struct thread_struct *thread)
589 {
590 	native_load_sp0(tss, thread);
591 }
592 
593 #define set_iopl_mask native_set_iopl_mask
594 #endif /* CONFIG_PARAVIRT */
595 
596 /*
597  * Save the cr4 feature set we're using (ie
598  * Pentium 4MB enable and PPro Global page
599  * enable), so that any CPU's that boot up
600  * after us can get the correct flags.
601  */
602 extern unsigned long		mmu_cr4_features;
603 
604 static inline void set_in_cr4(unsigned long mask)
605 {
606 	unsigned cr4;
607 
608 	mmu_cr4_features |= mask;
609 	cr4 = read_cr4();
610 	cr4 |= mask;
611 	write_cr4(cr4);
612 }
613 
614 static inline void clear_in_cr4(unsigned long mask)
615 {
616 	unsigned cr4;
617 
618 	mmu_cr4_features &= ~mask;
619 	cr4 = read_cr4();
620 	cr4 &= ~mask;
621 	write_cr4(cr4);
622 }
623 
624 typedef struct {
625 	unsigned long		seg;
626 } mm_segment_t;
627 
628 
629 /*
630  * create a kernel thread without removing it from tasklists
631  */
632 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
633 
634 /* Free all resources held by a thread. */
635 extern void release_thread(struct task_struct *);
636 
637 /* Prepare to copy thread state - unlazy all lazy state */
638 extern void prepare_to_copy(struct task_struct *tsk);
639 
640 unsigned long get_wchan(struct task_struct *p);
641 
642 /*
643  * Generic CPUID function
644  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
645  * resulting in stale register contents being returned.
646  */
647 static inline void cpuid(unsigned int op,
648 			 unsigned int *eax, unsigned int *ebx,
649 			 unsigned int *ecx, unsigned int *edx)
650 {
651 	*eax = op;
652 	*ecx = 0;
653 	__cpuid(eax, ebx, ecx, edx);
654 }
655 
656 /* Some CPUID calls want 'count' to be placed in ecx */
657 static inline void cpuid_count(unsigned int op, int count,
658 			       unsigned int *eax, unsigned int *ebx,
659 			       unsigned int *ecx, unsigned int *edx)
660 {
661 	*eax = op;
662 	*ecx = count;
663 	__cpuid(eax, ebx, ecx, edx);
664 }
665 
666 /*
667  * CPUID functions returning a single datum
668  */
669 static inline unsigned int cpuid_eax(unsigned int op)
670 {
671 	unsigned int eax, ebx, ecx, edx;
672 
673 	cpuid(op, &eax, &ebx, &ecx, &edx);
674 
675 	return eax;
676 }
677 
678 static inline unsigned int cpuid_ebx(unsigned int op)
679 {
680 	unsigned int eax, ebx, ecx, edx;
681 
682 	cpuid(op, &eax, &ebx, &ecx, &edx);
683 
684 	return ebx;
685 }
686 
687 static inline unsigned int cpuid_ecx(unsigned int op)
688 {
689 	unsigned int eax, ebx, ecx, edx;
690 
691 	cpuid(op, &eax, &ebx, &ecx, &edx);
692 
693 	return ecx;
694 }
695 
696 static inline unsigned int cpuid_edx(unsigned int op)
697 {
698 	unsigned int eax, ebx, ecx, edx;
699 
700 	cpuid(op, &eax, &ebx, &ecx, &edx);
701 
702 	return edx;
703 }
704 
705 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
706 static inline void rep_nop(void)
707 {
708 	asm volatile("rep; nop" ::: "memory");
709 }
710 
711 static inline void cpu_relax(void)
712 {
713 	rep_nop();
714 }
715 
716 /* Stop speculative execution and prefetching of modified code. */
717 static inline void sync_core(void)
718 {
719 	int tmp;
720 
721 #if defined(CONFIG_M386) || defined(CONFIG_M486)
722 	if (boot_cpu_data.x86 < 5)
723 		/* There is no speculative execution.
724 		 * jmp is a barrier to prefetching. */
725 		asm volatile("jmp 1f\n1:\n" ::: "memory");
726 	else
727 #endif
728 		/* cpuid is a barrier to speculative execution.
729 		 * Prefetched instructions are automatically
730 		 * invalidated when modified. */
731 		asm volatile("cpuid" : "=a" (tmp) : "0" (1)
732 			     : "ebx", "ecx", "edx", "memory");
733 }
734 
735 static inline void __monitor(const void *eax, unsigned long ecx,
736 			     unsigned long edx)
737 {
738 	/* "monitor %eax, %ecx, %edx;" */
739 	asm volatile(".byte 0x0f, 0x01, 0xc8;"
740 		     :: "a" (eax), "c" (ecx), "d"(edx));
741 }
742 
743 static inline void __mwait(unsigned long eax, unsigned long ecx)
744 {
745 	/* "mwait %eax, %ecx;" */
746 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
747 		     :: "a" (eax), "c" (ecx));
748 }
749 
750 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
751 {
752 	trace_hardirqs_on();
753 	/* "mwait %eax, %ecx;" */
754 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
755 		     :: "a" (eax), "c" (ecx));
756 }
757 
758 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
759 
760 extern void select_idle_routine(const struct cpuinfo_x86 *c);
761 extern void init_c1e_mask(void);
762 
763 extern unsigned long		boot_option_idle_override;
764 extern unsigned long		idle_halt;
765 extern unsigned long		idle_nomwait;
766 
767 /*
768  * on systems with caches, caches must be flashed as the absolute
769  * last instruction before going into a suspended halt.  Otherwise,
770  * dirty data can linger in the cache and become stale on resume,
771  * leading to strange errors.
772  *
773  * perform a variety of operations to guarantee that the compiler
774  * will not reorder instructions.  wbinvd itself is serializing
775  * so the processor will not reorder.
776  *
777  * Systems without cache can just go into halt.
778  */
779 static inline void wbinvd_halt(void)
780 {
781 	mb();
782 	/* check for clflush to determine if wbinvd is legal */
783 	if (cpu_has_clflush)
784 		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
785 	else
786 		while (1)
787 			halt();
788 }
789 
790 extern void enable_sep_cpu(void);
791 extern int sysenter_setup(void);
792 
793 /* Defined in head.S */
794 extern struct desc_ptr		early_gdt_descr;
795 
796 extern void cpu_set_gdt(int);
797 extern void switch_to_new_gdt(int);
798 extern void load_percpu_segment(int);
799 extern void cpu_init(void);
800 
801 static inline unsigned long get_debugctlmsr(void)
802 {
803     unsigned long debugctlmsr = 0;
804 
805 #ifndef CONFIG_X86_DEBUGCTLMSR
806 	if (boot_cpu_data.x86 < 6)
807 		return 0;
808 #endif
809 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
810 
811     return debugctlmsr;
812 }
813 
814 static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
815 {
816 	u64 debugctlmsr = 0;
817 	u32 val1, val2;
818 
819 #ifndef CONFIG_X86_DEBUGCTLMSR
820 	if (boot_cpu_data.x86 < 6)
821 		return 0;
822 #endif
823 	rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
824 	debugctlmsr = val1 | ((u64)val2 << 32);
825 
826 	return debugctlmsr;
827 }
828 
829 static inline void update_debugctlmsr(unsigned long debugctlmsr)
830 {
831 #ifndef CONFIG_X86_DEBUGCTLMSR
832 	if (boot_cpu_data.x86 < 6)
833 		return;
834 #endif
835 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
836 }
837 
838 static inline void update_debugctlmsr_on_cpu(int cpu,
839 					     unsigned long debugctlmsr)
840 {
841 #ifndef CONFIG_X86_DEBUGCTLMSR
842 	if (boot_cpu_data.x86 < 6)
843 		return;
844 #endif
845 	wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
846 		     (u32)((u64)debugctlmsr),
847 		     (u32)((u64)debugctlmsr >> 32));
848 }
849 
850 /*
851  * from system description table in BIOS. Mostly for MCA use, but
852  * others may find it useful:
853  */
854 extern unsigned int		machine_id;
855 extern unsigned int		machine_submodel_id;
856 extern unsigned int		BIOS_revision;
857 
858 /* Boot loader type from the setup header: */
859 extern int			bootloader_type;
860 extern int			bootloader_version;
861 
862 extern char			ignore_fpu_irq;
863 
864 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
865 #define ARCH_HAS_PREFETCHW
866 #define ARCH_HAS_SPINLOCK_PREFETCH
867 
868 #ifdef CONFIG_X86_32
869 # define BASE_PREFETCH		ASM_NOP4
870 # define ARCH_HAS_PREFETCH
871 #else
872 # define BASE_PREFETCH		"prefetcht0 (%1)"
873 #endif
874 
875 /*
876  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
877  *
878  * It's not worth to care about 3dnow prefetches for the K6
879  * because they are microcoded there and very slow.
880  */
881 static inline void prefetch(const void *x)
882 {
883 	alternative_input(BASE_PREFETCH,
884 			  "prefetchnta (%1)",
885 			  X86_FEATURE_XMM,
886 			  "r" (x));
887 }
888 
889 /*
890  * 3dnow prefetch to get an exclusive cache line.
891  * Useful for spinlocks to avoid one state transition in the
892  * cache coherency protocol:
893  */
894 static inline void prefetchw(const void *x)
895 {
896 	alternative_input(BASE_PREFETCH,
897 			  "prefetchw (%1)",
898 			  X86_FEATURE_3DNOW,
899 			  "r" (x));
900 }
901 
902 static inline void spin_lock_prefetch(const void *x)
903 {
904 	prefetchw(x);
905 }
906 
907 #ifdef CONFIG_X86_32
908 /*
909  * User space process size: 3GB (default).
910  */
911 #define TASK_SIZE		PAGE_OFFSET
912 #define TASK_SIZE_MAX		TASK_SIZE
913 #define STACK_TOP		TASK_SIZE
914 #define STACK_TOP_MAX		STACK_TOP
915 
916 #define INIT_THREAD  {							  \
917 	.sp0			= sizeof(init_stack) + (long)&init_stack, \
918 	.vm86_info		= NULL,					  \
919 	.sysenter_cs		= __KERNEL_CS,				  \
920 	.io_bitmap_ptr		= NULL,					  \
921 }
922 
923 /*
924  * Note that the .io_bitmap member must be extra-big. This is because
925  * the CPU will access an additional byte beyond the end of the IO
926  * permission bitmap. The extra byte must be all 1 bits, and must
927  * be within the limit.
928  */
929 #define INIT_TSS  {							  \
930 	.x86_tss = {							  \
931 		.sp0		= sizeof(init_stack) + (long)&init_stack, \
932 		.ss0		= __KERNEL_DS,				  \
933 		.ss1		= __KERNEL_CS,				  \
934 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
935 	 },								  \
936 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
937 }
938 
939 extern unsigned long thread_saved_pc(struct task_struct *tsk);
940 
941 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
942 #define KSTK_TOP(info)                                                 \
943 ({                                                                     \
944        unsigned long *__ptr = (unsigned long *)(info);                 \
945        (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
946 })
947 
948 /*
949  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
950  * This is necessary to guarantee that the entire "struct pt_regs"
951  * is accessable even if the CPU haven't stored the SS/ESP registers
952  * on the stack (interrupt gate does not save these registers
953  * when switching to the same priv ring).
954  * Therefore beware: accessing the ss/esp fields of the
955  * "struct pt_regs" is possible, but they may contain the
956  * completely wrong values.
957  */
958 #define task_pt_regs(task)                                             \
959 ({                                                                     \
960        struct pt_regs *__regs__;                                       \
961        __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
962        __regs__ - 1;                                                   \
963 })
964 
965 #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
966 
967 #else
968 /*
969  * User space process size. 47bits minus one guard page.
970  */
971 #define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
972 
973 /* This decides where the kernel will search for a free chunk of vm
974  * space during mmap's.
975  */
976 #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
977 					0xc0000000 : 0xFFFFe000)
978 
979 #define TASK_SIZE		(test_thread_flag(TIF_IA32) ? \
980 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
981 #define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_IA32)) ? \
982 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
983 
984 #define STACK_TOP		TASK_SIZE
985 #define STACK_TOP_MAX		TASK_SIZE_MAX
986 
987 #define INIT_THREAD  { \
988 	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
989 }
990 
991 #define INIT_TSS  { \
992 	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
993 }
994 
995 /*
996  * Return saved PC of a blocked thread.
997  * What is this good for? it will be always the scheduler or ret_from_fork.
998  */
999 #define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
1000 
1001 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
1002 #define KSTK_ESP(tsk)		-1 /* sorry. doesn't work for syscall. */
1003 #endif /* CONFIG_X86_64 */
1004 
1005 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
1006 					       unsigned long new_sp);
1007 
1008 /*
1009  * This decides where the kernel will search for a free chunk of vm
1010  * space during mmap's.
1011  */
1012 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
1013 
1014 #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
1015 
1016 /* Get/set a process' ability to use the timestamp counter instruction */
1017 #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
1018 #define SET_TSC_CTL(val)	set_tsc_mode((val))
1019 
1020 extern int get_tsc_mode(unsigned long adr);
1021 extern int set_tsc_mode(unsigned int val);
1022 
1023 #endif /* _ASM_X86_PROCESSOR_H */
1024