xref: /linux/arch/x86/include/asm/processor.h (revision decb4c41159e1511197f2964da758fa7f2eeb741)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_PROCESSOR_H
21965aae3SH. Peter Anvin #define _ASM_X86_PROCESSOR_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/processor-flags.h>
5bb898558SAl Viro 
6bb898558SAl Viro /* Forward declaration, a strange C thing */
7bb898558SAl Viro struct task_struct;
8bb898558SAl Viro struct mm_struct;
99fda6a06SBrian Gerst struct vm86;
10bb898558SAl Viro 
11bb898558SAl Viro #include <asm/math_emu.h>
12bb898558SAl Viro #include <asm/segment.h>
13bb898558SAl Viro #include <asm/types.h>
14*decb4c41SIngo Molnar #include <uapi/asm/sigcontext.h>
15bb898558SAl Viro #include <asm/current.h>
16bb898558SAl Viro #include <asm/cpufeature.h>
17bb898558SAl Viro #include <asm/page.h>
1854321d94SJeremy Fitzhardinge #include <asm/pgtable_types.h>
19bb898558SAl Viro #include <asm/percpu.h>
20bb898558SAl Viro #include <asm/msr.h>
21bb898558SAl Viro #include <asm/desc_defs.h>
22bb898558SAl Viro #include <asm/nops.h>
23f05e798aSDavid Howells #include <asm/special_insns.h>
2414b9675aSIngo Molnar #include <asm/fpu/types.h>
25bb898558SAl Viro 
26bb898558SAl Viro #include <linux/personality.h>
27bb898558SAl Viro #include <linux/cpumask.h>
28bb898558SAl Viro #include <linux/cache.h>
29bb898558SAl Viro #include <linux/threads.h>
305cbc19a9SPeter Zijlstra #include <linux/math64.h>
31faa4602eSPeter Zijlstra #include <linux/err.h>
32f05e798aSDavid Howells #include <linux/irqflags.h>
33f05e798aSDavid Howells 
34f05e798aSDavid Howells /*
35f05e798aSDavid Howells  * We handle most unaligned accesses in hardware.  On the other hand
36f05e798aSDavid Howells  * unaligned DMA can be quite expensive on some Nehalem processors.
37f05e798aSDavid Howells  *
38f05e798aSDavid Howells  * Based on this we disable the IP header alignment in network drivers.
39f05e798aSDavid Howells  */
40f05e798aSDavid Howells #define NET_IP_ALIGN	0
41bb898558SAl Viro 
42b332828cSK.Prasad #define HBP_NUM 4
43bb898558SAl Viro /*
44bb898558SAl Viro  * Default implementation of macro that returns current
45bb898558SAl Viro  * instruction pointer ("program counter").
46bb898558SAl Viro  */
47bb898558SAl Viro static inline void *current_text_addr(void)
48bb898558SAl Viro {
49bb898558SAl Viro 	void *pc;
50bb898558SAl Viro 
51bb898558SAl Viro 	asm volatile("mov $1f, %0; 1:":"=r" (pc));
52bb898558SAl Viro 
53bb898558SAl Viro 	return pc;
54bb898558SAl Viro }
55bb898558SAl Viro 
56b8c1b8eaSIngo Molnar /*
57b8c1b8eaSIngo Molnar  * These alignment constraints are for performance in the vSMP case,
58b8c1b8eaSIngo Molnar  * but in the task_struct case we must also meet hardware imposed
59b8c1b8eaSIngo Molnar  * alignment requirements of the FPU state:
60b8c1b8eaSIngo Molnar  */
61bb898558SAl Viro #ifdef CONFIG_X86_VSMP
62bb898558SAl Viro # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
63bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
64bb898558SAl Viro #else
65b8c1b8eaSIngo Molnar # define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
66bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	0
67bb898558SAl Viro #endif
68bb898558SAl Viro 
69e0ba94f1SAlex Shi enum tlb_infos {
70e0ba94f1SAlex Shi 	ENTRIES,
71e0ba94f1SAlex Shi 	NR_INFO
72e0ba94f1SAlex Shi };
73e0ba94f1SAlex Shi 
74e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4k[NR_INFO];
75e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_2m[NR_INFO];
76e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4m[NR_INFO];
77e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4k[NR_INFO];
78e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_2m[NR_INFO];
79e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4m[NR_INFO];
80dd360393SKirill A. Shutemov extern u16 __read_mostly tlb_lld_1g[NR_INFO];
81c4211f42SAlex Shi 
82bb898558SAl Viro /*
83bb898558SAl Viro  *  CPU type and hardware bug flags. Kept separately for each CPU.
84bb898558SAl Viro  *  Members of this structure are referenced in head.S, so think twice
85bb898558SAl Viro  *  before touching them. [mj]
86bb898558SAl Viro  */
87bb898558SAl Viro 
88bb898558SAl Viro struct cpuinfo_x86 {
89bb898558SAl Viro 	__u8			x86;		/* CPU family */
90bb898558SAl Viro 	__u8			x86_vendor;	/* CPU vendor */
91bb898558SAl Viro 	__u8			x86_model;
92bb898558SAl Viro 	__u8			x86_mask;
93bb898558SAl Viro #ifdef CONFIG_X86_32
94bb898558SAl Viro 	char			wp_works_ok;	/* It doesn't on 386's */
95bb898558SAl Viro 
96bb898558SAl Viro 	/* Problems on some 486Dx4's and old 386's: */
97bb898558SAl Viro 	char			rfu;
98bb898558SAl Viro 	char			pad0;
9960e019ebSH. Peter Anvin 	char			pad1;
100bb898558SAl Viro #else
101bb898558SAl Viro 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
102bb898558SAl Viro 	int			x86_tlbsize;
10313c6c532SJan Beulich #endif
104bb898558SAl Viro 	__u8			x86_virt_bits;
105bb898558SAl Viro 	__u8			x86_phys_bits;
106bb898558SAl Viro 	/* CPUID returned core id bits: */
107bb898558SAl Viro 	__u8			x86_coreid_bits;
108bb898558SAl Viro 	/* Max extended CPUID function supported: */
109bb898558SAl Viro 	__u32			extended_cpuid_level;
110bb898558SAl Viro 	/* Maximum supported CPUID level, -1=no CPUID: */
111bb898558SAl Viro 	int			cpuid_level;
11265fc985bSBorislav Petkov 	__u32			x86_capability[NCAPINTS + NBUGINTS];
113bb898558SAl Viro 	char			x86_vendor_id[16];
114bb898558SAl Viro 	char			x86_model_id[64];
115bb898558SAl Viro 	/* in KB - valid for CPUS which support this call: */
116bb898558SAl Viro 	int			x86_cache_size;
117bb898558SAl Viro 	int			x86_cache_alignment;	/* In bytes */
118cbc82b17SPeter P Waskiewicz Jr 	/* Cache QoS architectural values: */
119cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_max_rmid;	/* max index */
120cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_occ_scale;	/* scale to bytes */
121bb898558SAl Viro 	int			x86_power;
122bb898558SAl Viro 	unsigned long		loops_per_jiffy;
123bb898558SAl Viro 	/* cpuid returned max cores value: */
124bb898558SAl Viro 	u16			 x86_max_cores;
125bb898558SAl Viro 	u16			apicid;
126bb898558SAl Viro 	u16			initial_apicid;
127bb898558SAl Viro 	u16			x86_clflush_size;
128bb898558SAl Viro 	/* number of cores as seen by the OS: */
129bb898558SAl Viro 	u16			booted_cores;
130bb898558SAl Viro 	/* Physical processor id: */
131bb898558SAl Viro 	u16			phys_proc_id;
132bb898558SAl Viro 	/* Core id: */
133bb898558SAl Viro 	u16			cpu_core_id;
1346057b4d3SAndreas Herrmann 	/* Compute unit id */
1356057b4d3SAndreas Herrmann 	u8			compute_unit_id;
136bb898558SAl Viro 	/* Index into per_cpu list: */
137bb898558SAl Viro 	u16			cpu_index;
138506ed6b5SAndi Kleen 	u32			microcode;
1392c773dd3SJan Beulich };
140bb898558SAl Viro 
141bb898558SAl Viro #define X86_VENDOR_INTEL	0
142bb898558SAl Viro #define X86_VENDOR_CYRIX	1
143bb898558SAl Viro #define X86_VENDOR_AMD		2
144bb898558SAl Viro #define X86_VENDOR_UMC		3
145bb898558SAl Viro #define X86_VENDOR_CENTAUR	5
146bb898558SAl Viro #define X86_VENDOR_TRANSMETA	7
147bb898558SAl Viro #define X86_VENDOR_NSC		8
148bb898558SAl Viro #define X86_VENDOR_NUM		9
149bb898558SAl Viro 
150bb898558SAl Viro #define X86_VENDOR_UNKNOWN	0xff
151bb898558SAl Viro 
152bb898558SAl Viro /*
153bb898558SAl Viro  * capabilities of CPUs
154bb898558SAl Viro  */
155bb898558SAl Viro extern struct cpuinfo_x86	boot_cpu_data;
156bb898558SAl Viro extern struct cpuinfo_x86	new_cpu_data;
157bb898558SAl Viro 
158bb898558SAl Viro extern struct tss_struct	doublefault_tss;
1593e0c3737SYinghai Lu extern __u32			cpu_caps_cleared[NCAPINTS];
1603e0c3737SYinghai Lu extern __u32			cpu_caps_set[NCAPINTS];
161bb898558SAl Viro 
162bb898558SAl Viro #ifdef CONFIG_SMP
1632c773dd3SJan Beulich DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
164bb898558SAl Viro #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
165bb898558SAl Viro #else
1667b543a53STejun Heo #define cpu_info		boot_cpu_data
167bb898558SAl Viro #define cpu_data(cpu)		boot_cpu_data
168bb898558SAl Viro #endif
169bb898558SAl Viro 
170bb898558SAl Viro extern const struct seq_operations cpuinfo_op;
171bb898558SAl Viro 
172bb898558SAl Viro #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
173bb898558SAl Viro 
174bb898558SAl Viro extern void cpu_detect(struct cpuinfo_x86 *c);
175bb898558SAl Viro 
176bb898558SAl Viro extern void early_cpu_init(void);
177bb898558SAl Viro extern void identify_boot_cpu(void);
178bb898558SAl Viro extern void identify_secondary_cpu(struct cpuinfo_x86 *);
179bb898558SAl Viro extern void print_cpu_info(struct cpuinfo_x86 *);
18021c3fcf3SYinghai Lu void print_cpu_msr(struct cpuinfo_x86 *);
181bb898558SAl Viro extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
182bb898558SAl Viro extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
18304a15418SAndreas Herrmann extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
184bb898558SAl Viro 
185bb898558SAl Viro extern void detect_extended_topology(struct cpuinfo_x86 *c);
186bb898558SAl Viro extern void detect_ht(struct cpuinfo_x86 *c);
187bb898558SAl Viro 
188d288e1cfSFenghua Yu #ifdef CONFIG_X86_32
189d288e1cfSFenghua Yu extern int have_cpuid_p(void);
190d288e1cfSFenghua Yu #else
191d288e1cfSFenghua Yu static inline int have_cpuid_p(void)
192d288e1cfSFenghua Yu {
193d288e1cfSFenghua Yu 	return 1;
194d288e1cfSFenghua Yu }
195d288e1cfSFenghua Yu #endif
196bb898558SAl Viro static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
197bb898558SAl Viro 				unsigned int *ecx, unsigned int *edx)
198bb898558SAl Viro {
199bb898558SAl Viro 	/* ecx is often an input as well as an output. */
20045a94d7cSSuresh Siddha 	asm volatile("cpuid"
201bb898558SAl Viro 	    : "=a" (*eax),
202bb898558SAl Viro 	      "=b" (*ebx),
203bb898558SAl Viro 	      "=c" (*ecx),
204bb898558SAl Viro 	      "=d" (*edx)
205506ed6b5SAndi Kleen 	    : "0" (*eax), "2" (*ecx)
206506ed6b5SAndi Kleen 	    : "memory");
207bb898558SAl Viro }
208bb898558SAl Viro 
209bb898558SAl Viro static inline void load_cr3(pgd_t *pgdir)
210bb898558SAl Viro {
211bb898558SAl Viro 	write_cr3(__pa(pgdir));
212bb898558SAl Viro }
213bb898558SAl Viro 
214bb898558SAl Viro #ifdef CONFIG_X86_32
215bb898558SAl Viro /* This is the TSS defined by the hardware. */
216bb898558SAl Viro struct x86_hw_tss {
217bb898558SAl Viro 	unsigned short		back_link, __blh;
218bb898558SAl Viro 	unsigned long		sp0;
219bb898558SAl Viro 	unsigned short		ss0, __ss0h;
220cf9328ccSAndy Lutomirski 	unsigned long		sp1;
22176e4c490SAndy Lutomirski 
22276e4c490SAndy Lutomirski 	/*
223cf9328ccSAndy Lutomirski 	 * We don't use ring 1, so ss1 is a convenient scratch space in
224cf9328ccSAndy Lutomirski 	 * the same cacheline as sp0.  We use ss1 to cache the value in
225cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS.  When we context switch
226cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
227cf9328ccSAndy Lutomirski 	 * written matches ss1, and, if it's not, then we wrmsr the new
228cf9328ccSAndy Lutomirski 	 * value and update ss1.
22976e4c490SAndy Lutomirski 	 *
230cf9328ccSAndy Lutomirski 	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
231cf9328ccSAndy Lutomirski 	 * that we set it to zero in vm86 tasks to avoid corrupting the
232cf9328ccSAndy Lutomirski 	 * stack if we were to go through the sysenter path from vm86
233cf9328ccSAndy Lutomirski 	 * mode.
23476e4c490SAndy Lutomirski 	 */
23576e4c490SAndy Lutomirski 	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
23676e4c490SAndy Lutomirski 
23776e4c490SAndy Lutomirski 	unsigned short		__ss1h;
238bb898558SAl Viro 	unsigned long		sp2;
239bb898558SAl Viro 	unsigned short		ss2, __ss2h;
240bb898558SAl Viro 	unsigned long		__cr3;
241bb898558SAl Viro 	unsigned long		ip;
242bb898558SAl Viro 	unsigned long		flags;
243bb898558SAl Viro 	unsigned long		ax;
244bb898558SAl Viro 	unsigned long		cx;
245bb898558SAl Viro 	unsigned long		dx;
246bb898558SAl Viro 	unsigned long		bx;
247bb898558SAl Viro 	unsigned long		sp;
248bb898558SAl Viro 	unsigned long		bp;
249bb898558SAl Viro 	unsigned long		si;
250bb898558SAl Viro 	unsigned long		di;
251bb898558SAl Viro 	unsigned short		es, __esh;
252bb898558SAl Viro 	unsigned short		cs, __csh;
253bb898558SAl Viro 	unsigned short		ss, __ssh;
254bb898558SAl Viro 	unsigned short		ds, __dsh;
255bb898558SAl Viro 	unsigned short		fs, __fsh;
256bb898558SAl Viro 	unsigned short		gs, __gsh;
257bb898558SAl Viro 	unsigned short		ldt, __ldth;
258bb898558SAl Viro 	unsigned short		trace;
259bb898558SAl Viro 	unsigned short		io_bitmap_base;
260bb898558SAl Viro 
261bb898558SAl Viro } __attribute__((packed));
262bb898558SAl Viro #else
263bb898558SAl Viro struct x86_hw_tss {
264bb898558SAl Viro 	u32			reserved1;
265bb898558SAl Viro 	u64			sp0;
266bb898558SAl Viro 	u64			sp1;
267bb898558SAl Viro 	u64			sp2;
268bb898558SAl Viro 	u64			reserved2;
269bb898558SAl Viro 	u64			ist[7];
270bb898558SAl Viro 	u32			reserved3;
271bb898558SAl Viro 	u32			reserved4;
272bb898558SAl Viro 	u16			reserved5;
273bb898558SAl Viro 	u16			io_bitmap_base;
274bb898558SAl Viro 
275bb898558SAl Viro } __attribute__((packed)) ____cacheline_aligned;
276bb898558SAl Viro #endif
277bb898558SAl Viro 
278bb898558SAl Viro /*
279bb898558SAl Viro  * IO-bitmap sizes:
280bb898558SAl Viro  */
281bb898558SAl Viro #define IO_BITMAP_BITS			65536
282bb898558SAl Viro #define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
283bb898558SAl Viro #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
284bb898558SAl Viro #define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
285bb898558SAl Viro #define INVALID_IO_BITMAP_OFFSET	0x8000
286bb898558SAl Viro 
287bb898558SAl Viro struct tss_struct {
288bb898558SAl Viro 	/*
289bb898558SAl Viro 	 * The hardware state:
290bb898558SAl Viro 	 */
291bb898558SAl Viro 	struct x86_hw_tss	x86_tss;
292bb898558SAl Viro 
293bb898558SAl Viro 	/*
294bb898558SAl Viro 	 * The extra 1 is there because the CPU will access an
295bb898558SAl Viro 	 * additional byte beyond the end of the IO permission
296bb898558SAl Viro 	 * bitmap. The extra byte must be all 1 bits, and must
297bb898558SAl Viro 	 * be within the limit.
298bb898558SAl Viro 	 */
299bb898558SAl Viro 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
300bb898558SAl Viro 
301bb898558SAl Viro 	/*
302d828c71fSDenys Vlasenko 	 * Space for the temporary SYSENTER stack:
303bb898558SAl Viro 	 */
304d828c71fSDenys Vlasenko 	unsigned long		SYSENTER_stack[64];
305bb898558SAl Viro 
306bb898558SAl Viro } ____cacheline_aligned;
307bb898558SAl Viro 
30824933b82SAndy Lutomirski DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
309bb898558SAl Viro 
310a7fcf28dSAndy Lutomirski #ifdef CONFIG_X86_32
311a7fcf28dSAndy Lutomirski DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
312a7fcf28dSAndy Lutomirski #endif
313a7fcf28dSAndy Lutomirski 
314bb898558SAl Viro /*
315bb898558SAl Viro  * Save the original ist values for checking stack pointers during debugging
316bb898558SAl Viro  */
317bb898558SAl Viro struct orig_ist {
318bb898558SAl Viro 	unsigned long		ist[7];
319bb898558SAl Viro };
320bb898558SAl Viro 
321bb898558SAl Viro #ifdef CONFIG_X86_64
322bb898558SAl Viro DECLARE_PER_CPU(struct orig_ist, orig_ist);
32326f80bd6SBrian Gerst 
324947e76cdSBrian Gerst union irq_stack_union {
325947e76cdSBrian Gerst 	char irq_stack[IRQ_STACK_SIZE];
326947e76cdSBrian Gerst 	/*
327947e76cdSBrian Gerst 	 * GCC hardcodes the stack canary as %gs:40.  Since the
328947e76cdSBrian Gerst 	 * irq_stack is the object at %gs:0, we reserve the bottom
329947e76cdSBrian Gerst 	 * 48 bytes of the irq stack for the canary.
330947e76cdSBrian Gerst 	 */
331947e76cdSBrian Gerst 	struct {
332947e76cdSBrian Gerst 		char gs_base[40];
333947e76cdSBrian Gerst 		unsigned long stack_canary;
334947e76cdSBrian Gerst 	};
335947e76cdSBrian Gerst };
336947e76cdSBrian Gerst 
337277d5b40SAndi Kleen DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
3382add8e23SBrian Gerst DECLARE_INIT_PER_CPU(irq_stack_union);
3392add8e23SBrian Gerst 
34026f80bd6SBrian Gerst DECLARE_PER_CPU(char *, irq_stack_ptr);
3419766cdbcSJaswinder Singh Rajput DECLARE_PER_CPU(unsigned int, irq_count);
3429766cdbcSJaswinder Singh Rajput extern asmlinkage void ignore_sysret(void);
34360a5317fSTejun Heo #else	/* X86_64 */
34460a5317fSTejun Heo #ifdef CONFIG_CC_STACKPROTECTOR
3451ea0d14eSJeremy Fitzhardinge /*
3461ea0d14eSJeremy Fitzhardinge  * Make sure stack canary segment base is cached-aligned:
3471ea0d14eSJeremy Fitzhardinge  *   "For Intel Atom processors, avoid non zero segment base address
3481ea0d14eSJeremy Fitzhardinge  *    that is not aligned to cache line boundary at all cost."
3491ea0d14eSJeremy Fitzhardinge  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
3501ea0d14eSJeremy Fitzhardinge  */
3511ea0d14eSJeremy Fitzhardinge struct stack_canary {
3521ea0d14eSJeremy Fitzhardinge 	char __pad[20];		/* canary at %gs:20 */
3531ea0d14eSJeremy Fitzhardinge 	unsigned long canary;
3541ea0d14eSJeremy Fitzhardinge };
35553f82452SJeremy Fitzhardinge DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
356bb898558SAl Viro #endif
357198d208dSSteven Rostedt /*
358198d208dSSteven Rostedt  * per-CPU IRQ handling stacks
359198d208dSSteven Rostedt  */
360198d208dSSteven Rostedt struct irq_stack {
361198d208dSSteven Rostedt 	u32                     stack[THREAD_SIZE/sizeof(u32)];
362198d208dSSteven Rostedt } __aligned(THREAD_SIZE);
363198d208dSSteven Rostedt 
364198d208dSSteven Rostedt DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
365198d208dSSteven Rostedt DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
36660a5317fSTejun Heo #endif	/* X86_64 */
367bb898558SAl Viro 
368bb898558SAl Viro extern unsigned int xstate_size;
369bb898558SAl Viro 
37024f1e32cSFrederic Weisbecker struct perf_event;
37124f1e32cSFrederic Weisbecker 
372bb898558SAl Viro struct thread_struct {
373bb898558SAl Viro 	/* Cached TLS descriptors: */
374bb898558SAl Viro 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
375bb898558SAl Viro 	unsigned long		sp0;
376bb898558SAl Viro 	unsigned long		sp;
377bb898558SAl Viro #ifdef CONFIG_X86_32
378bb898558SAl Viro 	unsigned long		sysenter_cs;
379bb898558SAl Viro #else
380bb898558SAl Viro 	unsigned short		es;
381bb898558SAl Viro 	unsigned short		ds;
382bb898558SAl Viro 	unsigned short		fsindex;
383bb898558SAl Viro 	unsigned short		gsindex;
384bb898558SAl Viro #endif
3850c23590fSAlexey Dobriyan #ifdef CONFIG_X86_32
386bb898558SAl Viro 	unsigned long		ip;
3870c23590fSAlexey Dobriyan #endif
388d756f4adSAlexey Dobriyan #ifdef CONFIG_X86_64
389bb898558SAl Viro 	unsigned long		fs;
390d756f4adSAlexey Dobriyan #endif
391bb898558SAl Viro 	unsigned long		gs;
392c5bedc68SIngo Molnar 
39324f1e32cSFrederic Weisbecker 	/* Save middle states of ptrace breakpoints */
39424f1e32cSFrederic Weisbecker 	struct perf_event	*ptrace_bps[HBP_NUM];
39524f1e32cSFrederic Weisbecker 	/* Debug status used for traps, single steps, etc... */
396bb898558SAl Viro 	unsigned long           debugreg6;
397326264a0SFrederic Weisbecker 	/* Keep track of the exact dr7 value set by the user */
398326264a0SFrederic Weisbecker 	unsigned long           ptrace_dr7;
399bb898558SAl Viro 	/* Fault info: */
400bb898558SAl Viro 	unsigned long		cr2;
40151e7dc70SSrikar Dronamraju 	unsigned long		trap_nr;
402bb898558SAl Viro 	unsigned long		error_code;
4039fda6a06SBrian Gerst #ifdef CONFIG_VM86
404bb898558SAl Viro 	/* Virtual 86 mode info */
4059fda6a06SBrian Gerst 	struct vm86		*vm86;
406bb898558SAl Viro #endif
407bb898558SAl Viro 	/* IO permissions: */
408bb898558SAl Viro 	unsigned long		*io_bitmap_ptr;
409bb898558SAl Viro 	unsigned long		iopl;
410bb898558SAl Viro 	/* Max allowed port in the bitmap, in bytes: */
411bb898558SAl Viro 	unsigned		io_bitmap_max;
4120c8c0f03SDave Hansen 
4130c8c0f03SDave Hansen 	/* Floating point and extended processor state */
4140c8c0f03SDave Hansen 	struct fpu		fpu;
4150c8c0f03SDave Hansen 	/*
4160c8c0f03SDave Hansen 	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
4170c8c0f03SDave Hansen 	 * the end.
4180c8c0f03SDave Hansen 	 */
419bb898558SAl Viro };
420bb898558SAl Viro 
421bb898558SAl Viro /*
422bb898558SAl Viro  * Set IOPL bits in EFLAGS from given mask
423bb898558SAl Viro  */
424bb898558SAl Viro static inline void native_set_iopl_mask(unsigned mask)
425bb898558SAl Viro {
426bb898558SAl Viro #ifdef CONFIG_X86_32
427bb898558SAl Viro 	unsigned int reg;
428bb898558SAl Viro 
429bb898558SAl Viro 	asm volatile ("pushfl;"
430bb898558SAl Viro 		      "popl %0;"
431bb898558SAl Viro 		      "andl %1, %0;"
432bb898558SAl Viro 		      "orl %2, %0;"
433bb898558SAl Viro 		      "pushl %0;"
434bb898558SAl Viro 		      "popfl"
435bb898558SAl Viro 		      : "=&r" (reg)
436bb898558SAl Viro 		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
437bb898558SAl Viro #endif
438bb898558SAl Viro }
439bb898558SAl Viro 
440bb898558SAl Viro static inline void
441bb898558SAl Viro native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
442bb898558SAl Viro {
443bb898558SAl Viro 	tss->x86_tss.sp0 = thread->sp0;
444bb898558SAl Viro #ifdef CONFIG_X86_32
445bb898558SAl Viro 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
446bb898558SAl Viro 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
447bb898558SAl Viro 		tss->x86_tss.ss1 = thread->sysenter_cs;
448bb898558SAl Viro 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
449bb898558SAl Viro 	}
450bb898558SAl Viro #endif
451bb898558SAl Viro }
452bb898558SAl Viro 
453bb898558SAl Viro static inline void native_swapgs(void)
454bb898558SAl Viro {
455bb898558SAl Viro #ifdef CONFIG_X86_64
456bb898558SAl Viro 	asm volatile("swapgs" ::: "memory");
457bb898558SAl Viro #endif
458bb898558SAl Viro }
459bb898558SAl Viro 
460a7fcf28dSAndy Lutomirski static inline unsigned long current_top_of_stack(void)
4618ef46a67SAndy Lutomirski {
462a7fcf28dSAndy Lutomirski #ifdef CONFIG_X86_64
46324933b82SAndy Lutomirski 	return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
464a7fcf28dSAndy Lutomirski #else
465a7fcf28dSAndy Lutomirski 	/* sp0 on x86_32 is special in and around vm86 mode. */
466a7fcf28dSAndy Lutomirski 	return this_cpu_read_stable(cpu_current_top_of_stack);
467a7fcf28dSAndy Lutomirski #endif
4688ef46a67SAndy Lutomirski }
4698ef46a67SAndy Lutomirski 
470bb898558SAl Viro #ifdef CONFIG_PARAVIRT
471bb898558SAl Viro #include <asm/paravirt.h>
472bb898558SAl Viro #else
473bb898558SAl Viro #define __cpuid			native_cpuid
474bb898558SAl Viro #define paravirt_enabled()	0
475bb898558SAl Viro 
476bb898558SAl Viro static inline void load_sp0(struct tss_struct *tss,
477bb898558SAl Viro 			    struct thread_struct *thread)
478bb898558SAl Viro {
479bb898558SAl Viro 	native_load_sp0(tss, thread);
480bb898558SAl Viro }
481bb898558SAl Viro 
482bb898558SAl Viro #define set_iopl_mask native_set_iopl_mask
483bb898558SAl Viro #endif /* CONFIG_PARAVIRT */
484bb898558SAl Viro 
485bb898558SAl Viro typedef struct {
486bb898558SAl Viro 	unsigned long		seg;
487bb898558SAl Viro } mm_segment_t;
488bb898558SAl Viro 
489bb898558SAl Viro 
490bb898558SAl Viro /* Free all resources held by a thread. */
491bb898558SAl Viro extern void release_thread(struct task_struct *);
492bb898558SAl Viro 
493bb898558SAl Viro unsigned long get_wchan(struct task_struct *p);
494bb898558SAl Viro 
495bb898558SAl Viro /*
496bb898558SAl Viro  * Generic CPUID function
497bb898558SAl Viro  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
498bb898558SAl Viro  * resulting in stale register contents being returned.
499bb898558SAl Viro  */
500bb898558SAl Viro static inline void cpuid(unsigned int op,
501bb898558SAl Viro 			 unsigned int *eax, unsigned int *ebx,
502bb898558SAl Viro 			 unsigned int *ecx, unsigned int *edx)
503bb898558SAl Viro {
504bb898558SAl Viro 	*eax = op;
505bb898558SAl Viro 	*ecx = 0;
506bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
507bb898558SAl Viro }
508bb898558SAl Viro 
509bb898558SAl Viro /* Some CPUID calls want 'count' to be placed in ecx */
510bb898558SAl Viro static inline void cpuid_count(unsigned int op, int count,
511bb898558SAl Viro 			       unsigned int *eax, unsigned int *ebx,
512bb898558SAl Viro 			       unsigned int *ecx, unsigned int *edx)
513bb898558SAl Viro {
514bb898558SAl Viro 	*eax = op;
515bb898558SAl Viro 	*ecx = count;
516bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
517bb898558SAl Viro }
518bb898558SAl Viro 
519bb898558SAl Viro /*
520bb898558SAl Viro  * CPUID functions returning a single datum
521bb898558SAl Viro  */
522bb898558SAl Viro static inline unsigned int cpuid_eax(unsigned int op)
523bb898558SAl Viro {
524bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
525bb898558SAl Viro 
526bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
527bb898558SAl Viro 
528bb898558SAl Viro 	return eax;
529bb898558SAl Viro }
530bb898558SAl Viro 
531bb898558SAl Viro static inline unsigned int cpuid_ebx(unsigned int op)
532bb898558SAl Viro {
533bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
534bb898558SAl Viro 
535bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
536bb898558SAl Viro 
537bb898558SAl Viro 	return ebx;
538bb898558SAl Viro }
539bb898558SAl Viro 
540bb898558SAl Viro static inline unsigned int cpuid_ecx(unsigned int op)
541bb898558SAl Viro {
542bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
543bb898558SAl Viro 
544bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
545bb898558SAl Viro 
546bb898558SAl Viro 	return ecx;
547bb898558SAl Viro }
548bb898558SAl Viro 
549bb898558SAl Viro static inline unsigned int cpuid_edx(unsigned int op)
550bb898558SAl Viro {
551bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
552bb898558SAl Viro 
553bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
554bb898558SAl Viro 
555bb898558SAl Viro 	return edx;
556bb898558SAl Viro }
557bb898558SAl Viro 
558bb898558SAl Viro /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
559bb898558SAl Viro static inline void rep_nop(void)
560bb898558SAl Viro {
561bb898558SAl Viro 	asm volatile("rep; nop" ::: "memory");
562bb898558SAl Viro }
563bb898558SAl Viro 
564bb898558SAl Viro static inline void cpu_relax(void)
565bb898558SAl Viro {
566bb898558SAl Viro 	rep_nop();
567bb898558SAl Viro }
568bb898558SAl Viro 
5693a6bfbc9SDavidlohr Bueso #define cpu_relax_lowlatency() cpu_relax()
5703a6bfbc9SDavidlohr Bueso 
5715367b688SBen Hutchings /* Stop speculative execution and prefetching of modified code. */
572bb898558SAl Viro static inline void sync_core(void)
573bb898558SAl Viro {
574bb898558SAl Viro 	int tmp;
575bb898558SAl Viro 
576eb068e78SH. Peter Anvin #ifdef CONFIG_M486
57745c39fb0SH. Peter Anvin 	/*
57845c39fb0SH. Peter Anvin 	 * Do a CPUID if available, otherwise do a jump.  The jump
57945c39fb0SH. Peter Anvin 	 * can conveniently enough be the jump around CPUID.
58045c39fb0SH. Peter Anvin 	 */
58145c39fb0SH. Peter Anvin 	asm volatile("cmpl %2,%1\n\t"
58245c39fb0SH. Peter Anvin 		     "jl 1f\n\t"
58345c39fb0SH. Peter Anvin 		     "cpuid\n"
58445c39fb0SH. Peter Anvin 		     "1:"
58545c39fb0SH. Peter Anvin 		     : "=a" (tmp)
58645c39fb0SH. Peter Anvin 		     : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
587bb898558SAl Viro 		     : "ebx", "ecx", "edx", "memory");
58845c39fb0SH. Peter Anvin #else
58945c39fb0SH. Peter Anvin 	/*
59045c39fb0SH. Peter Anvin 	 * CPUID is a barrier to speculative execution.
59145c39fb0SH. Peter Anvin 	 * Prefetched instructions are automatically
59245c39fb0SH. Peter Anvin 	 * invalidated when modified.
59345c39fb0SH. Peter Anvin 	 */
59445c39fb0SH. Peter Anvin 	asm volatile("cpuid"
59545c39fb0SH. Peter Anvin 		     : "=a" (tmp)
59645c39fb0SH. Peter Anvin 		     : "0" (1)
59745c39fb0SH. Peter Anvin 		     : "ebx", "ecx", "edx", "memory");
59845c39fb0SH. Peter Anvin #endif
599bb898558SAl Viro }
600bb898558SAl Viro 
601bb898558SAl Viro extern void select_idle_routine(const struct cpuinfo_x86 *c);
60202c68a02SLen Brown extern void init_amd_e400_c1e_mask(void);
603bb898558SAl Viro 
604bb898558SAl Viro extern unsigned long		boot_option_idle_override;
60502c68a02SLen Brown extern bool			amd_e400_c1e_detected;
606bb898558SAl Viro 
607d1896049SThomas Renninger enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
60869fb3676SLen Brown 			 IDLE_POLL};
609d1896049SThomas Renninger 
610bb898558SAl Viro extern void enable_sep_cpu(void);
611bb898558SAl Viro extern int sysenter_setup(void);
612bb898558SAl Viro 
61329c84391SJan Kiszka extern void early_trap_init(void);
6148170e6beSH. Peter Anvin void early_trap_pf_init(void);
61529c84391SJan Kiszka 
616bb898558SAl Viro /* Defined in head.S */
617bb898558SAl Viro extern struct desc_ptr		early_gdt_descr;
618bb898558SAl Viro 
619bb898558SAl Viro extern void cpu_set_gdt(int);
620552be871SBrian Gerst extern void switch_to_new_gdt(int);
62111e3a840SJeremy Fitzhardinge extern void load_percpu_segment(int);
622bb898558SAl Viro extern void cpu_init(void);
623bb898558SAl Viro 
624c2724775SMarkus Metzger static inline unsigned long get_debugctlmsr(void)
625c2724775SMarkus Metzger {
626c2724775SMarkus Metzger 	unsigned long debugctlmsr = 0;
627c2724775SMarkus Metzger 
628c2724775SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR
629c2724775SMarkus Metzger 	if (boot_cpu_data.x86 < 6)
630c2724775SMarkus Metzger 		return 0;
631c2724775SMarkus Metzger #endif
632c2724775SMarkus Metzger 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
633c2724775SMarkus Metzger 
634c2724775SMarkus Metzger 	return debugctlmsr;
635c2724775SMarkus Metzger }
636c2724775SMarkus Metzger 
637bb898558SAl Viro static inline void update_debugctlmsr(unsigned long debugctlmsr)
638bb898558SAl Viro {
639bb898558SAl Viro #ifndef CONFIG_X86_DEBUGCTLMSR
640bb898558SAl Viro 	if (boot_cpu_data.x86 < 6)
641bb898558SAl Viro 		return;
642bb898558SAl Viro #endif
643bb898558SAl Viro 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
644bb898558SAl Viro }
645bb898558SAl Viro 
6469bd1190aSOleg Nesterov extern void set_task_blockstep(struct task_struct *task, bool on);
6479bd1190aSOleg Nesterov 
648bb898558SAl Viro /* Boot loader type from the setup header: */
649bb898558SAl Viro extern int			bootloader_type;
6505031296cSH. Peter Anvin extern int			bootloader_version;
651bb898558SAl Viro 
652bb898558SAl Viro extern char			ignore_fpu_irq;
653bb898558SAl Viro 
654bb898558SAl Viro #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
655bb898558SAl Viro #define ARCH_HAS_PREFETCHW
656bb898558SAl Viro #define ARCH_HAS_SPINLOCK_PREFETCH
657bb898558SAl Viro 
658bb898558SAl Viro #ifdef CONFIG_X86_32
659a930dc45SBorislav Petkov # define BASE_PREFETCH		""
660bb898558SAl Viro # define ARCH_HAS_PREFETCH
661bb898558SAl Viro #else
662a930dc45SBorislav Petkov # define BASE_PREFETCH		"prefetcht0 %P1"
663bb898558SAl Viro #endif
664bb898558SAl Viro 
665bb898558SAl Viro /*
666bb898558SAl Viro  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
667bb898558SAl Viro  *
668bb898558SAl Viro  * It's not worth to care about 3dnow prefetches for the K6
669bb898558SAl Viro  * because they are microcoded there and very slow.
670bb898558SAl Viro  */
671bb898558SAl Viro static inline void prefetch(const void *x)
672bb898558SAl Viro {
673a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
674bb898558SAl Viro 			  X86_FEATURE_XMM,
675a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
676bb898558SAl Viro }
677bb898558SAl Viro 
678bb898558SAl Viro /*
679bb898558SAl Viro  * 3dnow prefetch to get an exclusive cache line.
680bb898558SAl Viro  * Useful for spinlocks to avoid one state transition in the
681bb898558SAl Viro  * cache coherency protocol:
682bb898558SAl Viro  */
683bb898558SAl Viro static inline void prefetchw(const void *x)
684bb898558SAl Viro {
685a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchw %P1",
686a930dc45SBorislav Petkov 			  X86_FEATURE_3DNOWPREFETCH,
687a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
688bb898558SAl Viro }
689bb898558SAl Viro 
690bb898558SAl Viro static inline void spin_lock_prefetch(const void *x)
691bb898558SAl Viro {
692bb898558SAl Viro 	prefetchw(x);
693bb898558SAl Viro }
694bb898558SAl Viro 
695d9e05cc5SAndy Lutomirski #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
696d9e05cc5SAndy Lutomirski 			   TOP_OF_KERNEL_STACK_PADDING)
697d9e05cc5SAndy Lutomirski 
698bb898558SAl Viro #ifdef CONFIG_X86_32
699bb898558SAl Viro /*
700bb898558SAl Viro  * User space process size: 3GB (default).
701bb898558SAl Viro  */
702bb898558SAl Viro #define TASK_SIZE		PAGE_OFFSET
703d9517346SIngo Molnar #define TASK_SIZE_MAX		TASK_SIZE
704bb898558SAl Viro #define STACK_TOP		TASK_SIZE
705bb898558SAl Viro #define STACK_TOP_MAX		STACK_TOP
706bb898558SAl Viro 
707bb898558SAl Viro #define INIT_THREAD  {							  \
708d9e05cc5SAndy Lutomirski 	.sp0			= TOP_OF_INIT_STACK,			  \
709bb898558SAl Viro 	.sysenter_cs		= __KERNEL_CS,				  \
710bb898558SAl Viro 	.io_bitmap_ptr		= NULL,					  \
711bb898558SAl Viro }
712bb898558SAl Viro 
713bb898558SAl Viro extern unsigned long thread_saved_pc(struct task_struct *tsk);
714bb898558SAl Viro 
715bb898558SAl Viro /*
7165c39403eSDenys Vlasenko  * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
717bb898558SAl Viro  * This is necessary to guarantee that the entire "struct pt_regs"
718b595076aSUwe Kleine-König  * is accessible even if the CPU haven't stored the SS/ESP registers
719bb898558SAl Viro  * on the stack (interrupt gate does not save these registers
720bb898558SAl Viro  * when switching to the same priv ring).
721bb898558SAl Viro  * Therefore beware: accessing the ss/esp fields of the
722bb898558SAl Viro  * "struct pt_regs" is possible, but they may contain the
723bb898558SAl Viro  * completely wrong values.
724bb898558SAl Viro  */
725bb898558SAl Viro #define task_pt_regs(task) \
726bb898558SAl Viro ({									\
7275c39403eSDenys Vlasenko 	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
7285c39403eSDenys Vlasenko 	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
7295c39403eSDenys Vlasenko 	((struct pt_regs *)__ptr) - 1;					\
730bb898558SAl Viro })
731bb898558SAl Viro 
732bb898558SAl Viro #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
733bb898558SAl Viro 
734bb898558SAl Viro #else
735bb898558SAl Viro /*
73607114f0fSAndy Lutomirski  * User space process size. 47bits minus one guard page.  The guard
73707114f0fSAndy Lutomirski  * page is necessary on Intel CPUs: if a SYSCALL instruction is at
73807114f0fSAndy Lutomirski  * the highest possible canonical userspace address, then that
73907114f0fSAndy Lutomirski  * syscall will enter the kernel with a non-canonical return
74007114f0fSAndy Lutomirski  * address, and SYSRET will explode dangerously.  We avoid this
74107114f0fSAndy Lutomirski  * particular problem by preventing anything from being mapped
74207114f0fSAndy Lutomirski  * at the maximum canonical address.
743bb898558SAl Viro  */
744d9517346SIngo Molnar #define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
745bb898558SAl Viro 
746bb898558SAl Viro /* This decides where the kernel will search for a free chunk of vm
747bb898558SAl Viro  * space during mmap's.
748bb898558SAl Viro  */
749bb898558SAl Viro #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
750bb898558SAl Viro 					0xc0000000 : 0xFFFFe000)
751bb898558SAl Viro 
7526bd33008SH. Peter Anvin #define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
753d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
7546bd33008SH. Peter Anvin #define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
755d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
756bb898558SAl Viro 
757bb898558SAl Viro #define STACK_TOP		TASK_SIZE
758d9517346SIngo Molnar #define STACK_TOP_MAX		TASK_SIZE_MAX
759bb898558SAl Viro 
760bb898558SAl Viro #define INIT_THREAD  { \
761d9e05cc5SAndy Lutomirski 	.sp0 = TOP_OF_INIT_STACK \
762bb898558SAl Viro }
763bb898558SAl Viro 
764bb898558SAl Viro /*
765bb898558SAl Viro  * Return saved PC of a blocked thread.
766bb898558SAl Viro  * What is this good for? it will be always the scheduler or ret_from_fork.
767bb898558SAl Viro  */
768bb898558SAl Viro #define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
769bb898558SAl Viro 
770bb898558SAl Viro #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
77189240ba0SStefani Seibold extern unsigned long KSTK_ESP(struct task_struct *task);
772d046ff8bSH. J. Lu 
773bb898558SAl Viro #endif /* CONFIG_X86_64 */
774bb898558SAl Viro 
775bb898558SAl Viro extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
776bb898558SAl Viro 					       unsigned long new_sp);
777bb898558SAl Viro 
778bb898558SAl Viro /*
779bb898558SAl Viro  * This decides where the kernel will search for a free chunk of vm
780bb898558SAl Viro  * space during mmap's.
781bb898558SAl Viro  */
782bb898558SAl Viro #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
783bb898558SAl Viro 
784bb898558SAl Viro #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
785bb898558SAl Viro 
786bb898558SAl Viro /* Get/set a process' ability to use the timestamp counter instruction */
787bb898558SAl Viro #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
788bb898558SAl Viro #define SET_TSC_CTL(val)	set_tsc_mode((val))
789bb898558SAl Viro 
790bb898558SAl Viro extern int get_tsc_mode(unsigned long adr);
791bb898558SAl Viro extern int set_tsc_mode(unsigned int val);
792bb898558SAl Viro 
793fe3d197fSDave Hansen /* Register/unregister a process' MPX related resource */
79446a6e0cfSDave Hansen #define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
79546a6e0cfSDave Hansen #define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()
796fe3d197fSDave Hansen 
797fe3d197fSDave Hansen #ifdef CONFIG_X86_INTEL_MPX
79846a6e0cfSDave Hansen extern int mpx_enable_management(void);
79946a6e0cfSDave Hansen extern int mpx_disable_management(void);
800fe3d197fSDave Hansen #else
80146a6e0cfSDave Hansen static inline int mpx_enable_management(void)
802fe3d197fSDave Hansen {
803fe3d197fSDave Hansen 	return -EINVAL;
804fe3d197fSDave Hansen }
80546a6e0cfSDave Hansen static inline int mpx_disable_management(void)
806fe3d197fSDave Hansen {
807fe3d197fSDave Hansen 	return -EINVAL;
808fe3d197fSDave Hansen }
809fe3d197fSDave Hansen #endif /* CONFIG_X86_INTEL_MPX */
810fe3d197fSDave Hansen 
8118b84c8dfSDaniel J Blueman extern u16 amd_get_nb_id(int cpu);
812cc2749e4SAravind Gopalakrishnan extern u32 amd_get_nodes_per_socket(void);
8136a812691SAndreas Herrmann 
81496e39ac0SJason Wang static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
81596e39ac0SJason Wang {
81696e39ac0SJason Wang 	uint32_t base, eax, signature[3];
81796e39ac0SJason Wang 
81896e39ac0SJason Wang 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
81996e39ac0SJason Wang 		cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
82096e39ac0SJason Wang 
82196e39ac0SJason Wang 		if (!memcmp(sig, signature, 12) &&
82296e39ac0SJason Wang 		    (leaves == 0 || ((eax - base) >= leaves)))
82396e39ac0SJason Wang 			return base;
82496e39ac0SJason Wang 	}
82596e39ac0SJason Wang 
82696e39ac0SJason Wang 	return 0;
82796e39ac0SJason Wang }
82896e39ac0SJason Wang 
829f05e798aSDavid Howells extern unsigned long arch_align_stack(unsigned long sp);
830f05e798aSDavid Howells extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
831f05e798aSDavid Howells 
832f05e798aSDavid Howells void default_idle(void);
8336a377ddcSLen Brown #ifdef	CONFIG_XEN
8346a377ddcSLen Brown bool xen_set_default_idle(void);
8356a377ddcSLen Brown #else
8366a377ddcSLen Brown #define xen_set_default_idle 0
8376a377ddcSLen Brown #endif
838f05e798aSDavid Howells 
839f05e798aSDavid Howells void stop_this_cpu(void *dummy);
8404d067d8eSBorislav Petkov void df_debug(struct pt_regs *regs, long error_code);
8411965aae3SH. Peter Anvin #endif /* _ASM_X86_PROCESSOR_H */
842