xref: /linux/arch/x86/include/asm/processor.h (revision 2a41aa4feb25af3ead60b740c43df80c576efea2)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_PROCESSOR_H
21965aae3SH. Peter Anvin #define _ASM_X86_PROCESSOR_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/processor-flags.h>
5bb898558SAl Viro 
6bb898558SAl Viro /* Forward declaration, a strange C thing */
7bb898558SAl Viro struct task_struct;
8bb898558SAl Viro struct mm_struct;
99fda6a06SBrian Gerst struct vm86;
10bb898558SAl Viro 
11bb898558SAl Viro #include <asm/math_emu.h>
12bb898558SAl Viro #include <asm/segment.h>
13bb898558SAl Viro #include <asm/types.h>
14decb4c41SIngo Molnar #include <uapi/asm/sigcontext.h>
15bb898558SAl Viro #include <asm/current.h>
16cd4d09ecSBorislav Petkov #include <asm/cpufeatures.h>
17bb898558SAl Viro #include <asm/page.h>
1854321d94SJeremy Fitzhardinge #include <asm/pgtable_types.h>
19bb898558SAl Viro #include <asm/percpu.h>
20bb898558SAl Viro #include <asm/msr.h>
21bb898558SAl Viro #include <asm/desc_defs.h>
22bb898558SAl Viro #include <asm/nops.h>
23f05e798aSDavid Howells #include <asm/special_insns.h>
2414b9675aSIngo Molnar #include <asm/fpu/types.h>
25bb898558SAl Viro 
26bb898558SAl Viro #include <linux/personality.h>
27bb898558SAl Viro #include <linux/cache.h>
28bb898558SAl Viro #include <linux/threads.h>
295cbc19a9SPeter Zijlstra #include <linux/math64.h>
30faa4602eSPeter Zijlstra #include <linux/err.h>
31f05e798aSDavid Howells #include <linux/irqflags.h>
32f05e798aSDavid Howells 
33f05e798aSDavid Howells /*
34f05e798aSDavid Howells  * We handle most unaligned accesses in hardware.  On the other hand
35f05e798aSDavid Howells  * unaligned DMA can be quite expensive on some Nehalem processors.
36f05e798aSDavid Howells  *
37f05e798aSDavid Howells  * Based on this we disable the IP header alignment in network drivers.
38f05e798aSDavid Howells  */
39f05e798aSDavid Howells #define NET_IP_ALIGN	0
40bb898558SAl Viro 
41b332828cSK.Prasad #define HBP_NUM 4
42bb898558SAl Viro /*
43bb898558SAl Viro  * Default implementation of macro that returns current
44bb898558SAl Viro  * instruction pointer ("program counter").
45bb898558SAl Viro  */
46bb898558SAl Viro static inline void *current_text_addr(void)
47bb898558SAl Viro {
48bb898558SAl Viro 	void *pc;
49bb898558SAl Viro 
50bb898558SAl Viro 	asm volatile("mov $1f, %0; 1:":"=r" (pc));
51bb898558SAl Viro 
52bb898558SAl Viro 	return pc;
53bb898558SAl Viro }
54bb898558SAl Viro 
55b8c1b8eaSIngo Molnar /*
56b8c1b8eaSIngo Molnar  * These alignment constraints are for performance in the vSMP case,
57b8c1b8eaSIngo Molnar  * but in the task_struct case we must also meet hardware imposed
58b8c1b8eaSIngo Molnar  * alignment requirements of the FPU state:
59b8c1b8eaSIngo Molnar  */
60bb898558SAl Viro #ifdef CONFIG_X86_VSMP
61bb898558SAl Viro # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
62bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
63bb898558SAl Viro #else
64b8c1b8eaSIngo Molnar # define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
65bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	0
66bb898558SAl Viro #endif
67bb898558SAl Viro 
68e0ba94f1SAlex Shi enum tlb_infos {
69e0ba94f1SAlex Shi 	ENTRIES,
70e0ba94f1SAlex Shi 	NR_INFO
71e0ba94f1SAlex Shi };
72e0ba94f1SAlex Shi 
73e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4k[NR_INFO];
74e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_2m[NR_INFO];
75e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4m[NR_INFO];
76e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4k[NR_INFO];
77e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_2m[NR_INFO];
78e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4m[NR_INFO];
79dd360393SKirill A. Shutemov extern u16 __read_mostly tlb_lld_1g[NR_INFO];
80c4211f42SAlex Shi 
81bb898558SAl Viro /*
82bb898558SAl Viro  *  CPU type and hardware bug flags. Kept separately for each CPU.
83bb898558SAl Viro  *  Members of this structure are referenced in head.S, so think twice
84bb898558SAl Viro  *  before touching them. [mj]
85bb898558SAl Viro  */
86bb898558SAl Viro 
87bb898558SAl Viro struct cpuinfo_x86 {
88bb898558SAl Viro 	__u8			x86;		/* CPU family */
89bb898558SAl Viro 	__u8			x86_vendor;	/* CPU vendor */
90bb898558SAl Viro 	__u8			x86_model;
91bb898558SAl Viro 	__u8			x86_mask;
92bb898558SAl Viro #ifdef CONFIG_X86_32
93bb898558SAl Viro 	char			wp_works_ok;	/* It doesn't on 386's */
94bb898558SAl Viro 
95bb898558SAl Viro 	/* Problems on some 486Dx4's and old 386's: */
96bb898558SAl Viro 	char			rfu;
97bb898558SAl Viro 	char			pad0;
9860e019ebSH. Peter Anvin 	char			pad1;
99bb898558SAl Viro #else
100bb898558SAl Viro 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
101bb898558SAl Viro 	int			x86_tlbsize;
10213c6c532SJan Beulich #endif
103bb898558SAl Viro 	__u8			x86_virt_bits;
104bb898558SAl Viro 	__u8			x86_phys_bits;
105bb898558SAl Viro 	/* CPUID returned core id bits: */
106bb898558SAl Viro 	__u8			x86_coreid_bits;
107bb898558SAl Viro 	/* Max extended CPUID function supported: */
108bb898558SAl Viro 	__u32			extended_cpuid_level;
109bb898558SAl Viro 	/* Maximum supported CPUID level, -1=no CPUID: */
110bb898558SAl Viro 	int			cpuid_level;
11165fc985bSBorislav Petkov 	__u32			x86_capability[NCAPINTS + NBUGINTS];
112bb898558SAl Viro 	char			x86_vendor_id[16];
113bb898558SAl Viro 	char			x86_model_id[64];
114bb898558SAl Viro 	/* in KB - valid for CPUS which support this call: */
115bb898558SAl Viro 	int			x86_cache_size;
116bb898558SAl Viro 	int			x86_cache_alignment;	/* In bytes */
117cbc82b17SPeter P Waskiewicz Jr 	/* Cache QoS architectural values: */
118cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_max_rmid;	/* max index */
119cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_occ_scale;	/* scale to bytes */
120bb898558SAl Viro 	int			x86_power;
121bb898558SAl Viro 	unsigned long		loops_per_jiffy;
122bb898558SAl Viro 	/* cpuid returned max cores value: */
123bb898558SAl Viro 	u16			 x86_max_cores;
124bb898558SAl Viro 	u16			apicid;
125bb898558SAl Viro 	u16			initial_apicid;
126bb898558SAl Viro 	u16			x86_clflush_size;
127bb898558SAl Viro 	/* number of cores as seen by the OS: */
128bb898558SAl Viro 	u16			booted_cores;
129bb898558SAl Viro 	/* Physical processor id: */
130bb898558SAl Viro 	u16			phys_proc_id;
131bb898558SAl Viro 	/* Core id: */
132bb898558SAl Viro 	u16			cpu_core_id;
1336057b4d3SAndreas Herrmann 	/* Compute unit id */
1346057b4d3SAndreas Herrmann 	u8			compute_unit_id;
135bb898558SAl Viro 	/* Index into per_cpu list: */
136bb898558SAl Viro 	u16			cpu_index;
137506ed6b5SAndi Kleen 	u32			microcode;
1382c773dd3SJan Beulich };
139bb898558SAl Viro 
140bb898558SAl Viro #define X86_VENDOR_INTEL	0
141bb898558SAl Viro #define X86_VENDOR_CYRIX	1
142bb898558SAl Viro #define X86_VENDOR_AMD		2
143bb898558SAl Viro #define X86_VENDOR_UMC		3
144bb898558SAl Viro #define X86_VENDOR_CENTAUR	5
145bb898558SAl Viro #define X86_VENDOR_TRANSMETA	7
146bb898558SAl Viro #define X86_VENDOR_NSC		8
147bb898558SAl Viro #define X86_VENDOR_NUM		9
148bb898558SAl Viro 
149bb898558SAl Viro #define X86_VENDOR_UNKNOWN	0xff
150bb898558SAl Viro 
151bb898558SAl Viro /*
152bb898558SAl Viro  * capabilities of CPUs
153bb898558SAl Viro  */
154bb898558SAl Viro extern struct cpuinfo_x86	boot_cpu_data;
155bb898558SAl Viro extern struct cpuinfo_x86	new_cpu_data;
156bb898558SAl Viro 
157bb898558SAl Viro extern struct tss_struct	doublefault_tss;
1583e0c3737SYinghai Lu extern __u32			cpu_caps_cleared[NCAPINTS];
1593e0c3737SYinghai Lu extern __u32			cpu_caps_set[NCAPINTS];
160bb898558SAl Viro 
161bb898558SAl Viro #ifdef CONFIG_SMP
1622c773dd3SJan Beulich DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
163bb898558SAl Viro #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
164bb898558SAl Viro #else
1657b543a53STejun Heo #define cpu_info		boot_cpu_data
166bb898558SAl Viro #define cpu_data(cpu)		boot_cpu_data
167bb898558SAl Viro #endif
168bb898558SAl Viro 
169bb898558SAl Viro extern const struct seq_operations cpuinfo_op;
170bb898558SAl Viro 
171bb898558SAl Viro #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
172bb898558SAl Viro 
173bb898558SAl Viro extern void cpu_detect(struct cpuinfo_x86 *c);
174bb898558SAl Viro 
175bb898558SAl Viro extern void early_cpu_init(void);
176bb898558SAl Viro extern void identify_boot_cpu(void);
177bb898558SAl Viro extern void identify_secondary_cpu(struct cpuinfo_x86 *);
178bb898558SAl Viro extern void print_cpu_info(struct cpuinfo_x86 *);
17921c3fcf3SYinghai Lu void print_cpu_msr(struct cpuinfo_x86 *);
180bb898558SAl Viro extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
181bb898558SAl Viro extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
18204a15418SAndreas Herrmann extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
183bb898558SAl Viro 
184bb898558SAl Viro extern void detect_extended_topology(struct cpuinfo_x86 *c);
185bb898558SAl Viro extern void detect_ht(struct cpuinfo_x86 *c);
186bb898558SAl Viro 
187d288e1cfSFenghua Yu #ifdef CONFIG_X86_32
188d288e1cfSFenghua Yu extern int have_cpuid_p(void);
189d288e1cfSFenghua Yu #else
190d288e1cfSFenghua Yu static inline int have_cpuid_p(void)
191d288e1cfSFenghua Yu {
192d288e1cfSFenghua Yu 	return 1;
193d288e1cfSFenghua Yu }
194d288e1cfSFenghua Yu #endif
195bb898558SAl Viro static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
196bb898558SAl Viro 				unsigned int *ecx, unsigned int *edx)
197bb898558SAl Viro {
198bb898558SAl Viro 	/* ecx is often an input as well as an output. */
19945a94d7cSSuresh Siddha 	asm volatile("cpuid"
200bb898558SAl Viro 	    : "=a" (*eax),
201bb898558SAl Viro 	      "=b" (*ebx),
202bb898558SAl Viro 	      "=c" (*ecx),
203bb898558SAl Viro 	      "=d" (*edx)
204506ed6b5SAndi Kleen 	    : "0" (*eax), "2" (*ecx)
205506ed6b5SAndi Kleen 	    : "memory");
206bb898558SAl Viro }
207bb898558SAl Viro 
208bb898558SAl Viro static inline void load_cr3(pgd_t *pgdir)
209bb898558SAl Viro {
210bb898558SAl Viro 	write_cr3(__pa(pgdir));
211bb898558SAl Viro }
212bb898558SAl Viro 
213bb898558SAl Viro #ifdef CONFIG_X86_32
214bb898558SAl Viro /* This is the TSS defined by the hardware. */
215bb898558SAl Viro struct x86_hw_tss {
216bb898558SAl Viro 	unsigned short		back_link, __blh;
217bb898558SAl Viro 	unsigned long		sp0;
218bb898558SAl Viro 	unsigned short		ss0, __ss0h;
219cf9328ccSAndy Lutomirski 	unsigned long		sp1;
22076e4c490SAndy Lutomirski 
22176e4c490SAndy Lutomirski 	/*
222cf9328ccSAndy Lutomirski 	 * We don't use ring 1, so ss1 is a convenient scratch space in
223cf9328ccSAndy Lutomirski 	 * the same cacheline as sp0.  We use ss1 to cache the value in
224cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS.  When we context switch
225cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
226cf9328ccSAndy Lutomirski 	 * written matches ss1, and, if it's not, then we wrmsr the new
227cf9328ccSAndy Lutomirski 	 * value and update ss1.
22876e4c490SAndy Lutomirski 	 *
229cf9328ccSAndy Lutomirski 	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
230cf9328ccSAndy Lutomirski 	 * that we set it to zero in vm86 tasks to avoid corrupting the
231cf9328ccSAndy Lutomirski 	 * stack if we were to go through the sysenter path from vm86
232cf9328ccSAndy Lutomirski 	 * mode.
23376e4c490SAndy Lutomirski 	 */
23476e4c490SAndy Lutomirski 	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
23576e4c490SAndy Lutomirski 
23676e4c490SAndy Lutomirski 	unsigned short		__ss1h;
237bb898558SAl Viro 	unsigned long		sp2;
238bb898558SAl Viro 	unsigned short		ss2, __ss2h;
239bb898558SAl Viro 	unsigned long		__cr3;
240bb898558SAl Viro 	unsigned long		ip;
241bb898558SAl Viro 	unsigned long		flags;
242bb898558SAl Viro 	unsigned long		ax;
243bb898558SAl Viro 	unsigned long		cx;
244bb898558SAl Viro 	unsigned long		dx;
245bb898558SAl Viro 	unsigned long		bx;
246bb898558SAl Viro 	unsigned long		sp;
247bb898558SAl Viro 	unsigned long		bp;
248bb898558SAl Viro 	unsigned long		si;
249bb898558SAl Viro 	unsigned long		di;
250bb898558SAl Viro 	unsigned short		es, __esh;
251bb898558SAl Viro 	unsigned short		cs, __csh;
252bb898558SAl Viro 	unsigned short		ss, __ssh;
253bb898558SAl Viro 	unsigned short		ds, __dsh;
254bb898558SAl Viro 	unsigned short		fs, __fsh;
255bb898558SAl Viro 	unsigned short		gs, __gsh;
256bb898558SAl Viro 	unsigned short		ldt, __ldth;
257bb898558SAl Viro 	unsigned short		trace;
258bb898558SAl Viro 	unsigned short		io_bitmap_base;
259bb898558SAl Viro 
260bb898558SAl Viro } __attribute__((packed));
261bb898558SAl Viro #else
262bb898558SAl Viro struct x86_hw_tss {
263bb898558SAl Viro 	u32			reserved1;
264bb898558SAl Viro 	u64			sp0;
265bb898558SAl Viro 	u64			sp1;
266bb898558SAl Viro 	u64			sp2;
267bb898558SAl Viro 	u64			reserved2;
268bb898558SAl Viro 	u64			ist[7];
269bb898558SAl Viro 	u32			reserved3;
270bb898558SAl Viro 	u32			reserved4;
271bb898558SAl Viro 	u16			reserved5;
272bb898558SAl Viro 	u16			io_bitmap_base;
273bb898558SAl Viro 
274bb898558SAl Viro } __attribute__((packed)) ____cacheline_aligned;
275bb898558SAl Viro #endif
276bb898558SAl Viro 
277bb898558SAl Viro /*
278bb898558SAl Viro  * IO-bitmap sizes:
279bb898558SAl Viro  */
280bb898558SAl Viro #define IO_BITMAP_BITS			65536
281bb898558SAl Viro #define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
282bb898558SAl Viro #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
283bb898558SAl Viro #define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
284bb898558SAl Viro #define INVALID_IO_BITMAP_OFFSET	0x8000
285bb898558SAl Viro 
286bb898558SAl Viro struct tss_struct {
287bb898558SAl Viro 	/*
288bb898558SAl Viro 	 * The hardware state:
289bb898558SAl Viro 	 */
290bb898558SAl Viro 	struct x86_hw_tss	x86_tss;
291bb898558SAl Viro 
292bb898558SAl Viro 	/*
293bb898558SAl Viro 	 * The extra 1 is there because the CPU will access an
294bb898558SAl Viro 	 * additional byte beyond the end of the IO permission
295bb898558SAl Viro 	 * bitmap. The extra byte must be all 1 bits, and must
296bb898558SAl Viro 	 * be within the limit.
297bb898558SAl Viro 	 */
298bb898558SAl Viro 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
299bb898558SAl Viro 
3006dcc9414SAndy Lutomirski #ifdef CONFIG_X86_32
301bb898558SAl Viro 	/*
302*2a41aa4fSAndy Lutomirski 	 * Space for the temporary SYSENTER stack.
303bb898558SAl Viro 	 */
304*2a41aa4fSAndy Lutomirski 	unsigned long		SYSENTER_stack_canary;
305d828c71fSDenys Vlasenko 	unsigned long		SYSENTER_stack[64];
3066dcc9414SAndy Lutomirski #endif
307bb898558SAl Viro 
308bb898558SAl Viro } ____cacheline_aligned;
309bb898558SAl Viro 
31024933b82SAndy Lutomirski DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
311bb898558SAl Viro 
312a7fcf28dSAndy Lutomirski #ifdef CONFIG_X86_32
313a7fcf28dSAndy Lutomirski DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
314a7fcf28dSAndy Lutomirski #endif
315a7fcf28dSAndy Lutomirski 
316bb898558SAl Viro /*
317bb898558SAl Viro  * Save the original ist values for checking stack pointers during debugging
318bb898558SAl Viro  */
319bb898558SAl Viro struct orig_ist {
320bb898558SAl Viro 	unsigned long		ist[7];
321bb898558SAl Viro };
322bb898558SAl Viro 
323bb898558SAl Viro #ifdef CONFIG_X86_64
324bb898558SAl Viro DECLARE_PER_CPU(struct orig_ist, orig_ist);
32526f80bd6SBrian Gerst 
326947e76cdSBrian Gerst union irq_stack_union {
327947e76cdSBrian Gerst 	char irq_stack[IRQ_STACK_SIZE];
328947e76cdSBrian Gerst 	/*
329947e76cdSBrian Gerst 	 * GCC hardcodes the stack canary as %gs:40.  Since the
330947e76cdSBrian Gerst 	 * irq_stack is the object at %gs:0, we reserve the bottom
331947e76cdSBrian Gerst 	 * 48 bytes of the irq stack for the canary.
332947e76cdSBrian Gerst 	 */
333947e76cdSBrian Gerst 	struct {
334947e76cdSBrian Gerst 		char gs_base[40];
335947e76cdSBrian Gerst 		unsigned long stack_canary;
336947e76cdSBrian Gerst 	};
337947e76cdSBrian Gerst };
338947e76cdSBrian Gerst 
339277d5b40SAndi Kleen DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
3402add8e23SBrian Gerst DECLARE_INIT_PER_CPU(irq_stack_union);
3412add8e23SBrian Gerst 
34226f80bd6SBrian Gerst DECLARE_PER_CPU(char *, irq_stack_ptr);
3439766cdbcSJaswinder Singh Rajput DECLARE_PER_CPU(unsigned int, irq_count);
3449766cdbcSJaswinder Singh Rajput extern asmlinkage void ignore_sysret(void);
34560a5317fSTejun Heo #else	/* X86_64 */
34660a5317fSTejun Heo #ifdef CONFIG_CC_STACKPROTECTOR
3471ea0d14eSJeremy Fitzhardinge /*
3481ea0d14eSJeremy Fitzhardinge  * Make sure stack canary segment base is cached-aligned:
3491ea0d14eSJeremy Fitzhardinge  *   "For Intel Atom processors, avoid non zero segment base address
3501ea0d14eSJeremy Fitzhardinge  *    that is not aligned to cache line boundary at all cost."
3511ea0d14eSJeremy Fitzhardinge  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
3521ea0d14eSJeremy Fitzhardinge  */
3531ea0d14eSJeremy Fitzhardinge struct stack_canary {
3541ea0d14eSJeremy Fitzhardinge 	char __pad[20];		/* canary at %gs:20 */
3551ea0d14eSJeremy Fitzhardinge 	unsigned long canary;
3561ea0d14eSJeremy Fitzhardinge };
35753f82452SJeremy Fitzhardinge DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
358bb898558SAl Viro #endif
359198d208dSSteven Rostedt /*
360198d208dSSteven Rostedt  * per-CPU IRQ handling stacks
361198d208dSSteven Rostedt  */
362198d208dSSteven Rostedt struct irq_stack {
363198d208dSSteven Rostedt 	u32                     stack[THREAD_SIZE/sizeof(u32)];
364198d208dSSteven Rostedt } __aligned(THREAD_SIZE);
365198d208dSSteven Rostedt 
366198d208dSSteven Rostedt DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
367198d208dSSteven Rostedt DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
36860a5317fSTejun Heo #endif	/* X86_64 */
369bb898558SAl Viro 
370bb898558SAl Viro extern unsigned int xstate_size;
371bb898558SAl Viro 
37224f1e32cSFrederic Weisbecker struct perf_event;
37324f1e32cSFrederic Weisbecker 
374bb898558SAl Viro struct thread_struct {
375bb898558SAl Viro 	/* Cached TLS descriptors: */
376bb898558SAl Viro 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
377bb898558SAl Viro 	unsigned long		sp0;
378bb898558SAl Viro 	unsigned long		sp;
379bb898558SAl Viro #ifdef CONFIG_X86_32
380bb898558SAl Viro 	unsigned long		sysenter_cs;
381bb898558SAl Viro #else
382bb898558SAl Viro 	unsigned short		es;
383bb898558SAl Viro 	unsigned short		ds;
384bb898558SAl Viro 	unsigned short		fsindex;
385bb898558SAl Viro 	unsigned short		gsindex;
386bb898558SAl Viro #endif
3870c23590fSAlexey Dobriyan #ifdef CONFIG_X86_32
388bb898558SAl Viro 	unsigned long		ip;
3890c23590fSAlexey Dobriyan #endif
390d756f4adSAlexey Dobriyan #ifdef CONFIG_X86_64
391bb898558SAl Viro 	unsigned long		fs;
392d756f4adSAlexey Dobriyan #endif
393bb898558SAl Viro 	unsigned long		gs;
394c5bedc68SIngo Molnar 
39524f1e32cSFrederic Weisbecker 	/* Save middle states of ptrace breakpoints */
39624f1e32cSFrederic Weisbecker 	struct perf_event	*ptrace_bps[HBP_NUM];
39724f1e32cSFrederic Weisbecker 	/* Debug status used for traps, single steps, etc... */
398bb898558SAl Viro 	unsigned long           debugreg6;
399326264a0SFrederic Weisbecker 	/* Keep track of the exact dr7 value set by the user */
400326264a0SFrederic Weisbecker 	unsigned long           ptrace_dr7;
401bb898558SAl Viro 	/* Fault info: */
402bb898558SAl Viro 	unsigned long		cr2;
40351e7dc70SSrikar Dronamraju 	unsigned long		trap_nr;
404bb898558SAl Viro 	unsigned long		error_code;
4059fda6a06SBrian Gerst #ifdef CONFIG_VM86
406bb898558SAl Viro 	/* Virtual 86 mode info */
4079fda6a06SBrian Gerst 	struct vm86		*vm86;
408bb898558SAl Viro #endif
409bb898558SAl Viro 	/* IO permissions: */
410bb898558SAl Viro 	unsigned long		*io_bitmap_ptr;
411bb898558SAl Viro 	unsigned long		iopl;
412bb898558SAl Viro 	/* Max allowed port in the bitmap, in bytes: */
413bb898558SAl Viro 	unsigned		io_bitmap_max;
4140c8c0f03SDave Hansen 
4150c8c0f03SDave Hansen 	/* Floating point and extended processor state */
4160c8c0f03SDave Hansen 	struct fpu		fpu;
4170c8c0f03SDave Hansen 	/*
4180c8c0f03SDave Hansen 	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
4190c8c0f03SDave Hansen 	 * the end.
4200c8c0f03SDave Hansen 	 */
421bb898558SAl Viro };
422bb898558SAl Viro 
423bb898558SAl Viro /*
424bb898558SAl Viro  * Set IOPL bits in EFLAGS from given mask
425bb898558SAl Viro  */
426bb898558SAl Viro static inline void native_set_iopl_mask(unsigned mask)
427bb898558SAl Viro {
428bb898558SAl Viro #ifdef CONFIG_X86_32
429bb898558SAl Viro 	unsigned int reg;
430bb898558SAl Viro 
431bb898558SAl Viro 	asm volatile ("pushfl;"
432bb898558SAl Viro 		      "popl %0;"
433bb898558SAl Viro 		      "andl %1, %0;"
434bb898558SAl Viro 		      "orl %2, %0;"
435bb898558SAl Viro 		      "pushl %0;"
436bb898558SAl Viro 		      "popfl"
437bb898558SAl Viro 		      : "=&r" (reg)
438bb898558SAl Viro 		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
439bb898558SAl Viro #endif
440bb898558SAl Viro }
441bb898558SAl Viro 
442bb898558SAl Viro static inline void
443bb898558SAl Viro native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
444bb898558SAl Viro {
445bb898558SAl Viro 	tss->x86_tss.sp0 = thread->sp0;
446bb898558SAl Viro #ifdef CONFIG_X86_32
447bb898558SAl Viro 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
448bb898558SAl Viro 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
449bb898558SAl Viro 		tss->x86_tss.ss1 = thread->sysenter_cs;
450bb898558SAl Viro 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
451bb898558SAl Viro 	}
452bb898558SAl Viro #endif
453bb898558SAl Viro }
454bb898558SAl Viro 
455bb898558SAl Viro static inline void native_swapgs(void)
456bb898558SAl Viro {
457bb898558SAl Viro #ifdef CONFIG_X86_64
458bb898558SAl Viro 	asm volatile("swapgs" ::: "memory");
459bb898558SAl Viro #endif
460bb898558SAl Viro }
461bb898558SAl Viro 
462a7fcf28dSAndy Lutomirski static inline unsigned long current_top_of_stack(void)
4638ef46a67SAndy Lutomirski {
464a7fcf28dSAndy Lutomirski #ifdef CONFIG_X86_64
46524933b82SAndy Lutomirski 	return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
466a7fcf28dSAndy Lutomirski #else
467a7fcf28dSAndy Lutomirski 	/* sp0 on x86_32 is special in and around vm86 mode. */
468a7fcf28dSAndy Lutomirski 	return this_cpu_read_stable(cpu_current_top_of_stack);
469a7fcf28dSAndy Lutomirski #endif
4708ef46a67SAndy Lutomirski }
4718ef46a67SAndy Lutomirski 
472bb898558SAl Viro #ifdef CONFIG_PARAVIRT
473bb898558SAl Viro #include <asm/paravirt.h>
474bb898558SAl Viro #else
475bb898558SAl Viro #define __cpuid			native_cpuid
476bb898558SAl Viro #define paravirt_enabled()	0
477d8c98a1dSDavid Vrabel #define paravirt_has(x) 	0
478bb898558SAl Viro 
479bb898558SAl Viro static inline void load_sp0(struct tss_struct *tss,
480bb898558SAl Viro 			    struct thread_struct *thread)
481bb898558SAl Viro {
482bb898558SAl Viro 	native_load_sp0(tss, thread);
483bb898558SAl Viro }
484bb898558SAl Viro 
485bb898558SAl Viro #define set_iopl_mask native_set_iopl_mask
486bb898558SAl Viro #endif /* CONFIG_PARAVIRT */
487bb898558SAl Viro 
488bb898558SAl Viro typedef struct {
489bb898558SAl Viro 	unsigned long		seg;
490bb898558SAl Viro } mm_segment_t;
491bb898558SAl Viro 
492bb898558SAl Viro 
493bb898558SAl Viro /* Free all resources held by a thread. */
494bb898558SAl Viro extern void release_thread(struct task_struct *);
495bb898558SAl Viro 
496bb898558SAl Viro unsigned long get_wchan(struct task_struct *p);
497bb898558SAl Viro 
498bb898558SAl Viro /*
499bb898558SAl Viro  * Generic CPUID function
500bb898558SAl Viro  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
501bb898558SAl Viro  * resulting in stale register contents being returned.
502bb898558SAl Viro  */
503bb898558SAl Viro static inline void cpuid(unsigned int op,
504bb898558SAl Viro 			 unsigned int *eax, unsigned int *ebx,
505bb898558SAl Viro 			 unsigned int *ecx, unsigned int *edx)
506bb898558SAl Viro {
507bb898558SAl Viro 	*eax = op;
508bb898558SAl Viro 	*ecx = 0;
509bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
510bb898558SAl Viro }
511bb898558SAl Viro 
512bb898558SAl Viro /* Some CPUID calls want 'count' to be placed in ecx */
513bb898558SAl Viro static inline void cpuid_count(unsigned int op, int count,
514bb898558SAl Viro 			       unsigned int *eax, unsigned int *ebx,
515bb898558SAl Viro 			       unsigned int *ecx, unsigned int *edx)
516bb898558SAl Viro {
517bb898558SAl Viro 	*eax = op;
518bb898558SAl Viro 	*ecx = count;
519bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
520bb898558SAl Viro }
521bb898558SAl Viro 
522bb898558SAl Viro /*
523bb898558SAl Viro  * CPUID functions returning a single datum
524bb898558SAl Viro  */
525bb898558SAl Viro static inline unsigned int cpuid_eax(unsigned int op)
526bb898558SAl Viro {
527bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
528bb898558SAl Viro 
529bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
530bb898558SAl Viro 
531bb898558SAl Viro 	return eax;
532bb898558SAl Viro }
533bb898558SAl Viro 
534bb898558SAl Viro static inline unsigned int cpuid_ebx(unsigned int op)
535bb898558SAl Viro {
536bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
537bb898558SAl Viro 
538bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
539bb898558SAl Viro 
540bb898558SAl Viro 	return ebx;
541bb898558SAl Viro }
542bb898558SAl Viro 
543bb898558SAl Viro static inline unsigned int cpuid_ecx(unsigned int op)
544bb898558SAl Viro {
545bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
546bb898558SAl Viro 
547bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
548bb898558SAl Viro 
549bb898558SAl Viro 	return ecx;
550bb898558SAl Viro }
551bb898558SAl Viro 
552bb898558SAl Viro static inline unsigned int cpuid_edx(unsigned int op)
553bb898558SAl Viro {
554bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
555bb898558SAl Viro 
556bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
557bb898558SAl Viro 
558bb898558SAl Viro 	return edx;
559bb898558SAl Viro }
560bb898558SAl Viro 
561bb898558SAl Viro /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
5620b101e62SDenys Vlasenko static __always_inline void rep_nop(void)
563bb898558SAl Viro {
564bb898558SAl Viro 	asm volatile("rep; nop" ::: "memory");
565bb898558SAl Viro }
566bb898558SAl Viro 
5670b101e62SDenys Vlasenko static __always_inline void cpu_relax(void)
568bb898558SAl Viro {
569bb898558SAl Viro 	rep_nop();
570bb898558SAl Viro }
571bb898558SAl Viro 
5723a6bfbc9SDavidlohr Bueso #define cpu_relax_lowlatency() cpu_relax()
5733a6bfbc9SDavidlohr Bueso 
5745367b688SBen Hutchings /* Stop speculative execution and prefetching of modified code. */
575bb898558SAl Viro static inline void sync_core(void)
576bb898558SAl Viro {
577bb898558SAl Viro 	int tmp;
578bb898558SAl Viro 
579eb068e78SH. Peter Anvin #ifdef CONFIG_M486
58045c39fb0SH. Peter Anvin 	/*
58145c39fb0SH. Peter Anvin 	 * Do a CPUID if available, otherwise do a jump.  The jump
58245c39fb0SH. Peter Anvin 	 * can conveniently enough be the jump around CPUID.
58345c39fb0SH. Peter Anvin 	 */
58445c39fb0SH. Peter Anvin 	asm volatile("cmpl %2,%1\n\t"
58545c39fb0SH. Peter Anvin 		     "jl 1f\n\t"
58645c39fb0SH. Peter Anvin 		     "cpuid\n"
58745c39fb0SH. Peter Anvin 		     "1:"
58845c39fb0SH. Peter Anvin 		     : "=a" (tmp)
58945c39fb0SH. Peter Anvin 		     : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
590bb898558SAl Viro 		     : "ebx", "ecx", "edx", "memory");
59145c39fb0SH. Peter Anvin #else
59245c39fb0SH. Peter Anvin 	/*
59345c39fb0SH. Peter Anvin 	 * CPUID is a barrier to speculative execution.
59445c39fb0SH. Peter Anvin 	 * Prefetched instructions are automatically
59545c39fb0SH. Peter Anvin 	 * invalidated when modified.
59645c39fb0SH. Peter Anvin 	 */
59745c39fb0SH. Peter Anvin 	asm volatile("cpuid"
59845c39fb0SH. Peter Anvin 		     : "=a" (tmp)
59945c39fb0SH. Peter Anvin 		     : "0" (1)
60045c39fb0SH. Peter Anvin 		     : "ebx", "ecx", "edx", "memory");
60145c39fb0SH. Peter Anvin #endif
602bb898558SAl Viro }
603bb898558SAl Viro 
604bb898558SAl Viro extern void select_idle_routine(const struct cpuinfo_x86 *c);
60502c68a02SLen Brown extern void init_amd_e400_c1e_mask(void);
606bb898558SAl Viro 
607bb898558SAl Viro extern unsigned long		boot_option_idle_override;
60802c68a02SLen Brown extern bool			amd_e400_c1e_detected;
609bb898558SAl Viro 
610d1896049SThomas Renninger enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
61169fb3676SLen Brown 			 IDLE_POLL};
612d1896049SThomas Renninger 
613bb898558SAl Viro extern void enable_sep_cpu(void);
614bb898558SAl Viro extern int sysenter_setup(void);
615bb898558SAl Viro 
61629c84391SJan Kiszka extern void early_trap_init(void);
6178170e6beSH. Peter Anvin void early_trap_pf_init(void);
61829c84391SJan Kiszka 
619bb898558SAl Viro /* Defined in head.S */
620bb898558SAl Viro extern struct desc_ptr		early_gdt_descr;
621bb898558SAl Viro 
622bb898558SAl Viro extern void cpu_set_gdt(int);
623552be871SBrian Gerst extern void switch_to_new_gdt(int);
62411e3a840SJeremy Fitzhardinge extern void load_percpu_segment(int);
625bb898558SAl Viro extern void cpu_init(void);
626bb898558SAl Viro 
627c2724775SMarkus Metzger static inline unsigned long get_debugctlmsr(void)
628c2724775SMarkus Metzger {
629c2724775SMarkus Metzger 	unsigned long debugctlmsr = 0;
630c2724775SMarkus Metzger 
631c2724775SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR
632c2724775SMarkus Metzger 	if (boot_cpu_data.x86 < 6)
633c2724775SMarkus Metzger 		return 0;
634c2724775SMarkus Metzger #endif
635c2724775SMarkus Metzger 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
636c2724775SMarkus Metzger 
637c2724775SMarkus Metzger 	return debugctlmsr;
638c2724775SMarkus Metzger }
639c2724775SMarkus Metzger 
640bb898558SAl Viro static inline void update_debugctlmsr(unsigned long debugctlmsr)
641bb898558SAl Viro {
642bb898558SAl Viro #ifndef CONFIG_X86_DEBUGCTLMSR
643bb898558SAl Viro 	if (boot_cpu_data.x86 < 6)
644bb898558SAl Viro 		return;
645bb898558SAl Viro #endif
646bb898558SAl Viro 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
647bb898558SAl Viro }
648bb898558SAl Viro 
6499bd1190aSOleg Nesterov extern void set_task_blockstep(struct task_struct *task, bool on);
6509bd1190aSOleg Nesterov 
651bb898558SAl Viro /* Boot loader type from the setup header: */
652bb898558SAl Viro extern int			bootloader_type;
6535031296cSH. Peter Anvin extern int			bootloader_version;
654bb898558SAl Viro 
655bb898558SAl Viro extern char			ignore_fpu_irq;
656bb898558SAl Viro 
657bb898558SAl Viro #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
658bb898558SAl Viro #define ARCH_HAS_PREFETCHW
659bb898558SAl Viro #define ARCH_HAS_SPINLOCK_PREFETCH
660bb898558SAl Viro 
661bb898558SAl Viro #ifdef CONFIG_X86_32
662a930dc45SBorislav Petkov # define BASE_PREFETCH		""
663bb898558SAl Viro # define ARCH_HAS_PREFETCH
664bb898558SAl Viro #else
665a930dc45SBorislav Petkov # define BASE_PREFETCH		"prefetcht0 %P1"
666bb898558SAl Viro #endif
667bb898558SAl Viro 
668bb898558SAl Viro /*
669bb898558SAl Viro  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
670bb898558SAl Viro  *
671bb898558SAl Viro  * It's not worth to care about 3dnow prefetches for the K6
672bb898558SAl Viro  * because they are microcoded there and very slow.
673bb898558SAl Viro  */
674bb898558SAl Viro static inline void prefetch(const void *x)
675bb898558SAl Viro {
676a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
677bb898558SAl Viro 			  X86_FEATURE_XMM,
678a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
679bb898558SAl Viro }
680bb898558SAl Viro 
681bb898558SAl Viro /*
682bb898558SAl Viro  * 3dnow prefetch to get an exclusive cache line.
683bb898558SAl Viro  * Useful for spinlocks to avoid one state transition in the
684bb898558SAl Viro  * cache coherency protocol:
685bb898558SAl Viro  */
686bb898558SAl Viro static inline void prefetchw(const void *x)
687bb898558SAl Viro {
688a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchw %P1",
689a930dc45SBorislav Petkov 			  X86_FEATURE_3DNOWPREFETCH,
690a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
691bb898558SAl Viro }
692bb898558SAl Viro 
693bb898558SAl Viro static inline void spin_lock_prefetch(const void *x)
694bb898558SAl Viro {
695bb898558SAl Viro 	prefetchw(x);
696bb898558SAl Viro }
697bb898558SAl Viro 
698d9e05cc5SAndy Lutomirski #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
699d9e05cc5SAndy Lutomirski 			   TOP_OF_KERNEL_STACK_PADDING)
700d9e05cc5SAndy Lutomirski 
701bb898558SAl Viro #ifdef CONFIG_X86_32
702bb898558SAl Viro /*
703bb898558SAl Viro  * User space process size: 3GB (default).
704bb898558SAl Viro  */
705bb898558SAl Viro #define TASK_SIZE		PAGE_OFFSET
706d9517346SIngo Molnar #define TASK_SIZE_MAX		TASK_SIZE
707bb898558SAl Viro #define STACK_TOP		TASK_SIZE
708bb898558SAl Viro #define STACK_TOP_MAX		STACK_TOP
709bb898558SAl Viro 
710bb898558SAl Viro #define INIT_THREAD  {							  \
711d9e05cc5SAndy Lutomirski 	.sp0			= TOP_OF_INIT_STACK,			  \
712bb898558SAl Viro 	.sysenter_cs		= __KERNEL_CS,				  \
713bb898558SAl Viro 	.io_bitmap_ptr		= NULL,					  \
714bb898558SAl Viro }
715bb898558SAl Viro 
716bb898558SAl Viro extern unsigned long thread_saved_pc(struct task_struct *tsk);
717bb898558SAl Viro 
718bb898558SAl Viro /*
7195c39403eSDenys Vlasenko  * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
720bb898558SAl Viro  * This is necessary to guarantee that the entire "struct pt_regs"
721b595076aSUwe Kleine-König  * is accessible even if the CPU haven't stored the SS/ESP registers
722bb898558SAl Viro  * on the stack (interrupt gate does not save these registers
723bb898558SAl Viro  * when switching to the same priv ring).
724bb898558SAl Viro  * Therefore beware: accessing the ss/esp fields of the
725bb898558SAl Viro  * "struct pt_regs" is possible, but they may contain the
726bb898558SAl Viro  * completely wrong values.
727bb898558SAl Viro  */
728bb898558SAl Viro #define task_pt_regs(task) \
729bb898558SAl Viro ({									\
7305c39403eSDenys Vlasenko 	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
7315c39403eSDenys Vlasenko 	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
7325c39403eSDenys Vlasenko 	((struct pt_regs *)__ptr) - 1;					\
733bb898558SAl Viro })
734bb898558SAl Viro 
735bb898558SAl Viro #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
736bb898558SAl Viro 
737bb898558SAl Viro #else
738bb898558SAl Viro /*
73907114f0fSAndy Lutomirski  * User space process size. 47bits minus one guard page.  The guard
74007114f0fSAndy Lutomirski  * page is necessary on Intel CPUs: if a SYSCALL instruction is at
74107114f0fSAndy Lutomirski  * the highest possible canonical userspace address, then that
74207114f0fSAndy Lutomirski  * syscall will enter the kernel with a non-canonical return
74307114f0fSAndy Lutomirski  * address, and SYSRET will explode dangerously.  We avoid this
74407114f0fSAndy Lutomirski  * particular problem by preventing anything from being mapped
74507114f0fSAndy Lutomirski  * at the maximum canonical address.
746bb898558SAl Viro  */
747d9517346SIngo Molnar #define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
748bb898558SAl Viro 
749bb898558SAl Viro /* This decides where the kernel will search for a free chunk of vm
750bb898558SAl Viro  * space during mmap's.
751bb898558SAl Viro  */
752bb898558SAl Viro #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
753bb898558SAl Viro 					0xc0000000 : 0xFFFFe000)
754bb898558SAl Viro 
7556bd33008SH. Peter Anvin #define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
756d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
7576bd33008SH. Peter Anvin #define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
758d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
759bb898558SAl Viro 
760bb898558SAl Viro #define STACK_TOP		TASK_SIZE
761d9517346SIngo Molnar #define STACK_TOP_MAX		TASK_SIZE_MAX
762bb898558SAl Viro 
763bb898558SAl Viro #define INIT_THREAD  { \
764d9e05cc5SAndy Lutomirski 	.sp0 = TOP_OF_INIT_STACK \
765bb898558SAl Viro }
766bb898558SAl Viro 
767bb898558SAl Viro /*
768bb898558SAl Viro  * Return saved PC of a blocked thread.
769bb898558SAl Viro  * What is this good for? it will be always the scheduler or ret_from_fork.
770bb898558SAl Viro  */
77175edb54aSDmitry Vyukov #define thread_saved_pc(t)	READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
772bb898558SAl Viro 
773bb898558SAl Viro #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
77489240ba0SStefani Seibold extern unsigned long KSTK_ESP(struct task_struct *task);
775d046ff8bSH. J. Lu 
776bb898558SAl Viro #endif /* CONFIG_X86_64 */
777bb898558SAl Viro 
778bb898558SAl Viro extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
779bb898558SAl Viro 					       unsigned long new_sp);
780bb898558SAl Viro 
781bb898558SAl Viro /*
782bb898558SAl Viro  * This decides where the kernel will search for a free chunk of vm
783bb898558SAl Viro  * space during mmap's.
784bb898558SAl Viro  */
785bb898558SAl Viro #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
786bb898558SAl Viro 
787bb898558SAl Viro #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
788bb898558SAl Viro 
789bb898558SAl Viro /* Get/set a process' ability to use the timestamp counter instruction */
790bb898558SAl Viro #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
791bb898558SAl Viro #define SET_TSC_CTL(val)	set_tsc_mode((val))
792bb898558SAl Viro 
793bb898558SAl Viro extern int get_tsc_mode(unsigned long adr);
794bb898558SAl Viro extern int set_tsc_mode(unsigned int val);
795bb898558SAl Viro 
796fe3d197fSDave Hansen /* Register/unregister a process' MPX related resource */
79746a6e0cfSDave Hansen #define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
79846a6e0cfSDave Hansen #define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()
799fe3d197fSDave Hansen 
800fe3d197fSDave Hansen #ifdef CONFIG_X86_INTEL_MPX
80146a6e0cfSDave Hansen extern int mpx_enable_management(void);
80246a6e0cfSDave Hansen extern int mpx_disable_management(void);
803fe3d197fSDave Hansen #else
80446a6e0cfSDave Hansen static inline int mpx_enable_management(void)
805fe3d197fSDave Hansen {
806fe3d197fSDave Hansen 	return -EINVAL;
807fe3d197fSDave Hansen }
80846a6e0cfSDave Hansen static inline int mpx_disable_management(void)
809fe3d197fSDave Hansen {
810fe3d197fSDave Hansen 	return -EINVAL;
811fe3d197fSDave Hansen }
812fe3d197fSDave Hansen #endif /* CONFIG_X86_INTEL_MPX */
813fe3d197fSDave Hansen 
8148b84c8dfSDaniel J Blueman extern u16 amd_get_nb_id(int cpu);
815cc2749e4SAravind Gopalakrishnan extern u32 amd_get_nodes_per_socket(void);
8166a812691SAndreas Herrmann 
81796e39ac0SJason Wang static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
81896e39ac0SJason Wang {
81996e39ac0SJason Wang 	uint32_t base, eax, signature[3];
82096e39ac0SJason Wang 
82196e39ac0SJason Wang 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
82296e39ac0SJason Wang 		cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
82396e39ac0SJason Wang 
82496e39ac0SJason Wang 		if (!memcmp(sig, signature, 12) &&
82596e39ac0SJason Wang 		    (leaves == 0 || ((eax - base) >= leaves)))
82696e39ac0SJason Wang 			return base;
82796e39ac0SJason Wang 	}
82896e39ac0SJason Wang 
82996e39ac0SJason Wang 	return 0;
83096e39ac0SJason Wang }
83196e39ac0SJason Wang 
832f05e798aSDavid Howells extern unsigned long arch_align_stack(unsigned long sp);
833f05e798aSDavid Howells extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
834f05e798aSDavid Howells 
835f05e798aSDavid Howells void default_idle(void);
8366a377ddcSLen Brown #ifdef	CONFIG_XEN
8376a377ddcSLen Brown bool xen_set_default_idle(void);
8386a377ddcSLen Brown #else
8396a377ddcSLen Brown #define xen_set_default_idle 0
8406a377ddcSLen Brown #endif
841f05e798aSDavid Howells 
842f05e798aSDavid Howells void stop_this_cpu(void *dummy);
8434d067d8eSBorislav Petkov void df_debug(struct pt_regs *regs, long error_code);
8441965aae3SH. Peter Anvin #endif /* _ASM_X86_PROCESSOR_H */
845