xref: /linux/arch/x86/include/asm/processor.h (revision 03b122da74b22fbe7cd98184fa5657a9ce13970c)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_PROCESSOR_H
31965aae3SH. Peter Anvin #define _ASM_X86_PROCESSOR_H
4bb898558SAl Viro 
5bb898558SAl Viro #include <asm/processor-flags.h>
6bb898558SAl Viro 
7bb898558SAl Viro /* Forward declaration, a strange C thing */
8bb898558SAl Viro struct task_struct;
9bb898558SAl Viro struct mm_struct;
10577d5cd7SThomas Gleixner struct io_bitmap;
119fda6a06SBrian Gerst struct vm86;
12bb898558SAl Viro 
13bb898558SAl Viro #include <asm/math_emu.h>
14bb898558SAl Viro #include <asm/segment.h>
15bb898558SAl Viro #include <asm/types.h>
16decb4c41SIngo Molnar #include <uapi/asm/sigcontext.h>
17bb898558SAl Viro #include <asm/current.h>
18cd4d09ecSBorislav Petkov #include <asm/cpufeatures.h>
19bb898558SAl Viro #include <asm/page.h>
2054321d94SJeremy Fitzhardinge #include <asm/pgtable_types.h>
21bb898558SAl Viro #include <asm/percpu.h>
22bb898558SAl Viro #include <asm/msr.h>
23bb898558SAl Viro #include <asm/desc_defs.h>
24bb898558SAl Viro #include <asm/nops.h>
25f05e798aSDavid Howells #include <asm/special_insns.h>
2614b9675aSIngo Molnar #include <asm/fpu/types.h>
2776846bf3SJosh Poimboeuf #include <asm/unwind_hints.h>
2815934878SSean Christopherson #include <asm/vmxfeatures.h>
29abc22418SVincenzo Frascino #include <asm/vdso/processor.h>
30bb898558SAl Viro 
31bb898558SAl Viro #include <linux/personality.h>
32bb898558SAl Viro #include <linux/cache.h>
33bb898558SAl Viro #include <linux/threads.h>
345cbc19a9SPeter Zijlstra #include <linux/math64.h>
35faa4602eSPeter Zijlstra #include <linux/err.h>
36f05e798aSDavid Howells #include <linux/irqflags.h>
3721729f81STom Lendacky #include <linux/mem_encrypt.h>
38f05e798aSDavid Howells 
39f05e798aSDavid Howells /*
40f05e798aSDavid Howells  * We handle most unaligned accesses in hardware.  On the other hand
41f05e798aSDavid Howells  * unaligned DMA can be quite expensive on some Nehalem processors.
42f05e798aSDavid Howells  *
43f05e798aSDavid Howells  * Based on this we disable the IP header alignment in network drivers.
44f05e798aSDavid Howells  */
45f05e798aSDavid Howells #define NET_IP_ALIGN	0
46bb898558SAl Viro 
47b332828cSK.Prasad #define HBP_NUM 4
48bb898558SAl Viro 
49b8c1b8eaSIngo Molnar /*
50b8c1b8eaSIngo Molnar  * These alignment constraints are for performance in the vSMP case,
51b8c1b8eaSIngo Molnar  * but in the task_struct case we must also meet hardware imposed
52b8c1b8eaSIngo Molnar  * alignment requirements of the FPU state:
53b8c1b8eaSIngo Molnar  */
54bb898558SAl Viro #ifdef CONFIG_X86_VSMP
55bb898558SAl Viro # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
56bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
57bb898558SAl Viro #else
58b8c1b8eaSIngo Molnar # define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
59bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	0
60bb898558SAl Viro #endif
61bb898558SAl Viro 
62e0ba94f1SAlex Shi enum tlb_infos {
63e0ba94f1SAlex Shi 	ENTRIES,
64e0ba94f1SAlex Shi 	NR_INFO
65e0ba94f1SAlex Shi };
66e0ba94f1SAlex Shi 
67e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4k[NR_INFO];
68e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_2m[NR_INFO];
69e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lli_4m[NR_INFO];
70e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4k[NR_INFO];
71e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_2m[NR_INFO];
72e0ba94f1SAlex Shi extern u16 __read_mostly tlb_lld_4m[NR_INFO];
73dd360393SKirill A. Shutemov extern u16 __read_mostly tlb_lld_1g[NR_INFO];
74c4211f42SAlex Shi 
75bb898558SAl Viro /*
76bb898558SAl Viro  *  CPU type and hardware bug flags. Kept separately for each CPU.
7704402116SMathias Krause  *  Members of this structure are referenced in head_32.S, so think twice
78bb898558SAl Viro  *  before touching them. [mj]
79bb898558SAl Viro  */
80bb898558SAl Viro 
81bb898558SAl Viro struct cpuinfo_x86 {
82bb898558SAl Viro 	__u8			x86;		/* CPU family */
83bb898558SAl Viro 	__u8			x86_vendor;	/* CPU vendor */
84bb898558SAl Viro 	__u8			x86_model;
85b399151cSJia Zhang 	__u8			x86_stepping;
866415813bSMathias Krause #ifdef CONFIG_X86_64
87bb898558SAl Viro 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
88bb898558SAl Viro 	int			x86_tlbsize;
8913c6c532SJan Beulich #endif
90b47ce1feSSean Christopherson #ifdef CONFIG_X86_VMX_FEATURE_NAMES
91b47ce1feSSean Christopherson 	__u32			vmx_capability[NVMXINTS];
92b47ce1feSSean Christopherson #endif
93bb898558SAl Viro 	__u8			x86_virt_bits;
94bb898558SAl Viro 	__u8			x86_phys_bits;
95bb898558SAl Viro 	/* CPUID returned core id bits: */
96bb898558SAl Viro 	__u8			x86_coreid_bits;
9779a8b9aaSBorislav Petkov 	__u8			cu_id;
98bb898558SAl Viro 	/* Max extended CPUID function supported: */
99bb898558SAl Viro 	__u32			extended_cpuid_level;
100bb898558SAl Viro 	/* Maximum supported CPUID level, -1=no CPUID: */
101bb898558SAl Viro 	int			cpuid_level;
102db8c33f8SFenghua Yu 	/*
103db8c33f8SFenghua Yu 	 * Align to size of unsigned long because the x86_capability array
104db8c33f8SFenghua Yu 	 * is passed to bitops which require the alignment. Use unnamed
105db8c33f8SFenghua Yu 	 * union to enforce the array is aligned to size of unsigned long.
106db8c33f8SFenghua Yu 	 */
107db8c33f8SFenghua Yu 	union {
10865fc985bSBorislav Petkov 		__u32		x86_capability[NCAPINTS + NBUGINTS];
109db8c33f8SFenghua Yu 		unsigned long	x86_capability_alignment;
110db8c33f8SFenghua Yu 	};
111bb898558SAl Viro 	char			x86_vendor_id[16];
112bb898558SAl Viro 	char			x86_model_id[64];
113bb898558SAl Viro 	/* in KB - valid for CPUS which support this call: */
11424dbc600SGustavo A. R. Silva 	unsigned int		x86_cache_size;
115bb898558SAl Viro 	int			x86_cache_alignment;	/* In bytes */
116f3d44f18SReinette Chatre 	/* Cache QoS architectural values, valid only on the BSP: */
117cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_max_rmid;	/* max index */
118cbc82b17SPeter P Waskiewicz Jr 	int			x86_cache_occ_scale;	/* scale to bytes */
119f3d44f18SReinette Chatre 	int			x86_cache_mbm_width_offset;
120bb898558SAl Viro 	int			x86_power;
121bb898558SAl Viro 	unsigned long		loops_per_jiffy;
122bb898558SAl Viro 	/* cpuid returned max cores value: */
123bb898558SAl Viro 	u16			x86_max_cores;
124bb898558SAl Viro 	u16			apicid;
125bb898558SAl Viro 	u16			initial_apicid;
126bb898558SAl Viro 	u16			x86_clflush_size;
127bb898558SAl Viro 	/* number of cores as seen by the OS: */
128bb898558SAl Viro 	u16			booted_cores;
129bb898558SAl Viro 	/* Physical processor id: */
130bb898558SAl Viro 	u16			phys_proc_id;
1311f12e32fSThomas Gleixner 	/* Logical processor id: */
1321f12e32fSThomas Gleixner 	u16			logical_proc_id;
133bb898558SAl Viro 	/* Core id: */
134bb898558SAl Viro 	u16			cpu_core_id;
1357745f03eSLen Brown 	u16			cpu_die_id;
136212bf4fdSLen Brown 	u16			logical_die_id;
137bb898558SAl Viro 	/* Index into per_cpu list: */
138bb898558SAl Viro 	u16			cpu_index;
139c52787b5SBalbir Singh 	/*  Is SMT active on this core? */
140c52787b5SBalbir Singh 	bool			smt_active;
141506ed6b5SAndi Kleen 	u32			microcode;
142cc51e542SAndi Kleen 	/* Address space bits used by the cache internally */
143cc51e542SAndi Kleen 	u8			x86_cache_bits;
14430bb9811SAndi Kleen 	unsigned		initialized : 1;
1453859a271SKees Cook } __randomize_layout;
146bb898558SAl Viro 
14747f10a36SHe Chen struct cpuid_regs {
14847f10a36SHe Chen 	u32 eax, ebx, ecx, edx;
14947f10a36SHe Chen };
15047f10a36SHe Chen 
15147f10a36SHe Chen enum cpuid_regs_idx {
15247f10a36SHe Chen 	CPUID_EAX = 0,
15347f10a36SHe Chen 	CPUID_EBX,
15447f10a36SHe Chen 	CPUID_ECX,
15547f10a36SHe Chen 	CPUID_EDX,
15647f10a36SHe Chen };
15747f10a36SHe Chen 
158bb898558SAl Viro #define X86_VENDOR_INTEL	0
159bb898558SAl Viro #define X86_VENDOR_CYRIX	1
160bb898558SAl Viro #define X86_VENDOR_AMD		2
161bb898558SAl Viro #define X86_VENDOR_UMC		3
162bb898558SAl Viro #define X86_VENDOR_CENTAUR	5
163bb898558SAl Viro #define X86_VENDOR_TRANSMETA	7
164bb898558SAl Viro #define X86_VENDOR_NSC		8
165c9661c1eSPu Wen #define X86_VENDOR_HYGON	9
166761fdd5eSTony W Wang-oc #define X86_VENDOR_ZHAOXIN	10
167639475d4SMarcos Del Sol Vives #define X86_VENDOR_VORTEX	11
168639475d4SMarcos Del Sol Vives #define X86_VENDOR_NUM		12
169bb898558SAl Viro 
170bb898558SAl Viro #define X86_VENDOR_UNKNOWN	0xff
171bb898558SAl Viro 
172bb898558SAl Viro /*
173bb898558SAl Viro  * capabilities of CPUs
174bb898558SAl Viro  */
175bb898558SAl Viro extern struct cpuinfo_x86	boot_cpu_data;
176bb898558SAl Viro extern struct cpuinfo_x86	new_cpu_data;
177bb898558SAl Viro 
1786cbd2171SThomas Gleixner extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
1796cbd2171SThomas Gleixner extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
180bb898558SAl Viro 
181bb898558SAl Viro #ifdef CONFIG_SMP
1822c773dd3SJan Beulich DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
183bb898558SAl Viro #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
184bb898558SAl Viro #else
1857b543a53STejun Heo #define cpu_info		boot_cpu_data
186bb898558SAl Viro #define cpu_data(cpu)		boot_cpu_data
187bb898558SAl Viro #endif
188bb898558SAl Viro 
189bb898558SAl Viro extern const struct seq_operations cpuinfo_op;
190bb898558SAl Viro 
191bb898558SAl Viro #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
192bb898558SAl Viro 
193bb898558SAl Viro extern void cpu_detect(struct cpuinfo_x86 *c);
194bb898558SAl Viro 
1959df95169SVlastimil Babka static inline unsigned long long l1tf_pfn_limit(void)
19617dbca11SAndi Kleen {
197cc51e542SAndi Kleen 	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
19817dbca11SAndi Kleen }
19917dbca11SAndi Kleen 
200bb898558SAl Viro extern void early_cpu_init(void);
201bb898558SAl Viro extern void identify_boot_cpu(void);
202bb898558SAl Viro extern void identify_secondary_cpu(struct cpuinfo_x86 *);
203bb898558SAl Viro extern void print_cpu_info(struct cpuinfo_x86 *);
20421c3fcf3SYinghai Lu void print_cpu_msr(struct cpuinfo_x86 *);
205bb898558SAl Viro 
206d288e1cfSFenghua Yu #ifdef CONFIG_X86_32
207d288e1cfSFenghua Yu extern int have_cpuid_p(void);
208d288e1cfSFenghua Yu #else
209d288e1cfSFenghua Yu static inline int have_cpuid_p(void)
210d288e1cfSFenghua Yu {
211d288e1cfSFenghua Yu 	return 1;
212d288e1cfSFenghua Yu }
213d288e1cfSFenghua Yu #endif
214bb898558SAl Viro static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
215bb898558SAl Viro 				unsigned int *ecx, unsigned int *edx)
216bb898558SAl Viro {
217bb898558SAl Viro 	/* ecx is often an input as well as an output. */
21845a94d7cSSuresh Siddha 	asm volatile("cpuid"
219bb898558SAl Viro 	    : "=a" (*eax),
220bb898558SAl Viro 	      "=b" (*ebx),
221bb898558SAl Viro 	      "=c" (*ecx),
222bb898558SAl Viro 	      "=d" (*edx)
223506ed6b5SAndi Kleen 	    : "0" (*eax), "2" (*ecx)
224506ed6b5SAndi Kleen 	    : "memory");
225bb898558SAl Viro }
226bb898558SAl Viro 
2275dedade6SBorislav Petkov #define native_cpuid_reg(reg)					\
2285dedade6SBorislav Petkov static inline unsigned int native_cpuid_##reg(unsigned int op)	\
2295dedade6SBorislav Petkov {								\
2305dedade6SBorislav Petkov 	unsigned int eax = op, ebx, ecx = 0, edx;		\
2315dedade6SBorislav Petkov 								\
2325dedade6SBorislav Petkov 	native_cpuid(&eax, &ebx, &ecx, &edx);			\
2335dedade6SBorislav Petkov 								\
2345dedade6SBorislav Petkov 	return reg;						\
2355dedade6SBorislav Petkov }
2365dedade6SBorislav Petkov 
2375dedade6SBorislav Petkov /*
2385dedade6SBorislav Petkov  * Native CPUID functions returning a single datum.
2395dedade6SBorislav Petkov  */
2405dedade6SBorislav Petkov native_cpuid_reg(eax)
2415dedade6SBorislav Petkov native_cpuid_reg(ebx)
2425dedade6SBorislav Petkov native_cpuid_reg(ecx)
2435dedade6SBorislav Petkov native_cpuid_reg(edx)
2445dedade6SBorislav Petkov 
2456c690ee1SAndy Lutomirski /*
2466c690ee1SAndy Lutomirski  * Friendlier CR3 helpers.
2476c690ee1SAndy Lutomirski  */
2486c690ee1SAndy Lutomirski static inline unsigned long read_cr3_pa(void)
2496c690ee1SAndy Lutomirski {
2506c690ee1SAndy Lutomirski 	return __read_cr3() & CR3_ADDR_MASK;
2516c690ee1SAndy Lutomirski }
2526c690ee1SAndy Lutomirski 
253eef9c4abSTom Lendacky static inline unsigned long native_read_cr3_pa(void)
254eef9c4abSTom Lendacky {
255eef9c4abSTom Lendacky 	return __native_read_cr3() & CR3_ADDR_MASK;
256eef9c4abSTom Lendacky }
257eef9c4abSTom Lendacky 
258bb898558SAl Viro static inline void load_cr3(pgd_t *pgdir)
259bb898558SAl Viro {
26021729f81STom Lendacky 	write_cr3(__sme_pa(pgdir));
261bb898558SAl Viro }
262bb898558SAl Viro 
2637fb983b4SAndy Lutomirski /*
2647fb983b4SAndy Lutomirski  * Note that while the legacy 'TSS' name comes from 'Task State Segment',
2657fb983b4SAndy Lutomirski  * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
2667fb983b4SAndy Lutomirski  * unrelated to the task-switch mechanism:
2677fb983b4SAndy Lutomirski  */
268bb898558SAl Viro #ifdef CONFIG_X86_32
269bb898558SAl Viro /* This is the TSS defined by the hardware. */
270bb898558SAl Viro struct x86_hw_tss {
271bb898558SAl Viro 	unsigned short		back_link, __blh;
272bb898558SAl Viro 	unsigned long		sp0;
273bb898558SAl Viro 	unsigned short		ss0, __ss0h;
274cf9328ccSAndy Lutomirski 	unsigned long		sp1;
27576e4c490SAndy Lutomirski 
27676e4c490SAndy Lutomirski 	/*
277cf9328ccSAndy Lutomirski 	 * We don't use ring 1, so ss1 is a convenient scratch space in
278cf9328ccSAndy Lutomirski 	 * the same cacheline as sp0.  We use ss1 to cache the value in
279cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS.  When we context switch
280cf9328ccSAndy Lutomirski 	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
281cf9328ccSAndy Lutomirski 	 * written matches ss1, and, if it's not, then we wrmsr the new
282cf9328ccSAndy Lutomirski 	 * value and update ss1.
28376e4c490SAndy Lutomirski 	 *
284cf9328ccSAndy Lutomirski 	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
285cf9328ccSAndy Lutomirski 	 * that we set it to zero in vm86 tasks to avoid corrupting the
286cf9328ccSAndy Lutomirski 	 * stack if we were to go through the sysenter path from vm86
287cf9328ccSAndy Lutomirski 	 * mode.
28876e4c490SAndy Lutomirski 	 */
28976e4c490SAndy Lutomirski 	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
29076e4c490SAndy Lutomirski 
29176e4c490SAndy Lutomirski 	unsigned short		__ss1h;
292bb898558SAl Viro 	unsigned long		sp2;
293bb898558SAl Viro 	unsigned short		ss2, __ss2h;
294bb898558SAl Viro 	unsigned long		__cr3;
295bb898558SAl Viro 	unsigned long		ip;
296bb898558SAl Viro 	unsigned long		flags;
297bb898558SAl Viro 	unsigned long		ax;
298bb898558SAl Viro 	unsigned long		cx;
299bb898558SAl Viro 	unsigned long		dx;
300bb898558SAl Viro 	unsigned long		bx;
301bb898558SAl Viro 	unsigned long		sp;
302bb898558SAl Viro 	unsigned long		bp;
303bb898558SAl Viro 	unsigned long		si;
304bb898558SAl Viro 	unsigned long		di;
305bb898558SAl Viro 	unsigned short		es, __esh;
306bb898558SAl Viro 	unsigned short		cs, __csh;
307bb898558SAl Viro 	unsigned short		ss, __ssh;
308bb898558SAl Viro 	unsigned short		ds, __dsh;
309bb898558SAl Viro 	unsigned short		fs, __fsh;
310bb898558SAl Viro 	unsigned short		gs, __gsh;
311bb898558SAl Viro 	unsigned short		ldt, __ldth;
312bb898558SAl Viro 	unsigned short		trace;
313bb898558SAl Viro 	unsigned short		io_bitmap_base;
314bb898558SAl Viro 
315bb898558SAl Viro } __attribute__((packed));
316bb898558SAl Viro #else
317bb898558SAl Viro struct x86_hw_tss {
318bb898558SAl Viro 	u32			reserved1;
319bb898558SAl Viro 	u64			sp0;
320bb898558SAl Viro 	u64			sp1;
3219aaefe7bSAndy Lutomirski 
32298f05b51SAndy Lutomirski 	/*
32398f05b51SAndy Lutomirski 	 * Since Linux does not use ring 2, the 'sp2' slot is unused by
32498f05b51SAndy Lutomirski 	 * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
32598f05b51SAndy Lutomirski 	 * the user RSP value.
32698f05b51SAndy Lutomirski 	 */
327bb898558SAl Viro 	u64			sp2;
32898f05b51SAndy Lutomirski 
329bb898558SAl Viro 	u64			reserved2;
330bb898558SAl Viro 	u64			ist[7];
331bb898558SAl Viro 	u32			reserved3;
332bb898558SAl Viro 	u32			reserved4;
333bb898558SAl Viro 	u16			reserved5;
334bb898558SAl Viro 	u16			io_bitmap_base;
335bb898558SAl Viro 
336d3273deaSAndy Lutomirski } __attribute__((packed));
337bb898558SAl Viro #endif
338bb898558SAl Viro 
339bb898558SAl Viro /*
340bb898558SAl Viro  * IO-bitmap sizes:
341bb898558SAl Viro  */
342bb898558SAl Viro #define IO_BITMAP_BITS			65536
343f5848e5fSThomas Gleixner #define IO_BITMAP_BYTES			(IO_BITMAP_BITS / BITS_PER_BYTE)
344bb898558SAl Viro #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES / sizeof(long))
345ecc7e37dSThomas Gleixner 
346c8137aceSThomas Gleixner #define IO_BITMAP_OFFSET_VALID_MAP				\
347f5848e5fSThomas Gleixner 	(offsetof(struct tss_struct, io_bitmap.bitmap) -	\
348ecc7e37dSThomas Gleixner 	 offsetof(struct tss_struct, x86_tss))
349ecc7e37dSThomas Gleixner 
350c8137aceSThomas Gleixner #define IO_BITMAP_OFFSET_VALID_ALL				\
351c8137aceSThomas Gleixner 	(offsetof(struct tss_struct, io_bitmap.mapall) -	\
352c8137aceSThomas Gleixner 	 offsetof(struct tss_struct, x86_tss))
353c8137aceSThomas Gleixner 
354111e7b15SThomas Gleixner #ifdef CONFIG_X86_IOPL_IOPERM
355ecc7e37dSThomas Gleixner /*
356c8137aceSThomas Gleixner  * sizeof(unsigned long) coming from an extra "long" at the end of the
357c8137aceSThomas Gleixner  * iobitmap. The limit is inclusive, i.e. the last valid byte.
358ecc7e37dSThomas Gleixner  */
359ecc7e37dSThomas Gleixner # define __KERNEL_TSS_LIMIT	\
360c8137aceSThomas Gleixner 	(IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
361c8137aceSThomas Gleixner 	 sizeof(unsigned long) - 1)
362111e7b15SThomas Gleixner #else
363111e7b15SThomas Gleixner # define __KERNEL_TSS_LIMIT	\
364111e7b15SThomas Gleixner 	(offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
365111e7b15SThomas Gleixner #endif
366ecc7e37dSThomas Gleixner 
367ecc7e37dSThomas Gleixner /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
368ecc7e37dSThomas Gleixner #define IO_BITMAP_OFFSET_INVALID	(__KERNEL_TSS_LIMIT + 1)
369bb898558SAl Viro 
3704fe2d8b1SDave Hansen struct entry_stack {
371c7aadc09SPeter Zijlstra 	char	stack[PAGE_SIZE];
3720f9a4810SAndy Lutomirski };
3730f9a4810SAndy Lutomirski 
3744fe2d8b1SDave Hansen struct entry_stack_page {
3754fe2d8b1SDave Hansen 	struct entry_stack stack;
376c482feefSAndy Lutomirski } __aligned(PAGE_SIZE);
3771a935bc3SAndy Lutomirski 
378f5848e5fSThomas Gleixner /*
379f5848e5fSThomas Gleixner  * All IO bitmap related data stored in the TSS:
380f5848e5fSThomas Gleixner  */
381f5848e5fSThomas Gleixner struct x86_io_bitmap {
382060aa16fSThomas Gleixner 	/* The sequence number of the last active bitmap. */
383060aa16fSThomas Gleixner 	u64			prev_sequence;
384060aa16fSThomas Gleixner 
385f5848e5fSThomas Gleixner 	/*
386f5848e5fSThomas Gleixner 	 * Store the dirty size of the last io bitmap offender. The next
387f5848e5fSThomas Gleixner 	 * one will have to do the cleanup as the switch out to a non io
388f5848e5fSThomas Gleixner 	 * bitmap user will just set x86_tss.io_bitmap_base to a value
389f5848e5fSThomas Gleixner 	 * outside of the TSS limit. So for sane tasks there is no need to
390f5848e5fSThomas Gleixner 	 * actually touch the io_bitmap at all.
391f5848e5fSThomas Gleixner 	 */
392f5848e5fSThomas Gleixner 	unsigned int		prev_max;
393f5848e5fSThomas Gleixner 
394f5848e5fSThomas Gleixner 	/*
395f5848e5fSThomas Gleixner 	 * The extra 1 is there because the CPU will access an
396f5848e5fSThomas Gleixner 	 * additional byte beyond the end of the IO permission
397f5848e5fSThomas Gleixner 	 * bitmap. The extra byte must be all 1 bits, and must
398f5848e5fSThomas Gleixner 	 * be within the limit.
399f5848e5fSThomas Gleixner 	 */
400f5848e5fSThomas Gleixner 	unsigned long		bitmap[IO_BITMAP_LONGS + 1];
401c8137aceSThomas Gleixner 
402c8137aceSThomas Gleixner 	/*
403c8137aceSThomas Gleixner 	 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
404c8137aceSThomas Gleixner 	 * except the additional byte at the end.
405c8137aceSThomas Gleixner 	 */
406c8137aceSThomas Gleixner 	unsigned long		mapall[IO_BITMAP_LONGS + 1];
407f5848e5fSThomas Gleixner };
408f5848e5fSThomas Gleixner 
409bb898558SAl Viro struct tss_struct {
410bb898558SAl Viro 	/*
4111a935bc3SAndy Lutomirski 	 * The fixed hardware portion.  This must not cross a page boundary
4121a935bc3SAndy Lutomirski 	 * at risk of violating the SDM's advice and potentially triggering
4131a935bc3SAndy Lutomirski 	 * errata.
414bb898558SAl Viro 	 */
415bb898558SAl Viro 	struct x86_hw_tss	x86_tss;
416bb898558SAl Viro 
417f5848e5fSThomas Gleixner 	struct x86_io_bitmap	io_bitmap;
4181a935bc3SAndy Lutomirski } __aligned(PAGE_SIZE);
419bb898558SAl Viro 
420c482feefSAndy Lutomirski DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
421bb898558SAl Viro 
422e6401c13SAndy Lutomirski /* Per CPU interrupt stacks */
423e6401c13SAndy Lutomirski struct irq_stack {
424e6401c13SAndy Lutomirski 	char		stack[IRQ_STACK_SIZE];
425e6401c13SAndy Lutomirski } __aligned(IRQ_STACK_SIZE);
426e6401c13SAndy Lutomirski 
427a7fcf28dSAndy Lutomirski DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
428a7fcf28dSAndy Lutomirski 
429bb898558SAl Viro #ifdef CONFIG_X86_64
430e6401c13SAndy Lutomirski struct fixed_percpu_data {
431947e76cdSBrian Gerst 	/*
432947e76cdSBrian Gerst 	 * GCC hardcodes the stack canary as %gs:40.  Since the
433947e76cdSBrian Gerst 	 * irq_stack is the object at %gs:0, we reserve the bottom
434947e76cdSBrian Gerst 	 * 48 bytes of the irq stack for the canary.
4353fb0fdb3SAndy Lutomirski 	 *
4363fb0fdb3SAndy Lutomirski 	 * Once we are willing to require -mstack-protector-guard-symbol=
4373fb0fdb3SAndy Lutomirski 	 * support for x86_64 stackprotector, we can get rid of this.
438947e76cdSBrian Gerst 	 */
439947e76cdSBrian Gerst 	char		gs_base[40];
440947e76cdSBrian Gerst 	unsigned long	stack_canary;
441947e76cdSBrian Gerst };
442947e76cdSBrian Gerst 
443e6401c13SAndy Lutomirski DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
444e6401c13SAndy Lutomirski DECLARE_INIT_PER_CPU(fixed_percpu_data);
4452add8e23SBrian Gerst 
44635060ed6SVitaly Kuznetsov static inline unsigned long cpu_kernelmode_gs_base(int cpu)
44735060ed6SVitaly Kuznetsov {
448e6401c13SAndy Lutomirski 	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
44935060ed6SVitaly Kuznetsov }
45035060ed6SVitaly Kuznetsov 
451951c2a51SThomas Gleixner DECLARE_PER_CPU(void *, hardirq_stack_ptr);
452e7f89001SThomas Gleixner DECLARE_PER_CPU(bool, hardirq_stack_inuse);
4539766cdbcSJaswinder Singh Rajput extern asmlinkage void ignore_sysret(void);
45442b933b5SVitaly Kuznetsov 
45542b933b5SVitaly Kuznetsov /* Save actual FS/GS selectors and bases to current->thread */
4566758034eSThomas Gleixner void current_save_fsgs(void);
45760a5317fSTejun Heo #else	/* X86_64 */
458050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
4593fb0fdb3SAndy Lutomirski DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
460bb898558SAl Viro #endif
461951c2a51SThomas Gleixner DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
462a754fe2bSThomas Gleixner DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
463951c2a51SThomas Gleixner #endif	/* !X86_64 */
464bb898558SAl Viro 
46524f1e32cSFrederic Weisbecker struct perf_event;
46624f1e32cSFrederic Weisbecker 
467bb898558SAl Viro struct thread_struct {
468bb898558SAl Viro 	/* Cached TLS descriptors: */
469bb898558SAl Viro 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
470d375cf15SAndy Lutomirski #ifdef CONFIG_X86_32
471bb898558SAl Viro 	unsigned long		sp0;
472d375cf15SAndy Lutomirski #endif
473bb898558SAl Viro 	unsigned long		sp;
474bb898558SAl Viro #ifdef CONFIG_X86_32
475bb898558SAl Viro 	unsigned long		sysenter_cs;
476bb898558SAl Viro #else
477bb898558SAl Viro 	unsigned short		es;
478bb898558SAl Viro 	unsigned short		ds;
479bb898558SAl Viro 	unsigned short		fsindex;
480bb898558SAl Viro 	unsigned short		gsindex;
481bb898558SAl Viro #endif
482b9d989c7SAndy Lutomirski 
483d756f4adSAlexey Dobriyan #ifdef CONFIG_X86_64
484296f781aSAndy Lutomirski 	unsigned long		fsbase;
485296f781aSAndy Lutomirski 	unsigned long		gsbase;
486296f781aSAndy Lutomirski #else
487296f781aSAndy Lutomirski 	/*
488296f781aSAndy Lutomirski 	 * XXX: this could presumably be unsigned short.  Alternatively,
489296f781aSAndy Lutomirski 	 * 32-bit kernels could be taught to use fsindex instead.
490296f781aSAndy Lutomirski 	 */
491bb898558SAl Viro 	unsigned long fs;
492bb898558SAl Viro 	unsigned long gs;
493296f781aSAndy Lutomirski #endif
494c5bedc68SIngo Molnar 
49524f1e32cSFrederic Weisbecker 	/* Save middle states of ptrace breakpoints */
49624f1e32cSFrederic Weisbecker 	struct perf_event	*ptrace_bps[HBP_NUM];
49724f1e32cSFrederic Weisbecker 	/* Debug status used for traps, single steps, etc... */
498d53d9bc0SPeter Zijlstra 	unsigned long           virtual_dr6;
499326264a0SFrederic Weisbecker 	/* Keep track of the exact dr7 value set by the user */
500326264a0SFrederic Weisbecker 	unsigned long           ptrace_dr7;
501bb898558SAl Viro 	/* Fault info: */
502bb898558SAl Viro 	unsigned long		cr2;
50351e7dc70SSrikar Dronamraju 	unsigned long		trap_nr;
504bb898558SAl Viro 	unsigned long		error_code;
5059fda6a06SBrian Gerst #ifdef CONFIG_VM86
506bb898558SAl Viro 	/* Virtual 86 mode info */
5079fda6a06SBrian Gerst 	struct vm86		*vm86;
508bb898558SAl Viro #endif
509bb898558SAl Viro 	/* IO permissions: */
510577d5cd7SThomas Gleixner 	struct io_bitmap	*io_bitmap;
511c8137aceSThomas Gleixner 
512c8137aceSThomas Gleixner 	/*
513d9f6e12fSIngo Molnar 	 * IOPL. Privilege level dependent I/O permission which is
514a24ca997SThomas Gleixner 	 * emulated via the I/O bitmap to prevent user space from disabling
515a24ca997SThomas Gleixner 	 * interrupts.
516c8137aceSThomas Gleixner 	 */
517c8137aceSThomas Gleixner 	unsigned long		iopl_emul;
5180c8c0f03SDave Hansen 
519b968e84bSPeter Zijlstra 	unsigned int		iopl_warn:1;
5202a53ccbcSIngo Molnar 	unsigned int		sig_on_uaccess_err:1;
521dfa9a942SAndy Lutomirski 
5229782a712SDave Hansen 	/*
5239782a712SDave Hansen 	 * Protection Keys Register for Userspace.  Loaded immediately on
5249782a712SDave Hansen 	 * context switch. Store it in thread_struct to avoid a lookup in
5259782a712SDave Hansen 	 * the tasks's FPU xstate buffer. This value is only valid when a
5269782a712SDave Hansen 	 * task is scheduled out. For 'current' the authoritative source of
5279782a712SDave Hansen 	 * PKRU is the hardware itself.
5289782a712SDave Hansen 	 */
5299782a712SDave Hansen 	u32			pkru;
5309782a712SDave Hansen 
5310c8c0f03SDave Hansen 	/* Floating point and extended processor state */
5320c8c0f03SDave Hansen 	struct fpu		fpu;
5330c8c0f03SDave Hansen 	/*
5340c8c0f03SDave Hansen 	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
5350c8c0f03SDave Hansen 	 * the end.
5360c8c0f03SDave Hansen 	 */
537bb898558SAl Viro };
538bb898558SAl Viro 
5392dd8eedcSThomas Gleixner extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
5402dd8eedcSThomas Gleixner 
541f7d83c1cSKees Cook static inline void arch_thread_struct_whitelist(unsigned long *offset,
542f7d83c1cSKees Cook 						unsigned long *size)
543f7d83c1cSKees Cook {
5442dd8eedcSThomas Gleixner 	fpu_thread_struct_whitelist(offset, size);
545f7d83c1cSKees Cook }
546f7d83c1cSKees Cook 
547bb898558SAl Viro static inline void
548da51da18SAndy Lutomirski native_load_sp0(unsigned long sp0)
549bb898558SAl Viro {
550c482feefSAndy Lutomirski 	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
551bb898558SAl Viro }
552bb898558SAl Viro 
55358edfd2eSChang S. Bae static __always_inline void native_swapgs(void)
554bb898558SAl Viro {
555bb898558SAl Viro #ifdef CONFIG_X86_64
556bb898558SAl Viro 	asm volatile("swapgs" ::: "memory");
557bb898558SAl Viro #endif
558bb898558SAl Viro }
559bb898558SAl Viro 
560a7fcf28dSAndy Lutomirski static inline unsigned long current_top_of_stack(void)
5618ef46a67SAndy Lutomirski {
5629aaefe7bSAndy Lutomirski 	/*
5639aaefe7bSAndy Lutomirski 	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
5649aaefe7bSAndy Lutomirski 	 *  and around vm86 mode and sp0 on x86_64 is special because of the
5659aaefe7bSAndy Lutomirski 	 *  entry trampoline.
5669aaefe7bSAndy Lutomirski 	 */
567a7fcf28dSAndy Lutomirski 	return this_cpu_read_stable(cpu_current_top_of_stack);
5688ef46a67SAndy Lutomirski }
5698ef46a67SAndy Lutomirski 
5703383642cSAndy Lutomirski static inline bool on_thread_stack(void)
5713383642cSAndy Lutomirski {
5723383642cSAndy Lutomirski 	return (unsigned long)(current_top_of_stack() -
5733383642cSAndy Lutomirski 			       current_stack_pointer) < THREAD_SIZE;
5743383642cSAndy Lutomirski }
5753383642cSAndy Lutomirski 
5769bad5658SJuergen Gross #ifdef CONFIG_PARAVIRT_XXL
577bb898558SAl Viro #include <asm/paravirt.h>
578bb898558SAl Viro #else
579bb898558SAl Viro #define __cpuid			native_cpuid
580bb898558SAl Viro 
581da51da18SAndy Lutomirski static inline void load_sp0(unsigned long sp0)
582bb898558SAl Viro {
583da51da18SAndy Lutomirski 	native_load_sp0(sp0);
584bb898558SAl Viro }
585bb898558SAl Viro 
5869bad5658SJuergen Gross #endif /* CONFIG_PARAVIRT_XXL */
587bb898558SAl Viro 
588bb898558SAl Viro /* Free all resources held by a thread. */
589bb898558SAl Viro extern void release_thread(struct task_struct *);
590bb898558SAl Viro 
59142a20f86SKees Cook unsigned long __get_wchan(struct task_struct *p);
592bb898558SAl Viro 
593bb898558SAl Viro /*
594bb898558SAl Viro  * Generic CPUID function
595bb898558SAl Viro  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
596bb898558SAl Viro  * resulting in stale register contents being returned.
597bb898558SAl Viro  */
598bb898558SAl Viro static inline void cpuid(unsigned int op,
599bb898558SAl Viro 			 unsigned int *eax, unsigned int *ebx,
600bb898558SAl Viro 			 unsigned int *ecx, unsigned int *edx)
601bb898558SAl Viro {
602bb898558SAl Viro 	*eax = op;
603bb898558SAl Viro 	*ecx = 0;
604bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
605bb898558SAl Viro }
606bb898558SAl Viro 
607bb898558SAl Viro /* Some CPUID calls want 'count' to be placed in ecx */
608bb898558SAl Viro static inline void cpuid_count(unsigned int op, int count,
609bb898558SAl Viro 			       unsigned int *eax, unsigned int *ebx,
610bb898558SAl Viro 			       unsigned int *ecx, unsigned int *edx)
611bb898558SAl Viro {
612bb898558SAl Viro 	*eax = op;
613bb898558SAl Viro 	*ecx = count;
614bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
615bb898558SAl Viro }
616bb898558SAl Viro 
617bb898558SAl Viro /*
618bb898558SAl Viro  * CPUID functions returning a single datum
619bb898558SAl Viro  */
620bb898558SAl Viro static inline unsigned int cpuid_eax(unsigned int op)
621bb898558SAl Viro {
622bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
623bb898558SAl Viro 
624bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
625bb898558SAl Viro 
626bb898558SAl Viro 	return eax;
627bb898558SAl Viro }
628bb898558SAl Viro 
629bb898558SAl Viro static inline unsigned int cpuid_ebx(unsigned int op)
630bb898558SAl Viro {
631bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
632bb898558SAl Viro 
633bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
634bb898558SAl Viro 
635bb898558SAl Viro 	return ebx;
636bb898558SAl Viro }
637bb898558SAl Viro 
638bb898558SAl Viro static inline unsigned int cpuid_ecx(unsigned int op)
639bb898558SAl Viro {
640bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
641bb898558SAl Viro 
642bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
643bb898558SAl Viro 
644bb898558SAl Viro 	return ecx;
645bb898558SAl Viro }
646bb898558SAl Viro 
647bb898558SAl Viro static inline unsigned int cpuid_edx(unsigned int op)
648bb898558SAl Viro {
649bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
650bb898558SAl Viro 
651bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
652bb898558SAl Viro 
653bb898558SAl Viro 	return edx;
654bb898558SAl Viro }
655bb898558SAl Viro 
656bb898558SAl Viro extern void select_idle_routine(const struct cpuinfo_x86 *c);
65707c94a38SBorislav Petkov extern void amd_e400_c1e_apic_setup(void);
658bb898558SAl Viro 
659bb898558SAl Viro extern unsigned long		boot_option_idle_override;
660bb898558SAl Viro 
661d1896049SThomas Renninger enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
66269fb3676SLen Brown 			 IDLE_POLL};
663d1896049SThomas Renninger 
664bb898558SAl Viro extern void enable_sep_cpu(void);
665bb898558SAl Viro extern int sysenter_setup(void);
666bb898558SAl Viro 
66729c84391SJan Kiszka 
668bb898558SAl Viro /* Defined in head.S */
669bb898558SAl Viro extern struct desc_ptr		early_gdt_descr;
670bb898558SAl Viro 
671552be871SBrian Gerst extern void switch_to_new_gdt(int);
67245fc8757SThomas Garnier extern void load_direct_gdt(int);
67369218e47SThomas Garnier extern void load_fixmap_gdt(int);
67411e3a840SJeremy Fitzhardinge extern void load_percpu_segment(int);
675bb898558SAl Viro extern void cpu_init(void);
676b1efd0ffSBorislav Petkov extern void cpu_init_secondary(void);
677520d0308SJoerg Roedel extern void cpu_init_exception_handling(void);
6787652ac92SThomas Gleixner extern void cr4_init(void);
679bb898558SAl Viro 
680c2724775SMarkus Metzger static inline unsigned long get_debugctlmsr(void)
681c2724775SMarkus Metzger {
682c2724775SMarkus Metzger 	unsigned long debugctlmsr = 0;
683c2724775SMarkus Metzger 
684c2724775SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR
685c2724775SMarkus Metzger 	if (boot_cpu_data.x86 < 6)
686c2724775SMarkus Metzger 		return 0;
687c2724775SMarkus Metzger #endif
688c2724775SMarkus Metzger 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
689c2724775SMarkus Metzger 
690c2724775SMarkus Metzger 	return debugctlmsr;
691c2724775SMarkus Metzger }
692c2724775SMarkus Metzger 
693bb898558SAl Viro static inline void update_debugctlmsr(unsigned long debugctlmsr)
694bb898558SAl Viro {
695bb898558SAl Viro #ifndef CONFIG_X86_DEBUGCTLMSR
696bb898558SAl Viro 	if (boot_cpu_data.x86 < 6)
697bb898558SAl Viro 		return;
698bb898558SAl Viro #endif
699bb898558SAl Viro 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
700bb898558SAl Viro }
701bb898558SAl Viro 
7029bd1190aSOleg Nesterov extern void set_task_blockstep(struct task_struct *task, bool on);
7039bd1190aSOleg Nesterov 
704bb898558SAl Viro /* Boot loader type from the setup header: */
705bb898558SAl Viro extern int			bootloader_type;
7065031296cSH. Peter Anvin extern int			bootloader_version;
707bb898558SAl Viro 
708bb898558SAl Viro extern char			ignore_fpu_irq;
709bb898558SAl Viro 
710bb898558SAl Viro #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
711bb898558SAl Viro #define ARCH_HAS_PREFETCHW
712bb898558SAl Viro #define ARCH_HAS_SPINLOCK_PREFETCH
713bb898558SAl Viro 
714bb898558SAl Viro #ifdef CONFIG_X86_32
715a930dc45SBorislav Petkov # define BASE_PREFETCH		""
716bb898558SAl Viro # define ARCH_HAS_PREFETCH
717bb898558SAl Viro #else
718a930dc45SBorislav Petkov # define BASE_PREFETCH		"prefetcht0 %P1"
719bb898558SAl Viro #endif
720bb898558SAl Viro 
721bb898558SAl Viro /*
722bb898558SAl Viro  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
723bb898558SAl Viro  *
724bb898558SAl Viro  * It's not worth to care about 3dnow prefetches for the K6
725bb898558SAl Viro  * because they are microcoded there and very slow.
726bb898558SAl Viro  */
727bb898558SAl Viro static inline void prefetch(const void *x)
728bb898558SAl Viro {
729a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
730bb898558SAl Viro 			  X86_FEATURE_XMM,
731a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
732bb898558SAl Viro }
733bb898558SAl Viro 
734bb898558SAl Viro /*
735bb898558SAl Viro  * 3dnow prefetch to get an exclusive cache line.
736bb898558SAl Viro  * Useful for spinlocks to avoid one state transition in the
737bb898558SAl Viro  * cache coherency protocol:
738bb898558SAl Viro  */
7392823e83aSPeter Zijlstra static __always_inline void prefetchw(const void *x)
740bb898558SAl Viro {
741a930dc45SBorislav Petkov 	alternative_input(BASE_PREFETCH, "prefetchw %P1",
742a930dc45SBorislav Petkov 			  X86_FEATURE_3DNOWPREFETCH,
743a930dc45SBorislav Petkov 			  "m" (*(const char *)x));
744bb898558SAl Viro }
745bb898558SAl Viro 
746bb898558SAl Viro static inline void spin_lock_prefetch(const void *x)
747bb898558SAl Viro {
748bb898558SAl Viro 	prefetchw(x);
749bb898558SAl Viro }
750bb898558SAl Viro 
751d9e05cc5SAndy Lutomirski #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
752d9e05cc5SAndy Lutomirski 			   TOP_OF_KERNEL_STACK_PADDING)
753d9e05cc5SAndy Lutomirski 
7543500130bSAndy Lutomirski #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
7553500130bSAndy Lutomirski 
756d375cf15SAndy Lutomirski #define task_pt_regs(task) \
757d375cf15SAndy Lutomirski ({									\
758d375cf15SAndy Lutomirski 	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
759d375cf15SAndy Lutomirski 	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
760d375cf15SAndy Lutomirski 	((struct pt_regs *)__ptr) - 1;					\
761d375cf15SAndy Lutomirski })
762d375cf15SAndy Lutomirski 
763bb898558SAl Viro #ifdef CONFIG_X86_32
764bb898558SAl Viro #define INIT_THREAD  {							  \
765d9e05cc5SAndy Lutomirski 	.sp0			= TOP_OF_INIT_STACK,			  \
766bb898558SAl Viro 	.sysenter_cs		= __KERNEL_CS,				  \
767bb898558SAl Viro }
768bb898558SAl Viro 
769bb898558SAl Viro #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
770bb898558SAl Viro 
771bb898558SAl Viro #else
77247058bb5SChristoph Hellwig #define INIT_THREAD { }
773bb898558SAl Viro 
77489240ba0SStefani Seibold extern unsigned long KSTK_ESP(struct task_struct *task);
775d046ff8bSH. J. Lu 
776bb898558SAl Viro #endif /* CONFIG_X86_64 */
777bb898558SAl Viro 
778bb898558SAl Viro extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
779bb898558SAl Viro 					       unsigned long new_sp);
780bb898558SAl Viro 
781bb898558SAl Viro /*
782bb898558SAl Viro  * This decides where the kernel will search for a free chunk of vm
783bb898558SAl Viro  * space during mmap's.
784bb898558SAl Viro  */
7858f3e474fSDmitry Safonov #define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
786b569bab7SKirill A. Shutemov #define TASK_UNMAPPED_BASE		__TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
787bb898558SAl Viro 
788bb898558SAl Viro #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
789bb898558SAl Viro 
790bb898558SAl Viro /* Get/set a process' ability to use the timestamp counter instruction */
791bb898558SAl Viro #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
792bb898558SAl Viro #define SET_TSC_CTL(val)	set_tsc_mode((val))
793bb898558SAl Viro 
794bb898558SAl Viro extern int get_tsc_mode(unsigned long adr);
795bb898558SAl Viro extern int set_tsc_mode(unsigned int val);
796bb898558SAl Viro 
797e9ea1e7fSKyle Huey DECLARE_PER_CPU(u64, msr_misc_features_shadow);
798e9ea1e7fSKyle Huey 
7999164d949SKim Phillips extern u16 get_llc_id(unsigned int cpu);
8009164d949SKim Phillips 
801bc8e80d5SBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD
802cc2749e4SAravind Gopalakrishnan extern u32 amd_get_nodes_per_socket(void);
8033743d55bSHuang Rui extern u32 amd_get_highest_perf(void);
804bc8e80d5SBorislav Petkov #else
805bc8e80d5SBorislav Petkov static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
8063743d55bSHuang Rui static inline u32 amd_get_highest_perf(void)		{ return 0; }
807bc8e80d5SBorislav Petkov #endif
8086a812691SAndreas Herrmann 
809760849b1SPaul Durrant #define for_each_possible_hypervisor_cpuid_base(function) \
810760849b1SPaul Durrant 	for (function = 0x40000000; function < 0x40010000; function += 0x100)
811760849b1SPaul Durrant 
81296e39ac0SJason Wang static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
81396e39ac0SJason Wang {
81496e39ac0SJason Wang 	uint32_t base, eax, signature[3];
81596e39ac0SJason Wang 
816760849b1SPaul Durrant 	for_each_possible_hypervisor_cpuid_base(base) {
81796e39ac0SJason Wang 		cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
81896e39ac0SJason Wang 
81996e39ac0SJason Wang 		if (!memcmp(sig, signature, 12) &&
82096e39ac0SJason Wang 		    (leaves == 0 || ((eax - base) >= leaves)))
82196e39ac0SJason Wang 			return base;
82296e39ac0SJason Wang 	}
82396e39ac0SJason Wang 
82496e39ac0SJason Wang 	return 0;
82596e39ac0SJason Wang }
82696e39ac0SJason Wang 
827f05e798aSDavid Howells extern unsigned long arch_align_stack(unsigned long sp);
828e5cb113fSAlexey Dobriyan void free_init_pages(const char *what, unsigned long begin, unsigned long end);
8295494c3a6SKees Cook extern void free_kernel_image_pages(const char *what, void *begin, void *end);
830f05e798aSDavid Howells 
831f05e798aSDavid Howells void default_idle(void);
8326a377ddcSLen Brown #ifdef	CONFIG_XEN
8336a377ddcSLen Brown bool xen_set_default_idle(void);
8346a377ddcSLen Brown #else
8356a377ddcSLen Brown #define xen_set_default_idle 0
8366a377ddcSLen Brown #endif
837f05e798aSDavid Howells 
838f05e798aSDavid Howells void stop_this_cpu(void *dummy);
8391008c52cSBorislav Petkov void microcode_check(void);
840d90a7a0eSJiri Kosina 
841d90a7a0eSJiri Kosina enum l1tf_mitigations {
842d90a7a0eSJiri Kosina 	L1TF_MITIGATION_OFF,
843d90a7a0eSJiri Kosina 	L1TF_MITIGATION_FLUSH_NOWARN,
844d90a7a0eSJiri Kosina 	L1TF_MITIGATION_FLUSH,
845d90a7a0eSJiri Kosina 	L1TF_MITIGATION_FLUSH_NOSMT,
846d90a7a0eSJiri Kosina 	L1TF_MITIGATION_FULL,
847d90a7a0eSJiri Kosina 	L1TF_MITIGATION_FULL_FORCE
848d90a7a0eSJiri Kosina };
849d90a7a0eSJiri Kosina 
850d90a7a0eSJiri Kosina extern enum l1tf_mitigations l1tf_mitigation;
851d90a7a0eSJiri Kosina 
852bc124170SThomas Gleixner enum mds_mitigations {
853bc124170SThomas Gleixner 	MDS_MITIGATION_OFF,
854bc124170SThomas Gleixner 	MDS_MITIGATION_FULL,
85522dd8365SThomas Gleixner 	MDS_MITIGATION_VMWERV,
856bc124170SThomas Gleixner };
857bc124170SThomas Gleixner 
858*03b122daSTony Luck #ifdef CONFIG_X86_SGX
859*03b122daSTony Luck int arch_memory_failure(unsigned long pfn, int flags);
860*03b122daSTony Luck #define arch_memory_failure arch_memory_failure
861*03b122daSTony Luck 
862*03b122daSTony Luck bool arch_is_platform_page(u64 paddr);
863*03b122daSTony Luck #define arch_is_platform_page arch_is_platform_page
864*03b122daSTony Luck #endif
865*03b122daSTony Luck 
8661965aae3SH. Peter Anvin #endif /* _ASM_X86_PROCESSOR_H */
867