xref: /linux/arch/x86/include/asm/processor.h (revision 9b8de7479d0dbab1ed98b5b015d44232c9d3d08e)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_PROCESSOR_H
21965aae3SH. Peter Anvin #define _ASM_X86_PROCESSOR_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/processor-flags.h>
5bb898558SAl Viro 
6bb898558SAl Viro /* Forward declaration, a strange C thing */
7bb898558SAl Viro struct task_struct;
8bb898558SAl Viro struct mm_struct;
9bb898558SAl Viro 
10bb898558SAl Viro #include <asm/vm86.h>
11bb898558SAl Viro #include <asm/math_emu.h>
12bb898558SAl Viro #include <asm/segment.h>
13bb898558SAl Viro #include <asm/types.h>
14bb898558SAl Viro #include <asm/sigcontext.h>
15bb898558SAl Viro #include <asm/current.h>
16bb898558SAl Viro #include <asm/cpufeature.h>
17bb898558SAl Viro #include <asm/system.h>
18bb898558SAl Viro #include <asm/page.h>
1954321d94SJeremy Fitzhardinge #include <asm/pgtable_types.h>
20bb898558SAl Viro #include <asm/percpu.h>
21bb898558SAl Viro #include <asm/msr.h>
22bb898558SAl Viro #include <asm/desc_defs.h>
23bb898558SAl Viro #include <asm/nops.h>
24bb898558SAl Viro #include <asm/ds.h>
25bb898558SAl Viro 
26bb898558SAl Viro #include <linux/personality.h>
27bb898558SAl Viro #include <linux/cpumask.h>
28bb898558SAl Viro #include <linux/cache.h>
29bb898558SAl Viro #include <linux/threads.h>
30bb898558SAl Viro #include <linux/init.h>
31bb898558SAl Viro 
32bb898558SAl Viro /*
33bb898558SAl Viro  * Default implementation of macro that returns current
34bb898558SAl Viro  * instruction pointer ("program counter").
35bb898558SAl Viro  */
36bb898558SAl Viro static inline void *current_text_addr(void)
37bb898558SAl Viro {
38bb898558SAl Viro 	void *pc;
39bb898558SAl Viro 
40bb898558SAl Viro 	asm volatile("mov $1f, %0; 1:":"=r" (pc));
41bb898558SAl Viro 
42bb898558SAl Viro 	return pc;
43bb898558SAl Viro }
44bb898558SAl Viro 
45bb898558SAl Viro #ifdef CONFIG_X86_VSMP
46bb898558SAl Viro # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
47bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
48bb898558SAl Viro #else
49bb898558SAl Viro # define ARCH_MIN_TASKALIGN		16
50bb898558SAl Viro # define ARCH_MIN_MMSTRUCT_ALIGN	0
51bb898558SAl Viro #endif
52bb898558SAl Viro 
53bb898558SAl Viro /*
54bb898558SAl Viro  *  CPU type and hardware bug flags. Kept separately for each CPU.
55bb898558SAl Viro  *  Members of this structure are referenced in head.S, so think twice
56bb898558SAl Viro  *  before touching them. [mj]
57bb898558SAl Viro  */
58bb898558SAl Viro 
59bb898558SAl Viro struct cpuinfo_x86 {
60bb898558SAl Viro 	__u8			x86;		/* CPU family */
61bb898558SAl Viro 	__u8			x86_vendor;	/* CPU vendor */
62bb898558SAl Viro 	__u8			x86_model;
63bb898558SAl Viro 	__u8			x86_mask;
64bb898558SAl Viro #ifdef CONFIG_X86_32
65bb898558SAl Viro 	char			wp_works_ok;	/* It doesn't on 386's */
66bb898558SAl Viro 
67bb898558SAl Viro 	/* Problems on some 486Dx4's and old 386's: */
68bb898558SAl Viro 	char			hlt_works_ok;
69bb898558SAl Viro 	char			hard_math;
70bb898558SAl Viro 	char			rfu;
71bb898558SAl Viro 	char			fdiv_bug;
72bb898558SAl Viro 	char			f00f_bug;
73bb898558SAl Viro 	char			coma_bug;
74bb898558SAl Viro 	char			pad0;
75bb898558SAl Viro #else
76bb898558SAl Viro 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
77bb898558SAl Viro 	int			x86_tlbsize;
7813c6c532SJan Beulich #endif
79bb898558SAl Viro 	__u8			x86_virt_bits;
80bb898558SAl Viro 	__u8			x86_phys_bits;
81bb898558SAl Viro 	/* CPUID returned core id bits: */
82bb898558SAl Viro 	__u8			x86_coreid_bits;
83bb898558SAl Viro 	/* Max extended CPUID function supported: */
84bb898558SAl Viro 	__u32			extended_cpuid_level;
85bb898558SAl Viro 	/* Maximum supported CPUID level, -1=no CPUID: */
86bb898558SAl Viro 	int			cpuid_level;
87bb898558SAl Viro 	__u32			x86_capability[NCAPINTS];
88bb898558SAl Viro 	char			x86_vendor_id[16];
89bb898558SAl Viro 	char			x86_model_id[64];
90bb898558SAl Viro 	/* in KB - valid for CPUS which support this call: */
91bb898558SAl Viro 	int			x86_cache_size;
92bb898558SAl Viro 	int			x86_cache_alignment;	/* In bytes */
93bb898558SAl Viro 	int			x86_power;
94bb898558SAl Viro 	unsigned long		loops_per_jiffy;
95bb898558SAl Viro #ifdef CONFIG_SMP
96bb898558SAl Viro 	/* cpus sharing the last level cache: */
97155dd720SRusty Russell 	cpumask_var_t		llc_shared_map;
98bb898558SAl Viro #endif
99bb898558SAl Viro 	/* cpuid returned max cores value: */
100bb898558SAl Viro 	u16			 x86_max_cores;
101bb898558SAl Viro 	u16			apicid;
102bb898558SAl Viro 	u16			initial_apicid;
103bb898558SAl Viro 	u16			x86_clflush_size;
104bb898558SAl Viro #ifdef CONFIG_SMP
105bb898558SAl Viro 	/* number of cores as seen by the OS: */
106bb898558SAl Viro 	u16			booted_cores;
107bb898558SAl Viro 	/* Physical processor id: */
108bb898558SAl Viro 	u16			phys_proc_id;
109bb898558SAl Viro 	/* Core id: */
110bb898558SAl Viro 	u16			cpu_core_id;
111bb898558SAl Viro 	/* Index into per_cpu list: */
112bb898558SAl Viro 	u16			cpu_index;
113bb898558SAl Viro #endif
11488b094fbSAlok Kataria 	unsigned int		x86_hyper_vendor;
115bb898558SAl Viro } __attribute__((__aligned__(SMP_CACHE_BYTES)));
116bb898558SAl Viro 
117bb898558SAl Viro #define X86_VENDOR_INTEL	0
118bb898558SAl Viro #define X86_VENDOR_CYRIX	1
119bb898558SAl Viro #define X86_VENDOR_AMD		2
120bb898558SAl Viro #define X86_VENDOR_UMC		3
121bb898558SAl Viro #define X86_VENDOR_CENTAUR	5
122bb898558SAl Viro #define X86_VENDOR_TRANSMETA	7
123bb898558SAl Viro #define X86_VENDOR_NSC		8
124bb898558SAl Viro #define X86_VENDOR_NUM		9
125bb898558SAl Viro 
126bb898558SAl Viro #define X86_VENDOR_UNKNOWN	0xff
127bb898558SAl Viro 
12888b094fbSAlok Kataria #define X86_HYPER_VENDOR_NONE  0
12988b094fbSAlok Kataria #define X86_HYPER_VENDOR_VMWARE 1
13088b094fbSAlok Kataria 
131bb898558SAl Viro /*
132bb898558SAl Viro  * capabilities of CPUs
133bb898558SAl Viro  */
134bb898558SAl Viro extern struct cpuinfo_x86	boot_cpu_data;
135bb898558SAl Viro extern struct cpuinfo_x86	new_cpu_data;
136bb898558SAl Viro 
137bb898558SAl Viro extern struct tss_struct	doublefault_tss;
138bb898558SAl Viro extern __u32			cleared_cpu_caps[NCAPINTS];
139bb898558SAl Viro 
140bb898558SAl Viro #ifdef CONFIG_SMP
141*9b8de747SDavid Howells DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142bb898558SAl Viro #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
143bb898558SAl Viro #define current_cpu_data	__get_cpu_var(cpu_info)
144bb898558SAl Viro #else
145bb898558SAl Viro #define cpu_data(cpu)		boot_cpu_data
146bb898558SAl Viro #define current_cpu_data	boot_cpu_data
147bb898558SAl Viro #endif
148bb898558SAl Viro 
149bb898558SAl Viro extern const struct seq_operations cpuinfo_op;
150bb898558SAl Viro 
151bb898558SAl Viro static inline int hlt_works(int cpu)
152bb898558SAl Viro {
153bb898558SAl Viro #ifdef CONFIG_X86_32
154bb898558SAl Viro 	return cpu_data(cpu).hlt_works_ok;
155bb898558SAl Viro #else
156bb898558SAl Viro 	return 1;
157bb898558SAl Viro #endif
158bb898558SAl Viro }
159bb898558SAl Viro 
160bb898558SAl Viro #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
161bb898558SAl Viro 
162bb898558SAl Viro extern void cpu_detect(struct cpuinfo_x86 *c);
163bb898558SAl Viro 
164bb898558SAl Viro extern struct pt_regs *idle_regs(struct pt_regs *);
165bb898558SAl Viro 
166bb898558SAl Viro extern void early_cpu_init(void);
167bb898558SAl Viro extern void identify_boot_cpu(void);
168bb898558SAl Viro extern void identify_secondary_cpu(struct cpuinfo_x86 *);
169bb898558SAl Viro extern void print_cpu_info(struct cpuinfo_x86 *);
170bb898558SAl Viro extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
171bb898558SAl Viro extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
172bb898558SAl Viro extern unsigned short num_cache_leaves;
173bb898558SAl Viro 
174bb898558SAl Viro extern void detect_extended_topology(struct cpuinfo_x86 *c);
175bb898558SAl Viro extern void detect_ht(struct cpuinfo_x86 *c);
176bb898558SAl Viro 
177bb898558SAl Viro static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
178bb898558SAl Viro 				unsigned int *ecx, unsigned int *edx)
179bb898558SAl Viro {
180bb898558SAl Viro 	/* ecx is often an input as well as an output. */
181bb898558SAl Viro 	asm("cpuid"
182bb898558SAl Viro 	    : "=a" (*eax),
183bb898558SAl Viro 	      "=b" (*ebx),
184bb898558SAl Viro 	      "=c" (*ecx),
185bb898558SAl Viro 	      "=d" (*edx)
186bb898558SAl Viro 	    : "0" (*eax), "2" (*ecx));
187bb898558SAl Viro }
188bb898558SAl Viro 
189bb898558SAl Viro static inline void load_cr3(pgd_t *pgdir)
190bb898558SAl Viro {
191bb898558SAl Viro 	write_cr3(__pa(pgdir));
192bb898558SAl Viro }
193bb898558SAl Viro 
194bb898558SAl Viro #ifdef CONFIG_X86_32
195bb898558SAl Viro /* This is the TSS defined by the hardware. */
196bb898558SAl Viro struct x86_hw_tss {
197bb898558SAl Viro 	unsigned short		back_link, __blh;
198bb898558SAl Viro 	unsigned long		sp0;
199bb898558SAl Viro 	unsigned short		ss0, __ss0h;
200bb898558SAl Viro 	unsigned long		sp1;
201bb898558SAl Viro 	/* ss1 caches MSR_IA32_SYSENTER_CS: */
202bb898558SAl Viro 	unsigned short		ss1, __ss1h;
203bb898558SAl Viro 	unsigned long		sp2;
204bb898558SAl Viro 	unsigned short		ss2, __ss2h;
205bb898558SAl Viro 	unsigned long		__cr3;
206bb898558SAl Viro 	unsigned long		ip;
207bb898558SAl Viro 	unsigned long		flags;
208bb898558SAl Viro 	unsigned long		ax;
209bb898558SAl Viro 	unsigned long		cx;
210bb898558SAl Viro 	unsigned long		dx;
211bb898558SAl Viro 	unsigned long		bx;
212bb898558SAl Viro 	unsigned long		sp;
213bb898558SAl Viro 	unsigned long		bp;
214bb898558SAl Viro 	unsigned long		si;
215bb898558SAl Viro 	unsigned long		di;
216bb898558SAl Viro 	unsigned short		es, __esh;
217bb898558SAl Viro 	unsigned short		cs, __csh;
218bb898558SAl Viro 	unsigned short		ss, __ssh;
219bb898558SAl Viro 	unsigned short		ds, __dsh;
220bb898558SAl Viro 	unsigned short		fs, __fsh;
221bb898558SAl Viro 	unsigned short		gs, __gsh;
222bb898558SAl Viro 	unsigned short		ldt, __ldth;
223bb898558SAl Viro 	unsigned short		trace;
224bb898558SAl Viro 	unsigned short		io_bitmap_base;
225bb898558SAl Viro 
226bb898558SAl Viro } __attribute__((packed));
227bb898558SAl Viro #else
228bb898558SAl Viro struct x86_hw_tss {
229bb898558SAl Viro 	u32			reserved1;
230bb898558SAl Viro 	u64			sp0;
231bb898558SAl Viro 	u64			sp1;
232bb898558SAl Viro 	u64			sp2;
233bb898558SAl Viro 	u64			reserved2;
234bb898558SAl Viro 	u64			ist[7];
235bb898558SAl Viro 	u32			reserved3;
236bb898558SAl Viro 	u32			reserved4;
237bb898558SAl Viro 	u16			reserved5;
238bb898558SAl Viro 	u16			io_bitmap_base;
239bb898558SAl Viro 
240bb898558SAl Viro } __attribute__((packed)) ____cacheline_aligned;
241bb898558SAl Viro #endif
242bb898558SAl Viro 
243bb898558SAl Viro /*
244bb898558SAl Viro  * IO-bitmap sizes:
245bb898558SAl Viro  */
246bb898558SAl Viro #define IO_BITMAP_BITS			65536
247bb898558SAl Viro #define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
248bb898558SAl Viro #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
249bb898558SAl Viro #define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
250bb898558SAl Viro #define INVALID_IO_BITMAP_OFFSET	0x8000
251bb898558SAl Viro 
252bb898558SAl Viro struct tss_struct {
253bb898558SAl Viro 	/*
254bb898558SAl Viro 	 * The hardware state:
255bb898558SAl Viro 	 */
256bb898558SAl Viro 	struct x86_hw_tss	x86_tss;
257bb898558SAl Viro 
258bb898558SAl Viro 	/*
259bb898558SAl Viro 	 * The extra 1 is there because the CPU will access an
260bb898558SAl Viro 	 * additional byte beyond the end of the IO permission
261bb898558SAl Viro 	 * bitmap. The extra byte must be all 1 bits, and must
262bb898558SAl Viro 	 * be within the limit.
263bb898558SAl Viro 	 */
264bb898558SAl Viro 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
265bb898558SAl Viro 
266bb898558SAl Viro 	/*
267bb898558SAl Viro 	 * .. and then another 0x100 bytes for the emergency kernel stack:
268bb898558SAl Viro 	 */
269bb898558SAl Viro 	unsigned long		stack[64];
270bb898558SAl Viro 
271bb898558SAl Viro } ____cacheline_aligned;
272bb898558SAl Viro 
273*9b8de747SDavid Howells DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274bb898558SAl Viro 
275bb898558SAl Viro /*
276bb898558SAl Viro  * Save the original ist values for checking stack pointers during debugging
277bb898558SAl Viro  */
278bb898558SAl Viro struct orig_ist {
279bb898558SAl Viro 	unsigned long		ist[7];
280bb898558SAl Viro };
281bb898558SAl Viro 
282bb898558SAl Viro #define	MXCSR_DEFAULT		0x1f80
283bb898558SAl Viro 
284bb898558SAl Viro struct i387_fsave_struct {
285bb898558SAl Viro 	u32			cwd;	/* FPU Control Word		*/
286bb898558SAl Viro 	u32			swd;	/* FPU Status Word		*/
287bb898558SAl Viro 	u32			twd;	/* FPU Tag Word			*/
288bb898558SAl Viro 	u32			fip;	/* FPU IP Offset		*/
289bb898558SAl Viro 	u32			fcs;	/* FPU IP Selector		*/
290bb898558SAl Viro 	u32			foo;	/* FPU Operand Pointer Offset	*/
291bb898558SAl Viro 	u32			fos;	/* FPU Operand Pointer Selector	*/
292bb898558SAl Viro 
293bb898558SAl Viro 	/* 8*10 bytes for each FP-reg = 80 bytes:			*/
294bb898558SAl Viro 	u32			st_space[20];
295bb898558SAl Viro 
296bb898558SAl Viro 	/* Software status information [not touched by FSAVE ]:		*/
297bb898558SAl Viro 	u32			status;
298bb898558SAl Viro };
299bb898558SAl Viro 
300bb898558SAl Viro struct i387_fxsave_struct {
301bb898558SAl Viro 	u16			cwd; /* Control Word			*/
302bb898558SAl Viro 	u16			swd; /* Status Word			*/
303bb898558SAl Viro 	u16			twd; /* Tag Word			*/
304bb898558SAl Viro 	u16			fop; /* Last Instruction Opcode		*/
305bb898558SAl Viro 	union {
306bb898558SAl Viro 		struct {
307bb898558SAl Viro 			u64	rip; /* Instruction Pointer		*/
308bb898558SAl Viro 			u64	rdp; /* Data Pointer			*/
309bb898558SAl Viro 		};
310bb898558SAl Viro 		struct {
311bb898558SAl Viro 			u32	fip; /* FPU IP Offset			*/
312bb898558SAl Viro 			u32	fcs; /* FPU IP Selector			*/
313bb898558SAl Viro 			u32	foo; /* FPU Operand Offset		*/
314bb898558SAl Viro 			u32	fos; /* FPU Operand Selector		*/
315bb898558SAl Viro 		};
316bb898558SAl Viro 	};
317bb898558SAl Viro 	u32			mxcsr;		/* MXCSR Register State */
318bb898558SAl Viro 	u32			mxcsr_mask;	/* MXCSR Mask		*/
319bb898558SAl Viro 
320bb898558SAl Viro 	/* 8*16 bytes for each FP-reg = 128 bytes:			*/
321bb898558SAl Viro 	u32			st_space[32];
322bb898558SAl Viro 
323bb898558SAl Viro 	/* 16*16 bytes for each XMM-reg = 256 bytes:			*/
324bb898558SAl Viro 	u32			xmm_space[64];
325bb898558SAl Viro 
326bb898558SAl Viro 	u32			padding[12];
327bb898558SAl Viro 
328bb898558SAl Viro 	union {
329bb898558SAl Viro 		u32		padding1[12];
330bb898558SAl Viro 		u32		sw_reserved[12];
331bb898558SAl Viro 	};
332bb898558SAl Viro 
333bb898558SAl Viro } __attribute__((aligned(16)));
334bb898558SAl Viro 
335bb898558SAl Viro struct i387_soft_struct {
336bb898558SAl Viro 	u32			cwd;
337bb898558SAl Viro 	u32			swd;
338bb898558SAl Viro 	u32			twd;
339bb898558SAl Viro 	u32			fip;
340bb898558SAl Viro 	u32			fcs;
341bb898558SAl Viro 	u32			foo;
342bb898558SAl Viro 	u32			fos;
343bb898558SAl Viro 	/* 8*10 bytes for each FP-reg = 80 bytes: */
344bb898558SAl Viro 	u32			st_space[20];
345bb898558SAl Viro 	u8			ftop;
346bb898558SAl Viro 	u8			changed;
347bb898558SAl Viro 	u8			lookahead;
348bb898558SAl Viro 	u8			no_update;
349bb898558SAl Viro 	u8			rm;
350bb898558SAl Viro 	u8			alimit;
351ae6af41fSTejun Heo 	struct math_emu_info	*info;
352bb898558SAl Viro 	u32			entry_eip;
353bb898558SAl Viro };
354bb898558SAl Viro 
355a30469e7SSuresh Siddha struct ymmh_struct {
356a30469e7SSuresh Siddha 	/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
357a30469e7SSuresh Siddha 	u32 ymmh_space[64];
358a30469e7SSuresh Siddha };
359a30469e7SSuresh Siddha 
360bb898558SAl Viro struct xsave_hdr_struct {
361bb898558SAl Viro 	u64 xstate_bv;
362bb898558SAl Viro 	u64 reserved1[2];
363bb898558SAl Viro 	u64 reserved2[5];
364bb898558SAl Viro } __attribute__((packed));
365bb898558SAl Viro 
366bb898558SAl Viro struct xsave_struct {
367bb898558SAl Viro 	struct i387_fxsave_struct i387;
368bb898558SAl Viro 	struct xsave_hdr_struct xsave_hdr;
369a30469e7SSuresh Siddha 	struct ymmh_struct ymmh;
370bb898558SAl Viro 	/* new processor state extensions will go here */
371bb898558SAl Viro } __attribute__ ((packed, aligned (64)));
372bb898558SAl Viro 
373bb898558SAl Viro union thread_xstate {
374bb898558SAl Viro 	struct i387_fsave_struct	fsave;
375bb898558SAl Viro 	struct i387_fxsave_struct	fxsave;
376bb898558SAl Viro 	struct i387_soft_struct		soft;
377bb898558SAl Viro 	struct xsave_struct		xsave;
378bb898558SAl Viro };
379bb898558SAl Viro 
380bb898558SAl Viro #ifdef CONFIG_X86_64
381bb898558SAl Viro DECLARE_PER_CPU(struct orig_ist, orig_ist);
38226f80bd6SBrian Gerst 
383947e76cdSBrian Gerst union irq_stack_union {
384947e76cdSBrian Gerst 	char irq_stack[IRQ_STACK_SIZE];
385947e76cdSBrian Gerst 	/*
386947e76cdSBrian Gerst 	 * GCC hardcodes the stack canary as %gs:40.  Since the
387947e76cdSBrian Gerst 	 * irq_stack is the object at %gs:0, we reserve the bottom
388947e76cdSBrian Gerst 	 * 48 bytes of the irq stack for the canary.
389947e76cdSBrian Gerst 	 */
390947e76cdSBrian Gerst 	struct {
391947e76cdSBrian Gerst 		char gs_base[40];
392947e76cdSBrian Gerst 		unsigned long stack_canary;
393947e76cdSBrian Gerst 	};
394947e76cdSBrian Gerst };
395947e76cdSBrian Gerst 
396*9b8de747SDavid Howells DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
3972add8e23SBrian Gerst DECLARE_INIT_PER_CPU(irq_stack_union);
3982add8e23SBrian Gerst 
39926f80bd6SBrian Gerst DECLARE_PER_CPU(char *, irq_stack_ptr);
4009766cdbcSJaswinder Singh Rajput DECLARE_PER_CPU(unsigned int, irq_count);
4019766cdbcSJaswinder Singh Rajput extern unsigned long kernel_eflags;
4029766cdbcSJaswinder Singh Rajput extern asmlinkage void ignore_sysret(void);
40360a5317fSTejun Heo #else	/* X86_64 */
40460a5317fSTejun Heo #ifdef CONFIG_CC_STACKPROTECTOR
40560a5317fSTejun Heo DECLARE_PER_CPU(unsigned long, stack_canary);
406bb898558SAl Viro #endif
40760a5317fSTejun Heo #endif	/* X86_64 */
408bb898558SAl Viro 
409bb898558SAl Viro extern unsigned int xstate_size;
410bb898558SAl Viro extern void free_thread_xstate(struct task_struct *);
411bb898558SAl Viro extern struct kmem_cache *task_xstate_cachep;
412bb898558SAl Viro extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
413bb898558SAl Viro extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
414bb898558SAl Viro extern unsigned short num_cache_leaves;
415bb898558SAl Viro 
416bb898558SAl Viro struct thread_struct {
417bb898558SAl Viro 	/* Cached TLS descriptors: */
418bb898558SAl Viro 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
419bb898558SAl Viro 	unsigned long		sp0;
420bb898558SAl Viro 	unsigned long		sp;
421bb898558SAl Viro #ifdef CONFIG_X86_32
422bb898558SAl Viro 	unsigned long		sysenter_cs;
423bb898558SAl Viro #else
424bb898558SAl Viro 	unsigned long		usersp;	/* Copy from PDA */
425bb898558SAl Viro 	unsigned short		es;
426bb898558SAl Viro 	unsigned short		ds;
427bb898558SAl Viro 	unsigned short		fsindex;
428bb898558SAl Viro 	unsigned short		gsindex;
429bb898558SAl Viro #endif
430bb898558SAl Viro 	unsigned long		ip;
431bb898558SAl Viro 	unsigned long		fs;
432bb898558SAl Viro 	unsigned long		gs;
433bb898558SAl Viro 	/* Hardware debugging registers: */
434bb898558SAl Viro 	unsigned long		debugreg0;
435bb898558SAl Viro 	unsigned long		debugreg1;
436bb898558SAl Viro 	unsigned long		debugreg2;
437bb898558SAl Viro 	unsigned long		debugreg3;
438bb898558SAl Viro 	unsigned long		debugreg6;
439bb898558SAl Viro 	unsigned long		debugreg7;
440bb898558SAl Viro 	/* Fault info: */
441bb898558SAl Viro 	unsigned long		cr2;
442bb898558SAl Viro 	unsigned long		trap_no;
443bb898558SAl Viro 	unsigned long		error_code;
444bb898558SAl Viro 	/* floating point and extended processor state */
445bb898558SAl Viro 	union thread_xstate	*xstate;
446bb898558SAl Viro #ifdef CONFIG_X86_32
447bb898558SAl Viro 	/* Virtual 86 mode info */
448bb898558SAl Viro 	struct vm86_struct __user *vm86_info;
449bb898558SAl Viro 	unsigned long		screen_bitmap;
450bb898558SAl Viro 	unsigned long		v86flags;
451bb898558SAl Viro 	unsigned long		v86mask;
452bb898558SAl Viro 	unsigned long		saved_sp0;
453bb898558SAl Viro 	unsigned int		saved_fs;
454bb898558SAl Viro 	unsigned int		saved_gs;
455bb898558SAl Viro #endif
456bb898558SAl Viro 	/* IO permissions: */
457bb898558SAl Viro 	unsigned long		*io_bitmap_ptr;
458bb898558SAl Viro 	unsigned long		iopl;
459bb898558SAl Viro 	/* Max allowed port in the bitmap, in bytes: */
460bb898558SAl Viro 	unsigned		io_bitmap_max;
461bb898558SAl Viro /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */
462bb898558SAl Viro 	unsigned long	debugctlmsr;
463bb898558SAl Viro #ifdef CONFIG_X86_DS
464bb898558SAl Viro /* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
465bb898558SAl Viro 	struct ds_context	*ds_ctx;
466bb898558SAl Viro #endif /* CONFIG_X86_DS */
467bb898558SAl Viro #ifdef CONFIG_X86_PTRACE_BTS
468bb898558SAl Viro /* the signal to send on a bts buffer overflow */
469bb898558SAl Viro 	unsigned int	bts_ovfl_signal;
470bb898558SAl Viro #endif /* CONFIG_X86_PTRACE_BTS */
471bb898558SAl Viro };
472bb898558SAl Viro 
473bb898558SAl Viro static inline unsigned long native_get_debugreg(int regno)
474bb898558SAl Viro {
475bb898558SAl Viro 	unsigned long val = 0;	/* Damn you, gcc! */
476bb898558SAl Viro 
477bb898558SAl Viro 	switch (regno) {
478bb898558SAl Viro 	case 0:
479bb898558SAl Viro 		asm("mov %%db0, %0" :"=r" (val));
480bb898558SAl Viro 		break;
481bb898558SAl Viro 	case 1:
482bb898558SAl Viro 		asm("mov %%db1, %0" :"=r" (val));
483bb898558SAl Viro 		break;
484bb898558SAl Viro 	case 2:
485bb898558SAl Viro 		asm("mov %%db2, %0" :"=r" (val));
486bb898558SAl Viro 		break;
487bb898558SAl Viro 	case 3:
488bb898558SAl Viro 		asm("mov %%db3, %0" :"=r" (val));
489bb898558SAl Viro 		break;
490bb898558SAl Viro 	case 6:
491bb898558SAl Viro 		asm("mov %%db6, %0" :"=r" (val));
492bb898558SAl Viro 		break;
493bb898558SAl Viro 	case 7:
494bb898558SAl Viro 		asm("mov %%db7, %0" :"=r" (val));
495bb898558SAl Viro 		break;
496bb898558SAl Viro 	default:
497bb898558SAl Viro 		BUG();
498bb898558SAl Viro 	}
499bb898558SAl Viro 	return val;
500bb898558SAl Viro }
501bb898558SAl Viro 
502bb898558SAl Viro static inline void native_set_debugreg(int regno, unsigned long value)
503bb898558SAl Viro {
504bb898558SAl Viro 	switch (regno) {
505bb898558SAl Viro 	case 0:
506bb898558SAl Viro 		asm("mov %0, %%db0"	::"r" (value));
507bb898558SAl Viro 		break;
508bb898558SAl Viro 	case 1:
509bb898558SAl Viro 		asm("mov %0, %%db1"	::"r" (value));
510bb898558SAl Viro 		break;
511bb898558SAl Viro 	case 2:
512bb898558SAl Viro 		asm("mov %0, %%db2"	::"r" (value));
513bb898558SAl Viro 		break;
514bb898558SAl Viro 	case 3:
515bb898558SAl Viro 		asm("mov %0, %%db3"	::"r" (value));
516bb898558SAl Viro 		break;
517bb898558SAl Viro 	case 6:
518bb898558SAl Viro 		asm("mov %0, %%db6"	::"r" (value));
519bb898558SAl Viro 		break;
520bb898558SAl Viro 	case 7:
521bb898558SAl Viro 		asm("mov %0, %%db7"	::"r" (value));
522bb898558SAl Viro 		break;
523bb898558SAl Viro 	default:
524bb898558SAl Viro 		BUG();
525bb898558SAl Viro 	}
526bb898558SAl Viro }
527bb898558SAl Viro 
528bb898558SAl Viro /*
529bb898558SAl Viro  * Set IOPL bits in EFLAGS from given mask
530bb898558SAl Viro  */
531bb898558SAl Viro static inline void native_set_iopl_mask(unsigned mask)
532bb898558SAl Viro {
533bb898558SAl Viro #ifdef CONFIG_X86_32
534bb898558SAl Viro 	unsigned int reg;
535bb898558SAl Viro 
536bb898558SAl Viro 	asm volatile ("pushfl;"
537bb898558SAl Viro 		      "popl %0;"
538bb898558SAl Viro 		      "andl %1, %0;"
539bb898558SAl Viro 		      "orl %2, %0;"
540bb898558SAl Viro 		      "pushl %0;"
541bb898558SAl Viro 		      "popfl"
542bb898558SAl Viro 		      : "=&r" (reg)
543bb898558SAl Viro 		      : "i" (~X86_EFLAGS_IOPL), "r" (mask));
544bb898558SAl Viro #endif
545bb898558SAl Viro }
546bb898558SAl Viro 
547bb898558SAl Viro static inline void
548bb898558SAl Viro native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
549bb898558SAl Viro {
550bb898558SAl Viro 	tss->x86_tss.sp0 = thread->sp0;
551bb898558SAl Viro #ifdef CONFIG_X86_32
552bb898558SAl Viro 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
553bb898558SAl Viro 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
554bb898558SAl Viro 		tss->x86_tss.ss1 = thread->sysenter_cs;
555bb898558SAl Viro 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
556bb898558SAl Viro 	}
557bb898558SAl Viro #endif
558bb898558SAl Viro }
559bb898558SAl Viro 
560bb898558SAl Viro static inline void native_swapgs(void)
561bb898558SAl Viro {
562bb898558SAl Viro #ifdef CONFIG_X86_64
563bb898558SAl Viro 	asm volatile("swapgs" ::: "memory");
564bb898558SAl Viro #endif
565bb898558SAl Viro }
566bb898558SAl Viro 
567bb898558SAl Viro #ifdef CONFIG_PARAVIRT
568bb898558SAl Viro #include <asm/paravirt.h>
569bb898558SAl Viro #else
570bb898558SAl Viro #define __cpuid			native_cpuid
571bb898558SAl Viro #define paravirt_enabled()	0
572bb898558SAl Viro 
573bb898558SAl Viro /*
574bb898558SAl Viro  * These special macros can be used to get or set a debugging register
575bb898558SAl Viro  */
576bb898558SAl Viro #define get_debugreg(var, register)				\
577bb898558SAl Viro 	(var) = native_get_debugreg(register)
578bb898558SAl Viro #define set_debugreg(value, register)				\
579bb898558SAl Viro 	native_set_debugreg(register, value)
580bb898558SAl Viro 
581bb898558SAl Viro static inline void load_sp0(struct tss_struct *tss,
582bb898558SAl Viro 			    struct thread_struct *thread)
583bb898558SAl Viro {
584bb898558SAl Viro 	native_load_sp0(tss, thread);
585bb898558SAl Viro }
586bb898558SAl Viro 
587bb898558SAl Viro #define set_iopl_mask native_set_iopl_mask
588bb898558SAl Viro #endif /* CONFIG_PARAVIRT */
589bb898558SAl Viro 
590bb898558SAl Viro /*
591bb898558SAl Viro  * Save the cr4 feature set we're using (ie
592bb898558SAl Viro  * Pentium 4MB enable and PPro Global page
593bb898558SAl Viro  * enable), so that any CPU's that boot up
594bb898558SAl Viro  * after us can get the correct flags.
595bb898558SAl Viro  */
596bb898558SAl Viro extern unsigned long		mmu_cr4_features;
597bb898558SAl Viro 
598bb898558SAl Viro static inline void set_in_cr4(unsigned long mask)
599bb898558SAl Viro {
600bb898558SAl Viro 	unsigned cr4;
601bb898558SAl Viro 
602bb898558SAl Viro 	mmu_cr4_features |= mask;
603bb898558SAl Viro 	cr4 = read_cr4();
604bb898558SAl Viro 	cr4 |= mask;
605bb898558SAl Viro 	write_cr4(cr4);
606bb898558SAl Viro }
607bb898558SAl Viro 
608bb898558SAl Viro static inline void clear_in_cr4(unsigned long mask)
609bb898558SAl Viro {
610bb898558SAl Viro 	unsigned cr4;
611bb898558SAl Viro 
612bb898558SAl Viro 	mmu_cr4_features &= ~mask;
613bb898558SAl Viro 	cr4 = read_cr4();
614bb898558SAl Viro 	cr4 &= ~mask;
615bb898558SAl Viro 	write_cr4(cr4);
616bb898558SAl Viro }
617bb898558SAl Viro 
618bb898558SAl Viro typedef struct {
619bb898558SAl Viro 	unsigned long		seg;
620bb898558SAl Viro } mm_segment_t;
621bb898558SAl Viro 
622bb898558SAl Viro 
623bb898558SAl Viro /*
624bb898558SAl Viro  * create a kernel thread without removing it from tasklists
625bb898558SAl Viro  */
626bb898558SAl Viro extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
627bb898558SAl Viro 
628bb898558SAl Viro /* Free all resources held by a thread. */
629bb898558SAl Viro extern void release_thread(struct task_struct *);
630bb898558SAl Viro 
631bb898558SAl Viro /* Prepare to copy thread state - unlazy all lazy state */
632bb898558SAl Viro extern void prepare_to_copy(struct task_struct *tsk);
633bb898558SAl Viro 
634bb898558SAl Viro unsigned long get_wchan(struct task_struct *p);
635bb898558SAl Viro 
636bb898558SAl Viro /*
637bb898558SAl Viro  * Generic CPUID function
638bb898558SAl Viro  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
639bb898558SAl Viro  * resulting in stale register contents being returned.
640bb898558SAl Viro  */
641bb898558SAl Viro static inline void cpuid(unsigned int op,
642bb898558SAl Viro 			 unsigned int *eax, unsigned int *ebx,
643bb898558SAl Viro 			 unsigned int *ecx, unsigned int *edx)
644bb898558SAl Viro {
645bb898558SAl Viro 	*eax = op;
646bb898558SAl Viro 	*ecx = 0;
647bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
648bb898558SAl Viro }
649bb898558SAl Viro 
650bb898558SAl Viro /* Some CPUID calls want 'count' to be placed in ecx */
651bb898558SAl Viro static inline void cpuid_count(unsigned int op, int count,
652bb898558SAl Viro 			       unsigned int *eax, unsigned int *ebx,
653bb898558SAl Viro 			       unsigned int *ecx, unsigned int *edx)
654bb898558SAl Viro {
655bb898558SAl Viro 	*eax = op;
656bb898558SAl Viro 	*ecx = count;
657bb898558SAl Viro 	__cpuid(eax, ebx, ecx, edx);
658bb898558SAl Viro }
659bb898558SAl Viro 
660bb898558SAl Viro /*
661bb898558SAl Viro  * CPUID functions returning a single datum
662bb898558SAl Viro  */
663bb898558SAl Viro static inline unsigned int cpuid_eax(unsigned int op)
664bb898558SAl Viro {
665bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
666bb898558SAl Viro 
667bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
668bb898558SAl Viro 
669bb898558SAl Viro 	return eax;
670bb898558SAl Viro }
671bb898558SAl Viro 
672bb898558SAl Viro static inline unsigned int cpuid_ebx(unsigned int op)
673bb898558SAl Viro {
674bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
675bb898558SAl Viro 
676bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
677bb898558SAl Viro 
678bb898558SAl Viro 	return ebx;
679bb898558SAl Viro }
680bb898558SAl Viro 
681bb898558SAl Viro static inline unsigned int cpuid_ecx(unsigned int op)
682bb898558SAl Viro {
683bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
684bb898558SAl Viro 
685bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
686bb898558SAl Viro 
687bb898558SAl Viro 	return ecx;
688bb898558SAl Viro }
689bb898558SAl Viro 
690bb898558SAl Viro static inline unsigned int cpuid_edx(unsigned int op)
691bb898558SAl Viro {
692bb898558SAl Viro 	unsigned int eax, ebx, ecx, edx;
693bb898558SAl Viro 
694bb898558SAl Viro 	cpuid(op, &eax, &ebx, &ecx, &edx);
695bb898558SAl Viro 
696bb898558SAl Viro 	return edx;
697bb898558SAl Viro }
698bb898558SAl Viro 
699bb898558SAl Viro /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
700bb898558SAl Viro static inline void rep_nop(void)
701bb898558SAl Viro {
702bb898558SAl Viro 	asm volatile("rep; nop" ::: "memory");
703bb898558SAl Viro }
704bb898558SAl Viro 
705bb898558SAl Viro static inline void cpu_relax(void)
706bb898558SAl Viro {
707bb898558SAl Viro 	rep_nop();
708bb898558SAl Viro }
709bb898558SAl Viro 
710bb898558SAl Viro /* Stop speculative execution: */
711bb898558SAl Viro static inline void sync_core(void)
712bb898558SAl Viro {
713bb898558SAl Viro 	int tmp;
714bb898558SAl Viro 
715bb898558SAl Viro 	asm volatile("cpuid" : "=a" (tmp) : "0" (1)
716bb898558SAl Viro 		     : "ebx", "ecx", "edx", "memory");
717bb898558SAl Viro }
718bb898558SAl Viro 
719bb898558SAl Viro static inline void __monitor(const void *eax, unsigned long ecx,
720bb898558SAl Viro 			     unsigned long edx)
721bb898558SAl Viro {
722bb898558SAl Viro 	/* "monitor %eax, %ecx, %edx;" */
723bb898558SAl Viro 	asm volatile(".byte 0x0f, 0x01, 0xc8;"
724bb898558SAl Viro 		     :: "a" (eax), "c" (ecx), "d"(edx));
725bb898558SAl Viro }
726bb898558SAl Viro 
727bb898558SAl Viro static inline void __mwait(unsigned long eax, unsigned long ecx)
728bb898558SAl Viro {
729bb898558SAl Viro 	/* "mwait %eax, %ecx;" */
730bb898558SAl Viro 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
731bb898558SAl Viro 		     :: "a" (eax), "c" (ecx));
732bb898558SAl Viro }
733bb898558SAl Viro 
734bb898558SAl Viro static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
735bb898558SAl Viro {
736bb898558SAl Viro 	trace_hardirqs_on();
737bb898558SAl Viro 	/* "mwait %eax, %ecx;" */
738bb898558SAl Viro 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
739bb898558SAl Viro 		     :: "a" (eax), "c" (ecx));
740bb898558SAl Viro }
741bb898558SAl Viro 
742bb898558SAl Viro extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
743bb898558SAl Viro 
744bb898558SAl Viro extern void select_idle_routine(const struct cpuinfo_x86 *c);
74530e1e6d1SRusty Russell extern void init_c1e_mask(void);
746bb898558SAl Viro 
747bb898558SAl Viro extern unsigned long		boot_option_idle_override;
748bb898558SAl Viro extern unsigned long		idle_halt;
749bb898558SAl Viro extern unsigned long		idle_nomwait;
750bb898558SAl Viro 
751bb898558SAl Viro /*
752bb898558SAl Viro  * on systems with caches, caches must be flashed as the absolute
753bb898558SAl Viro  * last instruction before going into a suspended halt.  Otherwise,
754bb898558SAl Viro  * dirty data can linger in the cache and become stale on resume,
755bb898558SAl Viro  * leading to strange errors.
756bb898558SAl Viro  *
757bb898558SAl Viro  * perform a variety of operations to guarantee that the compiler
758bb898558SAl Viro  * will not reorder instructions.  wbinvd itself is serializing
759bb898558SAl Viro  * so the processor will not reorder.
760bb898558SAl Viro  *
761bb898558SAl Viro  * Systems without cache can just go into halt.
762bb898558SAl Viro  */
763bb898558SAl Viro static inline void wbinvd_halt(void)
764bb898558SAl Viro {
765bb898558SAl Viro 	mb();
766bb898558SAl Viro 	/* check for clflush to determine if wbinvd is legal */
767bb898558SAl Viro 	if (cpu_has_clflush)
768bb898558SAl Viro 		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
769bb898558SAl Viro 	else
770bb898558SAl Viro 		while (1)
771bb898558SAl Viro 			halt();
772bb898558SAl Viro }
773bb898558SAl Viro 
774bb898558SAl Viro extern void enable_sep_cpu(void);
775bb898558SAl Viro extern int sysenter_setup(void);
776bb898558SAl Viro 
777bb898558SAl Viro /* Defined in head.S */
778bb898558SAl Viro extern struct desc_ptr		early_gdt_descr;
779bb898558SAl Viro 
780bb898558SAl Viro extern void cpu_set_gdt(int);
781552be871SBrian Gerst extern void switch_to_new_gdt(int);
78211e3a840SJeremy Fitzhardinge extern void load_percpu_segment(int);
783bb898558SAl Viro extern void cpu_init(void);
784bb898558SAl Viro 
785c2724775SMarkus Metzger static inline unsigned long get_debugctlmsr(void)
786c2724775SMarkus Metzger {
787c2724775SMarkus Metzger     unsigned long debugctlmsr = 0;
788c2724775SMarkus Metzger 
789c2724775SMarkus Metzger #ifndef CONFIG_X86_DEBUGCTLMSR
790c2724775SMarkus Metzger 	if (boot_cpu_data.x86 < 6)
791c2724775SMarkus Metzger 		return 0;
792c2724775SMarkus Metzger #endif
793c2724775SMarkus Metzger 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
794c2724775SMarkus Metzger 
795c2724775SMarkus Metzger     return debugctlmsr;
796c2724775SMarkus Metzger }
797c2724775SMarkus Metzger 
798bb898558SAl Viro static inline void update_debugctlmsr(unsigned long debugctlmsr)
799bb898558SAl Viro {
800bb898558SAl Viro #ifndef CONFIG_X86_DEBUGCTLMSR
801bb898558SAl Viro 	if (boot_cpu_data.x86 < 6)
802bb898558SAl Viro 		return;
803bb898558SAl Viro #endif
804bb898558SAl Viro 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
805bb898558SAl Viro }
806bb898558SAl Viro 
807bb898558SAl Viro /*
808bb898558SAl Viro  * from system description table in BIOS. Mostly for MCA use, but
809bb898558SAl Viro  * others may find it useful:
810bb898558SAl Viro  */
811bb898558SAl Viro extern unsigned int		machine_id;
812bb898558SAl Viro extern unsigned int		machine_submodel_id;
813bb898558SAl Viro extern unsigned int		BIOS_revision;
814bb898558SAl Viro 
815bb898558SAl Viro /* Boot loader type from the setup header: */
816bb898558SAl Viro extern int			bootloader_type;
817bb898558SAl Viro 
818bb898558SAl Viro extern char			ignore_fpu_irq;
819bb898558SAl Viro 
820bb898558SAl Viro #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
821bb898558SAl Viro #define ARCH_HAS_PREFETCHW
822bb898558SAl Viro #define ARCH_HAS_SPINLOCK_PREFETCH
823bb898558SAl Viro 
824bb898558SAl Viro #ifdef CONFIG_X86_32
825bb898558SAl Viro # define BASE_PREFETCH		ASM_NOP4
826bb898558SAl Viro # define ARCH_HAS_PREFETCH
827bb898558SAl Viro #else
828bb898558SAl Viro # define BASE_PREFETCH		"prefetcht0 (%1)"
829bb898558SAl Viro #endif
830bb898558SAl Viro 
831bb898558SAl Viro /*
832bb898558SAl Viro  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
833bb898558SAl Viro  *
834bb898558SAl Viro  * It's not worth to care about 3dnow prefetches for the K6
835bb898558SAl Viro  * because they are microcoded there and very slow.
836bb898558SAl Viro  */
837bb898558SAl Viro static inline void prefetch(const void *x)
838bb898558SAl Viro {
839bb898558SAl Viro 	alternative_input(BASE_PREFETCH,
840bb898558SAl Viro 			  "prefetchnta (%1)",
841bb898558SAl Viro 			  X86_FEATURE_XMM,
842bb898558SAl Viro 			  "r" (x));
843bb898558SAl Viro }
844bb898558SAl Viro 
845bb898558SAl Viro /*
846bb898558SAl Viro  * 3dnow prefetch to get an exclusive cache line.
847bb898558SAl Viro  * Useful for spinlocks to avoid one state transition in the
848bb898558SAl Viro  * cache coherency protocol:
849bb898558SAl Viro  */
850bb898558SAl Viro static inline void prefetchw(const void *x)
851bb898558SAl Viro {
852bb898558SAl Viro 	alternative_input(BASE_PREFETCH,
853bb898558SAl Viro 			  "prefetchw (%1)",
854bb898558SAl Viro 			  X86_FEATURE_3DNOW,
855bb898558SAl Viro 			  "r" (x));
856bb898558SAl Viro }
857bb898558SAl Viro 
858bb898558SAl Viro static inline void spin_lock_prefetch(const void *x)
859bb898558SAl Viro {
860bb898558SAl Viro 	prefetchw(x);
861bb898558SAl Viro }
862bb898558SAl Viro 
863bb898558SAl Viro #ifdef CONFIG_X86_32
864bb898558SAl Viro /*
865bb898558SAl Viro  * User space process size: 3GB (default).
866bb898558SAl Viro  */
867bb898558SAl Viro #define TASK_SIZE		PAGE_OFFSET
868d9517346SIngo Molnar #define TASK_SIZE_MAX		TASK_SIZE
869bb898558SAl Viro #define STACK_TOP		TASK_SIZE
870bb898558SAl Viro #define STACK_TOP_MAX		STACK_TOP
871bb898558SAl Viro 
872bb898558SAl Viro #define INIT_THREAD  {							  \
873bb898558SAl Viro 	.sp0			= sizeof(init_stack) + (long)&init_stack, \
874bb898558SAl Viro 	.vm86_info		= NULL,					  \
875bb898558SAl Viro 	.sysenter_cs		= __KERNEL_CS,				  \
876bb898558SAl Viro 	.io_bitmap_ptr		= NULL,					  \
877bb898558SAl Viro 	.fs			= __KERNEL_PERCPU,			  \
878bb898558SAl Viro }
879bb898558SAl Viro 
880bb898558SAl Viro /*
881bb898558SAl Viro  * Note that the .io_bitmap member must be extra-big. This is because
882bb898558SAl Viro  * the CPU will access an additional byte beyond the end of the IO
883bb898558SAl Viro  * permission bitmap. The extra byte must be all 1 bits, and must
884bb898558SAl Viro  * be within the limit.
885bb898558SAl Viro  */
886bb898558SAl Viro #define INIT_TSS  {							  \
887bb898558SAl Viro 	.x86_tss = {							  \
888bb898558SAl Viro 		.sp0		= sizeof(init_stack) + (long)&init_stack, \
889bb898558SAl Viro 		.ss0		= __KERNEL_DS,				  \
890bb898558SAl Viro 		.ss1		= __KERNEL_CS,				  \
891bb898558SAl Viro 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
892bb898558SAl Viro 	 },								  \
893bb898558SAl Viro 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },	  \
894bb898558SAl Viro }
895bb898558SAl Viro 
896bb898558SAl Viro extern unsigned long thread_saved_pc(struct task_struct *tsk);
897bb898558SAl Viro 
898bb898558SAl Viro #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
899bb898558SAl Viro #define KSTK_TOP(info)                                                 \
900bb898558SAl Viro ({                                                                     \
901bb898558SAl Viro        unsigned long *__ptr = (unsigned long *)(info);                 \
902bb898558SAl Viro        (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
903bb898558SAl Viro })
904bb898558SAl Viro 
905bb898558SAl Viro /*
906bb898558SAl Viro  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
907bb898558SAl Viro  * This is necessary to guarantee that the entire "struct pt_regs"
908bb898558SAl Viro  * is accessable even if the CPU haven't stored the SS/ESP registers
909bb898558SAl Viro  * on the stack (interrupt gate does not save these registers
910bb898558SAl Viro  * when switching to the same priv ring).
911bb898558SAl Viro  * Therefore beware: accessing the ss/esp fields of the
912bb898558SAl Viro  * "struct pt_regs" is possible, but they may contain the
913bb898558SAl Viro  * completely wrong values.
914bb898558SAl Viro  */
915bb898558SAl Viro #define task_pt_regs(task)                                             \
916bb898558SAl Viro ({                                                                     \
917bb898558SAl Viro        struct pt_regs *__regs__;                                       \
918bb898558SAl Viro        __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
919bb898558SAl Viro        __regs__ - 1;                                                   \
920bb898558SAl Viro })
921bb898558SAl Viro 
922bb898558SAl Viro #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
923bb898558SAl Viro 
924bb898558SAl Viro #else
925bb898558SAl Viro /*
926bb898558SAl Viro  * User space process size. 47bits minus one guard page.
927bb898558SAl Viro  */
928d9517346SIngo Molnar #define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
929bb898558SAl Viro 
930bb898558SAl Viro /* This decides where the kernel will search for a free chunk of vm
931bb898558SAl Viro  * space during mmap's.
932bb898558SAl Viro  */
933bb898558SAl Viro #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
934bb898558SAl Viro 					0xc0000000 : 0xFFFFe000)
935bb898558SAl Viro 
936bb898558SAl Viro #define TASK_SIZE		(test_thread_flag(TIF_IA32) ? \
937d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
938bb898558SAl Viro #define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_IA32)) ? \
939d9517346SIngo Molnar 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
940bb898558SAl Viro 
941bb898558SAl Viro #define STACK_TOP		TASK_SIZE
942d9517346SIngo Molnar #define STACK_TOP_MAX		TASK_SIZE_MAX
943bb898558SAl Viro 
944bb898558SAl Viro #define INIT_THREAD  { \
945bb898558SAl Viro 	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
946bb898558SAl Viro }
947bb898558SAl Viro 
948bb898558SAl Viro #define INIT_TSS  { \
949bb898558SAl Viro 	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
950bb898558SAl Viro }
951bb898558SAl Viro 
952bb898558SAl Viro /*
953bb898558SAl Viro  * Return saved PC of a blocked thread.
954bb898558SAl Viro  * What is this good for? it will be always the scheduler or ret_from_fork.
955bb898558SAl Viro  */
956bb898558SAl Viro #define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
957bb898558SAl Viro 
958bb898558SAl Viro #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
959bb898558SAl Viro #define KSTK_ESP(tsk)		-1 /* sorry. doesn't work for syscall. */
960bb898558SAl Viro #endif /* CONFIG_X86_64 */
961bb898558SAl Viro 
962bb898558SAl Viro extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
963bb898558SAl Viro 					       unsigned long new_sp);
964bb898558SAl Viro 
965bb898558SAl Viro /*
966bb898558SAl Viro  * This decides where the kernel will search for a free chunk of vm
967bb898558SAl Viro  * space during mmap's.
968bb898558SAl Viro  */
969bb898558SAl Viro #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
970bb898558SAl Viro 
971bb898558SAl Viro #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
972bb898558SAl Viro 
973bb898558SAl Viro /* Get/set a process' ability to use the timestamp counter instruction */
974bb898558SAl Viro #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
975bb898558SAl Viro #define SET_TSC_CTL(val)	set_tsc_mode((val))
976bb898558SAl Viro 
977bb898558SAl Viro extern int get_tsc_mode(unsigned long adr);
978bb898558SAl Viro extern int set_tsc_mode(unsigned int val);
979bb898558SAl Viro 
9801965aae3SH. Peter Anvin #endif /* _ASM_X86_PROCESSOR_H */
981