xref: /titanic_41/usr/src/grub/grub-0.97/netboot/cpu.h (revision 3f7d54a6b84904c8f4d8daa4c7b577bede7df8b9)
1 #ifndef I386_BITS_CPU_H
2 #define I386_BITS_CPU_H
3 
4 
5 /* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
6 #define CPU_FEATURE_P(CAP, FEATURE) \
7 	(!!(CAP[(X86_FEATURE_##FEATURE)/32] & ((X86_FEATURE_##FEATURE) & 0x1f)))
8 
9 #define NCAPINTS	4	/* Currently we have 4 32-bit words worth of info */
10 
11 /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
12 #define X86_FEATURE_FPU		(0*32+ 0) /* Onboard FPU */
13 #define X86_FEATURE_VME		(0*32+ 1) /* Virtual Mode Extensions */
14 #define X86_FEATURE_DE		(0*32+ 2) /* Debugging Extensions */
15 #define X86_FEATURE_PSE 	(0*32+ 3) /* Page Size Extensions */
16 #define X86_FEATURE_TSC		(0*32+ 4) /* Time Stamp Counter */
17 #define X86_FEATURE_MSR		(0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
18 #define X86_FEATURE_PAE		(0*32+ 6) /* Physical Address Extensions */
19 #define X86_FEATURE_MCE		(0*32+ 7) /* Machine Check Architecture */
20 #define X86_FEATURE_CX8		(0*32+ 8) /* CMPXCHG8 instruction */
21 #define X86_FEATURE_APIC	(0*32+ 9) /* Onboard APIC */
22 #define X86_FEATURE_SEP		(0*32+11) /* SYSENTER/SYSEXIT */
23 #define X86_FEATURE_MTRR	(0*32+12) /* Memory Type Range Registers */
24 #define X86_FEATURE_PGE		(0*32+13) /* Page Global Enable */
25 #define X86_FEATURE_MCA		(0*32+14) /* Machine Check Architecture */
26 #define X86_FEATURE_CMOV	(0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
27 #define X86_FEATURE_PAT		(0*32+16) /* Page Attribute Table */
28 #define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
29 #define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
30 #define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
31 #define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
32 #define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
33 #define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
34 #define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
35 				          /* of FPU context), and CR4.OSFXSR available */
36 #define X86_FEATURE_XMM		(0*32+25) /* Streaming SIMD Extensions */
37 #define X86_FEATURE_XMM2	(0*32+26) /* Streaming SIMD Extensions-2 */
38 #define X86_FEATURE_SELFSNOOP	(0*32+27) /* CPU self snoop */
39 #define X86_FEATURE_HT		(0*32+28) /* Hyper-Threading */
40 #define X86_FEATURE_ACC		(0*32+29) /* Automatic clock control */
41 #define X86_FEATURE_IA64	(0*32+30) /* IA-64 processor */
42 
43 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
44 /* Don't duplicate feature flags which are redundant with Intel! */
45 #define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
46 #define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
47 #define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
48 #define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
49 #define X86_FEATURE_3DNOW	(1*32+31) /* 3DNow! */
50 
51 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
52 #define X86_FEATURE_RECOVERY	(2*32+ 0) /* CPU in recovery mode */
53 #define X86_FEATURE_LONGRUN	(2*32+ 1) /* Longrun power control */
54 #define X86_FEATURE_LRTI	(2*32+ 3) /* LongRun table interface */
55 
56 /* Other features, Linux-defined mapping, word 3 */
57 /* This range is used for feature bits which conflict or are synthesized */
58 #define X86_FEATURE_CXMMX	(3*32+ 0) /* Cyrix MMX extensions */
59 #define X86_FEATURE_K6_MTRR	(3*32+ 1) /* AMD K6 nonstandard MTRRs */
60 #define X86_FEATURE_CYRIX_ARR	(3*32+ 2) /* Cyrix ARRs (= MTRRs) */
61 #define X86_FEATURE_CENTAUR_MCR	(3*32+ 3) /* Centaur MCRs (= MTRRs) */
62 
63 #define MAX_X86_VENDOR_ID 16
64 struct cpuinfo_x86 {
65 	uint8_t	 x86;		/* CPU family */
66 	uint8_t	 x86_model;
67 	uint8_t	 x86_mask;
68 
69        	int	 cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
70 	unsigned x86_capability[NCAPINTS];
71 	char	 x86_vendor_id[MAX_X86_VENDOR_ID];
72 };
73 
74 
75 #define X86_VENDOR_INTEL 0
76 #define X86_VENDOR_CYRIX 1
77 #define X86_VENDOR_AMD 2
78 #define X86_VENDOR_UMC 3
79 #define X86_VENDOR_NEXGEN 4
80 #define X86_VENDOR_CENTAUR 5
81 #define X86_VENDOR_RISE 6
82 #define X86_VENDOR_TRANSMETA 7
83 #define X86_VENDOR_NSC 8
84 #define X86_VENDOR_UNKNOWN 0xff
85 
86 /*
87  * EFLAGS bits
88  */
89 #define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
90 #define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
91 #define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
92 #define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
93 #define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
94 #define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
95 #define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
96 #define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
97 #define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
98 #define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
99 #define X86_EFLAGS_NT	0x00004000 /* Nested Task */
100 #define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
101 #define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
102 #define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
103 #define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
104 #define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
105 #define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
106 
107 /*
108  * Generic CPUID function
109  */
110 static inline void cpuid(int op,
111 	unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
112 {
113 	__asm__("cpuid"
114 		: "=a" (*eax),
115 		  "=b" (*ebx),
116 		  "=c" (*ecx),
117 		  "=d" (*edx)
118 		: "0" (op));
119 }
120 
121 /*
122  * CPUID functions returning a single datum
123  */
124 static inline unsigned int cpuid_eax(unsigned int op)
125 {
126 	unsigned int eax;
127 
128 	__asm__("cpuid"
129 		: "=a" (eax)
130 		: "0" (op)
131 		: "bx", "cx", "dx");
132 	return eax;
133 }
134 static inline unsigned int cpuid_ebx(unsigned int op)
135 {
136 	unsigned int eax, ebx;
137 
138 	__asm__("cpuid"
139 		: "=a" (eax), "=b" (ebx)
140 		: "0" (op)
141 		: "cx", "dx" );
142 	return ebx;
143 }
144 static inline unsigned int cpuid_ecx(unsigned int op)
145 {
146 	unsigned int eax, ecx;
147 
148 	__asm__("cpuid"
149 		: "=a" (eax), "=c" (ecx)
150 		: "0" (op)
151 		: "bx", "dx" );
152 	return ecx;
153 }
154 static inline unsigned int cpuid_edx(unsigned int op)
155 {
156 	unsigned int eax, edx;
157 
158 	__asm__("cpuid"
159 		: "=a" (eax), "=d" (edx)
160 		: "0" (op)
161 		: "bx", "cx");
162 	return edx;
163 }
164 
165 /*
166  * Intel CPU features in CR4
167  */
168 #define X86_CR4_VME		0x0001	/* enable vm86 extensions */
169 #define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
170 #define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
171 #define X86_CR4_DE		0x0008	/* enable debugging extensions */
172 #define X86_CR4_PSE		0x0010	/* enable page size extensions */
173 #define X86_CR4_PAE		0x0020	/* enable physical address extensions */
174 #define X86_CR4_MCE		0x0040	/* Machine check enable */
175 #define X86_CR4_PGE		0x0080	/* enable global pages */
176 #define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
177 #define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
178 #define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
179 
180 
181 #define MSR_K6_EFER			0xC0000080
182 /* EFER bits: */
183 #define _EFER_SCE 0  /* SYSCALL/SYSRET */
184 #define _EFER_LME 8  /* Long mode enable */
185 #define _EFER_LMA 10 /* Long mode active (read-only) */
186 #define _EFER_NX 11  /* No execute enable */
187 
188 #define EFER_SCE (1<<_EFER_SCE)
189 #define EFER_LME (1<<EFER_LME)
190 #define EFER_LMA (1<<EFER_LMA)
191 #define EFER_NX (1<<_EFER_NX)
192 
193 #define rdmsr(msr,val1,val2) \
194      __asm__ __volatile__("rdmsr" \
195 			  : "=a" (val1), "=d" (val2) \
196 			  : "c" (msr))
197 
198 #define wrmsr(msr,val1,val2) \
199      __asm__ __volatile__("wrmsr" \
200 			  : /* no outputs */ \
201 			  : "c" (msr), "a" (val1), "d" (val2))
202 
203 
204 #define read_cr0()	({ \
205 	unsigned int __dummy; \
206 	__asm__( \
207 		"movl %%cr0, %0\n\t" \
208 		:"=r" (__dummy)); \
209 	__dummy; \
210 })
211 #define write_cr0(x) \
212 	__asm__("movl %0,%%cr0": :"r" (x));
213 
214 #define read_cr3()	({ \
215 	unsigned int __dummy; \
216 	__asm__( \
217 		"movl %%cr3, %0\n\t" \
218 		:"=r" (__dummy)); \
219 	__dummy; \
220 })
221 #define write_cr3x(x) \
222 	__asm__("movl %0,%%cr3": :"r" (x));
223 
224 
225 #define read_cr4()	({ \
226 	unsigned int __dummy; \
227 	__asm__( \
228 		"movl %%cr4, %0\n\t" \
229 		:"=r" (__dummy)); \
230 	__dummy; \
231 })
232 #define write_cr4x(x) \
233 	__asm__("movl %0,%%cr4": :"r" (x));
234 
235 
236 extern struct cpuinfo_x86 cpu_info;
237 #ifdef CONFIG_X86_64
238 extern void cpu_setup(void);
239 #else
240 #define cpu_setup() do {} while(0)
241 #endif
242 
243 #endif /* I386_BITS_CPU_H */
244