xref: /freebsd/sys/amd64/amd64/initcpu.c (revision 48c5129f93c5eb5419c87b08e4677d51513f1dc0)
1 /*-
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_cpu.h"
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
40 
41 #include <machine/cputypes.h>
42 #include <machine/md_var.h>
43 #include <machine/specialreg.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 
48 static int	hw_instruction_sse;
49 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
50     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
51 /*
52  * -1: automatic (default)
53  *  0: keep enable CLFLUSH
54  *  1: force disable CLFLUSH
55  */
56 static int	hw_clflush_disable = -1;
57 
58 int	cpu;			/* Are we 386, 386sx, 486, etc? */
59 u_int	cpu_feature;		/* Feature flags */
60 u_int	cpu_feature2;		/* Feature flags */
61 u_int	amd_feature;		/* AMD feature flags */
62 u_int	amd_feature2;		/* AMD feature flags */
63 u_int	amd_pminfo;		/* AMD advanced power management info */
64 u_int	via_feature_rng;	/* VIA RNG features */
65 u_int	via_feature_xcrypt;	/* VIA ACE features */
66 u_int	cpu_high;		/* Highest arg to CPUID */
67 u_int	cpu_exthigh;		/* Highest arg to extended CPUID */
68 u_int	cpu_id;			/* Stepping ID */
69 u_int	cpu_procinfo;		/* HyperThreading Info / Brand Index / CLFUSH */
70 u_int	cpu_procinfo2;		/* Multicore info */
71 char	cpu_vendor[20];		/* CPU Origin code */
72 u_int	cpu_vendor_id;		/* CPU vendor ID */
73 u_int	cpu_fxsr;		/* SSE enabled */
74 u_int	cpu_mxcsr_mask;		/* Valid bits in mxcsr */
75 u_int	cpu_clflush_line_size = 32;
76 u_int	cpu_stdext_feature;
77 u_int	cpu_max_ext_state_size;
78 
79 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
80 	&via_feature_rng, 0, "VIA RNG feature available in CPU");
81 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
82 	&via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU");
83 
84 static void
85 init_amd(void)
86 {
87 
88 	/*
89 	 * Work around Erratum 721 for Family 10h and 12h processors.
90 	 * These processors may incorrectly update the stack pointer
91 	 * after a long series of push and/or near-call instructions,
92 	 * or a long series of pop and/or near-return instructions.
93 	 *
94 	 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf
95 	 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf
96 	 *
97 	 * Hypervisors do not provide access to the errata MSR,
98 	 * causing #GP exception on attempt to apply the errata.  The
99 	 * MSR write shall be done on host and persist globally
100 	 * anyway, so do not try to do it when under virtualization.
101 	 */
102 	switch (CPUID_TO_FAMILY(cpu_id)) {
103 	case 0x10:
104 	case 0x12:
105 		if ((cpu_feature2 & CPUID2_HV) == 0)
106 			wrmsr(0xc0011029, rdmsr(0xc0011029) | 1);
107 		break;
108 	}
109 }
110 
111 /*
112  * Initialize special VIA features
113  */
114 static void
115 init_via(void)
116 {
117 	u_int regs[4], val;
118 
119 	/*
120 	 * Check extended CPUID for PadLock features.
121 	 *
122 	 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
123 	 */
124 	do_cpuid(0xc0000000, regs);
125 	if (regs[0] >= 0xc0000001) {
126 		do_cpuid(0xc0000001, regs);
127 		val = regs[3];
128 	} else
129 		return;
130 
131 	/* Enable RNG if present. */
132 	if ((val & VIA_CPUID_HAS_RNG) != 0) {
133 		via_feature_rng = VIA_HAS_RNG;
134 		wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
135 	}
136 
137 	/* Enable PadLock if present. */
138 	if ((val & VIA_CPUID_HAS_ACE) != 0)
139 		via_feature_xcrypt |= VIA_HAS_AES;
140 	if ((val & VIA_CPUID_HAS_ACE2) != 0)
141 		via_feature_xcrypt |= VIA_HAS_AESCTR;
142 	if ((val & VIA_CPUID_HAS_PHE) != 0)
143 		via_feature_xcrypt |= VIA_HAS_SHA;
144 	if ((val & VIA_CPUID_HAS_PMM) != 0)
145 		via_feature_xcrypt |= VIA_HAS_MM;
146 	if (via_feature_xcrypt != 0)
147 		wrmsr(0x1107, rdmsr(0x1107) | (1 << 28));
148 }
149 
150 /*
151  * Initialize CPU control registers
152  */
153 void
154 initializecpu(void)
155 {
156 	uint64_t msr;
157 	uint32_t cr4;
158 
159 	cr4 = rcr4();
160 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
161 		cr4 |= CR4_FXSR | CR4_XMM;
162 		cpu_fxsr = hw_instruction_sse = 1;
163 	}
164 	if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
165 		cr4 |= CR4_FSGSBASE;
166 
167 	/*
168 	 * Postpone enabling the SMEP on the boot CPU until the page
169 	 * tables are switched from the boot loader identity mapping
170 	 * to the kernel tables.  The boot loader enables the U bit in
171 	 * its tables.
172 	 */
173 	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
174 		cr4 |= CR4_SMEP;
175 	load_cr4(cr4);
176 	if ((amd_feature & AMDID_NX) != 0) {
177 		msr = rdmsr(MSR_EFER) | EFER_NXE;
178 		wrmsr(MSR_EFER, msr);
179 		pg_nx = PG_NX;
180 	}
181 	switch (cpu_vendor_id) {
182 	case CPU_VENDOR_AMD:
183 		init_amd();
184 		break;
185 	case CPU_VENDOR_CENTAUR:
186 		init_via();
187 		break;
188 	}
189 }
190 
191 void
192 initializecpucache()
193 {
194 
195 	/*
196 	 * CPUID with %eax = 1, %ebx returns
197 	 * Bits 15-8: CLFLUSH line size
198 	 * 	(Value * 8 = cache line size in bytes)
199 	 */
200 	if ((cpu_feature & CPUID_CLFSH) != 0)
201 		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
202 	/*
203 	 * XXXKIB: (temporary) hack to work around traps generated
204 	 * when CLFLUSHing APIC register window under virtualization
205 	 * environments.  These environments tend to disable the
206 	 * CPUID_SS feature even though the native CPU supports it.
207 	 */
208 	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
209 	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
210 		cpu_feature &= ~CPUID_CLFSH;
211 	/*
212 	 * Allow to disable CLFLUSH feature manually by
213 	 * hw.clflush_disable tunable.
214 	 */
215 	if (hw_clflush_disable == 1)
216 		cpu_feature &= ~CPUID_CLFSH;
217 }
218