1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) KATO Takenori, 1997, 1998. 5 * 6 * All rights reserved. Unpublished rights reserved under the copyright 7 * laws of Japan. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer as 15 * the first lines of this file unmodified. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_cpu.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/pcpu.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 43 #include <machine/cputypes.h> 44 #include <machine/md_var.h> 45 #include <machine/psl.h> 46 #include <machine/specialreg.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 static int hw_instruction_sse; 52 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 53 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 54 static int lower_sharedpage_init; 55 int hw_lower_amd64_sharedpage; 56 SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN, 57 &hw_lower_amd64_sharedpage, 0, 58 "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory"); 59 /* 60 * -1: automatic (default) 61 * 0: keep enable CLFLUSH 62 * 1: force disable CLFLUSH 63 */ 64 static int hw_clflush_disable = -1; 65 66 static void 67 init_amd(void) 68 { 69 uint64_t msr; 70 71 /* 72 * C1E renders the local APIC timer dead, so we disable it by 73 * reading the Interrupt Pending Message register and clearing 74 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). 75 * 76 * Reference: 77 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" 78 * #32559 revision 3.00+ 79 * 80 * Detect the presence of C1E capability mostly on latest 81 * dual-cores (or future) k8 family. Affected models range is 82 * taken from Linux sources. 83 */ 84 if ((CPUID_TO_FAMILY(cpu_id) == 0xf || 85 CPUID_TO_FAMILY(cpu_id) == 0x10) && (cpu_feature2 & CPUID2_HV) == 0) 86 cpu_amdc1e_bug = 1; 87 88 /* 89 * Work around Erratum 721 for Family 10h and 12h processors. 90 * These processors may incorrectly update the stack pointer 91 * after a long series of push and/or near-call instructions, 92 * or a long series of pop and/or near-return instructions. 93 * 94 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf 95 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf 96 * 97 * Hypervisors do not provide access to the errata MSR, 98 * causing #GP exception on attempt to apply the errata. The 99 * MSR write shall be done on host and persist globally 100 * anyway, so do not try to do it when under virtualization. 101 */ 102 switch (CPUID_TO_FAMILY(cpu_id)) { 103 case 0x10: 104 case 0x12: 105 if ((cpu_feature2 & CPUID2_HV) == 0) 106 wrmsr(MSR_DE_CFG, rdmsr(MSR_DE_CFG) | 1); 107 break; 108 } 109 110 /* 111 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG. 112 * So, do it here or otherwise some tools could be confused by 113 * Initial Local APIC ID reported with CPUID Function 1 in EBX. 114 */ 115 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 116 if ((cpu_feature2 & CPUID2_HV) == 0) { 117 msr = rdmsr(MSR_NB_CFG1); 118 msr |= (uint64_t)1 << 54; 119 wrmsr(MSR_NB_CFG1, msr); 120 } 121 } 122 123 /* 124 * BIOS may configure Family 10h processors to convert WC+ cache type 125 * to CD. That can hurt performance of guest VMs using nested paging. 126 * The relevant MSR bit is not documented in the BKDG, 127 * the fix is borrowed from Linux. 128 */ 129 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 130 if ((cpu_feature2 & CPUID2_HV) == 0) { 131 msr = rdmsr(0xc001102a); 132 msr &= ~((uint64_t)1 << 24); 133 wrmsr(0xc001102a, msr); 134 } 135 } 136 137 /* 138 * Work around Erratum 793: Specific Combination of Writes to Write 139 * Combined Memory Types and Locked Instructions May Cause Core Hang. 140 * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors, 141 * revision 3.04 or later, publication 51810. 142 */ 143 if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) { 144 if ((cpu_feature2 & CPUID2_HV) == 0) { 145 msr = rdmsr(MSR_LS_CFG); 146 msr |= (uint64_t)1 << 15; 147 wrmsr(MSR_LS_CFG, msr); 148 } 149 } 150 151 /* Ryzen erratas. */ 152 if (CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1 && 153 (cpu_feature2 & CPUID2_HV) == 0) { 154 /* 1021 */ 155 msr = rdmsr(MSR_DE_CFG); 156 msr |= 0x2000; 157 wrmsr(MSR_DE_CFG, msr); 158 159 /* 1033 */ 160 msr = rdmsr(MSR_LS_CFG); 161 msr |= 0x10; 162 wrmsr(MSR_LS_CFG, msr); 163 164 /* 1049 */ 165 msr = rdmsr(0xc0011028); 166 msr |= 0x10; 167 wrmsr(0xc0011028, msr); 168 169 /* 1095 */ 170 msr = rdmsr(MSR_LS_CFG); 171 msr |= 0x200000000000000; 172 wrmsr(MSR_LS_CFG, msr); 173 } 174 175 /* 176 * Work around a problem on Ryzen that is triggered by executing 177 * code near the top of user memory, in our case the signal 178 * trampoline code in the shared page on amd64. 179 * 180 * This function is executed once for the BSP before tunables take 181 * effect so the value determined here can be overridden by the 182 * tunable. This function is then executed again for each AP and 183 * also on resume. Set a flag the first time so that value set by 184 * the tunable is not overwritten. 185 * 186 * The stepping and/or microcode versions should be checked after 187 * this issue is fixed by AMD so that we don't use this mode if not 188 * needed. 189 */ 190 if (lower_sharedpage_init == 0) { 191 lower_sharedpage_init = 1; 192 if (CPUID_TO_FAMILY(cpu_id) == 0x17 || 193 CPUID_TO_FAMILY(cpu_id) == 0x18) { 194 hw_lower_amd64_sharedpage = 1; 195 } 196 } 197 } 198 199 /* 200 * Initialize special VIA features 201 */ 202 static void 203 init_via(void) 204 { 205 u_int regs[4], val; 206 207 /* 208 * Check extended CPUID for PadLock features. 209 * 210 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf 211 */ 212 do_cpuid(0xc0000000, regs); 213 if (regs[0] >= 0xc0000001) { 214 do_cpuid(0xc0000001, regs); 215 val = regs[3]; 216 } else 217 return; 218 219 /* Enable RNG if present. */ 220 if ((val & VIA_CPUID_HAS_RNG) != 0) { 221 via_feature_rng = VIA_HAS_RNG; 222 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG); 223 } 224 225 /* Enable PadLock if present. */ 226 if ((val & VIA_CPUID_HAS_ACE) != 0) 227 via_feature_xcrypt |= VIA_HAS_AES; 228 if ((val & VIA_CPUID_HAS_ACE2) != 0) 229 via_feature_xcrypt |= VIA_HAS_AESCTR; 230 if ((val & VIA_CPUID_HAS_PHE) != 0) 231 via_feature_xcrypt |= VIA_HAS_SHA; 232 if ((val & VIA_CPUID_HAS_PMM) != 0) 233 via_feature_xcrypt |= VIA_HAS_MM; 234 if (via_feature_xcrypt != 0) 235 wrmsr(0x1107, rdmsr(0x1107) | (1 << 28)); 236 } 237 238 /* 239 * The value for the TSC_AUX MSR and rdtscp/rdpid on the invoking CPU. 240 * 241 * Caller should prevent CPU migration. 242 */ 243 u_int 244 cpu_auxmsr(void) 245 { 246 KASSERT((read_rflags() & PSL_I) == 0, ("context switch possible")); 247 return (PCPU_GET(cpuid)); 248 } 249 250 /* 251 * Initialize CPU control registers 252 */ 253 void 254 initializecpu(void) 255 { 256 uint64_t msr; 257 uint32_t cr4; 258 259 cr4 = rcr4(); 260 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 261 cr4 |= CR4_FXSR | CR4_XMM; 262 cpu_fxsr = hw_instruction_sse = 1; 263 } 264 if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) 265 cr4 |= CR4_FSGSBASE; 266 267 if (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) 268 cr4 |= CR4_PKE; 269 270 /* 271 * If SMEP is present, we only need to flush RSB (by default) 272 * on context switches, to prevent cross-process ret2spec 273 * attacks. Do it automatically if ibrs_disable is set, to 274 * complete the mitigation. 275 * 276 * Postpone enabling the SMEP on the boot CPU until the page 277 * tables are switched from the boot loader identity mapping 278 * to the kernel tables. The boot loader enables the U bit in 279 * its tables. 280 */ 281 if (IS_BSP()) { 282 if (cpu_stdext_feature & CPUID_STDEXT_SMEP && 283 !TUNABLE_INT_FETCH( 284 "machdep.mitigations.cpu_flush_rsb_ctxsw", 285 &cpu_flush_rsb_ctxsw) && 286 hw_ibrs_disable) 287 cpu_flush_rsb_ctxsw = 1; 288 } else { 289 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) 290 cr4 |= CR4_SMEP; 291 if (cpu_stdext_feature & CPUID_STDEXT_SMAP) 292 cr4 |= CR4_SMAP; 293 } 294 load_cr4(cr4); 295 /* Reload cpu ext features to reflect cr4 changes */ 296 if (IS_BSP()) 297 identify_cpu_ext_features(); 298 if (IS_BSP() && (amd_feature & AMDID_NX) != 0) { 299 msr = rdmsr(MSR_EFER) | EFER_NXE; 300 wrmsr(MSR_EFER, msr); 301 pg_nx = PG_NX; 302 } 303 hw_ibrs_recalculate(false); 304 hw_ssb_recalculate(false); 305 amd64_syscall_ret_flush_l1d_recalc(); 306 x86_rngds_mitg_recalculate(false); 307 switch (cpu_vendor_id) { 308 case CPU_VENDOR_AMD: 309 case CPU_VENDOR_HYGON: 310 init_amd(); 311 break; 312 case CPU_VENDOR_CENTAUR: 313 init_via(); 314 break; 315 } 316 317 if ((amd_feature & AMDID_RDTSCP) != 0 || 318 (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0) 319 wrmsr(MSR_TSC_AUX, cpu_auxmsr()); 320 } 321 322 void 323 initializecpucache(void) 324 { 325 326 /* 327 * CPUID with %eax = 1, %ebx returns 328 * Bits 15-8: CLFLUSH line size 329 * (Value * 8 = cache line size in bytes) 330 */ 331 if ((cpu_feature & CPUID_CLFSH) != 0) 332 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; 333 /* 334 * XXXKIB: (temporary) hack to work around traps generated 335 * when CLFLUSHing APIC register window under virtualization 336 * environments. These environments tend to disable the 337 * CPUID_SS feature even though the native CPU supports it. 338 */ 339 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable); 340 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) { 341 cpu_feature &= ~CPUID_CLFSH; 342 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; 343 } 344 345 /* 346 * The kernel's use of CLFLUSH{,OPT} can be disabled manually 347 * by setting the hw.clflush_disable tunable. 348 */ 349 if (hw_clflush_disable == 1) { 350 cpu_feature &= ~CPUID_CLFSH; 351 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; 352 } 353 } 354