1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) KATO Takenori, 1997, 1998. 5 * 6 * All rights reserved. Unpublished rights reserved under the copyright 7 * laws of Japan. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer as 15 * the first lines of this file unmodified. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_cpu.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/pcpu.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 43 #include <machine/cputypes.h> 44 #include <machine/md_var.h> 45 #include <machine/specialreg.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 50 static int hw_instruction_sse; 51 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 52 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 53 static int lower_sharedpage_init; 54 int hw_lower_amd64_sharedpage; 55 SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN, 56 &hw_lower_amd64_sharedpage, 0, 57 "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory"); 58 /* 59 * -1: automatic (default) 60 * 0: keep enable CLFLUSH 61 * 1: force disable CLFLUSH 62 */ 63 static int hw_clflush_disable = -1; 64 65 static void 66 init_amd(void) 67 { 68 uint64_t msr; 69 70 /* 71 * Work around Erratum 721 for Family 10h and 12h processors. 72 * These processors may incorrectly update the stack pointer 73 * after a long series of push and/or near-call instructions, 74 * or a long series of pop and/or near-return instructions. 75 * 76 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf 77 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf 78 * 79 * Hypervisors do not provide access to the errata MSR, 80 * causing #GP exception on attempt to apply the errata. The 81 * MSR write shall be done on host and persist globally 82 * anyway, so do not try to do it when under virtualization. 83 */ 84 switch (CPUID_TO_FAMILY(cpu_id)) { 85 case 0x10: 86 case 0x12: 87 if ((cpu_feature2 & CPUID2_HV) == 0) 88 wrmsr(0xc0011029, rdmsr(0xc0011029) | 1); 89 break; 90 } 91 92 /* 93 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG. 94 * So, do it here or otherwise some tools could be confused by 95 * Initial Local APIC ID reported with CPUID Function 1 in EBX. 96 */ 97 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 98 if ((cpu_feature2 & CPUID2_HV) == 0) { 99 msr = rdmsr(MSR_NB_CFG1); 100 msr |= (uint64_t)1 << 54; 101 wrmsr(MSR_NB_CFG1, msr); 102 } 103 } 104 105 /* 106 * BIOS may configure Family 10h processors to convert WC+ cache type 107 * to CD. That can hurt performance of guest VMs using nested paging. 108 * The relevant MSR bit is not documented in the BKDG, 109 * the fix is borrowed from Linux. 110 */ 111 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 112 if ((cpu_feature2 & CPUID2_HV) == 0) { 113 msr = rdmsr(0xc001102a); 114 msr &= ~((uint64_t)1 << 24); 115 wrmsr(0xc001102a, msr); 116 } 117 } 118 119 /* 120 * Work around Erratum 793: Specific Combination of Writes to Write 121 * Combined Memory Types and Locked Instructions May Cause Core Hang. 122 * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors, 123 * revision 3.04 or later, publication 51810. 124 */ 125 if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) { 126 if ((cpu_feature2 & CPUID2_HV) == 0) { 127 msr = rdmsr(MSR_LS_CFG); 128 msr |= (uint64_t)1 << 15; 129 wrmsr(MSR_LS_CFG, msr); 130 } 131 } 132 133 /* Ryzen erratas. */ 134 if (CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1 && 135 (cpu_feature2 & CPUID2_HV) == 0) { 136 /* 1021 */ 137 msr = rdmsr(0xc0011029); 138 msr |= 0x2000; 139 wrmsr(0xc0011029, msr); 140 141 /* 1033 */ 142 msr = rdmsr(MSR_LS_CFG); 143 msr |= 0x10; 144 wrmsr(MSR_LS_CFG, msr); 145 146 /* 1049 */ 147 msr = rdmsr(0xc0011028); 148 msr |= 0x10; 149 wrmsr(0xc0011028, msr); 150 151 /* 1095 */ 152 msr = rdmsr(MSR_LS_CFG); 153 msr |= 0x200000000000000; 154 wrmsr(MSR_LS_CFG, msr); 155 } 156 157 /* 158 * Work around a problem on Ryzen that is triggered by executing 159 * code near the top of user memory, in our case the signal 160 * trampoline code in the shared page on amd64. 161 * 162 * This function is executed once for the BSP before tunables take 163 * effect so the value determined here can be overridden by the 164 * tunable. This function is then executed again for each AP and 165 * also on resume. Set a flag the first time so that value set by 166 * the tunable is not overwritten. 167 * 168 * The stepping and/or microcode versions should be checked after 169 * this issue is fixed by AMD so that we don't use this mode if not 170 * needed. 171 */ 172 if (lower_sharedpage_init == 0) { 173 lower_sharedpage_init = 1; 174 if (CPUID_TO_FAMILY(cpu_id) == 0x17 || 175 CPUID_TO_FAMILY(cpu_id) == 0x18) { 176 hw_lower_amd64_sharedpage = 1; 177 } 178 } 179 } 180 181 /* 182 * Initialize special VIA features 183 */ 184 static void 185 init_via(void) 186 { 187 u_int regs[4], val; 188 189 /* 190 * Check extended CPUID for PadLock features. 191 * 192 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf 193 */ 194 do_cpuid(0xc0000000, regs); 195 if (regs[0] >= 0xc0000001) { 196 do_cpuid(0xc0000001, regs); 197 val = regs[3]; 198 } else 199 return; 200 201 /* Enable RNG if present. */ 202 if ((val & VIA_CPUID_HAS_RNG) != 0) { 203 via_feature_rng = VIA_HAS_RNG; 204 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG); 205 } 206 207 /* Enable PadLock if present. */ 208 if ((val & VIA_CPUID_HAS_ACE) != 0) 209 via_feature_xcrypt |= VIA_HAS_AES; 210 if ((val & VIA_CPUID_HAS_ACE2) != 0) 211 via_feature_xcrypt |= VIA_HAS_AESCTR; 212 if ((val & VIA_CPUID_HAS_PHE) != 0) 213 via_feature_xcrypt |= VIA_HAS_SHA; 214 if ((val & VIA_CPUID_HAS_PMM) != 0) 215 via_feature_xcrypt |= VIA_HAS_MM; 216 if (via_feature_xcrypt != 0) 217 wrmsr(0x1107, rdmsr(0x1107) | (1 << 28)); 218 } 219 220 /* 221 * Initialize CPU control registers 222 */ 223 void 224 initializecpu(void) 225 { 226 uint64_t msr; 227 uint32_t cr4; 228 229 cr4 = rcr4(); 230 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 231 cr4 |= CR4_FXSR | CR4_XMM; 232 cpu_fxsr = hw_instruction_sse = 1; 233 } 234 if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) 235 cr4 |= CR4_FSGSBASE; 236 237 if (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) 238 cr4 |= CR4_PKE; 239 240 /* 241 * Postpone enabling the SMEP on the boot CPU until the page 242 * tables are switched from the boot loader identity mapping 243 * to the kernel tables. The boot loader enables the U bit in 244 * its tables. 245 */ 246 if (!IS_BSP()) { 247 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) 248 cr4 |= CR4_SMEP; 249 if (cpu_stdext_feature & CPUID_STDEXT_SMAP) 250 cr4 |= CR4_SMAP; 251 } 252 load_cr4(cr4); 253 if (IS_BSP() && (amd_feature & AMDID_NX) != 0) { 254 msr = rdmsr(MSR_EFER) | EFER_NXE; 255 wrmsr(MSR_EFER, msr); 256 pg_nx = PG_NX; 257 } 258 hw_ibrs_recalculate(); 259 hw_ssb_recalculate(false); 260 amd64_syscall_ret_flush_l1d_recalc(); 261 switch (cpu_vendor_id) { 262 case CPU_VENDOR_AMD: 263 case CPU_VENDOR_HYGON: 264 init_amd(); 265 break; 266 case CPU_VENDOR_CENTAUR: 267 init_via(); 268 break; 269 } 270 271 if ((amd_feature & AMDID_RDTSCP) != 0 || 272 (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0) 273 wrmsr(MSR_TSC_AUX, PCPU_GET(cpuid)); 274 } 275 276 void 277 initializecpucache(void) 278 { 279 280 /* 281 * CPUID with %eax = 1, %ebx returns 282 * Bits 15-8: CLFLUSH line size 283 * (Value * 8 = cache line size in bytes) 284 */ 285 if ((cpu_feature & CPUID_CLFSH) != 0) 286 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; 287 /* 288 * XXXKIB: (temporary) hack to work around traps generated 289 * when CLFLUSHing APIC register window under virtualization 290 * environments. These environments tend to disable the 291 * CPUID_SS feature even though the native CPU supports it. 292 */ 293 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable); 294 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) { 295 cpu_feature &= ~CPUID_CLFSH; 296 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; 297 } 298 299 /* 300 * The kernel's use of CLFLUSH{,OPT} can be disabled manually 301 * by setting the hw.clflush_disable tunable. 302 */ 303 if (hw_clflush_disable == 1) { 304 cpu_feature &= ~CPUID_CLFSH; 305 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; 306 } 307 } 308