1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2018, Joyent, Inc. 28 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 29 * Copyright 2024 Oxide Computer Company 30 */ 31 32 #include <sys/stdbool.h> 33 #include <sys/cmn_err.h> 34 #include <sys/controlregs.h> 35 #include <sys/kobj.h> 36 #include <sys/kobj_impl.h> 37 #include <sys/ontrap.h> 38 #include <sys/sysmacros.h> 39 #include <sys/systm.h> 40 #include <sys/ucode.h> 41 #include <sys/ucode_intel.h> 42 #include <ucode/ucode_errno.h> 43 #include <ucode/ucode_utils_intel.h> 44 #include <sys/x86_archext.h> 45 46 extern void *ucode_zalloc(size_t); 47 extern void ucode_free(void *, size_t); 48 extern const char *ucode_path(void); 49 extern int ucode_force_update; 50 51 static ucode_file_intel_t intel_ucodef; 52 53 /* 54 * Check whether this module can be used for microcode updates on this 55 * platform. 56 */ 57 static bool 58 ucode_select_intel(cpu_t *cp) 59 { 60 if ((get_hwenv() & HW_VIRTUAL) != 0) 61 return (false); 62 63 return (cpuid_getvendor(cp) == X86_VENDOR_Intel); 64 } 65 66 /* 67 * Check whether or not a processor is capable of microcode operations 68 * 69 * At this point we only support microcode update for: 70 * - Intel processors family 6 and above. 71 */ 72 static bool 73 ucode_capable_intel(cpu_t *cp) 74 { 75 return (cpuid_getfamily(cp) >= 6); 76 } 77 78 static void 79 ucode_file_reset_intel(void) 80 { 81 ucode_file_intel_t *ucodefp = &intel_ucodef; 82 int total_size, body_size; 83 84 if (ucodefp->uf_header == NULL) 85 return; 86 87 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size); 88 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 89 90 if (ucodefp->uf_body != NULL) { 91 ucode_free(ucodefp->uf_body, body_size); 92 ucodefp->uf_body = NULL; 93 } 94 95 if (ucodefp->uf_ext_table != NULL) { 96 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL; 97 98 ucode_free(ucodefp->uf_ext_table, size); 99 ucodefp->uf_ext_table = NULL; 100 } 101 102 ucode_free(ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL); 103 ucodefp->uf_header = NULL; 104 } 105 106 /* 107 * Checks if the microcode is for this processor. 108 */ 109 static ucode_errno_t 110 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop, 111 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp) 112 { 113 if (uhp == NULL) 114 return (EM_NOMATCH); 115 116 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature, 117 uinfop->cui_platid, uhp->uh_proc_flags)) { 118 119 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update) 120 return (EM_HIGHERREV); 121 122 return (EM_OK); 123 } 124 125 if (uetp != NULL) { 126 for (uint_t i = 0; i < uetp->uet_count; i++) { 127 ucode_ext_sig_intel_t *uesp; 128 129 uesp = &uetp->uet_ext_sig[i]; 130 131 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature, 132 uinfop->cui_platid, uesp->ues_proc_flags)) { 133 134 if (uinfop->cui_rev >= uhp->uh_rev && 135 !ucode_force_update) 136 return (EM_HIGHERREV); 137 138 return (EM_OK); 139 } 140 } 141 } 142 143 return (EM_NOMATCH); 144 } 145 146 /* 147 * Copy the given ucode into cpu_ucode_info_t in preparation for loading onto 148 * the corresponding CPU via ucode_load_intel(). 149 */ 150 static ucode_errno_t 151 ucode_copy_intel(const ucode_file_intel_t *ucodefp, cpu_ucode_info_t *uinfop) 152 { 153 ASSERT3P(ucodefp->uf_header, !=, NULL); 154 ASSERT3P(ucodefp->uf_body, !=, NULL); 155 ASSERT3P(uinfop->cui_pending_ucode, ==, NULL); 156 157 /* 158 * Allocate memory for the pending microcode update and copy the body. 159 * We don't need the header or extended signature table which are only 160 * used for matching. 161 */ 162 size_t sz = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 163 uinfop->cui_pending_ucode = ucode_zalloc(sz); 164 if (uinfop->cui_pending_ucode == NULL) 165 return (EM_NOMEM); 166 memcpy(uinfop->cui_pending_ucode, ucodefp->uf_body, sz); 167 168 uinfop->cui_pending_size = sz; 169 uinfop->cui_pending_rev = ucodefp->uf_header->uh_rev; 170 171 return (EM_OK); 172 } 173 174 static ucode_errno_t 175 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop) 176 { 177 char name[MAXPATHLEN]; 178 intptr_t fd; 179 int count; 180 int header_size = UCODE_HEADER_SIZE_INTEL; 181 int cpi_sig = cpuid_getsig(cp); 182 ucode_errno_t rc = EM_OK; 183 ucode_file_intel_t *ucodefp = &intel_ucodef; 184 185 /* 186 * If the cached microcode matches the CPU we are processing, use it. 187 */ 188 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 189 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) { 190 return (ucode_copy_intel(ucodefp, uinfop)); 191 } 192 193 /* 194 * Look for microcode file with the right name. 195 */ 196 (void) snprintf(name, MAXPATHLEN, "%s/%s/%08X-%02X", 197 ucode_path(), cpuid_getvendorstr(cp), cpi_sig, 198 uinfop->cui_platid); 199 if ((fd = kobj_open(name)) == -1) { 200 return (EM_OPENFILE); 201 } 202 203 /* 204 * We found a microcode file for the CPU we are processing, 205 * reset the microcode data structure and read in the new 206 * file. 207 */ 208 ucode_file_reset_intel(); 209 210 ucodefp->uf_header = ucode_zalloc(header_size); 211 if (ucodefp->uf_header == NULL) 212 return (EM_NOMEM); 213 214 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0); 215 216 switch (count) { 217 case UCODE_HEADER_SIZE_INTEL: { 218 219 ucode_header_intel_t *uhp = ucodefp->uf_header; 220 uint32_t offset = header_size; 221 int total_size, body_size, ext_size; 222 uint32_t sum = 0; 223 224 /* 225 * Make sure that the header contains valid fields. 226 */ 227 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) { 228 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 229 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 230 ucodefp->uf_body = ucode_zalloc(body_size); 231 if (ucodefp->uf_body == NULL) { 232 rc = EM_NOMEM; 233 break; 234 } 235 236 if (kobj_read(fd, (char *)ucodefp->uf_body, 237 body_size, offset) != body_size) 238 rc = EM_FILESIZE; 239 } 240 241 if (rc) 242 break; 243 244 sum = ucode_checksum_intel(0, header_size, 245 (uint8_t *)ucodefp->uf_header); 246 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) { 247 rc = EM_CHECKSUM; 248 break; 249 } 250 251 /* 252 * Check to see if there is extended signature table. 253 */ 254 offset = body_size + header_size; 255 ext_size = total_size - offset; 256 257 if (ext_size <= 0) 258 break; 259 260 ucodefp->uf_ext_table = ucode_zalloc(ext_size); 261 if (ucodefp->uf_ext_table == NULL) { 262 rc = EM_NOMEM; 263 break; 264 } 265 266 if (kobj_read(fd, (char *)ucodefp->uf_ext_table, 267 ext_size, offset) != ext_size) { 268 rc = EM_FILESIZE; 269 } else if (ucode_checksum_intel(0, ext_size, 270 (uint8_t *)(ucodefp->uf_ext_table))) { 271 rc = EM_EXTCHECKSUM; 272 } else { 273 int i; 274 275 for (i = 0; i < ucodefp->uf_ext_table->uet_count; i++) { 276 ucode_ext_sig_intel_t *sig; 277 278 sig = &ucodefp->uf_ext_table->uet_ext_sig[i]; 279 280 if (ucode_checksum_intel_extsig(uhp, 281 sig) != 0) { 282 rc = EM_SIGCHECKSUM; 283 break; 284 } 285 } 286 } 287 break; 288 } 289 290 default: 291 rc = EM_FILESIZE; 292 break; 293 } 294 295 kobj_close(fd); 296 297 if (rc != EM_OK) 298 return (rc); 299 300 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 301 ucodefp->uf_ext_table); 302 if (rc == EM_OK) { 303 return (ucode_copy_intel(ucodefp, uinfop)); 304 } 305 306 return (rc); 307 } 308 309 static void 310 ucode_read_rev_intel(cpu_ucode_info_t *uinfop) 311 { 312 struct cpuid_regs crs; 313 314 /* 315 * The Intel 64 and IA-32 Architecture Software Developer's Manual 316 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then 317 * execute cpuid to guarantee the correct reading of this register. 318 */ 319 wrmsr(MSR_INTC_UCODE_REV, 0); 320 (void) __cpuid_insn(&crs); 321 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT); 322 323 /* 324 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon 325 * (Family 6, model 5 and above) and all processors after. 326 */ 327 if ((cpuid_getmodel(CPU) >= 5 || cpuid_getfamily(CPU) > 6)) { 328 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >> 329 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK); 330 } 331 } 332 333 static void 334 ucode_load_intel(cpu_ucode_info_t *uinfop) 335 { 336 VERIFY3P(uinfop->cui_pending_ucode, !=, NULL); 337 338 kpreempt_disable(); 339 /* 340 * On some platforms a cache invalidation is required for the 341 * ucode update to be successful due to the parts of the 342 * processor that the microcode is updating. 343 */ 344 invalidate_cache(); 345 wrmsr(MSR_INTC_UCODE_WRITE, (uintptr_t)uinfop->cui_pending_ucode); 346 kpreempt_enable(); 347 } 348 349 static ucode_errno_t 350 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size) 351 { 352 uint32_t header_size = UCODE_HEADER_SIZE_INTEL; 353 int remaining; 354 int found = 0; 355 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 356 357 /* 358 * Go through the whole buffer in case there are 359 * multiple versions of matching microcode for this 360 * processor. 361 */ 362 for (remaining = size; remaining > 0; ) { 363 int total_size, body_size, ext_size; 364 uint8_t *curbuf = &ucodep[size - remaining]; 365 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf; 366 ucode_ext_table_intel_t *uetp = NULL; 367 ucode_errno_t tmprc; 368 369 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 370 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 371 ext_size = total_size - (header_size + body_size); 372 373 if (ext_size > 0) 374 uetp = (ucode_ext_table_intel_t *) 375 &curbuf[header_size + body_size]; 376 377 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp); 378 379 /* 380 * Since we are searching through a big file 381 * containing microcode for pretty much all the 382 * processors, we are bound to get EM_NOMATCH 383 * at one point. However, if we return 384 * EM_NOMATCH to users, it will really confuse 385 * them. Therefore, if we ever find a match of 386 * a lower rev, we will set return code to 387 * EM_HIGHERREV. 388 */ 389 if (tmprc == EM_HIGHERREV) 390 search_rc = EM_HIGHERREV; 391 392 if (tmprc == EM_OK && 393 uusp->expected_rev < uhp->uh_rev) { 394 uusp->ucodep = (uint8_t *)&curbuf[header_size]; 395 uusp->usize = 396 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 397 uusp->expected_rev = uhp->uh_rev; 398 found = 1; 399 } 400 401 remaining -= total_size; 402 } 403 404 if (!found) 405 return (search_rc); 406 407 return (EM_OK); 408 } 409 410 static const ucode_source_t ucode_intel = { 411 .us_name = "Intel microcode updater", 412 .us_write_msr = MSR_INTC_UCODE_WRITE, 413 .us_invalidate = true, 414 .us_select = ucode_select_intel, 415 .us_capable = ucode_capable_intel, 416 .us_file_reset = ucode_file_reset_intel, 417 .us_read_rev = ucode_read_rev_intel, 418 .us_load = ucode_load_intel, 419 .us_validate = ucode_validate_intel, 420 .us_extract = ucode_extract_intel, 421 .us_locate = ucode_locate_intel 422 }; 423 UCODE_SOURCE(ucode_intel); 424