xref: /titanic_50/usr/src/uts/i86pc/os/microcode.c (revision 61491074b8a9b27fe22ce320f874f909877c6d91)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef	__xpv
47 #include <sys/hypervisor.h>
48 #endif
49 
50 /*
51  * AMD-specific equivalence table
52  */
53 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
54 
55 /*
56  * mcpu_ucode_info for the boot CPU.  Statically allocated.
57  */
58 static struct cpu_ucode_info cpu_ucode_info0;
59 
60 static ucode_file_t ucodefile;
61 
62 static void* ucode_zalloc(processorid_t, size_t);
63 static void ucode_free(processorid_t, void *, size_t);
64 
65 static int ucode_capable_amd(cpu_t *);
66 static int ucode_capable_intel(cpu_t *);
67 
68 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
69 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
70     int);
71 
72 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
73 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
74 
75 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
76 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77 
78 #ifdef	__xpv
79 static void ucode_load_xpv(ucode_update_t *);
80 static void ucode_chipset_amd(uint8_t *, int);
81 #endif
82 
83 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
84 
85 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
86     ucode_file_t *);
87 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
88     ucode_file_t *);
89 
90 #ifndef __xpv
91 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
92     ucode_file_amd_t *, int);
93 #endif
94 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
95     ucode_header_intel_t *, ucode_ext_table_intel_t *);
96 
97 static void ucode_read_rev_amd(cpu_ucode_info_t *);
98 static void ucode_read_rev_intel(cpu_ucode_info_t *);
99 
100 static const struct ucode_ops ucode_amd = {
101 	MSR_AMD_PATCHLOADER,
102 	ucode_capable_amd,
103 	ucode_file_reset_amd,
104 	ucode_read_rev_amd,
105 	ucode_load_amd,
106 	ucode_validate_amd,
107 	ucode_extract_amd,
108 	ucode_locate_amd
109 };
110 
111 static const struct ucode_ops ucode_intel = {
112 	MSR_INTC_UCODE_WRITE,
113 	ucode_capable_intel,
114 	ucode_file_reset_intel,
115 	ucode_read_rev_intel,
116 	ucode_load_intel,
117 	ucode_validate_intel,
118 	ucode_extract_intel,
119 	ucode_locate_intel
120 };
121 
122 const struct ucode_ops *ucode;
123 
124 static const char ucode_failure_fmt[] =
125 	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
126 static const char ucode_success_fmt[] =
127 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
128 
129 /*
130  * Force flag.  If set, the first microcode binary that matches
131  * signature and platform id will be used for microcode update,
132  * regardless of version.  Should only be used for debugging.
133  */
134 int ucode_force_update = 0;
135 
136 /*
137  * Allocate space for mcpu_ucode_info in the machcpu structure
138  * for all non-boot CPUs.
139  */
140 void
141 ucode_alloc_space(cpu_t *cp)
142 {
143 	ASSERT(cp->cpu_id != 0);
144 	cp->cpu_m.mcpu_ucode_info =
145 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
146 }
147 
148 void
149 ucode_free_space(cpu_t *cp)
150 {
151 	ASSERT(cp->cpu_id != 0);
152 	kmem_free(cp->cpu_m.mcpu_ucode_info,
153 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
154 }
155 
156 /*
157  * Called when we are done with microcode update on all processors to free up
158  * space allocated for the microcode file.
159  */
160 void
161 ucode_cleanup()
162 {
163 	if (ucode == NULL)
164 		return;
165 
166 	ucode->file_reset(&ucodefile, -1);
167 }
168 
169 /*
170  * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
171  * allocated with BOP_ALLOC() and does not require a free.
172  */
173 static void*
174 ucode_zalloc(processorid_t id, size_t size)
175 {
176 	if (id)
177 		return (kmem_zalloc(size, KM_NOSLEEP));
178 
179 	/* BOP_ALLOC() failure results in panic */
180 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
181 }
182 
183 static void
184 ucode_free(processorid_t id, void* buf, size_t size)
185 {
186 	if (id)
187 		kmem_free(buf, size);
188 }
189 
190 /*
191  * Check whether or not a processor is capable of microcode operations
192  * Returns 1 if it is capable, 0 if not.
193  *
194  * At this point we only support microcode update for:
195  * - Intel processors family 6 and above, and
196  * - AMD processors family 0x10 and above.
197  *
198  * We also assume that we don't support a mix of Intel and
199  * AMD processors in the same box.
200  *
201  * An i86xpv guest domain can't update the microcode.
202  */
203 /*ARGSUSED*/
204 static int
205 ucode_capable_amd(cpu_t *cp)
206 {
207 	int hwenv = get_hwenv();
208 
209 	if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
210 		return (0);
211 	}
212 	return (cpuid_getfamily(cp) >= 0x10);
213 }
214 
215 static int
216 ucode_capable_intel(cpu_t *cp)
217 {
218 	int hwenv = get_hwenv();
219 
220 	if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
221 		return (0);
222 	}
223 	return (cpuid_getfamily(cp) >= 6);
224 }
225 
226 /*
227  * Called when it is no longer necessary to keep the microcode around,
228  * or when the cached microcode doesn't match the CPU being processed.
229  */
230 static void
231 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
232 {
233 	ucode_file_amd_t *ucodefp = ufp->amd;
234 
235 	if (ucodefp == NULL)
236 		return;
237 
238 	ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
239 	ufp->amd = NULL;
240 }
241 
242 static void
243 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
244 {
245 	ucode_file_intel_t *ucodefp = &ufp->intel;
246 	int total_size, body_size;
247 
248 	if (ucodefp == NULL || ucodefp->uf_header == NULL)
249 		return;
250 
251 	total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
252 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
253 	if (ucodefp->uf_body) {
254 		ucode_free(id, ucodefp->uf_body, body_size);
255 		ucodefp->uf_body = NULL;
256 	}
257 
258 	if (ucodefp->uf_ext_table) {
259 		int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
260 
261 		ucode_free(id, ucodefp->uf_ext_table, size);
262 		ucodefp->uf_ext_table = NULL;
263 	}
264 
265 	ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
266 	ucodefp->uf_header = NULL;
267 }
268 
269 /*
270  * Find the equivalent CPU id in the equivalence table.
271  */
272 static int
273 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
274 {
275 	char name[MAXPATHLEN];
276 	intptr_t fd;
277 	int count;
278 	int offset = 0, cpi_sig = cpuid_getsig(cp);
279 	ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
280 
281 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
282 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
283 
284 	/*
285 	 * No kmem_zalloc() etc. available on boot cpu.
286 	 */
287 	if (cp->cpu_id == 0) {
288 		if ((fd = kobj_open(name)) == -1)
289 			return (EM_OPENFILE);
290 		/* ucode_zalloc() cannot fail on boot cpu */
291 		eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
292 		ASSERT(eqtbl);
293 		do {
294 			count = kobj_read(fd, (int8_t *)eqtbl,
295 			    sizeof (*eqtbl), offset);
296 			if (count != sizeof (*eqtbl)) {
297 				(void) kobj_close(fd);
298 				return (EM_HIGHERREV);
299 			}
300 			offset += count;
301 		} while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
302 		(void) kobj_close(fd);
303 	}
304 
305 	/*
306 	 * If not already done, load the equivalence table.
307 	 * Not done on boot CPU.
308 	 */
309 	if (eqtbl == NULL) {
310 		struct _buf *eq;
311 		uint64_t size;
312 
313 		if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
314 			return (EM_OPENFILE);
315 
316 		if (kobj_get_filesize(eq, &size) < 0) {
317 			kobj_close_file(eq);
318 			return (EM_OPENFILE);
319 		}
320 
321 		ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
322 		if (ucode_eqtbl_amd == NULL) {
323 			kobj_close_file(eq);
324 			return (EM_NOMEM);
325 		}
326 
327 		count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
328 		kobj_close_file(eq);
329 
330 		if (count != size)
331 			return (EM_FILESIZE);
332 	}
333 
334 	/* Get the equivalent CPU id. */
335 	if (cp->cpu_id)
336 		for (eqtbl = ucode_eqtbl_amd;
337 		    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
338 		    eqtbl++)
339 			;
340 
341 	*eq_sig = eqtbl->ue_equiv_cpu;
342 
343 	/* No equivalent CPU id found, assume outdated microcode file. */
344 	if (*eq_sig == 0)
345 		return (EM_HIGHERREV);
346 
347 	return (EM_OK);
348 }
349 
350 /*
351  * xVM cannot check for the presence of PCI devices. Look for chipset-
352  * specific microcode patches in the container file and disable them
353  * by setting their CPU revision to an invalid value.
354  */
355 #ifdef __xpv
356 static void
357 ucode_chipset_amd(uint8_t *buf, int size)
358 {
359 	ucode_header_amd_t *uh;
360 	uint32_t *ptr = (uint32_t *)buf;
361 	int len = 0;
362 
363 	/* skip to first microcode patch */
364 	ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
365 
366 	while (size >= sizeof (ucode_header_amd_t) + 8) {
367 		ptr++; len = *ptr++;
368 		uh = (ucode_header_amd_t *)ptr;
369 		ptr += len >> 2; size -= len;
370 
371 		if (uh->uh_nb_id) {
372 			cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
373 			    "chipset id %x, revision %x",
374 			    uh->uh_nb_id, uh->uh_nb_rev);
375 			uh->uh_cpu_rev = 0xffff;
376 		}
377 
378 		if (uh->uh_sb_id) {
379 			cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
380 			    "chipset id %x, revision %x",
381 			    uh->uh_sb_id, uh->uh_sb_rev);
382 			uh->uh_cpu_rev = 0xffff;
383 		}
384 	}
385 }
386 #endif
387 
388 /*
389  * Populate the ucode file structure from microcode file corresponding to
390  * this CPU, if exists.
391  *
392  * Return EM_OK on success, corresponding error code on failure.
393  */
394 /*ARGSUSED*/
395 static ucode_errno_t
396 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
397 {
398 	char name[MAXPATHLEN];
399 	intptr_t fd;
400 	int count, rc;
401 	ucode_file_amd_t *ucodefp = ufp->amd;
402 
403 #ifndef __xpv
404 	uint16_t eq_sig = 0;
405 	int i;
406 
407 	/* get equivalent CPU id */
408 	if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
409 		return (rc);
410 
411 	/*
412 	 * Allocate a buffer for the microcode patch. If the buffer has been
413 	 * allocated before, check for a matching microcode to avoid loading
414 	 * the file again.
415 	 */
416 	if (ucodefp == NULL)
417 		ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
418 	else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
419 	    == EM_OK)
420 		return (EM_OK);
421 
422 	if (ucodefp == NULL)
423 		return (EM_NOMEM);
424 
425 	ufp->amd = ucodefp;
426 
427 	/*
428 	 * Find the patch for this CPU. The patch files are named XXXX-YY, where
429 	 * XXXX is the equivalent CPU id and YY is the running patch number.
430 	 * Patches specific to certain chipsets are guaranteed to have lower
431 	 * numbers than less specific patches, so we can just load the first
432 	 * patch that matches.
433 	 */
434 
435 	for (i = 0; i < 0xff; i++) {
436 		(void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
437 		    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
438 		if ((fd = kobj_open(name)) == -1)
439 			return (EM_NOMATCH);
440 		count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
441 		(void) kobj_close(fd);
442 
443 		if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
444 			return (EM_OK);
445 	}
446 	return (EM_NOMATCH);
447 #else
448 	int size = 0;
449 	char c;
450 
451 	/*
452 	 * The xVM case is special. To support mixed-revision systems, the
453 	 * hypervisor will choose which patch to load for which CPU, so the
454 	 * whole microcode patch container file will have to be loaded.
455 	 *
456 	 * Since this code is only run on the boot cpu, we don't have to care
457 	 * about failing ucode_zalloc() or freeing allocated memory.
458 	 */
459 	if (cp->cpu_id != 0)
460 		return (EM_INVALIDARG);
461 
462 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
463 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
464 
465 	if ((fd = kobj_open(name)) == -1)
466 		return (EM_OPENFILE);
467 
468 	/* get the file size by counting bytes */
469 	do {
470 		count = kobj_read(fd, &c, 1, size);
471 		size += count;
472 	} while (count);
473 
474 	ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
475 	ASSERT(ucodefp);
476 	ufp->amd = ucodefp;
477 
478 	ucodefp->usize = size;
479 	ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
480 	ASSERT(ucodefp->ucodep);
481 
482 	/* load the microcode patch container file */
483 	count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
484 	(void) kobj_close(fd);
485 
486 	if (count != size)
487 		return (EM_FILESIZE);
488 
489 	/* make sure the container file is valid */
490 	rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
491 
492 	if (rc != EM_OK)
493 		return (rc);
494 
495 	/* disable chipset-specific patches */
496 	ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
497 
498 	return (EM_OK);
499 #endif
500 }
501 
502 static ucode_errno_t
503 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
504 {
505 	char		name[MAXPATHLEN];
506 	intptr_t	fd;
507 	int		count;
508 	int		header_size = UCODE_HEADER_SIZE_INTEL;
509 	int		cpi_sig = cpuid_getsig(cp);
510 	ucode_errno_t	rc = EM_OK;
511 	ucode_file_intel_t *ucodefp = &ufp->intel;
512 
513 	ASSERT(ucode);
514 
515 	/*
516 	 * If the microcode matches the CPU we are processing, use it.
517 	 */
518 	if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
519 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
520 		return (EM_OK);
521 	}
522 
523 	/*
524 	 * Look for microcode file with the right name.
525 	 */
526 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
527 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
528 	    uinfop->cui_platid);
529 	if ((fd = kobj_open(name)) == -1) {
530 		return (EM_OPENFILE);
531 	}
532 
533 	/*
534 	 * We found a microcode file for the CPU we are processing,
535 	 * reset the microcode data structure and read in the new
536 	 * file.
537 	 */
538 	ucode->file_reset(ufp, cp->cpu_id);
539 
540 	ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
541 	if (ucodefp->uf_header == NULL)
542 		return (EM_NOMEM);
543 
544 	count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
545 
546 	switch (count) {
547 	case UCODE_HEADER_SIZE_INTEL: {
548 
549 		ucode_header_intel_t	*uhp = ucodefp->uf_header;
550 		uint32_t	offset = header_size;
551 		int		total_size, body_size, ext_size;
552 		uint32_t	sum = 0;
553 
554 		/*
555 		 * Make sure that the header contains valid fields.
556 		 */
557 		if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
558 			total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
559 			body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
560 			ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
561 			if (ucodefp->uf_body == NULL) {
562 				rc = EM_NOMEM;
563 				break;
564 			}
565 
566 			if (kobj_read(fd, (char *)ucodefp->uf_body,
567 			    body_size, offset) != body_size)
568 				rc = EM_FILESIZE;
569 		}
570 
571 		if (rc)
572 			break;
573 
574 		sum = ucode_checksum_intel(0, header_size,
575 		    (uint8_t *)ucodefp->uf_header);
576 		if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
577 			rc = EM_CHECKSUM;
578 			break;
579 		}
580 
581 		/*
582 		 * Check to see if there is extended signature table.
583 		 */
584 		offset = body_size + header_size;
585 		ext_size = total_size - offset;
586 
587 		if (ext_size <= 0)
588 			break;
589 
590 		ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
591 		if (ucodefp->uf_ext_table == NULL) {
592 			rc = EM_NOMEM;
593 			break;
594 		}
595 
596 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
597 		    ext_size, offset) != ext_size) {
598 			rc = EM_FILESIZE;
599 		} else if (ucode_checksum_intel(0, ext_size,
600 		    (uint8_t *)(ucodefp->uf_ext_table))) {
601 			rc = EM_CHECKSUM;
602 		} else {
603 			int i;
604 
605 			ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
606 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
607 			    i++) {
608 				if (ucode_checksum_intel(0,
609 				    UCODE_EXT_SIG_SIZE_INTEL,
610 				    (uint8_t *)(&(ucodefp->uf_ext_table->
611 				    uet_ext_sig[i])))) {
612 					rc = EM_CHECKSUM;
613 					break;
614 				}
615 			}
616 		}
617 		break;
618 	}
619 
620 	default:
621 		rc = EM_FILESIZE;
622 		break;
623 	}
624 
625 	kobj_close(fd);
626 
627 	if (rc != EM_OK)
628 		return (rc);
629 
630 	rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
631 	    ucodefp->uf_ext_table);
632 
633 	return (rc);
634 }
635 
636 #ifndef __xpv
637 static ucode_errno_t
638 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
639     ucode_file_amd_t *ucodefp, int size)
640 {
641 	ucode_header_amd_t *uh;
642 
643 	if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
644 		return (EM_NOMATCH);
645 
646 	/*
647 	 * Don't even think about loading patches that would require code
648 	 * execution.
649 	 */
650 	if (size > offsetof(ucode_file_amd_t, uf_code_present) &&
651 	    ucodefp->uf_code_present)
652 		return (EM_NOMATCH);
653 
654 	uh = &ucodefp->uf_header;
655 
656 	if (eq_sig != uh->uh_cpu_rev)
657 		return (EM_NOMATCH);
658 
659 	if (uh->uh_nb_id) {
660 		cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
661 		    "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
662 		return (EM_NOMATCH);
663 	}
664 
665 	if (uh->uh_sb_id) {
666 		cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
667 		    "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
668 		return (EM_NOMATCH);
669 	}
670 
671 	if (uh->uh_patch_id <= uinfop->cui_rev)
672 		return (EM_HIGHERREV);
673 
674 	return (EM_OK);
675 }
676 #endif
677 
678 /*
679  * Returns 1 if the microcode is for this processor; 0 otherwise.
680  */
681 static ucode_errno_t
682 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
683     ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
684 {
685 	if (uhp == NULL)
686 		return (EM_NOMATCH);
687 
688 	if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
689 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
690 
691 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
692 			return (EM_HIGHERREV);
693 
694 		return (EM_OK);
695 	}
696 
697 	if (uetp != NULL) {
698 		int i;
699 
700 		for (i = 0; i < uetp->uet_count; i++) {
701 			ucode_ext_sig_intel_t *uesp;
702 
703 			uesp = &uetp->uet_ext_sig[i];
704 
705 			if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
706 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
707 
708 				if (uinfop->cui_rev >= uhp->uh_rev &&
709 				    !ucode_force_update)
710 					return (EM_HIGHERREV);
711 
712 				return (EM_OK);
713 			}
714 		}
715 	}
716 
717 	return (EM_NOMATCH);
718 }
719 
720 /*ARGSUSED*/
721 static int
722 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
723 {
724 	ucode_update_t *uusp = (ucode_update_t *)arg1;
725 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
726 
727 	ASSERT(ucode);
728 	ASSERT(uusp->ucodep);
729 
730 #ifndef	__xpv
731 	/*
732 	 * Check one more time to see if it is really necessary to update
733 	 * microcode just in case this is a hyperthreaded processor where
734 	 * the threads share the same microcode.
735 	 */
736 	if (!ucode_force_update) {
737 		ucode->read_rev(uinfop);
738 		uusp->new_rev = uinfop->cui_rev;
739 		if (uinfop->cui_rev >= uusp->expected_rev)
740 			return (0);
741 	}
742 
743 	wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
744 #endif
745 	ucode->read_rev(uinfop);
746 	uusp->new_rev = uinfop->cui_rev;
747 
748 	return (0);
749 }
750 
751 /*ARGSUSED*/
752 static uint32_t
753 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
754 {
755 	ucode_file_amd_t *ucodefp = ufp->amd;
756 #ifdef	__xpv
757 	ucode_update_t uus;
758 #endif
759 
760 	ASSERT(ucode);
761 	ASSERT(ucodefp);
762 
763 #ifndef	__xpv
764 	kpreempt_disable();
765 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
766 	ucode->read_rev(uinfop);
767 	kpreempt_enable();
768 
769 	return (ucodefp->uf_header.uh_patch_id);
770 #else
771 	uus.ucodep = ucodefp->ucodep;
772 	uus.usize = ucodefp->usize;
773 	ucode_load_xpv(&uus);
774 	ucode->read_rev(uinfop);
775 	uus.new_rev = uinfop->cui_rev;
776 
777 	return (uus.new_rev);
778 #endif
779 }
780 
781 /*ARGSUSED2*/
782 static uint32_t
783 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
784 {
785 	ucode_file_intel_t *ucodefp = &ufp->intel;
786 #ifdef __xpv
787 	uint32_t ext_offset;
788 	uint32_t body_size;
789 	uint32_t ext_size;
790 	uint8_t *ustart;
791 	uint32_t usize;
792 	ucode_update_t uus;
793 #endif
794 
795 	ASSERT(ucode);
796 
797 #ifdef __xpv
798 	/*
799 	 * the hypervisor wants the header, data, and extended
800 	 * signature tables. We can only get here from the boot
801 	 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
802 	 * use BOP_ALLOC().
803 	 */
804 	usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
805 	ustart = ucode_zalloc(cp->cpu_id, usize);
806 	ASSERT(ustart);
807 
808 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
809 	ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
810 	ext_size = usize - ext_offset;
811 	ASSERT(ext_size >= 0);
812 
813 	(void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
814 	(void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
815 	    body_size);
816 	if (ext_size > 0) {
817 		(void) memcpy(&ustart[ext_offset],
818 		    ucodefp->uf_ext_table, ext_size);
819 	}
820 	uus.ucodep = ustart;
821 	uus.usize = usize;
822 	ucode_load_xpv(&uus);
823 	ucode->read_rev(uinfop);
824 	uus.new_rev = uinfop->cui_rev;
825 #else
826 	kpreempt_disable();
827 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
828 	ucode->read_rev(uinfop);
829 	kpreempt_enable();
830 #endif
831 
832 	return (ucodefp->uf_header->uh_rev);
833 }
834 
835 
836 #ifdef	__xpv
837 static void
838 ucode_load_xpv(ucode_update_t *uusp)
839 {
840 	xen_platform_op_t op;
841 	int e;
842 
843 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
844 
845 	kpreempt_disable();
846 	op.cmd = XENPF_microcode_update;
847 	op.interface_version = XENPF_INTERFACE_VERSION;
848 	/*LINTED: constant in conditional context*/
849 	set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
850 	op.u.microcode.length = uusp->usize;
851 	e = HYPERVISOR_platform_op(&op);
852 	if (e != 0) {
853 		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
854 	}
855 	kpreempt_enable();
856 }
857 #endif /* __xpv */
858 
859 static void
860 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
861 {
862 	uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
863 }
864 
865 static void
866 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
867 {
868 	struct cpuid_regs crs;
869 
870 	/*
871 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
872 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
873 	 * execute cpuid to guarantee the correct reading of this register.
874 	 */
875 	wrmsr(MSR_INTC_UCODE_REV, 0);
876 	(void) __cpuid_insn(&crs);
877 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
878 }
879 
880 static ucode_errno_t
881 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
882 {
883 #ifndef __xpv
884 	uint32_t *ptr = (uint32_t *)ucodep;
885 	ucode_eqtbl_amd_t *eqtbl;
886 	ucode_file_amd_t *ufp;
887 	int count;
888 	int higher = 0;
889 	ucode_errno_t rc = EM_NOMATCH;
890 	uint16_t eq_sig;
891 
892 	/* skip over magic number & equivalence table header */
893 	ptr += 2; size -= 8;
894 
895 	count = *ptr++; size -= 4;
896 	for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
897 	    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
898 	    eqtbl++)
899 		;
900 
901 	eq_sig = eqtbl->ue_equiv_cpu;
902 
903 	/* No equivalent CPU id found, assume outdated microcode file. */
904 	if (eq_sig == 0)
905 		return (EM_HIGHERREV);
906 
907 	/* Use the first microcode patch that matches. */
908 	do {
909 		ptr += count >> 2; size -= count;
910 
911 		if (!size)
912 			return (higher ? EM_HIGHERREV : EM_NOMATCH);
913 
914 		ptr++; size -= 4;
915 		count = *ptr++; size -= 4;
916 		ufp = (ucode_file_amd_t *)ptr;
917 
918 		rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
919 		if (rc == EM_HIGHERREV)
920 			higher = 1;
921 	} while (rc != EM_OK);
922 
923 	uusp->ucodep = (uint8_t *)ufp;
924 	uusp->usize = count;
925 	uusp->expected_rev = ufp->uf_header.uh_patch_id;
926 #else
927 	/*
928 	 * The hypervisor will choose the patch to load, so there is no way to
929 	 * know the "expected revision" in advance. This is especially true on
930 	 * mixed-revision systems where more than one patch will be loaded.
931 	 */
932 	uusp->expected_rev = 0;
933 	uusp->ucodep = ucodep;
934 	uusp->usize = size;
935 
936 	ucode_chipset_amd(ucodep, size);
937 #endif
938 
939 	return (EM_OK);
940 }
941 
942 static ucode_errno_t
943 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
944 {
945 	uint32_t	header_size = UCODE_HEADER_SIZE_INTEL;
946 	int		remaining;
947 	int		found = 0;
948 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
949 
950 	/*
951 	 * Go through the whole buffer in case there are
952 	 * multiple versions of matching microcode for this
953 	 * processor.
954 	 */
955 	for (remaining = size; remaining > 0; ) {
956 		int	total_size, body_size, ext_size;
957 		uint8_t	*curbuf = &ucodep[size - remaining];
958 		ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
959 		ucode_ext_table_intel_t *uetp = NULL;
960 		ucode_errno_t tmprc;
961 
962 		total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
963 		body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
964 		ext_size = total_size - (header_size + body_size);
965 
966 		if (ext_size > 0)
967 			uetp = (ucode_ext_table_intel_t *)
968 			    &curbuf[header_size + body_size];
969 
970 		tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
971 
972 		/*
973 		 * Since we are searching through a big file
974 		 * containing microcode for pretty much all the
975 		 * processors, we are bound to get EM_NOMATCH
976 		 * at one point.  However, if we return
977 		 * EM_NOMATCH to users, it will really confuse
978 		 * them.  Therefore, if we ever find a match of
979 		 * a lower rev, we will set return code to
980 		 * EM_HIGHERREV.
981 		 */
982 		if (tmprc == EM_HIGHERREV)
983 			search_rc = EM_HIGHERREV;
984 
985 		if (tmprc == EM_OK &&
986 		    uusp->expected_rev < uhp->uh_rev) {
987 #ifndef __xpv
988 			uusp->ucodep = (uint8_t *)&curbuf[header_size];
989 #else
990 			uusp->ucodep = (uint8_t *)curbuf;
991 #endif
992 			uusp->usize =
993 			    UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
994 			uusp->expected_rev = uhp->uh_rev;
995 			found = 1;
996 		}
997 
998 		remaining -= total_size;
999 	}
1000 
1001 	if (!found)
1002 		return (search_rc);
1003 
1004 	return (EM_OK);
1005 }
1006 /*
1007  * Entry point to microcode update from the ucode_drv driver.
1008  *
1009  * Returns EM_OK on success, corresponding error code on failure.
1010  */
1011 ucode_errno_t
1012 ucode_update(uint8_t *ucodep, int size)
1013 {
1014 	int		found = 0;
1015 	processorid_t	id;
1016 	ucode_update_t	cached = { 0 };
1017 	ucode_update_t	*cachedp = NULL;
1018 	ucode_errno_t	rc = EM_OK;
1019 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
1020 	cpuset_t cpuset;
1021 
1022 	ASSERT(ucode);
1023 	ASSERT(ucodep);
1024 	CPUSET_ZERO(cpuset);
1025 
1026 	if (!ucode->capable(CPU))
1027 		return (EM_NOTSUP);
1028 
1029 	mutex_enter(&cpu_lock);
1030 
1031 	for (id = 0; id < max_ncpus; id++) {
1032 		cpu_t *cpu;
1033 		ucode_update_t uus = { 0 };
1034 		ucode_update_t *uusp = &uus;
1035 
1036 		/*
1037 		 * If there is no such CPU or it is not xcall ready, skip it.
1038 		 */
1039 		if ((cpu = cpu_get(id)) == NULL ||
1040 		    !(cpu->cpu_flags & CPU_READY))
1041 			continue;
1042 
1043 		uusp->sig = cpuid_getsig(cpu);
1044 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1045 		    sizeof (uusp->info));
1046 
1047 		/*
1048 		 * If the current CPU has the same signature and platform
1049 		 * id as the previous one we processed, reuse the information.
1050 		 */
1051 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1052 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
1053 			uusp->ucodep = cachedp->ucodep;
1054 			uusp->expected_rev = cachedp->expected_rev;
1055 			/*
1056 			 * Intuitively we should check here to see whether the
1057 			 * running microcode rev is >= the expected rev, and
1058 			 * quit if it is.  But we choose to proceed with the
1059 			 * xcall regardless of the running version so that
1060 			 * the other threads in an HT processor can update
1061 			 * the cpu_ucode_info structure in machcpu.
1062 			 */
1063 		} else if ((search_rc = ucode->extract(uusp, ucodep, size))
1064 		    == EM_OK) {
1065 			bcopy(uusp, &cached, sizeof (cached));
1066 			cachedp = &cached;
1067 			found = 1;
1068 		}
1069 
1070 		/* Nothing to do */
1071 		if (uusp->ucodep == NULL)
1072 			continue;
1073 
1074 #ifdef	__xpv
1075 		/*
1076 		 * for i86xpv, the hypervisor will update all the CPUs.
1077 		 * the hypervisor wants the header, data, and extended
1078 		 * signature tables. ucode_write will just read in the
1079 		 * updated version on all the CPUs after the update has
1080 		 * completed.
1081 		 */
1082 		if (id == 0) {
1083 			ucode_load_xpv(uusp);
1084 		}
1085 #endif
1086 
1087 		CPUSET_ADD(cpuset, id);
1088 		kpreempt_disable();
1089 		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1090 		kpreempt_enable();
1091 		CPUSET_DEL(cpuset, id);
1092 
1093 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev) {
1094 			rc = EM_HIGHERREV;
1095 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1096 		    uusp->expected_rev != uusp->new_rev)) {
1097 			cmn_err(CE_WARN, ucode_failure_fmt,
1098 			    id, uusp->info.cui_rev, uusp->expected_rev);
1099 			rc = EM_UPDATE;
1100 		} else {
1101 			cmn_err(CE_CONT, ucode_success_fmt,
1102 			    id, uusp->info.cui_rev, uusp->new_rev);
1103 		}
1104 	}
1105 
1106 	mutex_exit(&cpu_lock);
1107 
1108 	if (!found)
1109 		rc = search_rc;
1110 
1111 	return (rc);
1112 }
1113 
1114 /*
1115  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1116  * This is the entry point from boot path where pointer to CPU structure
1117  * is available.
1118  *
1119  * cpuid_info must be initialized before ucode_check can be called.
1120  */
1121 void
1122 ucode_check(cpu_t *cp)
1123 {
1124 	cpu_ucode_info_t *uinfop;
1125 	ucode_errno_t rc = EM_OK;
1126 	uint32_t new_rev = 0;
1127 
1128 	ASSERT(cp);
1129 	if (cp->cpu_id == 0)
1130 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1131 
1132 	uinfop = cp->cpu_m.mcpu_ucode_info;
1133 	ASSERT(uinfop);
1134 
1135 	/* set up function pointers if not already done */
1136 	if (!ucode)
1137 		switch (cpuid_getvendor(cp)) {
1138 		case X86_VENDOR_AMD:
1139 			ucode = &ucode_amd;
1140 			break;
1141 		case X86_VENDOR_Intel:
1142 			ucode = &ucode_intel;
1143 			break;
1144 		default:
1145 			ucode = NULL;
1146 			return;
1147 		}
1148 
1149 	if (!ucode->capable(cp))
1150 		return;
1151 
1152 	/*
1153 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1154 	 * (Family 6, model 5 and above) and all processors after.
1155 	 */
1156 	if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1157 	    ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1158 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1159 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1160 	}
1161 
1162 	ucode->read_rev(uinfop);
1163 
1164 #ifdef	__xpv
1165 	/*
1166 	 * for i86xpv, the hypervisor will update all the CPUs. We only need
1167 	 * do do this on one of the CPUs (and there always is a CPU 0).
1168 	 */
1169 	if (cp->cpu_id != 0) {
1170 		return;
1171 	}
1172 #endif
1173 
1174 	/*
1175 	 * Check to see if we need ucode update
1176 	 */
1177 	if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1178 		new_rev = ucode->load(&ucodefile, uinfop, cp);
1179 
1180 		if (uinfop->cui_rev != new_rev)
1181 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1182 			    uinfop->cui_rev, new_rev);
1183 	}
1184 
1185 	/*
1186 	 * If we fail to find a match for any reason, free the file structure
1187 	 * just in case we have read in a partial file.
1188 	 *
1189 	 * Since the scratch memory for holding the microcode for the boot CPU
1190 	 * came from BOP_ALLOC, we will reset the data structure as if we
1191 	 * never did the allocation so we don't have to keep track of this
1192 	 * special chunk of memory.  We free the memory used for the rest
1193 	 * of the CPUs in start_other_cpus().
1194 	 */
1195 	if (rc != EM_OK || cp->cpu_id == 0)
1196 		ucode->file_reset(&ucodefile, cp->cpu_id);
1197 }
1198 
1199 /*
1200  * Returns microcode revision from the machcpu structure.
1201  */
1202 ucode_errno_t
1203 ucode_get_rev(uint32_t *revp)
1204 {
1205 	int i;
1206 
1207 	ASSERT(ucode);
1208 	ASSERT(revp);
1209 
1210 	if (!ucode->capable(CPU))
1211 		return (EM_NOTSUP);
1212 
1213 	mutex_enter(&cpu_lock);
1214 	for (i = 0; i < max_ncpus; i++) {
1215 		cpu_t *cpu;
1216 
1217 		if ((cpu = cpu_get(i)) == NULL)
1218 			continue;
1219 
1220 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1221 	}
1222 	mutex_exit(&cpu_lock);
1223 
1224 	return (EM_OK);
1225 }
1226