xref: /titanic_52/usr/src/uts/i86pc/os/microcode.c (revision 1a1a84a324206b6b1f5f704ab166c4ebf78aed76)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef	__xpv
47 #include <sys/hypervisor.h>
48 #endif
49 
50 /*
51  * AMD-specific equivalence table
52  */
53 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
54 
55 /*
56  * mcpu_ucode_info for the boot CPU.  Statically allocated.
57  */
58 static struct cpu_ucode_info cpu_ucode_info0;
59 
60 static ucode_file_t ucodefile;
61 
62 static void* ucode_zalloc(processorid_t, size_t);
63 static void ucode_free(processorid_t, void *, size_t);
64 
65 static int ucode_capable_amd(cpu_t *);
66 static int ucode_capable_intel(cpu_t *);
67 
68 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
69 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
70     int);
71 
72 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
73 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
74 
75 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
76 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77 
78 #ifdef	__xpv
79 static void ucode_load_xpv(ucode_update_t *);
80 static void ucode_chipset_amd(uint8_t *, int);
81 #endif
82 
83 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
84 
85 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
86     ucode_file_t *);
87 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
88     ucode_file_t *);
89 
90 #ifndef __xpv
91 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
92     ucode_file_amd_t *, int);
93 #endif
94 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
95     ucode_header_intel_t *, ucode_ext_table_intel_t *);
96 
97 static void ucode_read_rev_amd(cpu_ucode_info_t *);
98 static void ucode_read_rev_intel(cpu_ucode_info_t *);
99 
100 static const struct ucode_ops ucode_amd = {
101 	MSR_AMD_PATCHLOADER,
102 	ucode_capable_amd,
103 	ucode_file_reset_amd,
104 	ucode_read_rev_amd,
105 	ucode_load_amd,
106 	ucode_validate_amd,
107 	ucode_extract_amd,
108 	ucode_locate_amd
109 };
110 
111 static const struct ucode_ops ucode_intel = {
112 	MSR_INTC_UCODE_WRITE,
113 	ucode_capable_intel,
114 	ucode_file_reset_intel,
115 	ucode_read_rev_intel,
116 	ucode_load_intel,
117 	ucode_validate_intel,
118 	ucode_extract_intel,
119 	ucode_locate_intel
120 };
121 
122 const struct ucode_ops *ucode;
123 
124 static const char ucode_failure_fmt[] =
125 	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
126 static const char ucode_success_fmt[] =
127 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
128 
129 /*
130  * Force flag.  If set, the first microcode binary that matches
131  * signature and platform id will be used for microcode update,
132  * regardless of version.  Should only be used for debugging.
133  */
134 int ucode_force_update = 0;
135 
136 /*
137  * Allocate space for mcpu_ucode_info in the machcpu structure
138  * for all non-boot CPUs.
139  */
140 void
141 ucode_alloc_space(cpu_t *cp)
142 {
143 	ASSERT(cp->cpu_id != 0);
144 	cp->cpu_m.mcpu_ucode_info =
145 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
146 }
147 
148 void
149 ucode_free_space(cpu_t *cp)
150 {
151 	ASSERT(cp->cpu_id != 0);
152 	kmem_free(cp->cpu_m.mcpu_ucode_info,
153 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
154 }
155 
156 /*
157  * Called when we are done with microcode update on all processors to free up
158  * space allocated for the microcode file.
159  */
160 void
161 ucode_cleanup()
162 {
163 	ASSERT(ucode);
164 
165 	ucode->file_reset(&ucodefile, -1);
166 }
167 
168 /*
169  * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
170  * allocated with BOP_ALLOC() and does not require a free.
171  */
172 static void*
173 ucode_zalloc(processorid_t id, size_t size)
174 {
175 	if (id)
176 		return (kmem_zalloc(size, KM_NOSLEEP));
177 
178 	/* BOP_ALLOC() failure results in panic */
179 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
180 }
181 
182 static void
183 ucode_free(processorid_t id, void* buf, size_t size)
184 {
185 	if (id)
186 		kmem_free(buf, size);
187 }
188 
189 /*
190  * Check whether or not a processor is capable of microcode operations
191  * Returns 1 if it is capable, 0 if not.
192  *
193  * At this point we only support microcode update for:
194  * - Intel processors family 6 and above, and
195  * - AMD processors family 0x10 and above.
196  *
197  * We also assume that we don't support a mix of Intel and
198  * AMD processors in the same box.
199  *
200  * An i86xpv guest domain can't update the microcode.
201  */
202 /*ARGSUSED*/
203 static int
204 ucode_capable_amd(cpu_t *cp)
205 {
206 #ifndef	__xpv
207 	extern int xpv_is_hvm;
208 	if (xpv_is_hvm) {
209 		return (0);
210 	}
211 #else
212 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
213 		return (0);
214 	}
215 #endif
216 	return (cpuid_getfamily(cp) >= 0x10);
217 }
218 
219 static int
220 ucode_capable_intel(cpu_t *cp)
221 {
222 #ifndef	__xpv
223 	extern int xpv_is_hvm;
224 	if (xpv_is_hvm) {
225 		return (0);
226 	}
227 #else
228 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
229 		return (0);
230 	}
231 #endif
232 	return (cpuid_getfamily(cp) >= 6);
233 }
234 
235 /*
236  * Called when it is no longer necessary to keep the microcode around,
237  * or when the cached microcode doesn't match the CPU being processed.
238  */
239 static void
240 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
241 {
242 	ucode_file_amd_t *ucodefp = ufp->amd;
243 
244 	if (ucodefp == NULL)
245 		return;
246 
247 	ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
248 	ufp->amd = NULL;
249 }
250 
251 static void
252 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
253 {
254 	ucode_file_intel_t *ucodefp = &ufp->intel;
255 	int total_size, body_size;
256 
257 	if (ucodefp == NULL || ucodefp->uf_header == NULL)
258 		return;
259 
260 	total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
261 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
262 	if (ucodefp->uf_body) {
263 		ucode_free(id, ucodefp->uf_body, body_size);
264 		ucodefp->uf_body = NULL;
265 	}
266 
267 	if (ucodefp->uf_ext_table) {
268 		int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
269 
270 		ucode_free(id, ucodefp->uf_ext_table, size);
271 		ucodefp->uf_ext_table = NULL;
272 	}
273 
274 	ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
275 	ucodefp->uf_header = NULL;
276 }
277 
278 /*
279  * Find the equivalent CPU id in the equivalence table.
280  */
281 static int
282 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
283 {
284 	char name[MAXPATHLEN];
285 	intptr_t fd;
286 	int count;
287 	int offset = 0, cpi_sig = cpuid_getsig(cp);
288 	ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
289 
290 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
291 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
292 
293 	/*
294 	 * No kmem_zalloc() etc. available on boot cpu.
295 	 */
296 	if (cp->cpu_id == 0) {
297 		if ((fd = kobj_open(name)) == -1)
298 			return (EM_OPENFILE);
299 		/* ucode_zalloc() cannot fail on boot cpu */
300 		eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
301 		ASSERT(eqtbl);
302 		do {
303 			count = kobj_read(fd, (int8_t *)eqtbl,
304 			    sizeof (*eqtbl), offset);
305 			if (count != sizeof (*eqtbl)) {
306 				(void) kobj_close(fd);
307 				return (EM_HIGHERREV);
308 			}
309 			offset += count;
310 		} while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
311 		(void) kobj_close(fd);
312 	}
313 
314 	/*
315 	 * If not already done, load the equivalence table.
316 	 * Not done on boot CPU.
317 	 */
318 	if (eqtbl == NULL) {
319 		struct _buf *eq;
320 		uint64_t size;
321 
322 		if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
323 			return (EM_OPENFILE);
324 
325 		if (kobj_get_filesize(eq, &size) < 0) {
326 			kobj_close_file(eq);
327 			return (EM_OPENFILE);
328 		}
329 
330 		ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
331 		if (ucode_eqtbl_amd == NULL) {
332 			kobj_close_file(eq);
333 			return (EM_NOMEM);
334 		}
335 
336 		count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
337 		kobj_close_file(eq);
338 
339 		if (count != size)
340 			return (EM_FILESIZE);
341 	}
342 
343 	/* Get the equivalent CPU id. */
344 	if (cp->cpu_id)
345 		for (eqtbl = ucode_eqtbl_amd;
346 		    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
347 		    eqtbl++)
348 			;
349 
350 	*eq_sig = eqtbl->ue_equiv_cpu;
351 
352 	/* No equivalent CPU id found, assume outdated microcode file. */
353 	if (*eq_sig == 0)
354 		return (EM_HIGHERREV);
355 
356 	return (EM_OK);
357 }
358 
359 /*
360  * xVM cannot check for the presence of PCI devices. Look for chipset-
361  * specific microcode patches in the container file and disable them
362  * by setting their CPU revision to an invalid value.
363  */
364 #ifdef __xpv
365 static void
366 ucode_chipset_amd(uint8_t *buf, int size)
367 {
368 	ucode_header_amd_t *uh;
369 	uint32_t *ptr = (uint32_t *)buf;
370 	int len = 0;
371 
372 	/* skip to first microcode patch */
373 	ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
374 
375 	while (size >= sizeof (ucode_header_amd_t) + 8) {
376 		ptr++; len = *ptr++;
377 		uh = (ucode_header_amd_t *)ptr;
378 		ptr += len >> 2; size -= len;
379 
380 		if (uh->uh_nb_id) {
381 			cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
382 			    "chipset id %x, revision %x",
383 			    uh->uh_nb_id, uh->uh_nb_rev);
384 			uh->uh_cpu_rev = 0xffff;
385 		}
386 
387 		if (uh->uh_sb_id) {
388 			cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
389 			    "chipset id %x, revision %x",
390 			    uh->uh_sb_id, uh->uh_sb_rev);
391 			uh->uh_cpu_rev = 0xffff;
392 		}
393 	}
394 }
395 #endif
396 
397 /*
398  * Populate the ucode file structure from microcode file corresponding to
399  * this CPU, if exists.
400  *
401  * Return EM_OK on success, corresponding error code on failure.
402  */
403 /*ARGSUSED*/
404 static ucode_errno_t
405 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
406 {
407 	char name[MAXPATHLEN];
408 	intptr_t fd;
409 	int count, rc;
410 	ucode_file_amd_t *ucodefp = ufp->amd;
411 
412 #ifndef __xpv
413 	uint16_t eq_sig = 0;
414 	int i;
415 
416 	/* get equivalent CPU id */
417 	if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
418 		return (rc);
419 
420 	/*
421 	 * Allocate a buffer for the microcode patch. If the buffer has been
422 	 * allocated before, check for a matching microcode to avoid loading
423 	 * the file again.
424 	 */
425 	if (ucodefp == NULL)
426 		ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
427 	else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
428 	    == EM_OK)
429 		return (EM_OK);
430 
431 	if (ucodefp == NULL)
432 		return (EM_NOMEM);
433 
434 	ufp->amd = ucodefp;
435 
436 	/*
437 	 * Find the patch for this CPU. The patch files are named XXXX-YY, where
438 	 * XXXX is the equivalent CPU id and YY is the running patch number.
439 	 * Patches specific to certain chipsets are guaranteed to have lower
440 	 * numbers than less specific patches, so we can just load the first
441 	 * patch that matches.
442 	 */
443 
444 	for (i = 0; i < 0xff; i++) {
445 		(void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
446 		    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
447 		if ((fd = kobj_open(name)) == -1)
448 			return (EM_NOMATCH);
449 		count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
450 		(void) kobj_close(fd);
451 
452 		if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
453 			return (EM_OK);
454 	}
455 	return (EM_NOMATCH);
456 #else
457 	int size = 0;
458 	char c;
459 
460 	/*
461 	 * The xVM case is special. To support mixed-revision systems, the
462 	 * hypervisor will choose which patch to load for which CPU, so the
463 	 * whole microcode patch container file will have to be loaded.
464 	 *
465 	 * Since this code is only run on the boot cpu, we don't have to care
466 	 * about failing ucode_zalloc() or freeing allocated memory.
467 	 */
468 	if (cp->cpu_id != 0)
469 		return (EM_INVALIDARG);
470 
471 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
472 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
473 
474 	if ((fd = kobj_open(name)) == -1)
475 		return (EM_OPENFILE);
476 
477 	/* get the file size by counting bytes */
478 	do {
479 		count = kobj_read(fd, &c, 1, size);
480 		size += count;
481 	} while (count);
482 
483 	ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
484 	ASSERT(ucodefp);
485 	ufp->amd = ucodefp;
486 
487 	ucodefp->usize = size;
488 	ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
489 	ASSERT(ucodefp->ucodep);
490 
491 	/* load the microcode patch container file */
492 	count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
493 	(void) kobj_close(fd);
494 
495 	if (count != size)
496 		return (EM_FILESIZE);
497 
498 	/* make sure the container file is valid */
499 	rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
500 
501 	if (rc != EM_OK)
502 		return (rc);
503 
504 	/* disable chipset-specific patches */
505 	ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
506 
507 	return (EM_OK);
508 #endif
509 }
510 
511 static ucode_errno_t
512 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
513 {
514 	char		name[MAXPATHLEN];
515 	intptr_t	fd;
516 	int		count;
517 	int		header_size = UCODE_HEADER_SIZE_INTEL;
518 	int		cpi_sig = cpuid_getsig(cp);
519 	ucode_errno_t	rc = EM_OK;
520 	ucode_file_intel_t *ucodefp = &ufp->intel;
521 
522 	ASSERT(ucode);
523 
524 	/*
525 	 * If the microcode matches the CPU we are processing, use it.
526 	 */
527 	if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
528 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
529 		return (EM_OK);
530 	}
531 
532 	/*
533 	 * Look for microcode file with the right name.
534 	 */
535 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
536 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
537 	    uinfop->cui_platid);
538 	if ((fd = kobj_open(name)) == -1) {
539 		return (EM_OPENFILE);
540 	}
541 
542 	/*
543 	 * We found a microcode file for the CPU we are processing,
544 	 * reset the microcode data structure and read in the new
545 	 * file.
546 	 */
547 	ucode->file_reset(ufp, cp->cpu_id);
548 
549 	ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
550 	if (ucodefp->uf_header == NULL)
551 		return (EM_NOMEM);
552 
553 	count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
554 
555 	switch (count) {
556 	case UCODE_HEADER_SIZE_INTEL: {
557 
558 		ucode_header_intel_t	*uhp = ucodefp->uf_header;
559 		uint32_t	offset = header_size;
560 		int		total_size, body_size, ext_size;
561 		uint32_t	sum = 0;
562 
563 		/*
564 		 * Make sure that the header contains valid fields.
565 		 */
566 		if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
567 			total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
568 			body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
569 			ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
570 			if (ucodefp->uf_body == NULL) {
571 				rc = EM_NOMEM;
572 				break;
573 			}
574 
575 			if (kobj_read(fd, (char *)ucodefp->uf_body,
576 			    body_size, offset) != body_size)
577 				rc = EM_FILESIZE;
578 		}
579 
580 		if (rc)
581 			break;
582 
583 		sum = ucode_checksum_intel(0, header_size,
584 		    (uint8_t *)ucodefp->uf_header);
585 		if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
586 			rc = EM_CHECKSUM;
587 			break;
588 		}
589 
590 		/*
591 		 * Check to see if there is extended signature table.
592 		 */
593 		offset = body_size + header_size;
594 		ext_size = total_size - offset;
595 
596 		if (ext_size <= 0)
597 			break;
598 
599 		ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
600 		if (ucodefp->uf_ext_table == NULL) {
601 			rc = EM_NOMEM;
602 			break;
603 		}
604 
605 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
606 		    ext_size, offset) != ext_size) {
607 			rc = EM_FILESIZE;
608 		} else if (ucode_checksum_intel(0, ext_size,
609 		    (uint8_t *)(ucodefp->uf_ext_table))) {
610 			rc = EM_CHECKSUM;
611 		} else {
612 			int i;
613 
614 			ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
615 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
616 			    i++) {
617 				if (ucode_checksum_intel(0,
618 				    UCODE_EXT_SIG_SIZE_INTEL,
619 				    (uint8_t *)(&(ucodefp->uf_ext_table->
620 				    uet_ext_sig[i])))) {
621 					rc = EM_CHECKSUM;
622 					break;
623 				}
624 			}
625 		}
626 		break;
627 	}
628 
629 	default:
630 		rc = EM_FILESIZE;
631 		break;
632 	}
633 
634 	kobj_close(fd);
635 
636 	if (rc != EM_OK)
637 		return (rc);
638 
639 	rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
640 	    ucodefp->uf_ext_table);
641 
642 	return (rc);
643 }
644 
645 #ifndef __xpv
646 static ucode_errno_t
647 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
648     ucode_file_amd_t *ucodefp, int size)
649 {
650 	ucode_header_amd_t *uh;
651 
652 	if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
653 		return (EM_NOMATCH);
654 
655 	/*
656 	 * Don't even think about loading patches that would require code
657 	 * execution.
658 	 */
659 	if (size > offsetof(ucode_file_amd_t, uf_code_present) &&
660 	    ucodefp->uf_code_present)
661 		return (EM_NOMATCH);
662 
663 	uh = &ucodefp->uf_header;
664 
665 	if (eq_sig != uh->uh_cpu_rev)
666 		return (EM_NOMATCH);
667 
668 	if (uh->uh_nb_id) {
669 		cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
670 		    "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
671 		return (EM_NOMATCH);
672 	}
673 
674 	if (uh->uh_sb_id) {
675 		cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
676 		    "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
677 		return (EM_NOMATCH);
678 	}
679 
680 	if (uh->uh_patch_id <= uinfop->cui_rev)
681 		return (EM_HIGHERREV);
682 
683 	return (EM_OK);
684 }
685 #endif
686 
687 /*
688  * Returns 1 if the microcode is for this processor; 0 otherwise.
689  */
690 static ucode_errno_t
691 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
692     ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
693 {
694 	if (uhp == NULL)
695 		return (EM_NOMATCH);
696 
697 	if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
698 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
699 
700 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
701 			return (EM_HIGHERREV);
702 
703 		return (EM_OK);
704 	}
705 
706 	if (uetp != NULL) {
707 		int i;
708 
709 		for (i = 0; i < uetp->uet_count; i++) {
710 			ucode_ext_sig_intel_t *uesp;
711 
712 			uesp = &uetp->uet_ext_sig[i];
713 
714 			if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
715 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
716 
717 				if (uinfop->cui_rev >= uhp->uh_rev &&
718 				    !ucode_force_update)
719 					return (EM_HIGHERREV);
720 
721 				return (EM_OK);
722 			}
723 		}
724 	}
725 
726 	return (EM_NOMATCH);
727 }
728 
729 /*ARGSUSED*/
730 static int
731 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
732 {
733 	ucode_update_t *uusp = (ucode_update_t *)arg1;
734 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
735 
736 	ASSERT(ucode);
737 	ASSERT(uusp->ucodep);
738 
739 #ifndef	__xpv
740 	/*
741 	 * Check one more time to see if it is really necessary to update
742 	 * microcode just in case this is a hyperthreaded processor where
743 	 * the threads share the same microcode.
744 	 */
745 	if (!ucode_force_update) {
746 		ucode->read_rev(uinfop);
747 		uusp->new_rev = uinfop->cui_rev;
748 		if (uinfop->cui_rev >= uusp->expected_rev)
749 			return (0);
750 	}
751 
752 	wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
753 #endif
754 	ucode->read_rev(uinfop);
755 	uusp->new_rev = uinfop->cui_rev;
756 
757 	return (0);
758 }
759 
760 /*ARGSUSED*/
761 static uint32_t
762 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
763 {
764 	ucode_file_amd_t *ucodefp = ufp->amd;
765 #ifdef	__xpv
766 	ucode_update_t uus;
767 #endif
768 
769 	ASSERT(ucode);
770 	ASSERT(ucodefp);
771 
772 #ifndef	__xpv
773 	kpreempt_disable();
774 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
775 	ucode->read_rev(uinfop);
776 	kpreempt_enable();
777 
778 	return (ucodefp->uf_header.uh_patch_id);
779 #else
780 	uus.ucodep = ucodefp->ucodep;
781 	uus.usize = ucodefp->usize;
782 	ucode_load_xpv(&uus);
783 	ucode->read_rev(uinfop);
784 	uus.new_rev = uinfop->cui_rev;
785 
786 	return (uus.new_rev);
787 #endif
788 }
789 
790 /*ARGSUSED2*/
791 static uint32_t
792 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
793 {
794 	ucode_file_intel_t *ucodefp = &ufp->intel;
795 #ifdef __xpv
796 	uint32_t ext_offset;
797 	uint32_t body_size;
798 	uint32_t ext_size;
799 	uint8_t *ustart;
800 	uint32_t usize;
801 	ucode_update_t uus;
802 #endif
803 
804 	ASSERT(ucode);
805 
806 #ifdef __xpv
807 	/*
808 	 * the hypervisor wants the header, data, and extended
809 	 * signature tables. We can only get here from the boot
810 	 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
811 	 * use BOP_ALLOC().
812 	 */
813 	usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
814 	ustart = ucode_zalloc(cp->cpu_id, usize);
815 	ASSERT(ustart);
816 
817 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
818 	ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
819 	ext_size = usize - ext_offset;
820 	ASSERT(ext_size >= 0);
821 
822 	(void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
823 	(void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
824 	    body_size);
825 	if (ext_size > 0) {
826 		(void) memcpy(&ustart[ext_offset],
827 		    ucodefp->uf_ext_table, ext_size);
828 	}
829 	uus.ucodep = ustart;
830 	uus.usize = usize;
831 	ucode_load_xpv(&uus);
832 	ucode->read_rev(uinfop);
833 	uus.new_rev = uinfop->cui_rev;
834 #else
835 	kpreempt_disable();
836 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
837 	ucode->read_rev(uinfop);
838 	kpreempt_enable();
839 #endif
840 
841 	return (ucodefp->uf_header->uh_rev);
842 }
843 
844 
845 #ifdef	__xpv
846 static void
847 ucode_load_xpv(ucode_update_t *uusp)
848 {
849 	xen_platform_op_t op;
850 	int e;
851 
852 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
853 
854 	kpreempt_disable();
855 	op.cmd = XENPF_microcode_update;
856 	op.interface_version = XENPF_INTERFACE_VERSION;
857 	/*LINTED: constant in conditional context*/
858 	set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
859 	op.u.microcode.length = uusp->usize;
860 	e = HYPERVISOR_platform_op(&op);
861 	if (e != 0) {
862 		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
863 	}
864 	kpreempt_enable();
865 }
866 #endif /* __xpv */
867 
868 static void
869 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
870 {
871 	uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
872 }
873 
874 static void
875 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
876 {
877 	struct cpuid_regs crs;
878 
879 	/*
880 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
881 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
882 	 * execute cpuid to guarantee the correct reading of this register.
883 	 */
884 	wrmsr(MSR_INTC_UCODE_REV, 0);
885 	(void) __cpuid_insn(&crs);
886 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
887 }
888 
889 static ucode_errno_t
890 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
891 {
892 #ifndef __xpv
893 	uint32_t *ptr = (uint32_t *)ucodep;
894 	ucode_eqtbl_amd_t *eqtbl;
895 	ucode_file_amd_t *ufp;
896 	int count;
897 	int higher = 0;
898 	ucode_errno_t rc = EM_NOMATCH;
899 	uint16_t eq_sig;
900 
901 	/* skip over magic number & equivalence table header */
902 	ptr += 2; size -= 8;
903 
904 	count = *ptr++; size -= 4;
905 	for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
906 	    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
907 	    eqtbl++)
908 		;
909 
910 	eq_sig = eqtbl->ue_equiv_cpu;
911 
912 	/* No equivalent CPU id found, assume outdated microcode file. */
913 	if (eq_sig == 0)
914 		return (EM_HIGHERREV);
915 
916 	/* Use the first microcode patch that matches. */
917 	do {
918 		ptr += count >> 2; size -= count;
919 
920 		if (!size)
921 			return (higher ? EM_HIGHERREV : EM_NOMATCH);
922 
923 		ptr++; size -= 4;
924 		count = *ptr++; size -= 4;
925 		ufp = (ucode_file_amd_t *)ptr;
926 
927 		rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
928 		if (rc == EM_HIGHERREV)
929 			higher = 1;
930 	} while (rc != EM_OK);
931 
932 	uusp->ucodep = (uint8_t *)ufp;
933 	uusp->usize = count;
934 	uusp->expected_rev = ufp->uf_header.uh_patch_id;
935 #else
936 	/*
937 	 * The hypervisor will choose the patch to load, so there is no way to
938 	 * know the "expected revision" in advance. This is especially true on
939 	 * mixed-revision systems where more than one patch will be loaded.
940 	 */
941 	uusp->expected_rev = 0;
942 	uusp->ucodep = ucodep;
943 	uusp->usize = size;
944 
945 	ucode_chipset_amd(ucodep, size);
946 #endif
947 
948 	return (EM_OK);
949 }
950 
951 static ucode_errno_t
952 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
953 {
954 	uint32_t	header_size = UCODE_HEADER_SIZE_INTEL;
955 	int		remaining;
956 	int		found = 0;
957 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
958 
959 	/*
960 	 * Go through the whole buffer in case there are
961 	 * multiple versions of matching microcode for this
962 	 * processor.
963 	 */
964 	for (remaining = size; remaining > 0; ) {
965 		int	total_size, body_size, ext_size;
966 		uint8_t	*curbuf = &ucodep[size - remaining];
967 		ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
968 		ucode_ext_table_intel_t *uetp = NULL;
969 		ucode_errno_t tmprc;
970 
971 		total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
972 		body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
973 		ext_size = total_size - (header_size + body_size);
974 
975 		if (ext_size > 0)
976 			uetp = (ucode_ext_table_intel_t *)
977 			    &curbuf[header_size + body_size];
978 
979 		tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
980 
981 		/*
982 		 * Since we are searching through a big file
983 		 * containing microcode for pretty much all the
984 		 * processors, we are bound to get EM_NOMATCH
985 		 * at one point.  However, if we return
986 		 * EM_NOMATCH to users, it will really confuse
987 		 * them.  Therefore, if we ever find a match of
988 		 * a lower rev, we will set return code to
989 		 * EM_HIGHERREV.
990 		 */
991 		if (tmprc == EM_HIGHERREV)
992 			search_rc = EM_HIGHERREV;
993 
994 		if (tmprc == EM_OK &&
995 		    uusp->expected_rev < uhp->uh_rev) {
996 #ifndef __xpv
997 			uusp->ucodep = (uint8_t *)&curbuf[header_size];
998 #else
999 			uusp->ucodep = (uint8_t *)curbuf;
1000 #endif
1001 			uusp->usize =
1002 			    UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1003 			uusp->expected_rev = uhp->uh_rev;
1004 			found = 1;
1005 		}
1006 
1007 		remaining -= total_size;
1008 	}
1009 
1010 	if (!found)
1011 		return (search_rc);
1012 
1013 	return (EM_OK);
1014 }
1015 /*
1016  * Entry point to microcode update from the ucode_drv driver.
1017  *
1018  * Returns EM_OK on success, corresponding error code on failure.
1019  */
1020 ucode_errno_t
1021 ucode_update(uint8_t *ucodep, int size)
1022 {
1023 	int		found = 0;
1024 	processorid_t	id;
1025 	ucode_update_t	cached = { 0 };
1026 	ucode_update_t	*cachedp = NULL;
1027 	ucode_errno_t	rc = EM_OK;
1028 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
1029 	cpuset_t cpuset;
1030 
1031 	ASSERT(ucode);
1032 	ASSERT(ucodep);
1033 	CPUSET_ZERO(cpuset);
1034 
1035 	if (!ucode->capable(CPU))
1036 		return (EM_NOTSUP);
1037 
1038 	mutex_enter(&cpu_lock);
1039 
1040 	for (id = 0; id < max_ncpus; id++) {
1041 		cpu_t *cpu;
1042 		ucode_update_t uus = { 0 };
1043 		ucode_update_t *uusp = &uus;
1044 
1045 		/*
1046 		 * If there is no such CPU or it is not xcall ready, skip it.
1047 		 */
1048 		if ((cpu = cpu_get(id)) == NULL ||
1049 		    !(cpu->cpu_flags & CPU_READY))
1050 			continue;
1051 
1052 		uusp->sig = cpuid_getsig(cpu);
1053 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1054 		    sizeof (uusp->info));
1055 
1056 		/*
1057 		 * If the current CPU has the same signature and platform
1058 		 * id as the previous one we processed, reuse the information.
1059 		 */
1060 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1061 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
1062 			uusp->ucodep = cachedp->ucodep;
1063 			uusp->expected_rev = cachedp->expected_rev;
1064 			/*
1065 			 * Intuitively we should check here to see whether the
1066 			 * running microcode rev is >= the expected rev, and
1067 			 * quit if it is.  But we choose to proceed with the
1068 			 * xcall regardless of the running version so that
1069 			 * the other threads in an HT processor can update
1070 			 * the cpu_ucode_info structure in machcpu.
1071 			 */
1072 		} else if ((search_rc = ucode->extract(uusp, ucodep, size))
1073 		    == EM_OK) {
1074 			bcopy(uusp, &cached, sizeof (cached));
1075 			cachedp = &cached;
1076 			found = 1;
1077 		}
1078 
1079 		/* Nothing to do */
1080 		if (uusp->ucodep == NULL)
1081 			continue;
1082 
1083 #ifdef	__xpv
1084 		/*
1085 		 * for i86xpv, the hypervisor will update all the CPUs.
1086 		 * the hypervisor wants the header, data, and extended
1087 		 * signature tables. ucode_write will just read in the
1088 		 * updated version on all the CPUs after the update has
1089 		 * completed.
1090 		 */
1091 		if (id == 0) {
1092 			ucode_load_xpv(uusp);
1093 		}
1094 #endif
1095 
1096 		CPUSET_ADD(cpuset, id);
1097 		kpreempt_disable();
1098 		xc_sync((xc_arg_t)uusp, 0, 0, X_CALL_HIPRI, cpuset,
1099 		    ucode_write);
1100 		kpreempt_enable();
1101 		CPUSET_DEL(cpuset, id);
1102 
1103 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev) {
1104 			rc = EM_HIGHERREV;
1105 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1106 		    uusp->expected_rev != uusp->new_rev)) {
1107 			cmn_err(CE_WARN, ucode_failure_fmt,
1108 			    id, uusp->info.cui_rev, uusp->expected_rev);
1109 			rc = EM_UPDATE;
1110 		} else {
1111 			cmn_err(CE_CONT, ucode_success_fmt,
1112 			    id, uusp->info.cui_rev, uusp->new_rev);
1113 		}
1114 	}
1115 
1116 	mutex_exit(&cpu_lock);
1117 
1118 	if (!found)
1119 		rc = search_rc;
1120 
1121 	return (rc);
1122 }
1123 
1124 /*
1125  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1126  * This is the entry point from boot path where pointer to CPU structure
1127  * is available.
1128  *
1129  * cpuid_info must be initialized before ucode_check can be called.
1130  */
1131 void
1132 ucode_check(cpu_t *cp)
1133 {
1134 	cpu_ucode_info_t *uinfop;
1135 	ucode_errno_t rc = EM_OK;
1136 	uint32_t new_rev = 0;
1137 
1138 	ASSERT(cp);
1139 	if (cp->cpu_id == 0)
1140 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1141 
1142 	uinfop = cp->cpu_m.mcpu_ucode_info;
1143 	ASSERT(uinfop);
1144 
1145 	/* set up function pointers if not already done */
1146 	if (!ucode)
1147 		switch (cpuid_getvendor(cp)) {
1148 		case X86_VENDOR_AMD:
1149 			ucode = &ucode_amd;
1150 			break;
1151 		case X86_VENDOR_Intel:
1152 			ucode = &ucode_intel;
1153 			break;
1154 		default:
1155 			return;
1156 		}
1157 
1158 	if (!ucode->capable(cp))
1159 		return;
1160 
1161 	/*
1162 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1163 	 * (Family 6, model 5 and above) and all processors after.
1164 	 */
1165 	if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1166 	    ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1167 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1168 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1169 	}
1170 
1171 	ucode->read_rev(uinfop);
1172 
1173 #ifdef	__xpv
1174 	/*
1175 	 * for i86xpv, the hypervisor will update all the CPUs. We only need
1176 	 * do do this on one of the CPUs (and there always is a CPU 0).
1177 	 */
1178 	if (cp->cpu_id != 0) {
1179 		return;
1180 	}
1181 #endif
1182 
1183 	/*
1184 	 * Check to see if we need ucode update
1185 	 */
1186 	if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1187 		new_rev = ucode->load(&ucodefile, uinfop, cp);
1188 
1189 		if (uinfop->cui_rev != new_rev)
1190 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1191 			    uinfop->cui_rev, new_rev);
1192 	}
1193 
1194 	/*
1195 	 * If we fail to find a match for any reason, free the file structure
1196 	 * just in case we have read in a partial file.
1197 	 *
1198 	 * Since the scratch memory for holding the microcode for the boot CPU
1199 	 * came from BOP_ALLOC, we will reset the data structure as if we
1200 	 * never did the allocation so we don't have to keep track of this
1201 	 * special chunk of memory.  We free the memory used for the rest
1202 	 * of the CPUs in start_other_cpus().
1203 	 */
1204 	if (rc != EM_OK || cp->cpu_id == 0)
1205 		ucode->file_reset(&ucodefile, cp->cpu_id);
1206 }
1207 
1208 /*
1209  * Returns microcode revision from the machcpu structure.
1210  */
1211 ucode_errno_t
1212 ucode_get_rev(uint32_t *revp)
1213 {
1214 	int i;
1215 
1216 	ASSERT(ucode);
1217 	ASSERT(revp);
1218 
1219 	if (!ucode->capable(CPU))
1220 		return (EM_NOTSUP);
1221 
1222 	mutex_enter(&cpu_lock);
1223 	for (i = 0; i < max_ncpus; i++) {
1224 		cpu_t *cpu;
1225 
1226 		if ((cpu = cpu_get(i)) == NULL)
1227 			continue;
1228 
1229 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1230 	}
1231 	mutex_exit(&cpu_lock);
1232 
1233 	return (EM_OK);
1234 }
1235