xref: /titanic_41/usr/src/uts/i86pc/os/microcode.c (revision 4f8b8adc54496e548e2d73094de038a131d9cd45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef	__xpv
47 #include <sys/hypervisor.h>
48 #endif
49 
50 /*
51  * AMD-specific equivalence table
52  */
53 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
54 
55 /*
56  * mcpu_ucode_info for the boot CPU.  Statically allocated.
57  */
58 static struct cpu_ucode_info cpu_ucode_info0;
59 
60 static ucode_file_t ucodefile;
61 
62 static void* ucode_zalloc(processorid_t, size_t);
63 static void ucode_free(processorid_t, void *, size_t);
64 
65 static int ucode_capable_amd(cpu_t *);
66 static int ucode_capable_intel(cpu_t *);
67 
68 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
69 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
70     int);
71 
72 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
73 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
74 
75 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
76 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77 
78 #ifdef	__xpv
79 static void ucode_load_xpv(ucode_update_t *);
80 static void ucode_chipset_amd(uint8_t *, int);
81 #endif
82 
83 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
84 
85 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
86     ucode_file_t *);
87 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
88     ucode_file_t *);
89 
90 #ifndef __xpv
91 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
92     ucode_file_amd_t *, int);
93 #endif
94 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
95     ucode_header_intel_t *, ucode_ext_table_intel_t *);
96 
97 static void ucode_read_rev_amd(cpu_ucode_info_t *);
98 static void ucode_read_rev_intel(cpu_ucode_info_t *);
99 
100 static const struct ucode_ops ucode_amd = {
101 	MSR_AMD_PATCHLOADER,
102 	ucode_capable_amd,
103 	ucode_file_reset_amd,
104 	ucode_read_rev_amd,
105 	ucode_load_amd,
106 	ucode_validate_amd,
107 	ucode_extract_amd,
108 	ucode_locate_amd
109 };
110 
111 static const struct ucode_ops ucode_intel = {
112 	MSR_INTC_UCODE_WRITE,
113 	ucode_capable_intel,
114 	ucode_file_reset_intel,
115 	ucode_read_rev_intel,
116 	ucode_load_intel,
117 	ucode_validate_intel,
118 	ucode_extract_intel,
119 	ucode_locate_intel
120 };
121 
122 const struct ucode_ops *ucode;
123 
124 static const char ucode_failure_fmt[] =
125 	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
126 static const char ucode_success_fmt[] =
127 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
128 
129 /*
130  * Force flag.  If set, the first microcode binary that matches
131  * signature and platform id will be used for microcode update,
132  * regardless of version.  Should only be used for debugging.
133  */
134 int ucode_force_update = 0;
135 
136 /*
137  * Allocate space for mcpu_ucode_info in the machcpu structure
138  * for all non-boot CPUs.
139  */
140 void
141 ucode_alloc_space(cpu_t *cp)
142 {
143 	ASSERT(cp->cpu_id != 0);
144 	cp->cpu_m.mcpu_ucode_info =
145 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
146 }
147 
148 void
149 ucode_free_space(cpu_t *cp)
150 {
151 	ASSERT(cp->cpu_id != 0);
152 	kmem_free(cp->cpu_m.mcpu_ucode_info,
153 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
154 }
155 
156 /*
157  * Called when we are done with microcode update on all processors to free up
158  * space allocated for the microcode file.
159  */
160 void
161 ucode_cleanup()
162 {
163 	ASSERT(ucode);
164 
165 	ucode->file_reset(&ucodefile, -1);
166 }
167 
168 /*
169  * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
170  * allocated with BOP_ALLOC() and does not require a free.
171  */
172 static void*
173 ucode_zalloc(processorid_t id, size_t size)
174 {
175 	if (id)
176 		return (kmem_zalloc(size, KM_NOSLEEP));
177 
178 	/* BOP_ALLOC() failure results in panic */
179 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
180 }
181 
182 static void
183 ucode_free(processorid_t id, void* buf, size_t size)
184 {
185 	if (id)
186 		kmem_free(buf, size);
187 }
188 
189 /*
190  * Check whether or not a processor is capable of microcode operations
191  * Returns 1 if it is capable, 0 if not.
192  *
193  * At this point we only support microcode update for:
194  * - Intel processors family 6 and above, and
195  * - AMD processors family 0x10 and above.
196  *
197  * We also assume that we don't support a mix of Intel and
198  * AMD processors in the same box.
199  *
200  * An i86xpv guest domain can't update the microcode.
201  */
202 /*ARGSUSED*/
203 static int
204 ucode_capable_amd(cpu_t *cp)
205 {
206 	int hwenv = get_hwenv();
207 
208 	if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
209 		return (0);
210 	}
211 	return (cpuid_getfamily(cp) >= 0x10);
212 }
213 
214 static int
215 ucode_capable_intel(cpu_t *cp)
216 {
217 	int hwenv = get_hwenv();
218 
219 	if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
220 		return (0);
221 	}
222 	return (cpuid_getfamily(cp) >= 6);
223 }
224 
225 /*
226  * Called when it is no longer necessary to keep the microcode around,
227  * or when the cached microcode doesn't match the CPU being processed.
228  */
229 static void
230 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
231 {
232 	ucode_file_amd_t *ucodefp = ufp->amd;
233 
234 	if (ucodefp == NULL)
235 		return;
236 
237 	ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
238 	ufp->amd = NULL;
239 }
240 
241 static void
242 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
243 {
244 	ucode_file_intel_t *ucodefp = &ufp->intel;
245 	int total_size, body_size;
246 
247 	if (ucodefp == NULL || ucodefp->uf_header == NULL)
248 		return;
249 
250 	total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
251 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
252 	if (ucodefp->uf_body) {
253 		ucode_free(id, ucodefp->uf_body, body_size);
254 		ucodefp->uf_body = NULL;
255 	}
256 
257 	if (ucodefp->uf_ext_table) {
258 		int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
259 
260 		ucode_free(id, ucodefp->uf_ext_table, size);
261 		ucodefp->uf_ext_table = NULL;
262 	}
263 
264 	ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
265 	ucodefp->uf_header = NULL;
266 }
267 
268 /*
269  * Find the equivalent CPU id in the equivalence table.
270  */
271 static int
272 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
273 {
274 	char name[MAXPATHLEN];
275 	intptr_t fd;
276 	int count;
277 	int offset = 0, cpi_sig = cpuid_getsig(cp);
278 	ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
279 
280 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
281 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
282 
283 	/*
284 	 * No kmem_zalloc() etc. available on boot cpu.
285 	 */
286 	if (cp->cpu_id == 0) {
287 		if ((fd = kobj_open(name)) == -1)
288 			return (EM_OPENFILE);
289 		/* ucode_zalloc() cannot fail on boot cpu */
290 		eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
291 		ASSERT(eqtbl);
292 		do {
293 			count = kobj_read(fd, (int8_t *)eqtbl,
294 			    sizeof (*eqtbl), offset);
295 			if (count != sizeof (*eqtbl)) {
296 				(void) kobj_close(fd);
297 				return (EM_HIGHERREV);
298 			}
299 			offset += count;
300 		} while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
301 		(void) kobj_close(fd);
302 	}
303 
304 	/*
305 	 * If not already done, load the equivalence table.
306 	 * Not done on boot CPU.
307 	 */
308 	if (eqtbl == NULL) {
309 		struct _buf *eq;
310 		uint64_t size;
311 
312 		if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
313 			return (EM_OPENFILE);
314 
315 		if (kobj_get_filesize(eq, &size) < 0) {
316 			kobj_close_file(eq);
317 			return (EM_OPENFILE);
318 		}
319 
320 		ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
321 		if (ucode_eqtbl_amd == NULL) {
322 			kobj_close_file(eq);
323 			return (EM_NOMEM);
324 		}
325 
326 		count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
327 		kobj_close_file(eq);
328 
329 		if (count != size)
330 			return (EM_FILESIZE);
331 	}
332 
333 	/* Get the equivalent CPU id. */
334 	if (cp->cpu_id)
335 		for (eqtbl = ucode_eqtbl_amd;
336 		    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
337 		    eqtbl++)
338 			;
339 
340 	*eq_sig = eqtbl->ue_equiv_cpu;
341 
342 	/* No equivalent CPU id found, assume outdated microcode file. */
343 	if (*eq_sig == 0)
344 		return (EM_HIGHERREV);
345 
346 	return (EM_OK);
347 }
348 
349 /*
350  * xVM cannot check for the presence of PCI devices. Look for chipset-
351  * specific microcode patches in the container file and disable them
352  * by setting their CPU revision to an invalid value.
353  */
354 #ifdef __xpv
355 static void
356 ucode_chipset_amd(uint8_t *buf, int size)
357 {
358 	ucode_header_amd_t *uh;
359 	uint32_t *ptr = (uint32_t *)buf;
360 	int len = 0;
361 
362 	/* skip to first microcode patch */
363 	ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
364 
365 	while (size >= sizeof (ucode_header_amd_t) + 8) {
366 		ptr++; len = *ptr++;
367 		uh = (ucode_header_amd_t *)ptr;
368 		ptr += len >> 2; size -= len;
369 
370 		if (uh->uh_nb_id) {
371 			cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
372 			    "chipset id %x, revision %x",
373 			    uh->uh_nb_id, uh->uh_nb_rev);
374 			uh->uh_cpu_rev = 0xffff;
375 		}
376 
377 		if (uh->uh_sb_id) {
378 			cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
379 			    "chipset id %x, revision %x",
380 			    uh->uh_sb_id, uh->uh_sb_rev);
381 			uh->uh_cpu_rev = 0xffff;
382 		}
383 	}
384 }
385 #endif
386 
387 /*
388  * Populate the ucode file structure from microcode file corresponding to
389  * this CPU, if exists.
390  *
391  * Return EM_OK on success, corresponding error code on failure.
392  */
393 /*ARGSUSED*/
394 static ucode_errno_t
395 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
396 {
397 	char name[MAXPATHLEN];
398 	intptr_t fd;
399 	int count, rc;
400 	ucode_file_amd_t *ucodefp = ufp->amd;
401 
402 #ifndef __xpv
403 	uint16_t eq_sig = 0;
404 	int i;
405 
406 	/* get equivalent CPU id */
407 	if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
408 		return (rc);
409 
410 	/*
411 	 * Allocate a buffer for the microcode patch. If the buffer has been
412 	 * allocated before, check for a matching microcode to avoid loading
413 	 * the file again.
414 	 */
415 	if (ucodefp == NULL)
416 		ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
417 	else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
418 	    == EM_OK)
419 		return (EM_OK);
420 
421 	if (ucodefp == NULL)
422 		return (EM_NOMEM);
423 
424 	ufp->amd = ucodefp;
425 
426 	/*
427 	 * Find the patch for this CPU. The patch files are named XXXX-YY, where
428 	 * XXXX is the equivalent CPU id and YY is the running patch number.
429 	 * Patches specific to certain chipsets are guaranteed to have lower
430 	 * numbers than less specific patches, so we can just load the first
431 	 * patch that matches.
432 	 */
433 
434 	for (i = 0; i < 0xff; i++) {
435 		(void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
436 		    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
437 		if ((fd = kobj_open(name)) == -1)
438 			return (EM_NOMATCH);
439 		count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
440 		(void) kobj_close(fd);
441 
442 		if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
443 			return (EM_OK);
444 	}
445 	return (EM_NOMATCH);
446 #else
447 	int size = 0;
448 	char c;
449 
450 	/*
451 	 * The xVM case is special. To support mixed-revision systems, the
452 	 * hypervisor will choose which patch to load for which CPU, so the
453 	 * whole microcode patch container file will have to be loaded.
454 	 *
455 	 * Since this code is only run on the boot cpu, we don't have to care
456 	 * about failing ucode_zalloc() or freeing allocated memory.
457 	 */
458 	if (cp->cpu_id != 0)
459 		return (EM_INVALIDARG);
460 
461 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
462 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
463 
464 	if ((fd = kobj_open(name)) == -1)
465 		return (EM_OPENFILE);
466 
467 	/* get the file size by counting bytes */
468 	do {
469 		count = kobj_read(fd, &c, 1, size);
470 		size += count;
471 	} while (count);
472 
473 	ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
474 	ASSERT(ucodefp);
475 	ufp->amd = ucodefp;
476 
477 	ucodefp->usize = size;
478 	ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
479 	ASSERT(ucodefp->ucodep);
480 
481 	/* load the microcode patch container file */
482 	count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
483 	(void) kobj_close(fd);
484 
485 	if (count != size)
486 		return (EM_FILESIZE);
487 
488 	/* make sure the container file is valid */
489 	rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
490 
491 	if (rc != EM_OK)
492 		return (rc);
493 
494 	/* disable chipset-specific patches */
495 	ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
496 
497 	return (EM_OK);
498 #endif
499 }
500 
501 static ucode_errno_t
502 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
503 {
504 	char		name[MAXPATHLEN];
505 	intptr_t	fd;
506 	int		count;
507 	int		header_size = UCODE_HEADER_SIZE_INTEL;
508 	int		cpi_sig = cpuid_getsig(cp);
509 	ucode_errno_t	rc = EM_OK;
510 	ucode_file_intel_t *ucodefp = &ufp->intel;
511 
512 	ASSERT(ucode);
513 
514 	/*
515 	 * If the microcode matches the CPU we are processing, use it.
516 	 */
517 	if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
518 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
519 		return (EM_OK);
520 	}
521 
522 	/*
523 	 * Look for microcode file with the right name.
524 	 */
525 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
526 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
527 	    uinfop->cui_platid);
528 	if ((fd = kobj_open(name)) == -1) {
529 		return (EM_OPENFILE);
530 	}
531 
532 	/*
533 	 * We found a microcode file for the CPU we are processing,
534 	 * reset the microcode data structure and read in the new
535 	 * file.
536 	 */
537 	ucode->file_reset(ufp, cp->cpu_id);
538 
539 	ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
540 	if (ucodefp->uf_header == NULL)
541 		return (EM_NOMEM);
542 
543 	count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
544 
545 	switch (count) {
546 	case UCODE_HEADER_SIZE_INTEL: {
547 
548 		ucode_header_intel_t	*uhp = ucodefp->uf_header;
549 		uint32_t	offset = header_size;
550 		int		total_size, body_size, ext_size;
551 		uint32_t	sum = 0;
552 
553 		/*
554 		 * Make sure that the header contains valid fields.
555 		 */
556 		if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
557 			total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
558 			body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
559 			ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
560 			if (ucodefp->uf_body == NULL) {
561 				rc = EM_NOMEM;
562 				break;
563 			}
564 
565 			if (kobj_read(fd, (char *)ucodefp->uf_body,
566 			    body_size, offset) != body_size)
567 				rc = EM_FILESIZE;
568 		}
569 
570 		if (rc)
571 			break;
572 
573 		sum = ucode_checksum_intel(0, header_size,
574 		    (uint8_t *)ucodefp->uf_header);
575 		if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
576 			rc = EM_CHECKSUM;
577 			break;
578 		}
579 
580 		/*
581 		 * Check to see if there is extended signature table.
582 		 */
583 		offset = body_size + header_size;
584 		ext_size = total_size - offset;
585 
586 		if (ext_size <= 0)
587 			break;
588 
589 		ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
590 		if (ucodefp->uf_ext_table == NULL) {
591 			rc = EM_NOMEM;
592 			break;
593 		}
594 
595 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
596 		    ext_size, offset) != ext_size) {
597 			rc = EM_FILESIZE;
598 		} else if (ucode_checksum_intel(0, ext_size,
599 		    (uint8_t *)(ucodefp->uf_ext_table))) {
600 			rc = EM_CHECKSUM;
601 		} else {
602 			int i;
603 
604 			ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
605 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
606 			    i++) {
607 				if (ucode_checksum_intel(0,
608 				    UCODE_EXT_SIG_SIZE_INTEL,
609 				    (uint8_t *)(&(ucodefp->uf_ext_table->
610 				    uet_ext_sig[i])))) {
611 					rc = EM_CHECKSUM;
612 					break;
613 				}
614 			}
615 		}
616 		break;
617 	}
618 
619 	default:
620 		rc = EM_FILESIZE;
621 		break;
622 	}
623 
624 	kobj_close(fd);
625 
626 	if (rc != EM_OK)
627 		return (rc);
628 
629 	rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
630 	    ucodefp->uf_ext_table);
631 
632 	return (rc);
633 }
634 
635 #ifndef __xpv
636 static ucode_errno_t
637 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
638     ucode_file_amd_t *ucodefp, int size)
639 {
640 	ucode_header_amd_t *uh;
641 
642 	if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
643 		return (EM_NOMATCH);
644 
645 	/*
646 	 * Don't even think about loading patches that would require code
647 	 * execution.
648 	 */
649 	if (size > offsetof(ucode_file_amd_t, uf_code_present) &&
650 	    ucodefp->uf_code_present)
651 		return (EM_NOMATCH);
652 
653 	uh = &ucodefp->uf_header;
654 
655 	if (eq_sig != uh->uh_cpu_rev)
656 		return (EM_NOMATCH);
657 
658 	if (uh->uh_nb_id) {
659 		cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
660 		    "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
661 		return (EM_NOMATCH);
662 	}
663 
664 	if (uh->uh_sb_id) {
665 		cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
666 		    "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
667 		return (EM_NOMATCH);
668 	}
669 
670 	if (uh->uh_patch_id <= uinfop->cui_rev)
671 		return (EM_HIGHERREV);
672 
673 	return (EM_OK);
674 }
675 #endif
676 
677 /*
678  * Returns 1 if the microcode is for this processor; 0 otherwise.
679  */
680 static ucode_errno_t
681 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
682     ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
683 {
684 	if (uhp == NULL)
685 		return (EM_NOMATCH);
686 
687 	if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
688 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
689 
690 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
691 			return (EM_HIGHERREV);
692 
693 		return (EM_OK);
694 	}
695 
696 	if (uetp != NULL) {
697 		int i;
698 
699 		for (i = 0; i < uetp->uet_count; i++) {
700 			ucode_ext_sig_intel_t *uesp;
701 
702 			uesp = &uetp->uet_ext_sig[i];
703 
704 			if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
705 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
706 
707 				if (uinfop->cui_rev >= uhp->uh_rev &&
708 				    !ucode_force_update)
709 					return (EM_HIGHERREV);
710 
711 				return (EM_OK);
712 			}
713 		}
714 	}
715 
716 	return (EM_NOMATCH);
717 }
718 
719 /*ARGSUSED*/
720 static int
721 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
722 {
723 	ucode_update_t *uusp = (ucode_update_t *)arg1;
724 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
725 
726 	ASSERT(ucode);
727 	ASSERT(uusp->ucodep);
728 
729 #ifndef	__xpv
730 	/*
731 	 * Check one more time to see if it is really necessary to update
732 	 * microcode just in case this is a hyperthreaded processor where
733 	 * the threads share the same microcode.
734 	 */
735 	if (!ucode_force_update) {
736 		ucode->read_rev(uinfop);
737 		uusp->new_rev = uinfop->cui_rev;
738 		if (uinfop->cui_rev >= uusp->expected_rev)
739 			return (0);
740 	}
741 
742 	wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
743 #endif
744 	ucode->read_rev(uinfop);
745 	uusp->new_rev = uinfop->cui_rev;
746 
747 	return (0);
748 }
749 
750 /*ARGSUSED*/
751 static uint32_t
752 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
753 {
754 	ucode_file_amd_t *ucodefp = ufp->amd;
755 #ifdef	__xpv
756 	ucode_update_t uus;
757 #endif
758 
759 	ASSERT(ucode);
760 	ASSERT(ucodefp);
761 
762 #ifndef	__xpv
763 	kpreempt_disable();
764 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
765 	ucode->read_rev(uinfop);
766 	kpreempt_enable();
767 
768 	return (ucodefp->uf_header.uh_patch_id);
769 #else
770 	uus.ucodep = ucodefp->ucodep;
771 	uus.usize = ucodefp->usize;
772 	ucode_load_xpv(&uus);
773 	ucode->read_rev(uinfop);
774 	uus.new_rev = uinfop->cui_rev;
775 
776 	return (uus.new_rev);
777 #endif
778 }
779 
780 /*ARGSUSED2*/
781 static uint32_t
782 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
783 {
784 	ucode_file_intel_t *ucodefp = &ufp->intel;
785 #ifdef __xpv
786 	uint32_t ext_offset;
787 	uint32_t body_size;
788 	uint32_t ext_size;
789 	uint8_t *ustart;
790 	uint32_t usize;
791 	ucode_update_t uus;
792 #endif
793 
794 	ASSERT(ucode);
795 
796 #ifdef __xpv
797 	/*
798 	 * the hypervisor wants the header, data, and extended
799 	 * signature tables. We can only get here from the boot
800 	 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
801 	 * use BOP_ALLOC().
802 	 */
803 	usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
804 	ustart = ucode_zalloc(cp->cpu_id, usize);
805 	ASSERT(ustart);
806 
807 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
808 	ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
809 	ext_size = usize - ext_offset;
810 	ASSERT(ext_size >= 0);
811 
812 	(void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
813 	(void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
814 	    body_size);
815 	if (ext_size > 0) {
816 		(void) memcpy(&ustart[ext_offset],
817 		    ucodefp->uf_ext_table, ext_size);
818 	}
819 	uus.ucodep = ustart;
820 	uus.usize = usize;
821 	ucode_load_xpv(&uus);
822 	ucode->read_rev(uinfop);
823 	uus.new_rev = uinfop->cui_rev;
824 #else
825 	kpreempt_disable();
826 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
827 	ucode->read_rev(uinfop);
828 	kpreempt_enable();
829 #endif
830 
831 	return (ucodefp->uf_header->uh_rev);
832 }
833 
834 
835 #ifdef	__xpv
836 static void
837 ucode_load_xpv(ucode_update_t *uusp)
838 {
839 	xen_platform_op_t op;
840 	int e;
841 
842 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
843 
844 	kpreempt_disable();
845 	op.cmd = XENPF_microcode_update;
846 	op.interface_version = XENPF_INTERFACE_VERSION;
847 	/*LINTED: constant in conditional context*/
848 	set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
849 	op.u.microcode.length = uusp->usize;
850 	e = HYPERVISOR_platform_op(&op);
851 	if (e != 0) {
852 		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
853 	}
854 	kpreempt_enable();
855 }
856 #endif /* __xpv */
857 
858 static void
859 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
860 {
861 	uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
862 }
863 
864 static void
865 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
866 {
867 	struct cpuid_regs crs;
868 
869 	/*
870 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
871 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
872 	 * execute cpuid to guarantee the correct reading of this register.
873 	 */
874 	wrmsr(MSR_INTC_UCODE_REV, 0);
875 	(void) __cpuid_insn(&crs);
876 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
877 }
878 
879 static ucode_errno_t
880 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
881 {
882 #ifndef __xpv
883 	uint32_t *ptr = (uint32_t *)ucodep;
884 	ucode_eqtbl_amd_t *eqtbl;
885 	ucode_file_amd_t *ufp;
886 	int count;
887 	int higher = 0;
888 	ucode_errno_t rc = EM_NOMATCH;
889 	uint16_t eq_sig;
890 
891 	/* skip over magic number & equivalence table header */
892 	ptr += 2; size -= 8;
893 
894 	count = *ptr++; size -= 4;
895 	for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
896 	    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
897 	    eqtbl++)
898 		;
899 
900 	eq_sig = eqtbl->ue_equiv_cpu;
901 
902 	/* No equivalent CPU id found, assume outdated microcode file. */
903 	if (eq_sig == 0)
904 		return (EM_HIGHERREV);
905 
906 	/* Use the first microcode patch that matches. */
907 	do {
908 		ptr += count >> 2; size -= count;
909 
910 		if (!size)
911 			return (higher ? EM_HIGHERREV : EM_NOMATCH);
912 
913 		ptr++; size -= 4;
914 		count = *ptr++; size -= 4;
915 		ufp = (ucode_file_amd_t *)ptr;
916 
917 		rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
918 		if (rc == EM_HIGHERREV)
919 			higher = 1;
920 	} while (rc != EM_OK);
921 
922 	uusp->ucodep = (uint8_t *)ufp;
923 	uusp->usize = count;
924 	uusp->expected_rev = ufp->uf_header.uh_patch_id;
925 #else
926 	/*
927 	 * The hypervisor will choose the patch to load, so there is no way to
928 	 * know the "expected revision" in advance. This is especially true on
929 	 * mixed-revision systems where more than one patch will be loaded.
930 	 */
931 	uusp->expected_rev = 0;
932 	uusp->ucodep = ucodep;
933 	uusp->usize = size;
934 
935 	ucode_chipset_amd(ucodep, size);
936 #endif
937 
938 	return (EM_OK);
939 }
940 
941 static ucode_errno_t
942 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
943 {
944 	uint32_t	header_size = UCODE_HEADER_SIZE_INTEL;
945 	int		remaining;
946 	int		found = 0;
947 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
948 
949 	/*
950 	 * Go through the whole buffer in case there are
951 	 * multiple versions of matching microcode for this
952 	 * processor.
953 	 */
954 	for (remaining = size; remaining > 0; ) {
955 		int	total_size, body_size, ext_size;
956 		uint8_t	*curbuf = &ucodep[size - remaining];
957 		ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
958 		ucode_ext_table_intel_t *uetp = NULL;
959 		ucode_errno_t tmprc;
960 
961 		total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
962 		body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
963 		ext_size = total_size - (header_size + body_size);
964 
965 		if (ext_size > 0)
966 			uetp = (ucode_ext_table_intel_t *)
967 			    &curbuf[header_size + body_size];
968 
969 		tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
970 
971 		/*
972 		 * Since we are searching through a big file
973 		 * containing microcode for pretty much all the
974 		 * processors, we are bound to get EM_NOMATCH
975 		 * at one point.  However, if we return
976 		 * EM_NOMATCH to users, it will really confuse
977 		 * them.  Therefore, if we ever find a match of
978 		 * a lower rev, we will set return code to
979 		 * EM_HIGHERREV.
980 		 */
981 		if (tmprc == EM_HIGHERREV)
982 			search_rc = EM_HIGHERREV;
983 
984 		if (tmprc == EM_OK &&
985 		    uusp->expected_rev < uhp->uh_rev) {
986 #ifndef __xpv
987 			uusp->ucodep = (uint8_t *)&curbuf[header_size];
988 #else
989 			uusp->ucodep = (uint8_t *)curbuf;
990 #endif
991 			uusp->usize =
992 			    UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
993 			uusp->expected_rev = uhp->uh_rev;
994 			found = 1;
995 		}
996 
997 		remaining -= total_size;
998 	}
999 
1000 	if (!found)
1001 		return (search_rc);
1002 
1003 	return (EM_OK);
1004 }
1005 /*
1006  * Entry point to microcode update from the ucode_drv driver.
1007  *
1008  * Returns EM_OK on success, corresponding error code on failure.
1009  */
1010 ucode_errno_t
1011 ucode_update(uint8_t *ucodep, int size)
1012 {
1013 	int		found = 0;
1014 	processorid_t	id;
1015 	ucode_update_t	cached = { 0 };
1016 	ucode_update_t	*cachedp = NULL;
1017 	ucode_errno_t	rc = EM_OK;
1018 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
1019 	cpuset_t cpuset;
1020 
1021 	ASSERT(ucode);
1022 	ASSERT(ucodep);
1023 	CPUSET_ZERO(cpuset);
1024 
1025 	if (!ucode->capable(CPU))
1026 		return (EM_NOTSUP);
1027 
1028 	mutex_enter(&cpu_lock);
1029 
1030 	for (id = 0; id < max_ncpus; id++) {
1031 		cpu_t *cpu;
1032 		ucode_update_t uus = { 0 };
1033 		ucode_update_t *uusp = &uus;
1034 
1035 		/*
1036 		 * If there is no such CPU or it is not xcall ready, skip it.
1037 		 */
1038 		if ((cpu = cpu_get(id)) == NULL ||
1039 		    !(cpu->cpu_flags & CPU_READY))
1040 			continue;
1041 
1042 		uusp->sig = cpuid_getsig(cpu);
1043 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1044 		    sizeof (uusp->info));
1045 
1046 		/*
1047 		 * If the current CPU has the same signature and platform
1048 		 * id as the previous one we processed, reuse the information.
1049 		 */
1050 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1051 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
1052 			uusp->ucodep = cachedp->ucodep;
1053 			uusp->expected_rev = cachedp->expected_rev;
1054 			/*
1055 			 * Intuitively we should check here to see whether the
1056 			 * running microcode rev is >= the expected rev, and
1057 			 * quit if it is.  But we choose to proceed with the
1058 			 * xcall regardless of the running version so that
1059 			 * the other threads in an HT processor can update
1060 			 * the cpu_ucode_info structure in machcpu.
1061 			 */
1062 		} else if ((search_rc = ucode->extract(uusp, ucodep, size))
1063 		    == EM_OK) {
1064 			bcopy(uusp, &cached, sizeof (cached));
1065 			cachedp = &cached;
1066 			found = 1;
1067 		}
1068 
1069 		/* Nothing to do */
1070 		if (uusp->ucodep == NULL)
1071 			continue;
1072 
1073 #ifdef	__xpv
1074 		/*
1075 		 * for i86xpv, the hypervisor will update all the CPUs.
1076 		 * the hypervisor wants the header, data, and extended
1077 		 * signature tables. ucode_write will just read in the
1078 		 * updated version on all the CPUs after the update has
1079 		 * completed.
1080 		 */
1081 		if (id == 0) {
1082 			ucode_load_xpv(uusp);
1083 		}
1084 #endif
1085 
1086 		CPUSET_ADD(cpuset, id);
1087 		kpreempt_disable();
1088 		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1089 		kpreempt_enable();
1090 		CPUSET_DEL(cpuset, id);
1091 
1092 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev) {
1093 			rc = EM_HIGHERREV;
1094 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1095 		    uusp->expected_rev != uusp->new_rev)) {
1096 			cmn_err(CE_WARN, ucode_failure_fmt,
1097 			    id, uusp->info.cui_rev, uusp->expected_rev);
1098 			rc = EM_UPDATE;
1099 		} else {
1100 			cmn_err(CE_CONT, ucode_success_fmt,
1101 			    id, uusp->info.cui_rev, uusp->new_rev);
1102 		}
1103 	}
1104 
1105 	mutex_exit(&cpu_lock);
1106 
1107 	if (!found)
1108 		rc = search_rc;
1109 
1110 	return (rc);
1111 }
1112 
1113 /*
1114  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1115  * This is the entry point from boot path where pointer to CPU structure
1116  * is available.
1117  *
1118  * cpuid_info must be initialized before ucode_check can be called.
1119  */
1120 void
1121 ucode_check(cpu_t *cp)
1122 {
1123 	cpu_ucode_info_t *uinfop;
1124 	ucode_errno_t rc = EM_OK;
1125 	uint32_t new_rev = 0;
1126 
1127 	ASSERT(cp);
1128 	if (cp->cpu_id == 0)
1129 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1130 
1131 	uinfop = cp->cpu_m.mcpu_ucode_info;
1132 	ASSERT(uinfop);
1133 
1134 	/* set up function pointers if not already done */
1135 	if (!ucode)
1136 		switch (cpuid_getvendor(cp)) {
1137 		case X86_VENDOR_AMD:
1138 			ucode = &ucode_amd;
1139 			break;
1140 		case X86_VENDOR_Intel:
1141 			ucode = &ucode_intel;
1142 			break;
1143 		default:
1144 			return;
1145 		}
1146 
1147 	if (!ucode->capable(cp))
1148 		return;
1149 
1150 	/*
1151 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1152 	 * (Family 6, model 5 and above) and all processors after.
1153 	 */
1154 	if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1155 	    ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1156 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1157 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1158 	}
1159 
1160 	ucode->read_rev(uinfop);
1161 
1162 #ifdef	__xpv
1163 	/*
1164 	 * for i86xpv, the hypervisor will update all the CPUs. We only need
1165 	 * do do this on one of the CPUs (and there always is a CPU 0).
1166 	 */
1167 	if (cp->cpu_id != 0) {
1168 		return;
1169 	}
1170 #endif
1171 
1172 	/*
1173 	 * Check to see if we need ucode update
1174 	 */
1175 	if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1176 		new_rev = ucode->load(&ucodefile, uinfop, cp);
1177 
1178 		if (uinfop->cui_rev != new_rev)
1179 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1180 			    uinfop->cui_rev, new_rev);
1181 	}
1182 
1183 	/*
1184 	 * If we fail to find a match for any reason, free the file structure
1185 	 * just in case we have read in a partial file.
1186 	 *
1187 	 * Since the scratch memory for holding the microcode for the boot CPU
1188 	 * came from BOP_ALLOC, we will reset the data structure as if we
1189 	 * never did the allocation so we don't have to keep track of this
1190 	 * special chunk of memory.  We free the memory used for the rest
1191 	 * of the CPUs in start_other_cpus().
1192 	 */
1193 	if (rc != EM_OK || cp->cpu_id == 0)
1194 		ucode->file_reset(&ucodefile, cp->cpu_id);
1195 }
1196 
1197 /*
1198  * Returns microcode revision from the machcpu structure.
1199  */
1200 ucode_errno_t
1201 ucode_get_rev(uint32_t *revp)
1202 {
1203 	int i;
1204 
1205 	ASSERT(ucode);
1206 	ASSERT(revp);
1207 
1208 	if (!ucode->capable(CPU))
1209 		return (EM_NOTSUP);
1210 
1211 	mutex_enter(&cpu_lock);
1212 	for (i = 0; i < max_ncpus; i++) {
1213 		cpu_t *cpu;
1214 
1215 		if ((cpu = cpu_get(i)) == NULL)
1216 			continue;
1217 
1218 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1219 	}
1220 	mutex_exit(&cpu_lock);
1221 
1222 	return (EM_OK);
1223 }
1224