xref: /titanic_41/usr/src/uts/i86pc/os/microcode.c (revision 5e992ba3a9b8749890ab15d3ca96a0b1a79641ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef	__xpv
47 #include <sys/hypervisor.h>
48 #endif
49 
50 /*
51  * AMD-specific equivalence table
52  */
53 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
54 
55 /*
56  * mcpu_ucode_info for the boot CPU.  Statically allocated.
57  */
58 static struct cpu_ucode_info cpu_ucode_info0;
59 
60 static ucode_file_t ucodefile;
61 
62 static void* ucode_zalloc(processorid_t, size_t);
63 static void ucode_free(processorid_t, void *, size_t);
64 
65 static int ucode_capable_amd(cpu_t *);
66 static int ucode_capable_intel(cpu_t *);
67 
68 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
69 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
70     int);
71 
72 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
73 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
74 
75 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
76 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77 
78 #ifdef	__xpv
79 static void ucode_load_xpv(ucode_update_t *);
80 #endif
81 
82 static int ucode_equiv_cpu_amd(cpu_t *, int *);
83 
84 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
85     ucode_file_t *);
86 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
87     ucode_file_t *);
88 
89 static ucode_errno_t ucode_match_amd(int, cpu_ucode_info_t *,
90     ucode_file_amd_t *, int);
91 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
92     ucode_header_intel_t *, ucode_ext_table_intel_t *);
93 
94 static void ucode_read_rev_amd(cpu_ucode_info_t *);
95 static void ucode_read_rev_intel(cpu_ucode_info_t *);
96 
97 static const struct ucode_ops ucode_amd = {
98 	MSR_AMD_PATCHLOADER,
99 	ucode_capable_amd,
100 	ucode_file_reset_amd,
101 	ucode_read_rev_amd,
102 	ucode_load_amd,
103 	ucode_validate_amd,
104 	ucode_extract_amd,
105 	ucode_locate_amd
106 };
107 
108 static const struct ucode_ops ucode_intel = {
109 	MSR_INTC_UCODE_WRITE,
110 	ucode_capable_intel,
111 	ucode_file_reset_intel,
112 	ucode_read_rev_intel,
113 	ucode_load_intel,
114 	ucode_validate_intel,
115 	ucode_extract_intel,
116 	ucode_locate_intel
117 };
118 
119 const struct ucode_ops *ucode;
120 
121 static const char ucode_failure_fmt[] =
122 	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
123 static const char ucode_success_fmt[] =
124 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
125 
126 /*
127  * Force flag.  If set, the first microcode binary that matches
128  * signature and platform id will be used for microcode update,
129  * regardless of version.  Should only be used for debugging.
130  */
131 int ucode_force_update = 0;
132 
133 /*
134  * Allocate space for mcpu_ucode_info in the machcpu structure
135  * for all non-boot CPUs.
136  */
137 void
138 ucode_alloc_space(cpu_t *cp)
139 {
140 	ASSERT(cp->cpu_id != 0);
141 	cp->cpu_m.mcpu_ucode_info =
142 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
143 }
144 
145 void
146 ucode_free_space(cpu_t *cp)
147 {
148 	ASSERT(cp->cpu_id != 0);
149 	kmem_free(cp->cpu_m.mcpu_ucode_info,
150 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
151 }
152 
153 /*
154  * Called when we are done with microcode update on all processors to free up
155  * space allocated for the microcode file.
156  */
157 void
158 ucode_cleanup()
159 {
160 	ASSERT(ucode);
161 
162 	ucode->file_reset(&ucodefile, -1);
163 }
164 
165 /*
166  * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
167  * allocated with BOP_ALLOC() and does not require a free.
168  */
169 static void*
170 ucode_zalloc(processorid_t id, size_t size)
171 {
172 	if (id)
173 		return (kmem_zalloc(size, KM_NOSLEEP));
174 
175 	/* BOP_ALLOC() failure results in panic */
176 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
177 }
178 
179 static void
180 ucode_free(processorid_t id, void* buf, size_t size)
181 {
182 	if (id)
183 		kmem_free(buf, size);
184 }
185 
186 /*
187  * Check whether or not a processor is capable of microcode operations
188  * Returns 1 if it is capable, 0 if not.
189  *
190  * At this point we only support microcode update for:
191  * - Intel processors family 6 and above, and
192  * - AMD processors family 0x10 and above.
193  *
194  * We also assume that we don't support a mix of Intel and
195  * AMD processors in the same box.
196  *
197  * An i86xpv guest domain can't update the microcode.
198  */
199 /*ARGSUSED*/
200 static int
201 ucode_capable_amd(cpu_t *cp)
202 {
203 #ifndef	__xpv
204 	extern int xpv_is_hvm;
205 	if (xpv_is_hvm) {
206 		return (0);
207 	}
208 
209 	return (cpuid_getfamily(cp) >= 0x10);
210 #else
211 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
212 		return (0);
213 	}
214 
215 	/*
216 	 * XXPV - change when microcode loading works in dom0. Don't support
217 	 * microcode loading in dom0 right now for AMD.
218 	 */
219 	return (0);
220 #endif
221 }
222 static int
223 ucode_capable_intel(cpu_t *cp)
224 {
225 #ifndef	__xpv
226 	extern int xpv_is_hvm;
227 	if (xpv_is_hvm) {
228 		return (0);
229 	}
230 #else
231 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
232 		return (0);
233 	}
234 #endif
235 	return (cpuid_getfamily(cp) >= 6);
236 }
237 
238 /*
239  * Called when it is no longer necessary to keep the microcode around,
240  * or when the cached microcode doesn't match the CPU being processed.
241  */
242 static void
243 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
244 {
245 	ucode_file_amd_t *ucodefp = ufp->amd;
246 
247 	if (ucodefp == NULL)
248 		return;
249 
250 	ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
251 	ufp->amd = NULL;
252 }
253 
254 static void
255 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
256 {
257 	ucode_file_intel_t *ucodefp = &ufp->intel;
258 	int total_size, body_size;
259 
260 	if (ucodefp == NULL || ucodefp->uf_header == NULL)
261 		return;
262 
263 	total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
264 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
265 	if (ucodefp->uf_body) {
266 		ucode_free(id, ucodefp->uf_body, body_size);
267 		ucodefp->uf_body = NULL;
268 	}
269 
270 	if (ucodefp->uf_ext_table) {
271 		int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
272 
273 		ucode_free(id, ucodefp->uf_ext_table, size);
274 		ucodefp->uf_ext_table = NULL;
275 	}
276 
277 	ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
278 	ucodefp->uf_header = NULL;
279 }
280 
281 /*
282  * Find the equivalent CPU id in the equivalence table.
283  */
284 static int
285 ucode_equiv_cpu_amd(cpu_t *cp, int *eq_sig)
286 {
287 	char name[MAXPATHLEN];
288 	intptr_t fd;
289 	int count;
290 	int offset = 0, cpi_sig = cpuid_getsig(cp);
291 	ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
292 
293 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
294 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
295 
296 	/*
297 	 * No kmem_zalloc() etc. available on boot cpu.
298 	 */
299 	if (cp->cpu_id == 0) {
300 		if ((fd = kobj_open(name)) == -1)
301 			return (EM_OPENFILE);
302 		/* ucode_zalloc() cannot fail on boot cpu */
303 		eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
304 		ASSERT(eqtbl);
305 		do {
306 			count = kobj_read(fd, (int8_t *)eqtbl,
307 			    sizeof (*eqtbl), offset);
308 			if (count != sizeof (*eqtbl)) {
309 				(void) kobj_close(fd);
310 				return (EM_HIGHERREV);
311 			}
312 			offset += count;
313 		} while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
314 		(void) kobj_close(fd);
315 	}
316 
317 	/*
318 	 * If not already done, load the equivalence table.
319 	 * Not done on boot CPU.
320 	 */
321 	if (eqtbl == NULL) {
322 		struct _buf *eq;
323 		uint64_t size;
324 
325 		if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
326 			return (EM_OPENFILE);
327 
328 		if (kobj_get_filesize(eq, &size) < 0) {
329 			kobj_close_file(eq);
330 			return (EM_OPENFILE);
331 		}
332 
333 		ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
334 		if (ucode_eqtbl_amd == NULL) {
335 			kobj_close_file(eq);
336 			return (EM_NOMEM);
337 		}
338 
339 		count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
340 		kobj_close_file(eq);
341 
342 		if (count != size)
343 			return (EM_FILESIZE);
344 	}
345 
346 	/* Get the equivalent CPU id. */
347 	if (cp->cpu_id)
348 		for (eqtbl = ucode_eqtbl_amd;
349 		    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
350 		    eqtbl++)
351 			;
352 
353 	*eq_sig = eqtbl->ue_equiv_cpu;
354 	*eq_sig = ((*eq_sig >> 8) & 0xff00) | (*eq_sig & 0xff);
355 
356 	/* No equivalent CPU id found, assume outdated microcode file. */
357 	if (*eq_sig == 0)
358 		return (EM_HIGHERREV);
359 
360 	return (EM_OK);
361 }
362 
363 /*
364  * Populate the ucode file structure from microcode file corresponding to
365  * this CPU, if exists.
366  *
367  * Return EM_OK on success, corresponding error code on failure.
368  */
369 static ucode_errno_t
370 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
371 {
372 	char name[MAXPATHLEN];
373 	intptr_t fd;
374 	int count, i, rc;
375 	int eq_sig = 0;
376 	ucode_file_amd_t *ucodefp = ufp->amd;
377 
378 	/* get equivalent CPU id */
379 	if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
380 		return (rc);
381 
382 	/*
383 	 * Allocate a buffer for the microcode patch. If the buffer has been
384 	 * allocated before, check for a matching microcode to avoid loading
385 	 * the file again.
386 	 */
387 	if (ucodefp == NULL)
388 		ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
389 	else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
390 	    == EM_OK)
391 		return (EM_OK);
392 
393 	if (ucodefp == NULL)
394 		return (EM_NOMEM);
395 
396 	ufp->amd = ucodefp;
397 
398 	/*
399 	 * Find the patch for this CPU. The patch files are named XXXX-YY, where
400 	 * XXXX is the equivalent CPU id and YY is the running patch number.
401 	 * Patches specific to certain chipsets are guaranteed to have lower
402 	 * numbers than less specific patches, so we can just load the first
403 	 * patch that matches.
404 	 */
405 
406 	for (i = 0; i < 0xff; i++) {
407 		(void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
408 		    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
409 		if ((fd = kobj_open(name)) == -1)
410 			return (EM_NOMATCH);
411 		count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
412 		(void) kobj_close(fd);
413 
414 		if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
415 			return (EM_OK);
416 	}
417 	return (EM_NOMATCH);
418 }
419 
420 static ucode_errno_t
421 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
422 {
423 	char		name[MAXPATHLEN];
424 	intptr_t	fd;
425 	int		count;
426 	int		header_size = UCODE_HEADER_SIZE_INTEL;
427 	int		cpi_sig = cpuid_getsig(cp);
428 	ucode_errno_t	rc = EM_OK;
429 	ucode_file_intel_t *ucodefp = &ufp->intel;
430 
431 	ASSERT(ucode);
432 
433 	/*
434 	 * If the microcode matches the CPU we are processing, use it.
435 	 */
436 	if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
437 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
438 		return (EM_OK);
439 	}
440 
441 	/*
442 	 * Look for microcode file with the right name.
443 	 */
444 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
445 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
446 	    uinfop->cui_platid);
447 	if ((fd = kobj_open(name)) == -1) {
448 		return (EM_OPENFILE);
449 	}
450 
451 	/*
452 	 * We found a microcode file for the CPU we are processing,
453 	 * reset the microcode data structure and read in the new
454 	 * file.
455 	 */
456 	ucode->file_reset(ufp, cp->cpu_id);
457 
458 	ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
459 	if (ucodefp->uf_header == NULL)
460 		return (EM_NOMEM);
461 
462 	count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
463 
464 	switch (count) {
465 	case UCODE_HEADER_SIZE_INTEL: {
466 
467 		ucode_header_intel_t	*uhp = ucodefp->uf_header;
468 		uint32_t	offset = header_size;
469 		int		total_size, body_size, ext_size;
470 		uint32_t	sum = 0;
471 
472 		/*
473 		 * Make sure that the header contains valid fields.
474 		 */
475 		if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
476 			total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
477 			body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
478 			ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
479 			if (ucodefp->uf_body == NULL) {
480 				rc = EM_NOMEM;
481 				break;
482 			}
483 
484 			if (kobj_read(fd, (char *)ucodefp->uf_body,
485 			    body_size, offset) != body_size)
486 				rc = EM_FILESIZE;
487 		}
488 
489 		if (rc)
490 			break;
491 
492 		sum = ucode_checksum_intel(0, header_size,
493 		    (uint8_t *)ucodefp->uf_header);
494 		if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
495 			rc = EM_CHECKSUM;
496 			break;
497 		}
498 
499 		/*
500 		 * Check to see if there is extended signature table.
501 		 */
502 		offset = body_size + header_size;
503 		ext_size = total_size - offset;
504 
505 		if (ext_size <= 0)
506 			break;
507 
508 		ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
509 		if (ucodefp->uf_ext_table == NULL) {
510 			rc = EM_NOMEM;
511 			break;
512 		}
513 
514 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
515 		    ext_size, offset) != ext_size) {
516 			rc = EM_FILESIZE;
517 		} else if (ucode_checksum_intel(0, ext_size,
518 		    (uint8_t *)(ucodefp->uf_ext_table))) {
519 			rc = EM_CHECKSUM;
520 		} else {
521 			int i;
522 
523 			ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
524 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
525 			    i++) {
526 				if (ucode_checksum_intel(0,
527 				    UCODE_EXT_SIG_SIZE_INTEL,
528 				    (uint8_t *)(&(ucodefp->uf_ext_table->
529 				    uet_ext_sig[i])))) {
530 					rc = EM_CHECKSUM;
531 					break;
532 				}
533 			}
534 		}
535 		break;
536 	}
537 
538 	default:
539 		rc = EM_FILESIZE;
540 		break;
541 	}
542 
543 	kobj_close(fd);
544 
545 	if (rc != EM_OK)
546 		return (rc);
547 
548 	rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
549 	    ucodefp->uf_ext_table);
550 
551 	return (rc);
552 }
553 
554 static ucode_errno_t
555 ucode_match_amd(int eq_sig, cpu_ucode_info_t *uinfop, ucode_file_amd_t *ucodefp,
556     int size)
557 {
558 	ucode_header_amd_t *uh;
559 
560 	if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
561 		return (EM_NOMATCH);
562 
563 	/*
564 	 * Don't even think about loading patches that would require code
565 	 * execution.
566 	 */
567 	if (size > offsetof(ucode_file_amd_t, uf_code_present) &&
568 	    ucodefp->uf_code_present)
569 		return (EM_NOMATCH);
570 
571 	uh = &ucodefp->uf_header;
572 
573 	if (eq_sig != uh->uh_cpu_rev)
574 		return (EM_NOMATCH);
575 
576 	if (uh->uh_nb_id) {
577 		cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
578 		    "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
579 		return (EM_NOMATCH);
580 	}
581 
582 	if (uh->uh_sb_id) {
583 		cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
584 		    "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
585 		return (EM_NOMATCH);
586 	}
587 
588 	if (uh->uh_patch_id <= uinfop->cui_rev)
589 		return (EM_HIGHERREV);
590 
591 	return (EM_OK);
592 }
593 
594 /*
595  * Returns 1 if the microcode is for this processor; 0 otherwise.
596  */
597 static ucode_errno_t
598 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
599     ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
600 {
601 	if (uhp == NULL)
602 		return (EM_NOMATCH);
603 
604 	if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
605 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
606 
607 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
608 			return (EM_HIGHERREV);
609 
610 		return (EM_OK);
611 	}
612 
613 	if (uetp != NULL) {
614 		int i;
615 
616 		for (i = 0; i < uetp->uet_count; i++) {
617 			ucode_ext_sig_intel_t *uesp;
618 
619 			uesp = &uetp->uet_ext_sig[i];
620 
621 			if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
622 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
623 
624 				if (uinfop->cui_rev >= uhp->uh_rev &&
625 				    !ucode_force_update)
626 					return (EM_HIGHERREV);
627 
628 				return (EM_OK);
629 			}
630 		}
631 	}
632 
633 	return (EM_NOMATCH);
634 }
635 
636 /*ARGSUSED*/
637 static int
638 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
639 {
640 	ucode_update_t *uusp = (ucode_update_t *)arg1;
641 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
642 
643 	ASSERT(ucode);
644 	ASSERT(uusp->ucodep);
645 
646 #ifndef	__xpv
647 	/*
648 	 * Check one more time to see if it is really necessary to update
649 	 * microcode just in case this is a hyperthreaded processor where
650 	 * the threads share the same microcode.
651 	 */
652 	if (!ucode_force_update) {
653 		ucode->read_rev(uinfop);
654 		uusp->new_rev = uinfop->cui_rev;
655 		if (uinfop->cui_rev >= uusp->expected_rev)
656 			return (0);
657 	}
658 
659 	wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
660 #endif
661 	ucode->read_rev(uinfop);
662 	uusp->new_rev = uinfop->cui_rev;
663 
664 	return (0);
665 }
666 
667 /*ARGSUSED*/
668 static uint32_t
669 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
670 {
671 	ucode_file_amd_t *ucodefp = ufp->amd;
672 #ifdef	__xpv
673 	ucode_update_t uus;
674 #endif
675 
676 	ASSERT(ucode);
677 	ASSERT(ucodefp);
678 
679 #ifndef	__xpv
680 	kpreempt_disable();
681 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
682 	ucode->read_rev(uinfop);
683 	kpreempt_enable();
684 #else
685 	uus.ucodep = (uint8_t *)ucodefp;
686 	uus.usize = sizeof (*ucodefp);
687 	ucode_load_xpv(&uus);
688 	ucode->read_rev(uinfop);
689 	uus.new_rev = uinfop->cui_rev;
690 #endif
691 
692 	return (ucodefp->uf_header.uh_patch_id);
693 }
694 
695 /*ARGSUSED2*/
696 static uint32_t
697 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
698 {
699 	ucode_file_intel_t *ucodefp = &ufp->intel;
700 #ifdef __xpv
701 	uint32_t ext_offset;
702 	uint32_t body_size;
703 	uint32_t ext_size;
704 	uint8_t *ustart;
705 	uint32_t usize;
706 	ucode_update_t uus;
707 #endif
708 
709 	ASSERT(ucode);
710 
711 #ifdef __xpv
712 	/*
713 	 * the hypervisor wants the header, data, and extended
714 	 * signature tables. We can only get here from the boot
715 	 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
716 	 * use BOP_ALLOC().
717 	 */
718 	usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
719 	ustart = ucode_zalloc(cp->cpu_id, usize);
720 	ASSERT(ustart);
721 
722 	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
723 	ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
724 	ext_size = usize - ext_offset;
725 	ASSERT(ext_size >= 0);
726 
727 	(void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
728 	(void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
729 	    body_size);
730 	if (ext_size > 0) {
731 		(void) memcpy(&ustart[ext_offset],
732 		    ucodefp->uf_ext_table, ext_size);
733 	}
734 	uus.ucodep = ustart;
735 	uus.usize = usize;
736 	ucode_load_xpv(&uus);
737 	ucode->read_rev(uinfop);
738 	uus.new_rev = uinfop->cui_rev;
739 #else
740 	kpreempt_disable();
741 	wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
742 	ucode->read_rev(uinfop);
743 	kpreempt_enable();
744 #endif
745 
746 	return (ucodefp->uf_header->uh_rev);
747 }
748 
749 
750 #ifdef	__xpv
751 static void
752 ucode_load_xpv(ucode_update_t *uusp)
753 {
754 	xen_platform_op_t op;
755 	int e;
756 
757 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
758 
759 	kpreempt_disable();
760 	op.cmd = XENPF_microcode_update;
761 	op.interface_version = XENPF_INTERFACE_VERSION;
762 	/*LINTED: constant in conditional context*/
763 	set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
764 	op.u.microcode.length = uusp->usize;
765 	e = HYPERVISOR_platform_op(&op);
766 	if (e != 0) {
767 		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
768 	}
769 	kpreempt_enable();
770 }
771 #endif /* __xpv */
772 
773 static void
774 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
775 {
776 	uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
777 }
778 
779 static void
780 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
781 {
782 	struct cpuid_regs crs;
783 
784 	/*
785 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
786 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
787 	 * execute cpuid to guarantee the correct reading of this register.
788 	 */
789 	wrmsr(MSR_INTC_UCODE_REV, 0);
790 	(void) __cpuid_insn(&crs);
791 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
792 }
793 
794 static ucode_errno_t
795 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
796 {
797 	uint32_t *ptr = (uint32_t *)ucodep;
798 	ucode_eqtbl_amd_t *eqtbl;
799 	ucode_file_amd_t *ufp;
800 	int count, eq_sig;
801 
802 	/* skip over magic number & equivalence table header */
803 	ptr += 2; size -= 8;
804 
805 	count = *ptr++; size -= 4;
806 	for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
807 	    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
808 	    eqtbl++)
809 		;
810 
811 	eq_sig = eqtbl->ue_equiv_cpu;
812 	eq_sig = ((eq_sig >> 8) & 0xff00) | (eq_sig & 0xff);
813 
814 	/* No equivalent CPU id found, assume outdated microcode file. */
815 	if (eq_sig == 0)
816 		return (EM_HIGHERREV);
817 
818 	/* Use the first microcode patch that matches. */
819 	do {
820 		ptr += count >> 2; size -= count;
821 
822 		if (!size)
823 			return (EM_NOMATCH);
824 
825 		ptr++; size -= 4;
826 		count = *ptr++; size -= 4;
827 		ufp = (ucode_file_amd_t *)ptr;
828 	} while (ucode_match_amd(eq_sig, &uusp->info, ufp, count) != EM_OK);
829 
830 	uusp->ucodep = (uint8_t *)ufp;
831 	uusp->usize = count;
832 	uusp->expected_rev = ufp->uf_header.uh_patch_id;
833 
834 	return (EM_OK);
835 }
836 
837 static ucode_errno_t
838 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
839 {
840 	uint32_t	header_size = UCODE_HEADER_SIZE_INTEL;
841 	int		remaining;
842 	int		found = 0;
843 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
844 
845 	/*
846 	 * Go through the whole buffer in case there are
847 	 * multiple versions of matching microcode for this
848 	 * processor.
849 	 */
850 	for (remaining = size; remaining > 0; ) {
851 		int	total_size, body_size, ext_size;
852 		uint8_t	*curbuf = &ucodep[size - remaining];
853 		ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
854 		ucode_ext_table_intel_t *uetp = NULL;
855 		ucode_errno_t tmprc;
856 
857 		total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
858 		body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
859 		ext_size = total_size - (header_size + body_size);
860 
861 		if (ext_size > 0)
862 			uetp = (ucode_ext_table_intel_t *)
863 			    &curbuf[header_size + body_size];
864 
865 		tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
866 
867 		/*
868 		 * Since we are searching through a big file
869 		 * containing microcode for pretty much all the
870 		 * processors, we are bound to get EM_NOMATCH
871 		 * at one point.  However, if we return
872 		 * EM_NOMATCH to users, it will really confuse
873 		 * them.  Therefore, if we ever find a match of
874 		 * a lower rev, we will set return code to
875 		 * EM_HIGHERREV.
876 		 */
877 		if (tmprc == EM_HIGHERREV)
878 			search_rc = EM_HIGHERREV;
879 
880 		if (tmprc == EM_OK &&
881 		    uusp->expected_rev < uhp->uh_rev) {
882 #ifndef __xpv
883 			uusp->ucodep = (uint8_t *)&curbuf[header_size];
884 #else
885 			uusp->ucodep = (uint8_t *)curbuf;
886 #endif
887 			uusp->usize =
888 			    UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
889 			uusp->expected_rev = uhp->uh_rev;
890 			found = 1;
891 		}
892 
893 		remaining -= total_size;
894 	}
895 
896 	if (!found)
897 		return (search_rc);
898 
899 	return (EM_OK);
900 }
901 /*
902  * Entry point to microcode update from the ucode_drv driver.
903  *
904  * Returns EM_OK on success, corresponding error code on failure.
905  */
906 ucode_errno_t
907 ucode_update(uint8_t *ucodep, int size)
908 {
909 	int		found = 0;
910 	processorid_t	id;
911 	ucode_update_t	cached = { 0 };
912 	ucode_update_t	*cachedp = NULL;
913 	ucode_errno_t	rc = EM_OK;
914 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
915 	cpuset_t cpuset;
916 
917 	ASSERT(ucode);
918 	ASSERT(ucodep);
919 	CPUSET_ZERO(cpuset);
920 
921 	if (!ucode->capable(CPU))
922 		return (EM_NOTSUP);
923 
924 	mutex_enter(&cpu_lock);
925 
926 	for (id = 0; id < max_ncpus; id++) {
927 		cpu_t *cpu;
928 		ucode_update_t uus = { 0 };
929 		ucode_update_t *uusp = &uus;
930 
931 		/*
932 		 * If there is no such CPU or it is not xcall ready, skip it.
933 		 */
934 		if ((cpu = cpu_get(id)) == NULL ||
935 		    !(cpu->cpu_flags & CPU_READY))
936 			continue;
937 
938 		uusp->sig = cpuid_getsig(cpu);
939 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
940 		    sizeof (uusp->info));
941 
942 		/*
943 		 * If the current CPU has the same signature and platform
944 		 * id as the previous one we processed, reuse the information.
945 		 */
946 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
947 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
948 			uusp->ucodep = cachedp->ucodep;
949 			uusp->expected_rev = cachedp->expected_rev;
950 			/*
951 			 * Intuitively we should check here to see whether the
952 			 * running microcode rev is >= the expected rev, and
953 			 * quit if it is.  But we choose to proceed with the
954 			 * xcall regardless of the running version so that
955 			 * the other threads in an HT processor can update
956 			 * the cpu_ucode_info structure in machcpu.
957 			 */
958 		} else if ((search_rc = ucode->extract(uusp, ucodep, size))
959 		    == EM_OK) {
960 			bcopy(uusp, &cached, sizeof (cached));
961 			cachedp = &cached;
962 			found = 1;
963 		}
964 
965 		/* Nothing to do */
966 		if (uusp->ucodep == NULL)
967 			continue;
968 
969 #ifdef	__xpv
970 		/*
971 		 * for i86xpv, the hypervisor will update all the CPUs.
972 		 * the hypervisor wants the header, data, and extended
973 		 * signature tables. ucode_write will just read in the
974 		 * updated version on all the CPUs after the update has
975 		 * completed.
976 		 */
977 		if (id == 0) {
978 			ucode_load_xpv(uusp);
979 		}
980 #endif
981 
982 		CPUSET_ADD(cpuset, id);
983 		kpreempt_disable();
984 		xc_sync((xc_arg_t)uusp, 0, 0, X_CALL_HIPRI, cpuset,
985 		    ucode_write);
986 		kpreempt_enable();
987 		CPUSET_DEL(cpuset, id);
988 
989 		if (uusp->expected_rev == uusp->new_rev) {
990 			cmn_err(CE_CONT, ucode_success_fmt,
991 			    id, uusp->info.cui_rev, uusp->expected_rev);
992 		} else {
993 			cmn_err(CE_WARN, ucode_failure_fmt,
994 			    id, uusp->info.cui_rev, uusp->expected_rev);
995 			rc = EM_UPDATE;
996 		}
997 	}
998 
999 	mutex_exit(&cpu_lock);
1000 
1001 	if (!found)
1002 		rc = search_rc;
1003 
1004 	return (rc);
1005 }
1006 
1007 /*
1008  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1009  * This is the entry point from boot path where pointer to CPU structure
1010  * is available.
1011  *
1012  * cpuid_info must be initialized before ucode_check can be called.
1013  */
1014 void
1015 ucode_check(cpu_t *cp)
1016 {
1017 	cpu_ucode_info_t *uinfop;
1018 	ucode_errno_t rc = EM_OK;
1019 	uint32_t new_rev = 0;
1020 
1021 	ASSERT(cp);
1022 	if (cp->cpu_id == 0)
1023 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1024 
1025 	uinfop = cp->cpu_m.mcpu_ucode_info;
1026 	ASSERT(uinfop);
1027 
1028 	/* set up function pointers if not already done */
1029 	if (!ucode)
1030 		switch (cpuid_getvendor(cp)) {
1031 		case X86_VENDOR_AMD:
1032 			ucode = &ucode_amd;
1033 			break;
1034 		case X86_VENDOR_Intel:
1035 			ucode = &ucode_intel;
1036 			break;
1037 		default:
1038 			return;
1039 		}
1040 
1041 	if (!ucode->capable(cp))
1042 		return;
1043 
1044 	/*
1045 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1046 	 * (Family 6, model 5 and above) and all processors after.
1047 	 */
1048 	if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1049 	    ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1050 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1051 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1052 	}
1053 
1054 	ucode->read_rev(uinfop);
1055 
1056 #ifdef	__xpv
1057 	/*
1058 	 * for i86xpv, the hypervisor will update all the CPUs. We only need
1059 	 * do do this on one of the CPUs (and there always is a CPU 0).
1060 	 */
1061 	if (cp->cpu_id != 0) {
1062 		return;
1063 	}
1064 #endif
1065 
1066 	/*
1067 	 * Check to see if we need ucode update
1068 	 */
1069 	if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1070 		new_rev = ucode->load(&ucodefile, uinfop, cp);
1071 
1072 		if (uinfop->cui_rev != new_rev)
1073 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1074 			    uinfop->cui_rev, new_rev);
1075 	}
1076 
1077 	/*
1078 	 * If we fail to find a match for any reason, free the file structure
1079 	 * just in case we have read in a partial file.
1080 	 *
1081 	 * Since the scratch memory for holding the microcode for the boot CPU
1082 	 * came from BOP_ALLOC, we will reset the data structure as if we
1083 	 * never did the allocation so we don't have to keep track of this
1084 	 * special chunk of memory.  We free the memory used for the rest
1085 	 * of the CPUs in start_other_cpus().
1086 	 */
1087 	if (rc != EM_OK || cp->cpu_id == 0)
1088 		ucode->file_reset(&ucodefile, cp->cpu_id);
1089 }
1090 
1091 /*
1092  * Returns microcode revision from the machcpu structure.
1093  */
1094 ucode_errno_t
1095 ucode_get_rev(uint32_t *revp)
1096 {
1097 	int i;
1098 
1099 	ASSERT(ucode);
1100 	ASSERT(revp);
1101 
1102 	if (!ucode->capable(CPU))
1103 		return (EM_NOTSUP);
1104 
1105 	mutex_enter(&cpu_lock);
1106 	for (i = 0; i < max_ncpus; i++) {
1107 		cpu_t *cpu;
1108 
1109 		if ((cpu = cpu_get(i)) == NULL)
1110 			continue;
1111 
1112 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1113 	}
1114 	mutex_exit(&cpu_lock);
1115 
1116 	return (EM_OK);
1117 }
1118