xref: /titanic_51/usr/src/uts/i86pc/os/microcode.c (revision c5cd6260c3d6c06a9359df595ad9dddbfd00a80e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef	__xpv
47 #include <sys/hypervisor.h>
48 #endif
49 
50 /*
51  * Microcode specific information per core
52  */
53 struct cpu_ucode_info {
54 	uint32_t	cui_platid;	/* platform id */
55 	uint32_t	cui_rev;	/* microcode revision */
56 };
57 
58 /*
59  * Data structure used for xcall
60  */
61 struct ucode_update_struct {
62 	uint32_t		sig;	/* signature */
63 	struct cpu_ucode_info	info;	/* ucode info */
64 	uint32_t		expected_rev;
65 	uint32_t		new_rev;
66 	uint8_t			*ucodep; /* pointer to ucode body */
67 };
68 
69 /*
70  * mcpu_ucode_info for the boot CPU.  Statically allocated.
71  */
72 static struct cpu_ucode_info cpu_ucode_info0;
73 
74 static ucode_file_t ucodefile = { 0 };
75 
76 static int ucode_capable(cpu_t *);
77 static void ucode_file_reset(ucode_file_t *, processorid_t);
78 static ucode_errno_t ucode_match(int, struct cpu_ucode_info *,
79     ucode_header_t *, ucode_ext_table_t *);
80 static ucode_errno_t ucode_locate(cpu_t *, struct cpu_ucode_info *,
81     ucode_file_t *);
82 static void ucode_update_intel(uint8_t *, struct cpu_ucode_info *);
83 static void ucode_read_rev(struct cpu_ucode_info *);
84 #ifdef	__xpv
85 static void ucode_update_xpv(struct ucode_update_struct *, uint8_t *, uint32_t);
86 #endif
87 
88 static const char ucode_failure_fmt[] =
89 	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
90 static const char ucode_success_fmt[] =
91 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
92 
93 /*
94  * Force flag.  If set, the first microcode binary that matches
95  * signature and platform id will be used for microcode update,
96  * regardless of version.  Should only be used for debugging.
97  */
98 int ucode_force_update = 0;
99 
100 /*
101  * Allocate space for mcpu_ucode_info in the machcpu structure
102  * for all non-boot CPUs.
103  */
104 void
105 ucode_alloc_space(cpu_t *cp)
106 {
107 	ASSERT(cp->cpu_id != 0);
108 	cp->cpu_m.mcpu_ucode_info =
109 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
110 }
111 
112 void
113 ucode_free_space(cpu_t *cp)
114 {
115 	ASSERT(cp->cpu_id != 0);
116 	kmem_free(cp->cpu_m.mcpu_ucode_info,
117 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
118 }
119 
120 /*
121  * Called when we are done with microcode update on all processors to free up
122  * space allocated for the microcode file.
123  */
124 void
125 ucode_free()
126 {
127 	ucode_file_reset(&ucodefile, -1);
128 }
129 
130 /*
131  * Check whether or not a processor is capable of microcode operations
132  * Returns 1 if it is capable, 0 if not.
133  */
134 static int
135 ucode_capable(cpu_t *cp)
136 {
137 	/* i86xpv guest domain can't update microcode */
138 #ifndef	__xpv
139 	extern int xpv_is_hvm;
140 	if (xpv_is_hvm) {
141 		return (0);
142 	}
143 #else
144 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
145 		return (0);
146 	}
147 #endif
148 
149 	/*
150 	 * At this point we only support microcode update for Intel
151 	 * processors family 6 and above.
152 	 *
153 	 * We also assume that we don't support a mix of Intel and
154 	 * AMD processors in the same box.
155 	 */
156 	if (cpuid_getvendor(cp) != X86_VENDOR_Intel ||
157 	    cpuid_getfamily(cp) < 6)
158 		return (0);
159 	else
160 		return (1);
161 }
162 
163 /*
164  * Called when it is no longer necessary to keep the microcode around,
165  * or when the cached microcode doesn't match the CPU being processed.
166  */
167 static void
168 ucode_file_reset(ucode_file_t *ucodefp, processorid_t id)
169 {
170 	int total_size, body_size;
171 
172 	if (ucodefp == NULL)
173 		return;
174 
175 	total_size = UCODE_TOTAL_SIZE(ucodefp->uf_header.uh_total_size);
176 	body_size = UCODE_BODY_SIZE(ucodefp->uf_header.uh_body_size);
177 	if (ucodefp->uf_body) {
178 		/*
179 		 * Space for the boot CPU is allocated with BOP_ALLOC()
180 		 * and does not require a free.
181 		 */
182 		if (id != 0)
183 			kmem_free(ucodefp->uf_body, body_size);
184 		ucodefp->uf_body = NULL;
185 	}
186 
187 	if (ucodefp->uf_ext_table) {
188 		int size = total_size - body_size - UCODE_HEADER_SIZE;
189 		/*
190 		 * Space for the boot CPU is allocated with BOP_ALLOC()
191 		 * and does not require a free.
192 		 */
193 		if (id != 0)
194 			kmem_free(ucodefp->uf_ext_table, size);
195 		ucodefp->uf_ext_table = NULL;
196 	}
197 
198 	bzero(&ucodefp->uf_header, UCODE_HEADER_SIZE);
199 }
200 
201 /*
202  * Populate the ucode file structure from microcode file corresponding to
203  * this CPU, if exists.
204  *
205  * Return EM_OK on success, corresponding error code on failure.
206  */
207 static ucode_errno_t
208 ucode_locate(cpu_t *cp, struct cpu_ucode_info *uinfop, ucode_file_t *ucodefp)
209 {
210 	char		name[MAXPATHLEN];
211 	intptr_t	fd;
212 	int		count;
213 	int		header_size = UCODE_HEADER_SIZE;
214 	int		cpi_sig = cpuid_getsig(cp);
215 	ucode_errno_t	rc = EM_OK;
216 
217 	/*
218 	 * If the microcode matches the CPU we are processing, use it.
219 	 */
220 	if (ucode_match(cpi_sig, uinfop, &ucodefp->uf_header,
221 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
222 		return (EM_OK);
223 	}
224 
225 	/*
226 	 * Look for microcode file with the right name.
227 	 */
228 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
229 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
230 	    uinfop->cui_platid);
231 	if ((fd = kobj_open(name)) == -1) {
232 		return (EM_OPENFILE);
233 	}
234 
235 	/*
236 	 * We found a microcode file for the CPU we are processing,
237 	 * reset the microcode data structure and read in the new
238 	 * file.
239 	 */
240 	ucode_file_reset(ucodefp, cp->cpu_id);
241 
242 	count = kobj_read(fd, (char *)&ucodefp->uf_header, header_size, 0);
243 
244 	switch (count) {
245 	case UCODE_HEADER_SIZE: {
246 
247 		ucode_header_t	*uhp = &ucodefp->uf_header;
248 		uint32_t	offset = header_size;
249 		int		total_size, body_size, ext_size;
250 		uint32_t	sum = 0;
251 
252 		/*
253 		 * Make sure that the header contains valid fields.
254 		 */
255 		if ((rc = ucode_header_validate(uhp)) == EM_OK) {
256 			total_size = UCODE_TOTAL_SIZE(uhp->uh_total_size);
257 			body_size = UCODE_BODY_SIZE(uhp->uh_body_size);
258 			if (cp->cpu_id != 0) {
259 				if ((ucodefp->uf_body = kmem_zalloc(body_size,
260 				    KM_NOSLEEP)) == NULL) {
261 					rc = EM_NOMEM;
262 					break;
263 				}
264 			} else {
265 				/*
266 				 * BOP_ALLOC() failure results in panic so we
267 				 * don't have to check for NULL return.
268 				 */
269 				ucodefp->uf_body =
270 				    (uint8_t *)BOP_ALLOC(bootops,
271 				    NULL, body_size, MMU_PAGESIZE);
272 			}
273 
274 			if (kobj_read(fd, (char *)ucodefp->uf_body,
275 			    body_size, offset) != body_size)
276 				rc = EM_FILESIZE;
277 		}
278 
279 		if (rc)
280 			break;
281 
282 		sum = ucode_checksum(0, header_size,
283 		    (uint8_t *)&ucodefp->uf_header);
284 		if (ucode_checksum(sum, body_size, ucodefp->uf_body)) {
285 			rc = EM_CHECKSUM;
286 			break;
287 		}
288 
289 		/*
290 		 * Check to see if there is extended signature table.
291 		 */
292 		offset = body_size + header_size;
293 		ext_size = total_size - offset;
294 
295 		if (ext_size <= 0)
296 			break;
297 
298 		if (cp->cpu_id != 0) {
299 			if ((ucodefp->uf_ext_table = kmem_zalloc(ext_size,
300 			    KM_NOSLEEP)) == NULL) {
301 				rc = EM_NOMEM;
302 				break;
303 			}
304 		} else {
305 			/*
306 			 * BOP_ALLOC() failure results in panic so we
307 			 * don't have to check for NULL return.
308 			 */
309 			ucodefp->uf_ext_table =
310 			    (ucode_ext_table_t *)BOP_ALLOC(bootops, NULL,
311 			    ext_size, MMU_PAGESIZE);
312 		}
313 
314 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
315 		    ext_size, offset) != ext_size) {
316 			rc = EM_FILESIZE;
317 		} else if (ucode_checksum(0, ext_size,
318 		    (uint8_t *)(ucodefp->uf_ext_table))) {
319 			rc = EM_CHECKSUM;
320 		} else {
321 			int i;
322 
323 			ext_size -= UCODE_EXT_TABLE_SIZE;
324 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
325 			    i++) {
326 				if (ucode_checksum(0, UCODE_EXT_SIG_SIZE,
327 				    (uint8_t *)(&(ucodefp->uf_ext_table->
328 				    uet_ext_sig[i])))) {
329 					rc = EM_CHECKSUM;
330 					break;
331 				}
332 			}
333 		}
334 		break;
335 	}
336 
337 	default:
338 		rc = EM_FILESIZE;
339 		break;
340 	}
341 
342 	kobj_close(fd);
343 
344 	if (rc != EM_OK)
345 		return (rc);
346 
347 	rc = ucode_match(cpi_sig, uinfop, &ucodefp->uf_header,
348 	    ucodefp->uf_ext_table);
349 
350 	return (rc);
351 }
352 
353 
354 /*
355  * Returns 1 if the microcode is for this processor; 0 otherwise.
356  */
357 static ucode_errno_t
358 ucode_match(int cpi_sig, struct cpu_ucode_info *uinfop,
359     ucode_header_t *uhp, ucode_ext_table_t *uetp)
360 {
361 	ASSERT(uhp);
362 
363 	if (UCODE_MATCH(cpi_sig, uhp->uh_signature,
364 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
365 
366 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
367 			return (EM_HIGHERREV);
368 
369 		return (EM_OK);
370 	}
371 
372 	if (uetp != NULL) {
373 		int i;
374 
375 		for (i = 0; i < uetp->uet_count; i++) {
376 			ucode_ext_sig_t *uesp;
377 
378 			uesp = &uetp->uet_ext_sig[i];
379 
380 			if (UCODE_MATCH(cpi_sig, uesp->ues_signature,
381 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
382 
383 				if (uinfop->cui_rev >= uhp->uh_rev &&
384 				    !ucode_force_update)
385 					return (EM_HIGHERREV);
386 
387 				return (EM_OK);
388 			}
389 		}
390 	}
391 
392 	return (EM_NOMATCH);
393 }
394 
395 /*ARGSUSED*/
396 static int
397 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
398 {
399 	struct ucode_update_struct *uusp = (struct ucode_update_struct *)arg1;
400 	struct cpu_ucode_info *uinfop = CPU->cpu_m.mcpu_ucode_info;
401 
402 	ASSERT(uusp->ucodep);
403 
404 #ifndef	__xpv
405 	/*
406 	 * Check one more time to see if it is really necessary to update
407 	 * microcode just in case this is a hyperthreaded processor where
408 	 * the threads share the same microcode.
409 	 */
410 	if (!ucode_force_update) {
411 		ucode_read_rev(uinfop);
412 		uusp->new_rev = uinfop->cui_rev;
413 		if (uinfop->cui_rev >= uusp->expected_rev)
414 			return (0);
415 	}
416 
417 	wrmsr(MSR_INTC_UCODE_WRITE,
418 	    (uint64_t)(intptr_t)(uusp->ucodep));
419 #endif
420 	ucode_read_rev(uinfop);
421 	uusp->new_rev = uinfop->cui_rev;
422 
423 	return (0);
424 }
425 
426 
427 static void
428 ucode_update_intel(uint8_t *ucode_body, struct cpu_ucode_info *uinfop)
429 {
430 	kpreempt_disable();
431 	wrmsr(MSR_INTC_UCODE_WRITE, (uint64_t)(uintptr_t)ucode_body);
432 	ucode_read_rev(uinfop);
433 	kpreempt_enable();
434 }
435 
436 
437 #ifdef	__xpv
438 static void
439 ucode_update_xpv(struct ucode_update_struct *uusp, uint8_t *ucode,
440     uint32_t size)
441 {
442 	struct cpu_ucode_info *uinfop;
443 	xen_platform_op_t op;
444 	int e;
445 
446 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
447 
448 	kpreempt_disable();
449 	uinfop = CPU->cpu_m.mcpu_ucode_info;
450 	op.cmd = XENPF_microcode_update;
451 	op.interface_version = XENPF_INTERFACE_VERSION;
452 	/*LINTED: constant in conditional context*/
453 	set_xen_guest_handle(op.u.microcode.data, ucode);
454 	op.u.microcode.length = size;
455 	e = HYPERVISOR_platform_op(&op);
456 	if (e != 0) {
457 		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
458 	}
459 	ucode_read_rev(uinfop);
460 	if (uusp != NULL) {
461 		uusp->new_rev = uinfop->cui_rev;
462 	}
463 	kpreempt_enable();
464 }
465 #endif /* __xpv */
466 
467 
468 static void
469 ucode_read_rev(struct cpu_ucode_info *uinfop)
470 {
471 	struct cpuid_regs crs;
472 
473 	/*
474 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
475 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
476 	 * execute cpuid to guarantee the correct reading of this register.
477 	 */
478 	wrmsr(MSR_INTC_UCODE_REV, 0);
479 	(void) __cpuid_insn(&crs);
480 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
481 }
482 
483 /*
484  * Entry point to microcode update from the ucode_drv driver.
485  *
486  * Returns EM_OK on success, corresponding error code on failure.
487  */
488 ucode_errno_t
489 ucode_update(uint8_t *ucodep, int size)
490 {
491 	uint32_t	header_size = UCODE_HEADER_SIZE;
492 	int		remaining;
493 	int		found = 0;
494 	processorid_t	id;
495 	struct ucode_update_struct cached = { 0 };
496 	struct ucode_update_struct *cachedp = NULL;
497 	ucode_errno_t	rc = EM_OK;
498 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
499 	cpuset_t cpuset;
500 #ifdef	__xpv
501 	uint8_t *ustart;
502 	uint32_t usize;
503 #endif
504 
505 	ASSERT(ucodep);
506 	CPUSET_ZERO(cpuset);
507 
508 	if (!ucode_capable(CPU))
509 		return (EM_NOTSUP);
510 
511 	mutex_enter(&cpu_lock);
512 
513 	for (id = 0; id < max_ncpus; id++) {
514 		cpu_t *cpu;
515 		struct ucode_update_struct uus = { 0 };
516 		struct ucode_update_struct *uusp = &uus;
517 
518 		/*
519 		 * If there is no such CPU or it is not xcall ready, skip it.
520 		 */
521 		if ((cpu = cpu_get(id)) == NULL ||
522 		    !(cpu->cpu_flags & CPU_READY))
523 			continue;
524 
525 		uusp->sig = cpuid_getsig(cpu);
526 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
527 		    sizeof (uusp->info));
528 
529 		/*
530 		 * If the current CPU has the same signature and platform
531 		 * id as the previous one we processed, reuse the information.
532 		 */
533 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
534 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
535 			uusp->ucodep = cachedp->ucodep;
536 			uusp->expected_rev = cachedp->expected_rev;
537 			/*
538 			 * Intuitively we should check here to see whether the
539 			 * running microcode rev is >= the expected rev, and
540 			 * quit if it is.  But we choose to proceed with the
541 			 * xcall regardless of the running version so that
542 			 * the other threads in an HT processor can update
543 			 * the cpu_ucode_info structure in machcpu.
544 			 */
545 		} else {
546 			/*
547 			 * Go through the whole buffer in case there are
548 			 * multiple versions of matching microcode for this
549 			 * processor.
550 			 */
551 			for (remaining = size; remaining > 0; ) {
552 				int	total_size, body_size, ext_size;
553 				uint8_t	*curbuf = &ucodep[size - remaining];
554 				ucode_header_t	*uhp = (ucode_header_t *)curbuf;
555 				ucode_ext_table_t *uetp = NULL;
556 				ucode_errno_t tmprc;
557 
558 				total_size =
559 				    UCODE_TOTAL_SIZE(uhp->uh_total_size);
560 				body_size = UCODE_BODY_SIZE(uhp->uh_body_size);
561 				ext_size = total_size -
562 				    (header_size + body_size);
563 
564 				if (ext_size > 0)
565 					uetp = (ucode_ext_table_t *)
566 					    &curbuf[header_size + body_size];
567 
568 				tmprc = ucode_match(uusp->sig, &uusp->info,
569 				    uhp, uetp);
570 
571 				/*
572 				 * Since we are searching through a big file
573 				 * containing microcode for pretty much all the
574 				 * processors, we are bound to get EM_NOMATCH
575 				 * at one point.  However, if we return
576 				 * EM_NOMATCH to users, it will really confuse
577 				 * them.  Therefore, if we ever find a match of
578 				 * a lower rev, we will set return code to
579 				 * EM_HIGHERREV.
580 				 */
581 				if (tmprc == EM_HIGHERREV)
582 					search_rc = EM_HIGHERREV;
583 
584 				if (tmprc == EM_OK &&
585 				    uusp->expected_rev < uhp->uh_rev) {
586 					uusp->ucodep = &curbuf[header_size];
587 #ifdef	__xpv
588 					ustart = (uint8_t *)curbuf;
589 					usize = UCODE_TOTAL_SIZE(
590 					    uhp->uh_total_size);
591 #endif
592 					uusp->expected_rev = uhp->uh_rev;
593 					bcopy(uusp, &cached, sizeof (cached));
594 					cachedp = &cached;
595 					found = 1;
596 				}
597 
598 				remaining -= total_size;
599 			}
600 		}
601 
602 		/* Nothing to do */
603 		if (uusp->ucodep == NULL)
604 			continue;
605 
606 #ifdef	__xpv
607 		/*
608 		 * for i86xpv, the hypervisor will update all the CPUs.
609 		 * the hypervisor wants the header, data, and extended
610 		 * signature tables. ucode_write will just read in the
611 		 * updated version on all the CPUs after the update has
612 		 * completed.
613 		 */
614 		ucode_update_xpv(uusp, ustart, usize);
615 #endif
616 
617 		CPUSET_ADD(cpuset, id);
618 		kpreempt_disable();
619 		xc_sync((xc_arg_t)uusp, 0, 0, X_CALL_HIPRI, cpuset,
620 		    ucode_write);
621 		kpreempt_enable();
622 		CPUSET_DEL(cpuset, id);
623 
624 		if (uusp->expected_rev == uusp->new_rev) {
625 			cmn_err(CE_CONT, ucode_success_fmt,
626 			    id, uusp->info.cui_rev, uusp->expected_rev);
627 		} else {
628 			cmn_err(CE_WARN, ucode_failure_fmt,
629 			    id, uusp->info.cui_rev, uusp->expected_rev);
630 			rc = EM_UPDATE;
631 		}
632 	}
633 
634 	mutex_exit(&cpu_lock);
635 
636 	if (!found)
637 		rc = search_rc;
638 
639 	return (rc);
640 }
641 
642 /*
643  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
644  * This is the entry point from boot path where pointer to CPU structure
645  * is available.
646  *
647  * cpuid_info must be initialized before ucode_check can be called.
648  */
649 void
650 ucode_check(cpu_t *cp)
651 {
652 	struct cpu_ucode_info *uinfop;
653 	ucode_errno_t rc = EM_OK;
654 #ifdef	__xpv
655 	uint32_t ext_offset;
656 	uint32_t body_size;
657 	uint32_t ext_size;
658 	uint8_t *ustart;
659 	uint32_t usize;
660 #endif
661 
662 	ASSERT(cp);
663 	if (cp->cpu_id == 0)
664 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
665 
666 	uinfop = cp->cpu_m.mcpu_ucode_info;
667 	ASSERT(uinfop);
668 
669 	if (!ucode_capable(cp))
670 		return;
671 
672 #ifdef	__xpv
673 	/*
674 	 * for i86xpv, the hypervisor will update all the CPUs. We only need
675 	 * do do this on one of the CPUs (and there always is a CPU 0). We do
676 	 * need to update the CPU version though. Do that before returning.
677 	 */
678 	if (cp->cpu_id != 0) {
679 		ucode_read_rev(uinfop);
680 		return;
681 	}
682 #endif
683 
684 	/*
685 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
686 	 * (Family 6, model 5 and above) and all processors after.
687 	 */
688 	if ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6)) {
689 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
690 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
691 	}
692 
693 	ucode_read_rev(uinfop);
694 
695 	/*
696 	 * Check to see if we need ucode update
697 	 */
698 	if ((rc = ucode_locate(cp, uinfop, &ucodefile)) == EM_OK) {
699 #ifndef	__xpv
700 		ucode_update_intel(ucodefile.uf_body, uinfop);
701 #else
702 		/*
703 		 * the hypervisor wants the header, data, and extended
704 		 * signature tables. We can only get here from the boot
705 		 * CPU (cpu #0), so use BOP_ALLOC. Since we're using BOP_ALLOC,
706 		 * We don't need to free.
707 		 */
708 		usize = UCODE_TOTAL_SIZE(ucodefile.uf_header.uh_total_size);
709 		ustart = (uint8_t *)BOP_ALLOC(bootops, NULL, usize,
710 		    MMU_PAGESIZE);
711 
712 		body_size = UCODE_BODY_SIZE(ucodefile.uf_header.uh_body_size);
713 		ext_offset = body_size + UCODE_HEADER_SIZE;
714 		ext_size = usize - ext_offset;
715 		ASSERT(ext_size >= 0);
716 
717 		(void) memcpy(ustart, &ucodefile.uf_header, UCODE_HEADER_SIZE);
718 		(void) memcpy(&ustart[UCODE_HEADER_SIZE], ucodefile.uf_body,
719 		    body_size);
720 		if (ext_size > 0) {
721 			(void) memcpy(&ustart[ext_offset],
722 			    ucodefile.uf_ext_table, ext_size);
723 		}
724 		ucode_update_xpv(NULL, ustart, usize);
725 #endif
726 
727 		if (uinfop->cui_rev != ucodefile.uf_header.uh_rev)
728 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
729 			    uinfop->cui_rev, ucodefile.uf_header.uh_rev);
730 	}
731 
732 	/*
733 	 * If we fail to find a match for any reason, free the file structure
734 	 * just in case we have read in a partial file.
735 	 *
736 	 * Since the scratch memory for holding the microcode for the boot CPU
737 	 * came from BOP_ALLOC, we will reset the data structure as if we
738 	 * never did the allocation so we don't have to keep track of this
739 	 * special chunk of memory.  We free the memory used for the rest
740 	 * of the CPUs in start_other_cpus().
741 	 */
742 	if (rc != EM_OK || cp->cpu_id == 0)
743 		ucode_file_reset(&ucodefile, cp->cpu_id);
744 }
745 
746 /*
747  * Returns microcode revision from the machcpu structure.
748  */
749 ucode_errno_t
750 ucode_get_rev(uint32_t *revp)
751 {
752 	int i;
753 
754 	ASSERT(revp);
755 
756 	if (!ucode_capable(CPU))
757 		return (EM_NOTSUP);
758 
759 	mutex_enter(&cpu_lock);
760 	for (i = 0; i < max_ncpus; i++) {
761 		cpu_t *cpu;
762 
763 		if ((cpu = cpu_get(i)) == NULL)
764 			continue;
765 
766 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
767 	}
768 	mutex_exit(&cpu_lock);
769 
770 	return (EM_OK);
771 }
772