xref: /titanic_52/usr/src/uts/i86pc/os/microcode.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/asm_linkage.h>
30 #include <sys/bootconf.h>
31 #include <sys/cpuvar.h>
32 #include <sys/cmn_err.h>
33 #include <sys/controlregs.h>
34 #include <sys/debug.h>
35 #include <sys/kobj.h>
36 #include <sys/kobj_impl.h>
37 #include <sys/machsystm.h>
38 #include <sys/param.h>
39 #include <sys/machparam.h>
40 #include <sys/promif.h>
41 #include <sys/sysmacros.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44 #include <sys/thread.h>
45 #include <sys/ucode.h>
46 #include <sys/x86_archext.h>
47 #include <sys/x_call.h>
48 #ifdef	__xpv
49 #include <sys/hypervisor.h>
50 #endif
51 
52 /*
53  * Microcode specific information per core
54  */
55 struct cpu_ucode_info {
56 	uint32_t	cui_platid;	/* platform id */
57 	uint32_t	cui_rev;	/* microcode revision */
58 };
59 
60 /*
61  * Data structure used for xcall
62  */
63 struct ucode_update_struct {
64 	uint32_t		sig;	/* signature */
65 	struct cpu_ucode_info	info;	/* ucode info */
66 	uint32_t		expected_rev;
67 	uint32_t		new_rev;
68 	uint8_t			*ucodep; /* pointer to ucode body */
69 };
70 
71 /*
72  * mcpu_ucode_info for the boot CPU.  Statically allocated.
73  */
74 static struct cpu_ucode_info cpu_ucode_info0;
75 
76 static ucode_file_t ucodefile = { 0 };
77 
78 static int ucode_capable(cpu_t *);
79 static void ucode_file_reset(ucode_file_t *, processorid_t);
80 static ucode_errno_t ucode_match(int, struct cpu_ucode_info *,
81     ucode_header_t *, ucode_ext_table_t *);
82 static ucode_errno_t ucode_locate(cpu_t *, struct cpu_ucode_info *,
83     ucode_file_t *);
84 static void ucode_update_intel(uint8_t *, struct cpu_ucode_info *);
85 static void ucode_read_rev(struct cpu_ucode_info *);
86 
87 static const char ucode_failure_fmt[] =
88 	"cpu%d: failed to update microcode code from version 0x%x to 0x%x\n";
89 static const char ucode_success_fmt[] =
90 	"?cpu%d: microcode code has been updated from version 0x%x to 0x%x\n";
91 
92 /*
93  * Force flag.  If set, the first microcode binary that matches
94  * signature and platform id will be used for microcode update,
95  * regardless of version.  Should only be used for debugging.
96  */
97 int ucode_force_update = 0;
98 
99 /*
100  * Allocate space for mcpu_ucode_info in the machcpu structure
101  * for all non-boot CPUs.
102  */
103 void
104 ucode_alloc_space(cpu_t *cp)
105 {
106 	ASSERT(cp->cpu_id != 0);
107 	cp->cpu_m.mcpu_ucode_info =
108 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
109 }
110 
111 void
112 ucode_free_space(cpu_t *cp)
113 {
114 	ASSERT(cp->cpu_id != 0);
115 	kmem_free(cp->cpu_m.mcpu_ucode_info,
116 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
117 }
118 
119 /*
120  * Called when we are done with microcode update on all processors to free up
121  * space allocated for the microcode file.
122  */
123 void
124 ucode_free()
125 {
126 	ucode_file_reset(&ucodefile, -1);
127 }
128 
129 /*
130  * Check whether or not a processor is capable of microcode operations
131  * Returns 1 if it is capable, 0 if not.
132  */
133 /*ARGSUSED*/
134 static int
135 ucode_capable(cpu_t *cp)
136 {
137 	/* i86xpv guest domain can't update microcode */
138 #ifdef	__xpv
139 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
140 		return (0);
141 	}
142 #endif
143 
144 #ifndef	__xpv
145 	/*
146 	 * At this point we only support microcode update for Intel
147 	 * processors family 6 and above.
148 	 *
149 	 * We also assume that we don't support a mix of Intel and
150 	 * AMD processors in the same box.
151 	 */
152 	if (cpuid_getvendor(cp) != X86_VENDOR_Intel ||
153 	    cpuid_getfamily(cp) < 6)
154 		return (0);
155 	else
156 		return (1);
157 #else
158 	/*
159 	 * XXPV - remove when microcode loading works in dom0. Don't support
160 	 * microcode loading in dom0 right now.
161 	 */
162 	return (0);
163 #endif
164 }
165 
166 /*
167  * Called when it is no longer necessary to keep the microcode around,
168  * or when the cached microcode doesn't match the CPU being processed.
169  */
170 static void
171 ucode_file_reset(ucode_file_t *ucodefp, processorid_t id)
172 {
173 	int total_size, body_size;
174 
175 	if (ucodefp == NULL)
176 		return;
177 
178 	total_size = UCODE_TOTAL_SIZE(ucodefp->uf_header.uh_total_size);
179 	body_size = UCODE_BODY_SIZE(ucodefp->uf_header.uh_body_size);
180 	if (ucodefp->uf_body) {
181 		/*
182 		 * Space for the boot CPU is allocated with BOP_ALLOC()
183 		 * and does not require a free.
184 		 */
185 		if (id != 0)
186 			kmem_free(ucodefp->uf_body, body_size);
187 		ucodefp->uf_body = NULL;
188 	}
189 
190 	if (ucodefp->uf_ext_table) {
191 		int size = total_size - body_size - UCODE_HEADER_SIZE;
192 		/*
193 		 * Space for the boot CPU is allocated with BOP_ALLOC()
194 		 * and does not require a free.
195 		 */
196 		if (id != 0)
197 			kmem_free(ucodefp->uf_ext_table, size);
198 		ucodefp->uf_ext_table = NULL;
199 	}
200 
201 	bzero(&ucodefp->uf_header, UCODE_HEADER_SIZE);
202 }
203 
204 /*
205  * Populate the ucode file structure from microcode file corresponding to
206  * this CPU, if exists.
207  *
208  * Return EM_OK on success, corresponding error code on failure.
209  */
210 static ucode_errno_t
211 ucode_locate(cpu_t *cp, struct cpu_ucode_info *uinfop, ucode_file_t *ucodefp)
212 {
213 	char		name[MAXPATHLEN];
214 	intptr_t	fd;
215 	int		count;
216 	int		header_size = UCODE_HEADER_SIZE;
217 	int		cpi_sig = cpuid_getsig(cp);
218 	ucode_errno_t	rc = EM_OK;
219 
220 	/*
221 	 * If the microcode matches the CPU we are processing, use it.
222 	 */
223 	if (ucode_match(cpi_sig, uinfop, &ucodefp->uf_header,
224 	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
225 		return (EM_OK);
226 	}
227 
228 	/*
229 	 * Look for microcode file with the right name.
230 	 */
231 	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
232 	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
233 	    uinfop->cui_platid);
234 	if ((fd = kobj_open(name)) == -1) {
235 		return (EM_OPENFILE);
236 	}
237 
238 	/*
239 	 * We found a microcode file for the CPU we are processing,
240 	 * reset the microcode data structure and read in the new
241 	 * file.
242 	 */
243 	ucode_file_reset(ucodefp, cp->cpu_id);
244 
245 	count = kobj_read(fd, (char *)&ucodefp->uf_header, header_size, 0);
246 
247 	switch (count) {
248 	case UCODE_HEADER_SIZE: {
249 
250 		ucode_header_t	*uhp = &ucodefp->uf_header;
251 		uint32_t	offset = header_size;
252 		int		total_size, body_size, ext_size;
253 		uint32_t	sum = 0;
254 
255 		/*
256 		 * Make sure that the header contains valid fields.
257 		 */
258 		if ((rc = ucode_header_validate(uhp)) == EM_OK) {
259 			total_size = UCODE_TOTAL_SIZE(uhp->uh_total_size);
260 			body_size = UCODE_BODY_SIZE(uhp->uh_body_size);
261 			if (cp->cpu_id != 0) {
262 				if ((ucodefp->uf_body = kmem_zalloc(body_size,
263 				    KM_NOSLEEP)) == NULL) {
264 					rc = EM_NOMEM;
265 					break;
266 				}
267 			} else {
268 				/*
269 				 * BOP_ALLOC() failure results in panic so we
270 				 * don't have to check for NULL return.
271 				 */
272 				ucodefp->uf_body =
273 				    (uint8_t *)BOP_ALLOC(bootops,
274 				    NULL, body_size, MMU_PAGESIZE);
275 			}
276 
277 			if (kobj_read(fd, (char *)ucodefp->uf_body,
278 			    body_size, offset) != body_size)
279 				rc = EM_FILESIZE;
280 		}
281 
282 		if (rc)
283 			break;
284 
285 		sum = ucode_checksum(0, header_size,
286 		    (uint8_t *)&ucodefp->uf_header);
287 		if (ucode_checksum(sum, body_size, ucodefp->uf_body)) {
288 			rc = EM_CHECKSUM;
289 			break;
290 		}
291 
292 		/*
293 		 * Check to see if there is extended signature table.
294 		 */
295 		offset = body_size + header_size;
296 		ext_size = total_size - offset;
297 
298 		if (ext_size <= 0)
299 			break;
300 
301 		if (cp->cpu_id != 0) {
302 			if ((ucodefp->uf_ext_table = kmem_zalloc(ext_size,
303 			    KM_NOSLEEP)) == NULL) {
304 				rc = EM_NOMEM;
305 				break;
306 			}
307 		} else {
308 			/*
309 			 * BOP_ALLOC() failure results in panic so we
310 			 * don't have to check for NULL return.
311 			 */
312 			ucodefp->uf_ext_table =
313 			    (ucode_ext_table_t *)BOP_ALLOC(bootops, NULL,
314 			    ext_size, MMU_PAGESIZE);
315 		}
316 
317 		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
318 		    ext_size, offset) != ext_size) {
319 			rc = EM_FILESIZE;
320 		} else if (ucode_checksum(0, ext_size,
321 		    (uint8_t *)(ucodefp->uf_ext_table))) {
322 			rc = EM_CHECKSUM;
323 		} else {
324 			int i;
325 
326 			ext_size -= UCODE_EXT_TABLE_SIZE;
327 			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
328 			    i++) {
329 				if (ucode_checksum(0, UCODE_EXT_SIG_SIZE,
330 				    (uint8_t *)(&(ucodefp->uf_ext_table->
331 				    uet_ext_sig[i])))) {
332 					rc = EM_CHECKSUM;
333 					break;
334 				}
335 			}
336 		}
337 		break;
338 	}
339 
340 	default:
341 		rc = EM_FILESIZE;
342 		break;
343 	}
344 
345 	kobj_close(fd);
346 
347 	if (rc != EM_OK)
348 		return (rc);
349 
350 	rc = ucode_match(cpi_sig, uinfop, &ucodefp->uf_header,
351 	    ucodefp->uf_ext_table);
352 
353 	return (rc);
354 }
355 
356 
357 /*
358  * Returns 1 if the microcode is for this processor; 0 otherwise.
359  */
360 static ucode_errno_t
361 ucode_match(int cpi_sig, struct cpu_ucode_info *uinfop,
362     ucode_header_t *uhp, ucode_ext_table_t *uetp)
363 {
364 	ASSERT(uhp);
365 
366 	if (UCODE_MATCH(cpi_sig, uhp->uh_signature,
367 	    uinfop->cui_platid, uhp->uh_proc_flags)) {
368 
369 		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
370 			return (EM_HIGHERREV);
371 
372 		return (EM_OK);
373 	}
374 
375 	if (uetp != NULL) {
376 		int i;
377 
378 		for (i = 0; i < uetp->uet_count; i++) {
379 			ucode_ext_sig_t *uesp;
380 
381 			uesp = &uetp->uet_ext_sig[i];
382 
383 			if (UCODE_MATCH(cpi_sig, uesp->ues_signature,
384 			    uinfop->cui_platid, uesp->ues_proc_flags)) {
385 
386 				if (uinfop->cui_rev >= uhp->uh_rev &&
387 				    !ucode_force_update)
388 					return (EM_HIGHERREV);
389 
390 				return (EM_OK);
391 			}
392 		}
393 	}
394 
395 	return (EM_NOMATCH);
396 }
397 
398 /*ARGSUSED*/
399 static int
400 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
401 {
402 	struct ucode_update_struct *uusp = (struct ucode_update_struct *)arg1;
403 	struct cpu_ucode_info *uinfop = CPU->cpu_m.mcpu_ucode_info;
404 
405 	ASSERT(uusp->ucodep);
406 
407 	/*
408 	 * Check one more time to see if it is really necessary to update
409 	 * microcode just in case this is a hyperthreaded processor where
410 	 * the threads share the same microcode.
411 	 */
412 	if (!ucode_force_update) {
413 		ucode_read_rev(uinfop);
414 		uusp->new_rev = uinfop->cui_rev;
415 		if (uinfop->cui_rev >= uusp->expected_rev)
416 			return (0);
417 	}
418 
419 	wrmsr(MSR_INTC_UCODE_WRITE,
420 	    (uint64_t)(intptr_t)(uusp->ucodep));
421 	ucode_read_rev(uinfop);
422 	uusp->new_rev = uinfop->cui_rev;
423 
424 	return (0);
425 }
426 
427 
428 static void
429 ucode_update_intel(uint8_t *ucode_body, struct cpu_ucode_info *uinfop)
430 {
431 	kpreempt_disable();
432 	wrmsr(MSR_INTC_UCODE_WRITE, (uint64_t)(uintptr_t)ucode_body);
433 	ucode_read_rev(uinfop);
434 	kpreempt_enable();
435 }
436 
437 static void
438 ucode_read_rev(struct cpu_ucode_info *uinfop)
439 {
440 	struct cpuid_regs crs;
441 
442 	/*
443 	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
444 	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
445 	 * execute cpuid to guarantee the correct reading of this register.
446 	 */
447 	wrmsr(MSR_INTC_UCODE_REV, 0);
448 	(void) __cpuid_insn(&crs);
449 	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
450 }
451 
452 /*
453  * Entry point to microcode update from the ucode_drv driver.
454  *
455  * Returns EM_OK on success, corresponding error code on failure.
456  */
457 ucode_errno_t
458 ucode_update(uint8_t *ucodep, int size)
459 {
460 	uint32_t	header_size = UCODE_HEADER_SIZE;
461 	int		remaining;
462 	int		found = 0;
463 	processorid_t	id;
464 	struct ucode_update_struct cached = { 0 };
465 	struct ucode_update_struct *cachedp = NULL;
466 	ucode_errno_t	rc = EM_OK;
467 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
468 	cpuset_t cpuset;
469 
470 	ASSERT(ucodep);
471 
472 	CPUSET_ZERO(cpuset);
473 
474 	if (!ucode_capable(CPU))
475 		return (EM_NOTSUP);
476 
477 	mutex_enter(&cpu_lock);
478 
479 	for (id = 0; id < max_ncpus; id++) {
480 		cpu_t *cpu;
481 		struct ucode_update_struct uus = { 0 };
482 		struct ucode_update_struct *uusp = &uus;
483 
484 		/*
485 		 * If there is no such CPU or it is not xcall ready, skip it.
486 		 */
487 		if ((cpu = cpu_get(id)) == NULL ||
488 		    !(cpu->cpu_flags & CPU_READY))
489 			continue;
490 
491 		uusp->sig = cpuid_getsig(cpu);
492 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
493 		    sizeof (uusp->info));
494 
495 		/*
496 		 * If the current CPU has the same signature and platform
497 		 * id as the previous one we processed, reuse the information.
498 		 */
499 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
500 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
501 			uusp->ucodep = cachedp->ucodep;
502 			uusp->expected_rev = cachedp->expected_rev;
503 			/*
504 			 * Intuitively we should check here to see whether the
505 			 * running microcode rev is >= the expected rev, and
506 			 * quit if it is.  But we choose to proceed with the
507 			 * xcall regardless of the running version so that
508 			 * the other threads in an HT processor can update
509 			 * the cpu_ucode_info structure in machcpu.
510 			 */
511 		} else {
512 			/*
513 			 * Go through the whole buffer in case there are
514 			 * multiple versions of matching microcode for this
515 			 * processor.
516 			 */
517 			for (remaining = size; remaining > 0; ) {
518 				int	total_size, body_size, ext_size;
519 				uint8_t	*curbuf = &ucodep[size - remaining];
520 				ucode_header_t	*uhp = (ucode_header_t *)curbuf;
521 				ucode_ext_table_t *uetp = NULL;
522 				ucode_errno_t tmprc;
523 
524 				total_size =
525 				    UCODE_TOTAL_SIZE(uhp->uh_total_size);
526 				body_size = UCODE_BODY_SIZE(uhp->uh_body_size);
527 				ext_size = total_size -
528 				    (header_size + body_size);
529 
530 				if (ext_size > 0)
531 					uetp = (ucode_ext_table_t *)
532 					    &curbuf[header_size + body_size];
533 
534 				tmprc = ucode_match(uusp->sig, &uusp->info,
535 				    uhp, uetp);
536 
537 				/*
538 				 * Since we are searching through a big file
539 				 * containing microcode for pretty much all the
540 				 * processors, we are bound to get EM_NOMATCH
541 				 * at one point.  However, if we return
542 				 * EM_NOMATCH to users, it will really confuse
543 				 * them.  Therefore, if we ever find a match of
544 				 * a lower rev, we will set return code to
545 				 * EM_HIGHERREV.
546 				 */
547 				if (tmprc == EM_HIGHERREV)
548 					search_rc = EM_HIGHERREV;
549 
550 				if (tmprc == EM_OK &&
551 				    uusp->expected_rev < uhp->uh_rev) {
552 					uusp->ucodep = &curbuf[header_size];
553 					uusp->expected_rev = uhp->uh_rev;
554 					bcopy(uusp, &cached, sizeof (cached));
555 					cachedp = &cached;
556 					found = 1;
557 				}
558 
559 				remaining -= total_size;
560 			}
561 		}
562 
563 		/* Nothing to do */
564 		if (uusp->ucodep == NULL)
565 			continue;
566 
567 		CPUSET_ADD(cpuset, id);
568 		kpreempt_disable();
569 		xc_sync((xc_arg_t)uusp, 0, 0, X_CALL_HIPRI, cpuset,
570 		    ucode_write);
571 		kpreempt_enable();
572 		CPUSET_DEL(cpuset, id);
573 
574 		if (uusp->expected_rev == uusp->new_rev) {
575 			cmn_err(CE_CONT, ucode_success_fmt,
576 			    id, uusp->info.cui_rev, uusp->expected_rev);
577 		} else {
578 			cmn_err(CE_WARN, ucode_failure_fmt,
579 			    id, uusp->info.cui_rev, uusp->expected_rev);
580 			rc = EM_UPDATE;
581 		}
582 	}
583 
584 	mutex_exit(&cpu_lock);
585 
586 	if (!found)
587 		rc = search_rc;
588 
589 	return (rc);
590 }
591 
592 /*
593  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
594  * This is the entry point from boot path where pointer to CPU structure
595  * is available.
596  *
597  * cpuid_info must be initialized before ucode_check can be called.
598  */
599 void
600 ucode_check(cpu_t *cp)
601 {
602 	struct cpu_ucode_info *uinfop;
603 	ucode_errno_t rc = EM_OK;
604 
605 	ASSERT(cp);
606 	if (cp->cpu_id == 0)
607 		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
608 
609 	uinfop = cp->cpu_m.mcpu_ucode_info;
610 	ASSERT(uinfop);
611 
612 	if (!ucode_capable(cp))
613 		return;
614 
615 	/*
616 	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
617 	 * (Family 6, model 5 and above) and all processors after.
618 	 */
619 	if ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6)) {
620 		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
621 		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
622 	}
623 
624 	ucode_read_rev(uinfop);
625 
626 	/*
627 	 * Check to see if we need ucode update
628 	 */
629 	if ((rc = ucode_locate(cp, uinfop, &ucodefile)) == EM_OK) {
630 		ucode_update_intel(ucodefile.uf_body, uinfop);
631 
632 		if (uinfop->cui_rev != ucodefile.uf_header.uh_rev)
633 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
634 			    uinfop->cui_rev, ucodefile.uf_header.uh_rev);
635 	}
636 
637 	/*
638 	 * If we fail to find a match for any reason, free the file structure
639 	 * just in case we have read in a partial file.
640 	 *
641 	 * Since the scratch memory for holding the microcode for the boot CPU
642 	 * came from BOP_ALLOC, we will reset the data structure as if we
643 	 * never did the allocation so we don't have to keep track of this
644 	 * special chunk of memory.  We free the memory used for the rest
645 	 * of the CPUs in start_other_cpus().
646 	 */
647 	if (rc != EM_OK || cp->cpu_id == 0)
648 		ucode_file_reset(&ucodefile, cp->cpu_id);
649 }
650 
651 /*
652  * Returns microcode revision from the machcpu structure.
653  */
654 ucode_errno_t
655 ucode_get_rev(uint32_t *revp)
656 {
657 	int i;
658 
659 	ASSERT(revp);
660 
661 	if (!ucode_capable(CPU))
662 		return (EM_NOTSUP);
663 
664 	mutex_enter(&cpu_lock);
665 	for (i = 0; i < max_ncpus; i++) {
666 		cpu_t *cpu;
667 
668 		if ((cpu = cpu_get(i)) == NULL)
669 			continue;
670 
671 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
672 	}
673 	mutex_exit(&cpu_lock);
674 
675 	return (EM_OK);
676 }
677