xref: /illumos-gate/usr/src/uts/intel/os/microcode.c (revision 48d6e6ab8fae1f82ef9fbee512e42a4f9d4de74b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27  * Copyright (c) 2018, Joyent, Inc.
28  * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
29  * Copyright 2025 Oxide Computer Company
30  */
31 
32 #include <sys/bootconf.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/utsname.h>
36 #include <sys/debug.h>
37 #include <sys/kobj.h>
38 #include <sys/kobj_impl.h>
39 #include <sys/ontrap.h>
40 #include <sys/stdbool.h>
41 #include <sys/systeminfo.h>
42 #include <sys/systm.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 
47 /*
48  * mcpu_ucode_info for the boot CPU.  Statically allocated.
49  */
50 static struct cpu_ucode_info cpu_ucode_info0;
51 static const ucode_source_t *ucode;
52 static char *ucodepath;
53 static kmutex_t ucode_lock;
54 static bool ucode_cleanup_done = false;
55 
56 /*
57  * Flag for use by microcode impls to determine if they can use kmem.  Note this
58  * is meant primarily for gating use of functions like kobj_open_file() which
59  * allocate internally with kmem.  ucode_zalloc() and ucode_free() should
60  * otherwise be used.
61  */
62 bool ucode_use_kmem = false;
63 
64 static const char ucode_failure_fmt[] =
65 	"cpu%d: failed to update microcode from version 0x%x to 0x%x";
66 static const char ucode_success_fmt[] =
67 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
68 static const char ucode_fallback_fmt[] =
69 	"?cpu%d: using older fallback microcode; update the system firmware";
70 
71 static const char ucode_path_fmt[] = "/platform/%s/ucode";
72 
73 SET_DECLARE(ucode_source_set, ucode_source_t);
74 
75 /*
76  * Force flag.  If set, the first microcode binary that matches
77  * signature and platform id will be used for microcode update,
78  * regardless of version.  Should only be used for debugging.
79  */
80 int ucode_force_update = 0;
81 
82 void
ucode_init(void)83 ucode_init(void)
84 {
85 	ucode_source_t **src;
86 
87 	mutex_init(&ucode_lock, NULL, MUTEX_DEFAULT, NULL);
88 
89 	/* Set up function pointers */
90 	SET_FOREACH(src, ucode_source_set) {
91 		if ((*src)->us_select(CPU)) {
92 			ucode = *src;
93 			break;
94 		}
95 	}
96 
97 	if (ucode == NULL)
98 		return;
99 
100 #ifdef DEBUG
101 	cmn_err(CE_CONT, "?ucode: selected %s\n", ucode->us_name);
102 
103 	if (!ucode->us_capable(CPU)) {
104 		cmn_err(CE_CONT,
105 		    "?ucode: microcode update not supported on CPU\n");
106 		return;
107 	}
108 #endif
109 }
110 
111 /*
112  * Allocate space for mcpu_ucode_info in the machcpu structure
113  * for all non-boot CPUs.
114  */
115 void
ucode_alloc_space(cpu_t * cp)116 ucode_alloc_space(cpu_t *cp)
117 {
118 	ASSERT(cp->cpu_id != 0);
119 	ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
120 	cp->cpu_m.mcpu_ucode_info =
121 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
122 }
123 
124 void
ucode_free_space(cpu_t * cp)125 ucode_free_space(cpu_t *cp)
126 {
127 	ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
128 	ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
129 	kmem_free(cp->cpu_m.mcpu_ucode_info,
130 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
131 	cp->cpu_m.mcpu_ucode_info = NULL;
132 }
133 
134 const char *
ucode_path(void)135 ucode_path(void)
136 {
137 	ASSERT(ucodepath != NULL);
138 	return (ucodepath);
139 }
140 
141 /*
142  * Allocate/free a buffer used to hold ucode data. Space allocated before kmem
143  * is available is allocated with BOP_ALLOC() and does not require a free.
144  */
145 void *
ucode_zalloc(size_t size)146 ucode_zalloc(size_t size)
147 {
148 	if (ucode_use_kmem)
149 		return (kmem_zalloc(size, KM_NOSLEEP));
150 
151 	/* BOP_ALLOC() failure results in panic */
152 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
153 }
154 
155 void
ucode_free(void * buf,size_t size)156 ucode_free(void *buf, size_t size)
157 {
158 	if (ucode_use_kmem && buf != NULL)
159 		kmem_free(buf, size);
160 }
161 
162 /*
163  * Called to free up space allocated for the microcode file. This is called
164  * from start_other_cpus() after an update attempt has been performed on all
165  * CPUs.
166  */
167 void
ucode_cleanup(void)168 ucode_cleanup(void)
169 {
170 	mutex_enter(&ucode_lock);
171 	if (ucode != NULL)
172 		ucode->us_file_reset();
173 	ucode_cleanup_done = true;
174 	mutex_exit(&ucode_lock);
175 
176 	/*
177 	 * We purposefully do not free 'ucodepath' here so that it persists for
178 	 * any future callers to ucode_locate(), such as could occur on systems
179 	 * that support DR.
180 	 */
181 }
182 
183 static int
ucode_write(xc_arg_t arg1,xc_arg_t unused2,xc_arg_t unused3)184 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
185 {
186 	ucode_update_t *uusp = (ucode_update_t *)arg1;
187 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
188 	on_trap_data_t otd;
189 
190 	ASSERT(ucode != NULL);
191 	ASSERT(uusp->ucodep != NULL);
192 
193 	/*
194 	 * Check one more time to see if it is really necessary to update
195 	 * microcode just in case this is a hyperthreaded processor where
196 	 * the threads share the same microcode.
197 	 */
198 	if (!ucode_force_update) {
199 		ucode->us_read_rev(uinfop);
200 		uusp->new_rev = uinfop->cui_rev;
201 		if (uinfop->cui_rev >= uusp->expected_rev)
202 			return (0);
203 	}
204 
205 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
206 		if (ucode->us_invalidate) {
207 			/*
208 			 * On some platforms a cache invalidation is required
209 			 * for the ucode update to be successful due to the
210 			 * parts of the processor that the microcode is
211 			 * updating.
212 			 */
213 			invalidate_cache();
214 		}
215 		wrmsr(ucode->us_write_msr, (uintptr_t)uusp->ucodep);
216 	}
217 
218 	no_trap();
219 	ucode->us_read_rev(uinfop);
220 	uusp->new_rev = uinfop->cui_rev;
221 
222 	return (0);
223 }
224 
225 /*
226  * Entry points to microcode update from the 'ucode' driver.
227  */
228 
229 ucode_errno_t
ucode_validate(uint8_t * ucodep,size_t size)230 ucode_validate(uint8_t *ucodep, size_t size)
231 {
232 	if (ucode == NULL)
233 		return (EM_NOTSUP);
234 	return (ucode->us_validate(ucodep, size));
235 }
236 
237 ucode_errno_t
ucode_update(uint8_t * ucodep,size_t size)238 ucode_update(uint8_t *ucodep, size_t size)
239 {
240 	bool		found = false;
241 	ucode_update_t	cached = { 0 };
242 	ucode_update_t	*cachedp = NULL;
243 	ucode_errno_t	rc = EM_OK;
244 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
245 	cpuset_t cpuset;
246 
247 	ASSERT(ucode != 0);
248 	ASSERT(ucodep != 0);
249 	CPUSET_ZERO(cpuset);
250 
251 	if (!ucode->us_capable(CPU))
252 		return (EM_NOTSUP);
253 
254 	mutex_enter(&cpu_lock);
255 
256 	for (processorid_t id = 0; id < max_ncpus; id++) {
257 		cpu_t *cpu;
258 		ucode_update_t uus = { 0 };
259 		ucode_update_t *uusp = &uus;
260 
261 		/*
262 		 * If there is no such CPU or it is not xcall ready, skip it.
263 		 */
264 		if ((cpu = cpu_get(id)) == NULL ||
265 		    !(cpu->cpu_flags & CPU_READY)) {
266 			continue;
267 		}
268 
269 		uusp->sig = cpuid_getsig(cpu);
270 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
271 		    sizeof (uusp->info));
272 
273 		/*
274 		 * If the current CPU has the same signature and platform
275 		 * id as the previous one we processed, reuse the information.
276 		 */
277 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
278 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
279 			uusp->ucodep = cachedp->ucodep;
280 			uusp->expected_rev = cachedp->expected_rev;
281 			/*
282 			 * Intuitively we should check here to see whether the
283 			 * running microcode rev is >= the expected rev, and
284 			 * quit if it is.  But we choose to proceed with the
285 			 * xcall regardless of the running version so that
286 			 * the other threads in an HT processor can update
287 			 * the cpu_ucode_info structure in machcpu.
288 			 */
289 		} else if ((search_rc = ucode->us_extract(uusp, ucodep, size))
290 		    == EM_OK) {
291 			bcopy(uusp, &cached, sizeof (cached));
292 			cachedp = &cached;
293 			found = true;
294 		}
295 
296 		/* Nothing to do */
297 		if (uusp->ucodep == NULL)
298 			continue;
299 
300 		CPUSET_ADD(cpuset, id);
301 		kpreempt_disable();
302 		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
303 		kpreempt_enable();
304 		CPUSET_DEL(cpuset, id);
305 
306 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
307 		    !ucode_force_update) {
308 			rc = EM_HIGHERREV;
309 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
310 		    uusp->expected_rev != uusp->new_rev)) {
311 			cmn_err(CE_WARN, ucode_failure_fmt,
312 			    id, uusp->info.cui_rev, uusp->expected_rev);
313 			rc = EM_UPDATE;
314 		} else {
315 			cmn_err(CE_CONT, ucode_success_fmt,
316 			    id, uusp->info.cui_rev, uusp->new_rev);
317 		}
318 	}
319 
320 	mutex_exit(&cpu_lock);
321 
322 	if (!found) {
323 		rc = search_rc;
324 	} else if (rc == EM_OK) {
325 		cpuid_post_ucodeadm();
326 	}
327 
328 	return (rc);
329 }
330 
331 /*
332  * Called when starting up non-boot CPUs from mp_startup() to read the current
333  * microcode revision before the control CPU calls ucode_locate().
334  */
335 void
ucode_read_rev(cpu_t * cp)336 ucode_read_rev(cpu_t *cp)
337 {
338 	cpu_ucode_info_t *uinfop;
339 
340 	ASSERT3P(cp, !=, NULL);
341 
342 	if (ucode == NULL || !ucode->us_capable(cp))
343 		return;
344 
345 	uinfop = cp->cpu_m.mcpu_ucode_info;
346 	ASSERT3P(uinfop, !=, NULL);
347 
348 	ucode->us_read_rev(uinfop);
349 }
350 
351 /*
352  * Called by the control CPU when starting up non-boot CPUs to find any
353  * applicable microcode updates. Initializes mcpu_ucode_info, which will contain
354  * the relevant update to be applied, via ucode_apply(), if one is found.
355  * ucode_read_rev() must be called before this function on the target CPU.
356  */
357 void
ucode_locate(cpu_t * cp)358 ucode_locate(cpu_t *cp)
359 {
360 	cpu_ucode_info_t *uinfop;
361 	ucode_errno_t rc;
362 	size_t sz;
363 
364 	ASSERT3P(cp, !=, NULL);
365 	ASSERT(ucode_use_kmem);
366 
367 	mutex_enter(&ucode_lock);
368 
369 	if (ucode == NULL || !ucode->us_capable(cp))
370 		goto out;
371 
372 	if (ucodepath == NULL) {
373 		sz = snprintf(NULL, 0, ucode_path_fmt, platform) + 1;
374 		ucodepath = kmem_zalloc(sz, KM_NOSLEEP);
375 		if (ucodepath == NULL) {
376 			cmn_err(CE_WARN,
377 			    "ucode: could not allocate memory for path");
378 			goto out;
379 		}
380 		(void) snprintf(ucodepath, sz, ucode_path_fmt, platform);
381 	}
382 
383 	uinfop = cp->cpu_m.mcpu_ucode_info;
384 	ASSERT3P(uinfop, !=, NULL);
385 
386 	/*
387 	 * Search for any applicable updates.
388 	 *
389 	 * A return value of EM_HIGHERREV indicates that no update was applied
390 	 * due to the CPU already being at that or a higher revision, but both
391 	 * EM_HIGHERREV and EM_OK indicate that some microcode that matches the
392 	 * CPU was successfully located. In either of these cases it's worth
393 	 * keeping it around in case it's useful for the next CPU -- and if it
394 	 * isn't it will end up being discarded. In all other cases we clear it
395 	 * out just in case we have read in a partial or invalid file.
396 	 *
397 	 * Architectural note:
398 	 *   Depending on the platform, the cpu_t being processed may represent
399 	 *   a thread within a CPU core. If updating one thread's microcode
400 	 *   implicitly updates all sibling threads in the core, it's normal to
401 	 *   see a mix of EM_OK and EM_HIGHERREV when iterating over those
402 	 *   threads.
403 	 *
404 	 * There's one additional consideration. If we are here after
405 	 * ucode_cleanup() has been called, such as could occur with CPU
406 	 * hotplug, we also clear the memory and reset the data structure as
407 	 * nothing else will call ucode_cleanup() and we don't need to cache
408 	 * the data as we do during boot when starting the APs.
409 	 */
410 	rc = ucode->us_locate(cp, uinfop);
411 	if ((rc != EM_OK && rc != EM_HIGHERREV) || ucode_cleanup_done)
412 		ucode->us_file_reset();
413 
414 out:
415 	mutex_exit(&ucode_lock);
416 }
417 
418 /*
419  * Called when starting up non-boot CPUs to load any pending microcode updates
420  * found in ucode_locate().  Note this is called very early in the startup
421  * process (before CPU_READY is set and while CPU_QUIESCED is) so we must be
422  * careful about what we do here, e.g., no kmem_free or anything that might call
423  * hat_unload; no kmem_alloc or anything which may cause thread context switch.
424  * We also don't take the ucode_lock here for similar reasons (if contended
425  * the idle thread will spin with CPU_QUIESCED set). This is fine though since
426  * we should not be updating any shared ucode state.
427  */
428 void
ucode_apply(cpu_t * cp)429 ucode_apply(cpu_t *cp)
430 {
431 	cpu_ucode_info_t *uinfop;
432 
433 	ASSERT3P(cp, !=, NULL);
434 
435 	if (ucode == NULL || !ucode->us_capable(cp))
436 		return;
437 
438 	uinfop = cp->cpu_m.mcpu_ucode_info;
439 	ASSERT3P(uinfop, !=, NULL);
440 
441 	/*
442 	 * No pending update -- nothing to do.
443 	 */
444 	if (uinfop->cui_pending_ucode == NULL)
445 		return;
446 
447 	/*
448 	 * Apply pending update.
449 	 */
450 	ucode->us_load(uinfop);
451 }
452 
453 /*
454  * Called when starting up non-boot CPUs to free any pending microcode updates
455  * found in ucode_locate() and print the result of the attempting to load it in
456  * ucode_apply().  This is separate from ucode_apply() as we can't yet call
457  * kmem_free() at that point in the startup process.
458  */
459 void
ucode_finish(cpu_t * cp)460 ucode_finish(cpu_t *cp)
461 {
462 	cpu_ucode_info_t *uinfop;
463 	uint32_t old_rev, new_rev;
464 
465 	ASSERT3P(cp, !=, NULL);
466 
467 	if (ucode == NULL || !ucode->us_capable(cp))
468 		return;
469 
470 	uinfop = cp->cpu_m.mcpu_ucode_info;
471 	ASSERT3P(uinfop, !=, NULL);
472 
473 	/*
474 	 * No pending update -- nothing to do.
475 	 */
476 	if (uinfop->cui_pending_ucode == NULL)
477 		return;
478 
479 	old_rev = uinfop->cui_rev;
480 	new_rev = uinfop->cui_pending_rev;
481 	ucode->us_read_rev(uinfop);
482 
483 	if (uinfop->cui_rev != new_rev) {
484 		ASSERT3U(uinfop->cui_rev, ==, old_rev);
485 		cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, old_rev,
486 		    new_rev);
487 	} else {
488 		cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id, old_rev,
489 		    new_rev);
490 	}
491 
492 	ucode_free(uinfop->cui_pending_ucode, uinfop->cui_pending_size);
493 	uinfop->cui_pending_ucode = NULL;
494 	uinfop->cui_pending_size = 0;
495 	uinfop->cui_pending_rev = 0;
496 }
497 
498 /*
499  * Entry point to microcode update from mlsetup() for boot CPU.
500  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
501  * cpuid_info must be initialized before we can be called.
502  */
503 void
ucode_check_boot(void)504 ucode_check_boot(void)
505 {
506 	cpu_t *cp = CPU;
507 	cpu_ucode_info_t *uinfop;
508 	const char *prop;
509 	char *plat;
510 	int prop_len;
511 	size_t path_len;
512 
513 	ASSERT3U(cp->cpu_id, ==, 0);
514 	ASSERT(!ucode_use_kmem);
515 
516 	mutex_enter(&ucode_lock);
517 
518 	/* Space statically allocated for BSP; ensure pointer is set */
519 	ASSERT3P(cp->cpu_m.mcpu_ucode_info, ==, NULL);
520 	uinfop = cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
521 
522 	if (ucode == NULL || !ucode->us_capable(cp))
523 		goto out;
524 
525 	ASSERT3P(ucodepath, ==, NULL);
526 
527 	prop = "impl-arch-name";
528 	prop_len = BOP_GETPROPLEN(bootops, prop);
529 	if (prop_len <= 0) {
530 		cmn_err(CE_WARN, "ucode: could not find %s property", prop);
531 		goto out;
532 	}
533 
534 	/*
535 	 * We're running on the boot CPU before kmem is available so we make use
536 	 * of BOP_ALLOC() -- which panics on failure -- to allocate any memory
537 	 * we need.  That also means we don't need to explicity free it.
538 	 */
539 	plat = BOP_ALLOC(bootops, NULL, prop_len + 1, MMU_PAGESIZE);
540 	(void) BOP_GETPROP(bootops, prop, plat);
541 	if (plat[0] == '\0') {
542 		/*
543 		 * If we can't determine the architecture name,
544 		 * we cannot find microcode files for it.
545 		 * Return without setting 'ucodepath'.
546 		 */
547 		cmn_err(CE_WARN, "ucode: could not determine arch");
548 		goto out;
549 	}
550 
551 	path_len = snprintf(NULL, 0, ucode_path_fmt, plat) + 1;
552 	ucodepath = BOP_ALLOC(bootops, NULL, path_len, MMU_PAGESIZE);
553 	(void) snprintf(ucodepath, path_len, ucode_path_fmt, plat);
554 
555 	/*
556 	 * Check to see if we need ucode update
557 	 */
558 	ucode->us_read_rev(uinfop);
559 	if (ucode->us_locate(cp, uinfop) == EM_OK) {
560 		uint32_t old_rev, new_rev;
561 		bool fallback = false;
562 
563 		old_rev = uinfop->cui_rev;
564 
565 retry:
566 		new_rev = uinfop->cui_pending_rev;
567 		ucode->us_load(uinfop);
568 		ucode->us_read_rev(uinfop);
569 
570 		if (uinfop->cui_rev != new_rev) {
571 			ASSERT3U(uinfop->cui_rev, ==, old_rev);
572 
573 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
574 			    old_rev, new_rev);
575 
576 			/*
577 			 * If the updater supports attempting a fallback
578 			 * microcode version, try that.
579 			 */
580 			if (!fallback && ucode->us_locate_fallback != NULL) {
581 				ucode->us_file_reset();
582 				uinfop->cui_pending_ucode = NULL;
583 				uinfop->cui_pending_size = 0;
584 				uinfop->cui_pending_rev = 0;
585 				if (ucode->us_locate_fallback(cp, uinfop) ==
586 				    EM_OK) {
587 					cmn_err(CE_WARN, ucode_fallback_fmt,
588 					    cp->cpu_id);
589 					fallback = true;
590 					goto retry;
591 				}
592 			}
593 		} else {
594 			cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id,
595 			    old_rev, new_rev);
596 		}
597 	}
598 
599 	/*
600 	 * Regardless of whether we found a match or not, since the scratch
601 	 * memory for holding the microcode for the boot CPU came from
602 	 * BOP_ALLOC, we will reset the data structure as if we never did the
603 	 * allocation so we don't have to keep track of this special chunk of
604 	 * memory.
605 	 */
606 	ucode->us_file_reset();
607 
608 	/*
609 	 * Similarly clear any pending update that may have been found.
610 	 */
611 	uinfop->cui_pending_ucode = NULL;
612 	uinfop->cui_pending_size = 0;
613 	uinfop->cui_pending_rev = 0;
614 
615 out:
616 	/*
617 	 * Discard the memory that came from BOP_ALLOC and was used to build the
618 	 * ucode path.  Subsequent CPUs will be handled via ucode_locate() at
619 	 * which point kmem is available and we can cache the path.
620 	 */
621 	ucodepath = NULL;
622 	ucode_use_kmem = true;
623 
624 	mutex_exit(&ucode_lock);
625 }
626 
627 /*
628  * Returns microcode revision from the machcpu structure.
629  */
630 ucode_errno_t
ucode_get_rev(uint32_t * revp)631 ucode_get_rev(uint32_t *revp)
632 {
633 	int i;
634 
635 	ASSERT(revp != NULL);
636 
637 	if (ucode == NULL || !ucode->us_capable(CPU))
638 		return (EM_NOTSUP);
639 
640 	mutex_enter(&cpu_lock);
641 	for (i = 0; i < max_ncpus; i++) {
642 		cpu_t *cpu;
643 
644 		if ((cpu = cpu_get(i)) == NULL)
645 			continue;
646 
647 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
648 	}
649 	mutex_exit(&cpu_lock);
650 
651 	return (EM_OK);
652 }
653