xref: /titanic_50/usr/src/uts/intel/io/acpica/osl.c (revision 836d28888aa06622108b415aae3a904e78ce852e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 /*
31  * ACPI CA OSL for Solaris x86
32  */
33 
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/psm.h>
37 #include <sys/pci_cfgspace.h>
38 #include <sys/apic.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/pci.h>
43 #include <sys/kobj.h>
44 #include <sys/taskq.h>
45 #include <sys/strlog.h>
46 #include <sys/x86_archext.h>
47 #include <sys/note.h>
48 #include <sys/promif.h>
49 
50 #include <sys/acpi/acpi.h>
51 #include <sys/acpi/accommon.h>
52 #include <sys/acpica.h>
53 
54 #define	MAX_DAT_FILE_SIZE	(64*1024)
55 
56 /* local functions */
57 static int CompressEisaID(char *np);
58 
59 static void scan_d2a_map(void);
60 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
61 
62 static int acpica_query_bbn_problem(void);
63 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
64 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
65 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
66 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
67 static void acpica_devinfo_handler(ACPI_HANDLE, UINT32, void *);
68 
69 /*
70  * Event queue vars
71  */
72 int acpica_eventq_init = 0;
73 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
74 
75 /*
76  * Priorities relative to minclsyspri that each taskq
77  * run at; OSL_NOTIFY_HANDLER needs to run at a higher
78  * priority than OSL_GPE_HANDLER.  There's an implicit
79  * assumption that no priority here results in exceeding
80  * maxclsyspri.
81  * Note: these initializations need to match the order of
82  * ACPI_EXECUTE_TYPE.
83  */
84 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
85 	0,	/* OSL_GLOBAL_LOCK_HANDLER */
86 	2,	/* OSL_NOTIFY_HANDLER */
87 	0,	/* OSL_GPE_HANDLER */
88 	0,	/* OSL_DEBUGGER_THREAD */
89 	0,	/* OSL_EC_POLL_HANDLER */
90 	0	/* OSL_EC_BURST_HANDLER */
91 };
92 
93 /*
94  * Note, if you change this path, you need to update
95  * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
96  */
97 static char *acpi_table_path = "/boot/acpi/tables/";
98 
99 /* non-zero while scan_d2a_map() is working */
100 static int scanning_d2a_map = 0;
101 static int d2a_done = 0;
102 
103 /* features supported by ACPICA and ACPI device configuration. */
104 uint64_t acpica_core_features = 0;
105 static uint64_t acpica_devcfg_features = 0;
106 
107 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
108 int acpica_use_safe_delay = 0;
109 
110 /* CPU mapping data */
111 struct cpu_map_item {
112 	processorid_t	cpu_id;
113 	UINT32		proc_id;
114 	UINT32		apic_id;
115 	ACPI_HANDLE	obj;
116 };
117 
118 static kmutex_t cpu_map_lock;
119 static struct cpu_map_item **cpu_map = NULL;
120 static int cpu_map_count_max = 0;
121 static int cpu_map_count = 0;
122 static int cpu_map_built = 0;
123 
124 /*
125  * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
126  * This flag is used to check for uppc-only systems by detecting whether
127  * acpica_map_cpu() has been called or not.
128  */
129 static int cpu_map_called = 0;
130 
131 static int acpi_has_broken_bbn = -1;
132 
133 /* buffer for AcpiOsVprintf() */
134 #define	ACPI_OSL_PR_BUFLEN	1024
135 static char *acpi_osl_pr_buffer = NULL;
136 static int acpi_osl_pr_buflen;
137 
138 #define	D2A_DEBUG
139 
140 /*
141  *
142  */
143 static void
144 discard_event_queues()
145 {
146 	int	i;
147 
148 	/*
149 	 * destroy event queues
150 	 */
151 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
152 		if (osl_eventq[i])
153 			ddi_taskq_destroy(osl_eventq[i]);
154 	}
155 }
156 
157 
158 /*
159  *
160  */
161 static ACPI_STATUS
162 init_event_queues()
163 {
164 	char	namebuf[32];
165 	int	i, error = 0;
166 
167 	/*
168 	 * Initialize event queues
169 	 */
170 
171 	/* Always allocate only 1 thread per queue to force FIFO execution */
172 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
173 		snprintf(namebuf, 32, "ACPI%d", i);
174 		osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
175 		    osl_eventq_pri_delta[i] + minclsyspri, 0);
176 		if (osl_eventq[i] == NULL)
177 			error++;
178 	}
179 
180 	if (error != 0) {
181 		discard_event_queues();
182 #ifdef	DEBUG
183 		cmn_err(CE_WARN, "!acpica: could not initialize event queues");
184 #endif
185 		return (AE_ERROR);
186 	}
187 
188 	acpica_eventq_init = 1;
189 	return (AE_OK);
190 }
191 
192 /*
193  * One-time initialization of OSL layer
194  */
195 ACPI_STATUS
196 AcpiOsInitialize(void)
197 {
198 	/*
199 	 * Allocate buffer for AcpiOsVprintf() here to avoid
200 	 * kmem_alloc()/kmem_free() at high PIL
201 	 */
202 	acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
203 	if (acpi_osl_pr_buffer != NULL)
204 		acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
205 
206 	return (AE_OK);
207 }
208 
209 /*
210  * One-time shut-down of OSL layer
211  */
212 ACPI_STATUS
213 AcpiOsTerminate(void)
214 {
215 
216 	if (acpi_osl_pr_buffer != NULL)
217 		kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
218 
219 	discard_event_queues();
220 	return (AE_OK);
221 }
222 
223 
224 ACPI_PHYSICAL_ADDRESS
225 AcpiOsGetRootPointer()
226 {
227 	ACPI_PHYSICAL_ADDRESS Address;
228 
229 	/*
230 	 * For EFI firmware, the root pointer is defined in EFI systab.
231 	 * The boot code process the table and put the physical address
232 	 * in the acpi-root-tab property.
233 	 */
234 	Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
235 	    DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
236 
237 	if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
238 		Address = NULL;
239 
240 	return (Address);
241 }
242 
243 /*ARGSUSED*/
244 ACPI_STATUS
245 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
246 				ACPI_STRING *NewVal)
247 {
248 
249 	*NewVal = 0;
250 	return (AE_OK);
251 }
252 
253 static void
254 acpica_strncpy(char *dest, const char *src, int len)
255 {
256 
257 	/*LINTED*/
258 	while ((*dest++ = *src++) && (--len > 0))
259 		/* copy the string */;
260 	*dest = '\0';
261 }
262 
263 ACPI_STATUS
264 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
265 			ACPI_TABLE_HEADER **NewTable)
266 {
267 	char signature[5];
268 	char oemid[7];
269 	char oemtableid[9];
270 	struct _buf *file;
271 	char *buf1, *buf2;
272 	int count;
273 	char acpi_table_loc[128];
274 
275 	acpica_strncpy(signature, ExistingTable->Signature, 4);
276 	acpica_strncpy(oemid, ExistingTable->OemId, 6);
277 	acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
278 
279 #ifdef	DEBUG
280 	cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
281 	    " OEM TABLE ID [%s] OEM rev %x",
282 	    signature, ExistingTable->Revision, oemid, oemtableid,
283 	    ExistingTable->OemRevision);
284 #endif
285 
286 	/* File name format is "signature_oemid_oemtableid.dat" */
287 	(void) strcpy(acpi_table_loc, acpi_table_path);
288 	(void) strcat(acpi_table_loc, signature); /* for example, DSDT */
289 	(void) strcat(acpi_table_loc, "_");
290 	(void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
291 	(void) strcat(acpi_table_loc, "_");
292 	(void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
293 	(void) strcat(acpi_table_loc, ".dat");
294 
295 	file = kobj_open_file(acpi_table_loc);
296 	if (file == (struct _buf *)-1) {
297 		*NewTable = 0;
298 		return (AE_OK);
299 	} else {
300 		buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
301 		count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
302 		if (count >= MAX_DAT_FILE_SIZE) {
303 			cmn_err(CE_WARN, "!acpica: table %s file size too big",
304 			    acpi_table_loc);
305 			*NewTable = 0;
306 		} else {
307 			buf2 = (char *)kmem_alloc(count, KM_SLEEP);
308 			(void) memcpy(buf2, buf1, count);
309 			*NewTable = (ACPI_TABLE_HEADER *)buf2;
310 			cmn_err(CE_NOTE, "!acpica: replacing table: %s",
311 			    acpi_table_loc);
312 		}
313 	}
314 	kobj_close_file(file);
315 	kmem_free(buf1, MAX_DAT_FILE_SIZE);
316 
317 	return (AE_OK);
318 }
319 
320 
321 /*
322  * ACPI semaphore implementation
323  */
324 typedef struct {
325 	kmutex_t	mutex;
326 	kcondvar_t	cv;
327 	uint32_t	available;
328 	uint32_t	initial;
329 	uint32_t	maximum;
330 } acpi_sema_t;
331 
332 /*
333  *
334  */
335 void
336 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
337 {
338 	mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
339 	cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
340 	/* no need to enter mutex here at creation */
341 	sp->available = count;
342 	sp->initial = count;
343 	sp->maximum = max;
344 }
345 
346 /*
347  *
348  */
349 void
350 acpi_sema_destroy(acpi_sema_t *sp)
351 {
352 
353 	cv_destroy(&sp->cv);
354 	mutex_destroy(&sp->mutex);
355 }
356 
357 /*
358  *
359  */
360 ACPI_STATUS
361 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
362 {
363 	ACPI_STATUS rv = AE_OK;
364 	clock_t deadline;
365 
366 	mutex_enter(&sp->mutex);
367 
368 	if (sp->available >= count) {
369 		/*
370 		 * Enough units available, no blocking
371 		 */
372 		sp->available -= count;
373 		mutex_exit(&sp->mutex);
374 		return (rv);
375 	} else if (wait_time == 0) {
376 		/*
377 		 * Not enough units available and timeout
378 		 * specifies no blocking
379 		 */
380 		rv = AE_TIME;
381 		mutex_exit(&sp->mutex);
382 		return (rv);
383 	}
384 
385 	/*
386 	 * Not enough units available and timeout specifies waiting
387 	 */
388 	if (wait_time != ACPI_WAIT_FOREVER)
389 		deadline = ddi_get_lbolt() +
390 		    (clock_t)drv_usectohz(wait_time * 1000);
391 
392 	do {
393 		if (wait_time == ACPI_WAIT_FOREVER)
394 			cv_wait(&sp->cv, &sp->mutex);
395 		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
396 			rv = AE_TIME;
397 			break;
398 		}
399 	} while (sp->available < count);
400 
401 	/* if we dropped out of the wait with AE_OK, we got the units */
402 	if (rv == AE_OK)
403 		sp->available -= count;
404 
405 	mutex_exit(&sp->mutex);
406 	return (rv);
407 }
408 
409 /*
410  *
411  */
412 void
413 acpi_sema_v(acpi_sema_t *sp, unsigned count)
414 {
415 	mutex_enter(&sp->mutex);
416 	sp->available += count;
417 	cv_broadcast(&sp->cv);
418 	mutex_exit(&sp->mutex);
419 }
420 
421 
422 ACPI_STATUS
423 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
424 ACPI_HANDLE *OutHandle)
425 {
426 	acpi_sema_t *sp;
427 
428 	if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
429 		return (AE_BAD_PARAMETER);
430 
431 	sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
432 	acpi_sema_init(sp, MaxUnits, InitialUnits);
433 	*OutHandle = (ACPI_HANDLE)sp;
434 	return (AE_OK);
435 }
436 
437 
438 ACPI_STATUS
439 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
440 {
441 
442 	if (Handle == NULL)
443 		return (AE_BAD_PARAMETER);
444 
445 	acpi_sema_destroy((acpi_sema_t *)Handle);
446 	kmem_free((void *)Handle, sizeof (acpi_sema_t));
447 	return (AE_OK);
448 }
449 
450 ACPI_STATUS
451 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
452 {
453 
454 	if ((Handle == NULL) || (Units < 1))
455 		return (AE_BAD_PARAMETER);
456 
457 	return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
458 }
459 
460 ACPI_STATUS
461 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
462 {
463 
464 	if ((Handle == NULL) || (Units < 1))
465 		return (AE_BAD_PARAMETER);
466 
467 	acpi_sema_v((acpi_sema_t *)Handle, Units);
468 	return (AE_OK);
469 }
470 
471 ACPI_STATUS
472 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
473 {
474 	kmutex_t *mp;
475 
476 	if (OutHandle == NULL)
477 		return (AE_BAD_PARAMETER);
478 
479 	mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
480 	mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
481 	*OutHandle = (ACPI_HANDLE)mp;
482 	return (AE_OK);
483 }
484 
485 void
486 AcpiOsDeleteLock(ACPI_HANDLE Handle)
487 {
488 
489 	if (Handle == NULL)
490 		return;
491 
492 	mutex_destroy((kmutex_t *)Handle);
493 	kmem_free((void *)Handle, sizeof (kmutex_t));
494 }
495 
496 ACPI_CPU_FLAGS
497 AcpiOsAcquireLock(ACPI_HANDLE Handle)
498 {
499 
500 
501 	if (Handle == NULL)
502 		return (AE_BAD_PARAMETER);
503 
504 	if (curthread == CPU->cpu_idle_thread) {
505 		while (!mutex_tryenter((kmutex_t *)Handle))
506 			/* spin */;
507 	} else
508 		mutex_enter((kmutex_t *)Handle);
509 	return (AE_OK);
510 }
511 
512 void
513 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
514 {
515 	_NOTE(ARGUNUSED(Flags))
516 
517 	mutex_exit((kmutex_t *)Handle);
518 }
519 
520 
521 void *
522 AcpiOsAllocate(ACPI_SIZE Size)
523 {
524 	ACPI_SIZE *tmp_ptr;
525 
526 	Size += sizeof (Size);
527 	tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
528 	*tmp_ptr++ = Size;
529 	return (tmp_ptr);
530 }
531 
532 void
533 AcpiOsFree(void *Memory)
534 {
535 	ACPI_SIZE	size, *tmp_ptr;
536 
537 	tmp_ptr = (ACPI_SIZE *)Memory;
538 	tmp_ptr -= 1;
539 	size = *tmp_ptr;
540 	kmem_free(tmp_ptr, size);
541 }
542 
543 static int napics_found;	/* number of ioapic addresses in array */
544 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
545 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
546 static void *dummy_ioapicadr;
547 
548 void
549 acpica_find_ioapics(void)
550 {
551 	int			madt_seen, madt_size;
552 	ACPI_SUBTABLE_HEADER		*ap;
553 	ACPI_MADT_IO_APIC		*mia;
554 
555 	if (acpi_mapic_dtp != NULL)
556 		return;	/* already parsed table */
557 	if (AcpiGetTable(ACPI_SIG_MADT, 1,
558 	    (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
559 		return;
560 
561 	napics_found = 0;
562 
563 	/*
564 	 * Search the MADT for ioapics
565 	 */
566 	ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
567 	madt_size = acpi_mapic_dtp->Header.Length;
568 	madt_seen = sizeof (*acpi_mapic_dtp);
569 
570 	while (madt_seen < madt_size) {
571 
572 		switch (ap->Type) {
573 		case ACPI_MADT_TYPE_IO_APIC:
574 			mia = (ACPI_MADT_IO_APIC *) ap;
575 			if (napics_found < MAX_IO_APIC) {
576 				ioapic_paddr[napics_found++] =
577 				    (ACPI_PHYSICAL_ADDRESS)
578 				    (mia->Address & PAGEMASK);
579 			}
580 			break;
581 
582 		default:
583 			break;
584 		}
585 
586 		/* advance to next entry */
587 		madt_seen += ap->Length;
588 		ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
589 	}
590 	if (dummy_ioapicadr == NULL)
591 		dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
592 }
593 
594 
595 void *
596 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
597 {
598 	int	i;
599 
600 	/*
601 	 * If the iopaic address table is populated, check if trying
602 	 * to access an ioapic.  Instead, return a pointer to a dummy ioapic.
603 	 */
604 	for (i = 0; i < napics_found; i++) {
605 		if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
606 			return (dummy_ioapicadr);
607 	}
608 	/* FUTUREWORK: test PhysicalAddress for > 32 bits */
609 	return (psm_map_new((paddr_t)PhysicalAddress,
610 	    (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
611 }
612 
613 void
614 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
615 {
616 	/*
617 	 * Check if trying to unmap dummy ioapic address.
618 	 */
619 	if (LogicalAddress == dummy_ioapicadr)
620 		return;
621 
622 	psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
623 }
624 
625 /*ARGSUSED*/
626 ACPI_STATUS
627 AcpiOsGetPhysicalAddress(void *LogicalAddress,
628 			ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
629 {
630 
631 	/* UNIMPLEMENTED: not invoked by ACPI CA code */
632 	return (AE_NOT_IMPLEMENTED);
633 }
634 
635 
636 ACPI_OSD_HANDLER acpi_isr;
637 void *acpi_isr_context;
638 
639 uint_t
640 acpi_wrapper_isr(char *arg)
641 {
642 	_NOTE(ARGUNUSED(arg))
643 
644 	int	status;
645 
646 	status = (*acpi_isr)(acpi_isr_context);
647 
648 	if (status == ACPI_INTERRUPT_HANDLED) {
649 		return (DDI_INTR_CLAIMED);
650 	} else {
651 		return (DDI_INTR_UNCLAIMED);
652 	}
653 }
654 
655 static int acpi_intr_hooked = 0;
656 
657 ACPI_STATUS
658 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
659 		ACPI_OSD_HANDLER ServiceRoutine,
660 		void *Context)
661 {
662 	_NOTE(ARGUNUSED(InterruptNumber))
663 
664 	int retval;
665 	int sci_vect;
666 	iflag_t sci_flags;
667 
668 	acpi_isr = ServiceRoutine;
669 	acpi_isr_context = Context;
670 
671 	/*
672 	 * Get SCI (adjusted for PIC/APIC mode if necessary)
673 	 */
674 	if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
675 		return (AE_ERROR);
676 	}
677 
678 #ifdef	DEBUG
679 	cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
680 #endif
681 
682 	retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
683 	    "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
684 	if (retval) {
685 		acpi_intr_hooked = 1;
686 		return (AE_OK);
687 	} else
688 		return (AE_BAD_PARAMETER);
689 }
690 
691 ACPI_STATUS
692 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
693 			ACPI_OSD_HANDLER ServiceRoutine)
694 {
695 	_NOTE(ARGUNUSED(ServiceRoutine))
696 
697 #ifdef	DEBUG
698 	cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
699 #endif
700 	if (acpi_intr_hooked) {
701 		rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
702 		    InterruptNumber);
703 		acpi_intr_hooked = 0;
704 	}
705 	return (AE_OK);
706 }
707 
708 
709 ACPI_THREAD_ID
710 AcpiOsGetThreadId(void)
711 {
712 	/*
713 	 * ACPI CA doesn't care what actual value is returned as long
714 	 * as it is non-zero and unique to each existing thread.
715 	 * ACPI CA assumes that thread ID is castable to a pointer,
716 	 * so we use the current thread pointer.
717 	 */
718 	return (curthread);
719 }
720 
721 /*
722  *
723  */
724 ACPI_STATUS
725 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK  Function,
726     void *Context)
727 {
728 
729 	if (!acpica_eventq_init) {
730 		/*
731 		 * Create taskqs for event handling
732 		 */
733 		if (init_event_queues() != AE_OK)
734 			return (AE_ERROR);
735 	}
736 
737 	if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
738 	    DDI_NOSLEEP) == DDI_FAILURE) {
739 #ifdef	DEBUG
740 		cmn_err(CE_WARN, "!acpica: unable to dispatch event");
741 #endif
742 		return (AE_ERROR);
743 	}
744 	return (AE_OK);
745 
746 }
747 
748 void
749 AcpiOsSleep(ACPI_INTEGER Milliseconds)
750 {
751 	/*
752 	 * During kernel startup, before the first tick interrupt
753 	 * has taken place, we can't call delay; very late in
754 	 * kernel shutdown or suspend/resume, clock interrupts
755 	 * are blocked, so delay doesn't work then either.
756 	 * So we busy wait if lbolt == 0 (kernel startup)
757 	 * or if acpica_use_safe_delay has been set to a
758 	 * non-zero value.
759 	 */
760 	if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
761 		drv_usecwait(Milliseconds * 1000);
762 	else
763 		delay(drv_usectohz(Milliseconds * 1000));
764 }
765 
766 void
767 AcpiOsStall(UINT32 Microseconds)
768 {
769 	drv_usecwait(Microseconds);
770 }
771 
772 
773 /*
774  * Implementation of "Windows 2001" compatible I/O permission map
775  *
776  */
777 #define	OSL_IO_NONE	(0)
778 #define	OSL_IO_READ	(1<<0)
779 #define	OSL_IO_WRITE	(1<<1)
780 #define	OSL_IO_RW	(OSL_IO_READ | OSL_IO_WRITE)
781 #define	OSL_IO_TERM	(1<<2)
782 #define	OSL_IO_DEFAULT	OSL_IO_RW
783 
784 static struct io_perm  {
785 	ACPI_IO_ADDRESS	low;
786 	ACPI_IO_ADDRESS	high;
787 	uint8_t		perm;
788 } osl_io_perm[] = {
789 	{ 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
790 };
791 
792 
793 /*
794  *
795  */
796 static struct io_perm *
797 osl_io_find_perm(ACPI_IO_ADDRESS addr)
798 {
799 	struct io_perm *p;
800 
801 	p = osl_io_perm;
802 	while (p != NULL) {
803 		if ((p->low <= addr) && (addr <= p->high))
804 			break;
805 		p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
806 	}
807 
808 	return (p);
809 }
810 
811 /*
812  *
813  */
814 ACPI_STATUS
815 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
816 {
817 	struct io_perm *p;
818 
819 	/* verify permission */
820 	p = osl_io_find_perm(Address);
821 	if (p && (p->perm & OSL_IO_READ) == 0) {
822 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
823 		    (long)Address, Width);
824 		*Value = 0xffffffff;
825 		return (AE_ERROR);
826 	}
827 
828 	switch (Width) {
829 	case 8:
830 		*Value = inb(Address);
831 		break;
832 	case 16:
833 		*Value = inw(Address);
834 		break;
835 	case 32:
836 		*Value = inl(Address);
837 		break;
838 	default:
839 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
840 		    (long)Address, Width);
841 		return (AE_BAD_PARAMETER);
842 	}
843 	return (AE_OK);
844 }
845 
846 ACPI_STATUS
847 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
848 {
849 	struct io_perm *p;
850 
851 	/* verify permission */
852 	p = osl_io_find_perm(Address);
853 	if (p && (p->perm & OSL_IO_WRITE) == 0) {
854 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
855 		    (long)Address, Width);
856 		return (AE_ERROR);
857 	}
858 
859 	switch (Width) {
860 	case 8:
861 		outb(Address, Value);
862 		break;
863 	case 16:
864 		outw(Address, Value);
865 		break;
866 	case 32:
867 		outl(Address, Value);
868 		break;
869 	default:
870 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
871 		    (long)Address, Width);
872 		return (AE_BAD_PARAMETER);
873 	}
874 	return (AE_OK);
875 }
876 
877 
878 /*
879  *
880  */
881 
882 #define	OSL_RW(ptr, val, type, rw) \
883 	{ if (rw) *((type *)(ptr)) = *((type *) val); \
884 	    else *((type *) val) = *((type *)(ptr)); }
885 
886 
887 static void
888 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
889     UINT32 Width, int write)
890 {
891 	size_t	maplen = Width / 8;
892 	caddr_t	ptr;
893 
894 	ptr = psm_map_new((paddr_t)Address, maplen,
895 	    PSM_PROT_WRITE | PSM_PROT_READ);
896 
897 	switch (maplen) {
898 	case 1:
899 		OSL_RW(ptr, Value, uint8_t, write);
900 		break;
901 	case 2:
902 		OSL_RW(ptr, Value, uint16_t, write);
903 		break;
904 	case 4:
905 		OSL_RW(ptr, Value, uint32_t, write);
906 		break;
907 	default:
908 		cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
909 		    Width);
910 		break;
911 	}
912 
913 	psm_unmap(ptr, maplen);
914 }
915 
916 ACPI_STATUS
917 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
918 		UINT32 *Value, UINT32 Width)
919 {
920 	osl_rw_memory(Address, Value, Width, 0);
921 	return (AE_OK);
922 }
923 
924 ACPI_STATUS
925 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
926 		UINT32 Value, UINT32 Width)
927 {
928 	osl_rw_memory(Address, &Value, Width, 1);
929 	return (AE_OK);
930 }
931 
932 
933 ACPI_STATUS
934 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
935 			void *Value, UINT32 Width)
936 {
937 
938 	switch (Width) {
939 	case 8:
940 		*((UINT64 *)Value) = (UINT64)(*pci_getb_func)
941 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
942 		break;
943 	case 16:
944 		*((UINT64 *)Value) = (UINT64)(*pci_getw_func)
945 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
946 		break;
947 	case 32:
948 		*((UINT64 *)Value) = (UINT64)(*pci_getl_func)
949 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
950 		break;
951 	case 64:
952 	default:
953 		cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
954 		    Register, Width);
955 		return (AE_BAD_PARAMETER);
956 	}
957 	return (AE_OK);
958 }
959 
960 /*
961  *
962  */
963 int acpica_write_pci_config_ok = 1;
964 
965 ACPI_STATUS
966 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
967 		ACPI_INTEGER Value, UINT32 Width)
968 {
969 
970 	if (!acpica_write_pci_config_ok) {
971 		cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
972 		    " %lx %d not permitted", PciId->Bus, PciId->Device,
973 		    PciId->Function, Register, (long)Value, Width);
974 		return (AE_OK);
975 	}
976 
977 	switch (Width) {
978 	case 8:
979 		(*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
980 		    Register, (uint8_t)Value);
981 		break;
982 	case 16:
983 		(*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
984 		    Register, (uint16_t)Value);
985 		break;
986 	case 32:
987 		(*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
988 		    Register, (uint32_t)Value);
989 		break;
990 	case 64:
991 	default:
992 		cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
993 		    Register, Width);
994 		return (AE_BAD_PARAMETER);
995 	}
996 	return (AE_OK);
997 }
998 
999 /*
1000  * Called with ACPI_HANDLEs for both a PCI Config Space
1001  * OpRegion and (what ACPI CA thinks is) the PCI device
1002  * to which this ConfigSpace OpRegion belongs.  Since
1003  * ACPI CA depends on a valid _BBN object being present
1004  * and this is not always true (one old x86 had broken _BBN),
1005  * we go ahead and get the correct PCI bus number using the
1006  * devinfo mapping (which compensates for broken _BBN).
1007  *
1008  * Default values for bus, segment, device and function are
1009  * all 0 when ACPI CA can't figure them out.
1010  *
1011  * Some BIOSes implement _BBN() by reading PCI config space
1012  * on bus #0 - which means that we'll recurse when we attempt
1013  * to create the devinfo-to-ACPI map.  If Derive is called during
1014  * scan_d2a_map, we don't translate the bus # and return.
1015  *
1016  * We get the parent of the OpRegion, which must be a PCI
1017  * node, fetch the associated devinfo node and snag the
1018  * b/d/f from it.
1019  */
1020 void
1021 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1022 		ACPI_PCI_ID **PciId)
1023 {
1024 	ACPI_HANDLE handle;
1025 	dev_info_t *dip;
1026 	int bus, device, func, devfn;
1027 
1028 
1029 	/*
1030 	 * See above - avoid recursing during scanning_d2a_map.
1031 	 */
1032 	if (scanning_d2a_map)
1033 		return;
1034 
1035 	/*
1036 	 * Get the OpRegion's parent
1037 	 */
1038 	if (AcpiGetParent(chandle, &handle) != AE_OK)
1039 		return;
1040 
1041 	/*
1042 	 * If we've mapped the ACPI node to the devinfo
1043 	 * tree, use the devinfo reg property
1044 	 */
1045 	if (acpica_get_devinfo(handle, &dip) == AE_OK) {
1046 		(void) acpica_get_bdf(dip, &bus, &device, &func);
1047 		(*PciId)->Bus = bus;
1048 		(*PciId)->Device = device;
1049 		(*PciId)->Function = func;
1050 	} else if (acpica_eval_int(handle, "_ADR", &devfn) == AE_OK) {
1051 		/* no devinfo node - just confirm the d/f */
1052 		(*PciId)->Device = (devfn >> 16) & 0xFFFF;
1053 		(*PciId)->Function = devfn & 0xFFFF;
1054 	}
1055 }
1056 
1057 
1058 /*ARGSUSED*/
1059 BOOLEAN
1060 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1061 {
1062 
1063 	/* Always says yes; all mapped memory assumed readable */
1064 	return (1);
1065 }
1066 
1067 /*ARGSUSED*/
1068 BOOLEAN
1069 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1070 {
1071 
1072 	/* Always says yes; all mapped memory assumed writable */
1073 	return (1);
1074 }
1075 
1076 UINT64
1077 AcpiOsGetTimer(void)
1078 {
1079 	/* gethrtime() returns 1nS resolution; convert to 100nS granules */
1080 	return ((gethrtime() + 50) / 100);
1081 }
1082 
1083 static struct AcpiOSIFeature_s {
1084 	uint64_t	control_flag;
1085 	const char	*feature_name;
1086 } AcpiOSIFeatures[] = {
1087 	{ ACPI_FEATURE_OSI_MODULE,	"Module Device" },
1088 	{ 0,				"Processor Device" }
1089 };
1090 
1091 /*ARGSUSED*/
1092 ACPI_STATUS
1093 AcpiOsValidateInterface(char *feature)
1094 {
1095 	int i;
1096 
1097 	ASSERT(feature != NULL);
1098 	for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1099 	    i++) {
1100 		if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1101 			continue;
1102 		}
1103 		/* Check whether required core features are available. */
1104 		if (AcpiOSIFeatures[i].control_flag != 0 &&
1105 		    acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1106 		    AcpiOSIFeatures[i].control_flag) {
1107 			break;
1108 		}
1109 		/* Feature supported. */
1110 		return (AE_OK);
1111 	}
1112 
1113 	return (AE_SUPPORT);
1114 }
1115 
1116 /*ARGSUSED*/
1117 ACPI_STATUS
1118 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1119     ACPI_SIZE length)
1120 {
1121 	return (AE_OK);
1122 }
1123 
1124 ACPI_STATUS
1125 AcpiOsSignal(UINT32 Function, void *Info)
1126 {
1127 	_NOTE(ARGUNUSED(Function, Info))
1128 
1129 	/* FUTUREWORK: debugger support */
1130 
1131 	cmn_err(CE_NOTE, "!OsSignal unimplemented");
1132 	return (AE_OK);
1133 }
1134 
1135 void ACPI_INTERNAL_VAR_XFACE
1136 AcpiOsPrintf(const char *Format, ...)
1137 {
1138 	va_list ap;
1139 
1140 	va_start(ap, Format);
1141 	AcpiOsVprintf(Format, ap);
1142 	va_end(ap);
1143 }
1144 
1145 /*
1146  * When != 0, sends output to console
1147  * Patchable with kmdb or /etc/system.
1148  */
1149 int acpica_console_out = 0;
1150 
1151 #define	ACPICA_OUTBUF_LEN	160
1152 char	acpica_outbuf[ACPICA_OUTBUF_LEN];
1153 int	acpica_outbuf_offset;
1154 
1155 /*
1156  *
1157  */
1158 static void
1159 acpica_pr_buf(char *buf)
1160 {
1161 	char c, *bufp, *outp;
1162 	int	out_remaining;
1163 
1164 	/*
1165 	 * copy the supplied buffer into the output buffer
1166 	 * when we hit a '\n' or overflow the output buffer,
1167 	 * output and reset the output buffer
1168 	 */
1169 	bufp = buf;
1170 	outp = acpica_outbuf + acpica_outbuf_offset;
1171 	out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1172 	while (c = *bufp++) {
1173 		*outp++ = c;
1174 		if (c == '\n' || --out_remaining == 0) {
1175 			*outp = '\0';
1176 			switch (acpica_console_out) {
1177 			case 1:
1178 				printf(acpica_outbuf);
1179 				break;
1180 			case 2:
1181 				prom_printf(acpica_outbuf);
1182 				break;
1183 			case 0:
1184 			default:
1185 				(void) strlog(0, 0, 0,
1186 				    SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1187 				    acpica_outbuf);
1188 				break;
1189 			}
1190 			acpica_outbuf_offset = 0;
1191 			outp = acpica_outbuf;
1192 			out_remaining = ACPICA_OUTBUF_LEN - 1;
1193 		}
1194 	}
1195 
1196 	acpica_outbuf_offset = outp - acpica_outbuf;
1197 }
1198 
1199 void
1200 AcpiOsVprintf(const char *Format, va_list Args)
1201 {
1202 
1203 	/*
1204 	 * If AcpiOsInitialize() failed to allocate a string buffer,
1205 	 * resort to vprintf().
1206 	 */
1207 	if (acpi_osl_pr_buffer == NULL) {
1208 		vprintf(Format, Args);
1209 		return;
1210 	}
1211 
1212 	/*
1213 	 * It is possible that a very long debug output statement will
1214 	 * be truncated; this is silently ignored.
1215 	 */
1216 	(void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1217 	acpica_pr_buf(acpi_osl_pr_buffer);
1218 }
1219 
1220 void
1221 AcpiOsRedirectOutput(void *Destination)
1222 {
1223 	_NOTE(ARGUNUSED(Destination))
1224 
1225 	/* FUTUREWORK: debugger support */
1226 
1227 #ifdef	DEBUG
1228 	cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1229 #endif
1230 }
1231 
1232 
1233 UINT32
1234 AcpiOsGetLine(char *Buffer)
1235 {
1236 	_NOTE(ARGUNUSED(Buffer))
1237 
1238 	/* FUTUREWORK: debugger support */
1239 
1240 	return (0);
1241 }
1242 
1243 /*
1244  * Device tree binding
1245  */
1246 static ACPI_STATUS
1247 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1248 {
1249 	_NOTE(ARGUNUSED(lvl));
1250 
1251 	int sta, hid, bbn;
1252 	int busno = (intptr_t)ctxp;
1253 	ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1254 
1255 	/* Check whether device exists. */
1256 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1257 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1258 		/*
1259 		 * Skip object if device doesn't exist.
1260 		 * According to ACPI Spec,
1261 		 * 1) setting either bit 0 or bit 3 means that device exists.
1262 		 * 2) Absence of _STA method means all status bits set.
1263 		 */
1264 		return (AE_CTRL_DEPTH);
1265 	}
1266 
1267 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1268 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1269 		/* Non PCI/PCIe host bridge. */
1270 		return (AE_OK);
1271 	}
1272 
1273 	if (acpi_has_broken_bbn) {
1274 		ACPI_BUFFER rb;
1275 		rb.Pointer = NULL;
1276 		rb.Length = ACPI_ALLOCATE_BUFFER;
1277 
1278 		/* Decree _BBN == n from PCI<n> */
1279 		if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1280 			return (AE_CTRL_TERMINATE);
1281 		}
1282 		bbn = ((char *)rb.Pointer)[3] - '0';
1283 		AcpiOsFree(rb.Pointer);
1284 		if (bbn == busno || busno == 0) {
1285 			*hdlp = hdl;
1286 			return (AE_CTRL_TERMINATE);
1287 		}
1288 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1289 		if (bbn == busno) {
1290 			*hdlp = hdl;
1291 			return (AE_CTRL_TERMINATE);
1292 		}
1293 	} else if (busno == 0) {
1294 		*hdlp = hdl;
1295 		return (AE_CTRL_TERMINATE);
1296 	}
1297 
1298 	return (AE_CTRL_DEPTH);
1299 }
1300 
1301 static int
1302 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1303 {
1304 	ACPI_HANDLE sbobj, busobj;
1305 
1306 	/* initialize static flag by querying ACPI namespace for bug */
1307 	if (acpi_has_broken_bbn == -1)
1308 		acpi_has_broken_bbn = acpica_query_bbn_problem();
1309 
1310 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1311 		busobj = NULL;
1312 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1313 		    acpica_find_pcibus_walker, (void *)(intptr_t)busno,
1314 		    (void **)&busobj);
1315 		if (busobj != NULL) {
1316 			*rh = busobj;
1317 			return (AE_OK);
1318 		}
1319 	}
1320 
1321 	return (AE_ERROR);
1322 }
1323 
1324 static ACPI_STATUS
1325 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1326 {
1327 	_NOTE(ARGUNUSED(lvl));
1328 	_NOTE(ARGUNUSED(rvpp));
1329 
1330 	int sta, hid, bbn;
1331 	int *cntp = (int *)ctxp;
1332 
1333 	/* Check whether device exists. */
1334 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1335 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1336 		/*
1337 		 * Skip object if device doesn't exist.
1338 		 * According to ACPI Spec,
1339 		 * 1) setting either bit 0 or bit 3 means that device exists.
1340 		 * 2) Absence of _STA method means all status bits set.
1341 		 */
1342 		return (AE_CTRL_DEPTH);
1343 	}
1344 
1345 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1346 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1347 		/* Non PCI/PCIe host bridge. */
1348 		return (AE_OK);
1349 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1350 	    bbn == 0 && ++(*cntp) > 1) {
1351 		/*
1352 		 * If we find more than one bus with a 0 _BBN
1353 		 * we have the problem that BigBear's BIOS shows
1354 		 */
1355 		return (AE_CTRL_TERMINATE);
1356 	} else {
1357 		/*
1358 		 * Skip children of PCI/PCIe host bridge.
1359 		 */
1360 		return (AE_CTRL_DEPTH);
1361 	}
1362 }
1363 
1364 /*
1365  * Look for ACPI problem where _BBN is zero for multiple PCI buses
1366  * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1367  * below if it exists.
1368  */
1369 static int
1370 acpica_query_bbn_problem(void)
1371 {
1372 	ACPI_HANDLE sbobj;
1373 	int zerobbncnt;
1374 	void *rv;
1375 
1376 	zerobbncnt = 0;
1377 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1378 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1379 		    acpica_query_bbn_walker, &zerobbncnt, &rv);
1380 	}
1381 
1382 	return (zerobbncnt > 1 ? 1 : 0);
1383 }
1384 
1385 static const char hextab[] = "0123456789ABCDEF";
1386 
1387 static int
1388 hexdig(int c)
1389 {
1390 	/*
1391 	 *  Get hex digit:
1392 	 *
1393 	 *  Returns the 4-bit hex digit named by the input character.  Returns
1394 	 *  zero if the input character is not valid hex!
1395 	 */
1396 
1397 	int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1398 	int j = sizeof (hextab);
1399 
1400 	while (--j && (x != hextab[j])) {
1401 	}
1402 	return (j);
1403 }
1404 
1405 static int
1406 CompressEisaID(char *np)
1407 {
1408 	/*
1409 	 *  Compress an EISA device name:
1410 	 *
1411 	 *  This routine converts a 7-byte ASCII device name into the 4-byte
1412 	 *  compressed form used by EISA (50 bytes of ROM to save 1 byte of
1413 	 *  NV-RAM!)
1414 	 */
1415 
1416 	union { char octets[4]; int retval; } myu;
1417 
1418 	myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1419 	myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1420 	myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1421 	myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1422 
1423 	return (myu.retval);
1424 }
1425 
1426 ACPI_STATUS
1427 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1428 {
1429 	ACPI_STATUS status;
1430 	ACPI_BUFFER rb;
1431 	ACPI_OBJECT ro;
1432 
1433 	rb.Pointer = &ro;
1434 	rb.Length = sizeof (ro);
1435 	if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1436 	    ACPI_TYPE_INTEGER)) == AE_OK)
1437 		*rint = ro.Integer.Value;
1438 
1439 	return (status);
1440 }
1441 
1442 static int
1443 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1444 {
1445 	ACPI_BUFFER rb;
1446 	ACPI_OBJECT *rv;
1447 
1448 	rb.Pointer = NULL;
1449 	rb.Length = ACPI_ALLOCATE_BUFFER;
1450 	if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1451 	    rb.Length != 0) {
1452 		rv = rb.Pointer;
1453 		if (rv->Type == ACPI_TYPE_INTEGER) {
1454 			*rint = rv->Integer.Value;
1455 			AcpiOsFree(rv);
1456 			return (AE_OK);
1457 		} else if (rv->Type == ACPI_TYPE_STRING) {
1458 			char *stringData;
1459 
1460 			/* Convert the string into an EISA ID */
1461 			if (rv->String.Pointer == NULL) {
1462 				AcpiOsFree(rv);
1463 				return (AE_ERROR);
1464 			}
1465 
1466 			stringData = rv->String.Pointer;
1467 
1468 			/*
1469 			 * If the string is an EisaID, it must be 7
1470 			 * characters; if it's an ACPI ID, it will be 8
1471 			 * (and we don't care about ACPI ids here).
1472 			 */
1473 			if (strlen(stringData) != 7) {
1474 				AcpiOsFree(rv);
1475 				return (AE_ERROR);
1476 			}
1477 
1478 			*rint = CompressEisaID(stringData);
1479 			AcpiOsFree(rv);
1480 			return (AE_OK);
1481 		} else
1482 			AcpiOsFree(rv);
1483 	}
1484 	return (AE_ERROR);
1485 }
1486 
1487 /*
1488  * Create linkage between devinfo nodes and ACPI nodes
1489  */
1490 ACPI_STATUS
1491 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1492 {
1493 	ACPI_STATUS status;
1494 	ACPI_BUFFER rb;
1495 
1496 	/*
1497 	 * Tag the devinfo node with the ACPI name
1498 	 */
1499 	rb.Pointer = NULL;
1500 	rb.Length = ACPI_ALLOCATE_BUFFER;
1501 	status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1502 	if (ACPI_FAILURE(status)) {
1503 		cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1504 	} else {
1505 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1506 		    "acpi-namespace", (char *)rb.Pointer);
1507 		AcpiOsFree(rb.Pointer);
1508 
1509 		/*
1510 		 * Tag the ACPI node with the dip
1511 		 */
1512 		status = acpica_set_devinfo(acpiobj, dip);
1513 		ASSERT(ACPI_SUCCESS(status));
1514 	}
1515 
1516 	return (status);
1517 }
1518 
1519 /*
1520  * Destroy linkage between devinfo nodes and ACPI nodes
1521  */
1522 ACPI_STATUS
1523 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1524 {
1525 	(void) acpica_unset_devinfo(acpiobj);
1526 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1527 
1528 	return (AE_OK);
1529 }
1530 
1531 /*
1532  * Return the ACPI device node matching the CPU dev_info node.
1533  */
1534 ACPI_STATUS
1535 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1536 {
1537 	int i;
1538 
1539 	/*
1540 	 * if cpu_map itself is NULL, we're a uppc system and
1541 	 * acpica_build_processor_map() hasn't been called yet.
1542 	 * So call it here
1543 	 */
1544 	if (cpu_map == NULL) {
1545 		(void) acpica_build_processor_map();
1546 		if (cpu_map == NULL)
1547 			return (AE_ERROR);
1548 	}
1549 
1550 	if (cpu_id < 0) {
1551 		return (AE_ERROR);
1552 	}
1553 
1554 	/*
1555 	 * search object with cpuid in cpu_map
1556 	 */
1557 	mutex_enter(&cpu_map_lock);
1558 	for (i = 0; i < cpu_map_count; i++) {
1559 		if (cpu_map[i]->cpu_id == cpu_id) {
1560 			break;
1561 		}
1562 	}
1563 	if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1564 		*rh = cpu_map[cpu_id]->obj;
1565 		mutex_exit(&cpu_map_lock);
1566 		return (AE_OK);
1567 	}
1568 
1569 	/* Handle special case for uppc-only systems. */
1570 	if (cpu_map_called == 0) {
1571 		uint32_t apicid = cpuid_get_apicid(CPU);
1572 		if (apicid != UINT32_MAX) {
1573 			for (i = 0; i < cpu_map_count; i++) {
1574 				if (cpu_map[i]->apic_id == apicid) {
1575 					break;
1576 				}
1577 			}
1578 			if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1579 				*rh = cpu_map[cpu_id]->obj;
1580 				mutex_exit(&cpu_map_lock);
1581 				return (AE_OK);
1582 			}
1583 		}
1584 	}
1585 	mutex_exit(&cpu_map_lock);
1586 
1587 	return (AE_ERROR);
1588 }
1589 
1590 /*
1591  * Determine if this object is a processor
1592  */
1593 static ACPI_STATUS
1594 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1595 {
1596 	ACPI_STATUS status;
1597 	ACPI_OBJECT_TYPE objtype;
1598 	unsigned long acpi_id;
1599 	ACPI_BUFFER rb;
1600 
1601 	if (AcpiGetType(obj, &objtype) != AE_OK)
1602 		return (AE_OK);
1603 
1604 	if (objtype == ACPI_TYPE_PROCESSOR) {
1605 		/* process a Processor */
1606 		rb.Pointer = NULL;
1607 		rb.Length = ACPI_ALLOCATE_BUFFER;
1608 		status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1609 		    ACPI_TYPE_PROCESSOR);
1610 		if (status != AE_OK) {
1611 			cmn_err(CE_WARN, "!acpica: error probing Processor");
1612 			return (status);
1613 		}
1614 		acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1615 		AcpiOsFree(rb.Pointer);
1616 	} else if (objtype == ACPI_TYPE_DEVICE) {
1617 		/* process a processor Device */
1618 		rb.Pointer = NULL;
1619 		rb.Length = ACPI_ALLOCATE_BUFFER;
1620 		status = AcpiGetObjectInfo(obj, &rb);
1621 		if (status != AE_OK) {
1622 			cmn_err(CE_WARN,
1623 			    "!acpica: error probing Processor Device\n");
1624 			return (status);
1625 		}
1626 		ASSERT(((ACPI_OBJECT *)rb.Pointer)->Type ==
1627 		    ACPI_TYPE_DEVICE);
1628 
1629 		if (ddi_strtoul(
1630 		    ((ACPI_DEVICE_INFO *)rb.Pointer)->UniqueId.Value,
1631 		    NULL, 10, &acpi_id) != 0) {
1632 			AcpiOsFree(rb.Pointer);
1633 			cmn_err(CE_WARN,
1634 			    "!acpica: error probing Processor Device _UID\n");
1635 			return (AE_ERROR);
1636 		}
1637 		AcpiOsFree(rb.Pointer);
1638 	}
1639 	(void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1640 
1641 	return (AE_OK);
1642 }
1643 
1644 static void
1645 scan_d2a_map(void)
1646 {
1647 	dev_info_t *dip, *cdip;
1648 	ACPI_HANDLE acpiobj;
1649 	char *device_type_prop;
1650 	int bus;
1651 	static int map_error = 0;
1652 
1653 	if (map_error)
1654 		return;
1655 
1656 	scanning_d2a_map = 1;
1657 
1658 	/*
1659 	 * Find all child-of-root PCI buses, and find their corresponding
1660 	 * ACPI child-of-root PCI nodes.  For each one, add to the
1661 	 * d2a table.
1662 	 */
1663 
1664 	for (dip = ddi_get_child(ddi_root_node());
1665 	    dip != NULL;
1666 	    dip = ddi_get_next_sibling(dip)) {
1667 
1668 		/* prune non-PCI nodes */
1669 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1670 		    DDI_PROP_DONTPASS,
1671 		    "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1672 			continue;
1673 
1674 		if ((strcmp("pci", device_type_prop) != 0) &&
1675 		    (strcmp("pciex", device_type_prop) != 0)) {
1676 			ddi_prop_free(device_type_prop);
1677 			continue;
1678 		}
1679 
1680 		ddi_prop_free(device_type_prop);
1681 
1682 		/*
1683 		 * To get bus number of dip, get first child and get its
1684 		 * bus number.  If NULL, just continue, because we don't
1685 		 * care about bus nodes with no children anyway.
1686 		 */
1687 		if ((cdip = ddi_get_child(dip)) == NULL)
1688 			continue;
1689 
1690 		if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1691 #ifdef D2ADEBUG
1692 			cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1693 #endif
1694 			map_error = 1;
1695 			scanning_d2a_map = 0;
1696 			d2a_done = 1;
1697 			return;
1698 		}
1699 
1700 		if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1701 #ifdef D2ADEBUG
1702 			cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1703 #endif
1704 			map_error = 1;
1705 			continue;
1706 		}
1707 
1708 		acpica_tag_devinfo(dip, acpiobj);
1709 
1710 		/* call recursively to enumerate subtrees */
1711 		scan_d2a_subtree(dip, acpiobj, bus);
1712 	}
1713 
1714 	scanning_d2a_map = 0;
1715 	d2a_done = 1;
1716 }
1717 
1718 /*
1719  * For all acpi child devices of acpiobj, find their matching
1720  * dip under "dip" argument.  (matching means "matches dev/fn").
1721  * bus is assumed to already be a match from caller, and is
1722  * used here only to record in the d2a entry.  Recurse if necessary.
1723  */
1724 static void
1725 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1726 {
1727 	int acpi_devfn, hid;
1728 	ACPI_HANDLE acld;
1729 	dev_info_t *dcld;
1730 	int dcld_b, dcld_d, dcld_f;
1731 	int dev, func;
1732 	char *device_type_prop;
1733 
1734 	acld = NULL;
1735 	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1736 	    == AE_OK) {
1737 		/* get the dev/func we're looking for in the devinfo tree */
1738 		if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1739 			continue;
1740 		dev = (acpi_devfn >> 16) & 0xFFFF;
1741 		func = acpi_devfn & 0xFFFF;
1742 
1743 		/* look through all the immediate children of dip */
1744 		for (dcld = ddi_get_child(dip); dcld != NULL;
1745 		    dcld = ddi_get_next_sibling(dcld)) {
1746 			if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1747 				continue;
1748 
1749 			/* dev must match; function must match or wildcard */
1750 			if (dcld_d != dev ||
1751 			    (func != 0xFFFF && func != dcld_f))
1752 				continue;
1753 			bus = dcld_b;
1754 
1755 			/* found a match, record it */
1756 			acpica_tag_devinfo(dcld, acld);
1757 
1758 			/* if we find a bridge, recurse from here */
1759 			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1760 			    DDI_PROP_DONTPASS, "device_type",
1761 			    &device_type_prop) == DDI_PROP_SUCCESS) {
1762 				if ((strcmp("pci", device_type_prop) == 0) ||
1763 				    (strcmp("pciex", device_type_prop) == 0))
1764 					scan_d2a_subtree(dcld, acld, bus);
1765 				ddi_prop_free(device_type_prop);
1766 			}
1767 
1768 			/* done finding a match, so break now */
1769 			break;
1770 		}
1771 	}
1772 }
1773 
1774 /*
1775  * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1776  */
1777 int
1778 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1779 {
1780 	pci_regspec_t *pci_rp;
1781 	int len;
1782 
1783 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1784 	    "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1785 		return (-1);
1786 
1787 	if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1788 		ddi_prop_free(pci_rp);
1789 		return (-1);
1790 	}
1791 	if (bus != NULL)
1792 		*bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1793 	if (device != NULL)
1794 		*device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1795 	if (func != NULL)
1796 		*func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1797 	ddi_prop_free(pci_rp);
1798 	return (0);
1799 }
1800 
1801 /*
1802  * Return the ACPI device node matching this dev_info node, if it
1803  * exists in the ACPI tree.
1804  */
1805 ACPI_STATUS
1806 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1807 {
1808 	ACPI_STATUS status;
1809 	char *acpiname;
1810 
1811 	if (!d2a_done)
1812 		scan_d2a_map();
1813 
1814 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1815 	    "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1816 		return (AE_ERROR);
1817 	}
1818 
1819 	status = AcpiGetHandle(NULL, acpiname, rh);
1820 	ddi_prop_free((void *)acpiname);
1821 	return (status);
1822 }
1823 
1824 
1825 
1826 /*
1827  * Manage OS data attachment to ACPI nodes
1828  */
1829 
1830 /*
1831  * Return the (dev_info_t *) associated with the ACPI node.
1832  */
1833 ACPI_STATUS
1834 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1835 {
1836 	ACPI_STATUS status;
1837 	void *ptr;
1838 
1839 	status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1840 	if (status == AE_OK)
1841 		*dipp = (dev_info_t *)ptr;
1842 
1843 	return (status);
1844 }
1845 
1846 /*
1847  * Set the dev_info_t associated with the ACPI node.
1848  */
1849 static ACPI_STATUS
1850 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1851 {
1852 	ACPI_STATUS status;
1853 
1854 	status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1855 	return (status);
1856 }
1857 
1858 /*
1859  * Unset the dev_info_t associated with the ACPI node.
1860  */
1861 static ACPI_STATUS
1862 acpica_unset_devinfo(ACPI_HANDLE obj)
1863 {
1864 	return (AcpiDetachData(obj, acpica_devinfo_handler));
1865 }
1866 
1867 /*
1868  *
1869  */
1870 void
1871 acpica_devinfo_handler(ACPI_HANDLE obj, UINT32 func, void *data)
1872 {
1873 	/* noop */
1874 }
1875 
1876 ACPI_STATUS
1877 acpica_build_processor_map(void)
1878 {
1879 	ACPI_STATUS status;
1880 	void *rv;
1881 
1882 	/*
1883 	 * shouldn't be called more than once anyway
1884 	 */
1885 	if (cpu_map_built)
1886 		return (AE_OK);
1887 
1888 	/*
1889 	 * ACPI device configuration driver has built mapping information
1890 	 * among processor id and object handle, no need to probe again.
1891 	 */
1892 	if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1893 		cpu_map_built = 1;
1894 		return (AE_OK);
1895 	}
1896 
1897 	/*
1898 	 * Look for Processor objects
1899 	 */
1900 	status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1901 	    ACPI_ROOT_OBJECT,
1902 	    4,
1903 	    acpica_probe_processor,
1904 	    NULL,
1905 	    &rv);
1906 	ASSERT(status == AE_OK);
1907 
1908 	/*
1909 	 * Look for processor Device objects
1910 	 */
1911 	status = AcpiGetDevices("ACPI0007",
1912 	    acpica_probe_processor,
1913 	    NULL,
1914 	    &rv);
1915 	ASSERT(status == AE_OK);
1916 	cpu_map_built = 1;
1917 
1918 	return (status);
1919 }
1920 
1921 /*
1922  * Grow cpu map table on demand.
1923  */
1924 static void
1925 acpica_grow_cpu_map(void)
1926 {
1927 	if (cpu_map_count == cpu_map_count_max) {
1928 		size_t sz;
1929 		struct cpu_map_item **new_map;
1930 
1931 		ASSERT(cpu_map_count_max < INT_MAX / 2);
1932 		cpu_map_count_max += max_ncpus;
1933 		new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1934 		    KM_SLEEP);
1935 		if (cpu_map_count != 0) {
1936 			ASSERT(cpu_map != NULL);
1937 			sz = sizeof (cpu_map[0]) * cpu_map_count;
1938 			kcopy(cpu_map, new_map, sz);
1939 			kmem_free(cpu_map, sz);
1940 		}
1941 		cpu_map = new_map;
1942 	}
1943 }
1944 
1945 /*
1946  * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1947  * ACPI handle). The mapping table will be setup in two steps:
1948  * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1949  *    processor id and ACPI object handle.
1950  * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1951  * On systems with which have ACPI device configuration for CPUs enabled,
1952  * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1953  * otherwise acpica_map_cpu() will be called before
1954  * acpica_add_processor_to_map().
1955  */
1956 ACPI_STATUS
1957 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1958 {
1959 	int i;
1960 	ACPI_STATUS rc = AE_OK;
1961 	struct cpu_map_item *item = NULL;
1962 
1963 	ASSERT(obj != NULL);
1964 	if (obj == NULL) {
1965 		return (AE_ERROR);
1966 	}
1967 
1968 	mutex_enter(&cpu_map_lock);
1969 
1970 	/*
1971 	 * Special case for uppc
1972 	 * If we're a uppc system and ACPI device configuration for CPU has
1973 	 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1974 	 * call acpica_map_cpu(). So create one and use the passed-in processor
1975 	 * as CPU 0
1976 	 * Assumption: the first CPU returned by
1977 	 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1978 	 * Unfortunately there appears to be no good way to ASSERT this.
1979 	 */
1980 	if (cpu_map == NULL &&
1981 	    !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1982 		acpica_grow_cpu_map();
1983 		ASSERT(cpu_map != NULL);
1984 		item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1985 		item->cpu_id = 0;
1986 		item->proc_id = acpi_id;
1987 		item->apic_id = apic_id;
1988 		item->obj = obj;
1989 		cpu_map[0] = item;
1990 		cpu_map_count = 1;
1991 		mutex_exit(&cpu_map_lock);
1992 		return (AE_OK);
1993 	}
1994 
1995 	for (i = 0; i < cpu_map_count; i++) {
1996 		if (cpu_map[i]->obj == obj) {
1997 			rc = AE_ALREADY_EXISTS;
1998 			break;
1999 		} else if (cpu_map[i]->proc_id == acpi_id) {
2000 			ASSERT(item == NULL);
2001 			item = cpu_map[i];
2002 		}
2003 	}
2004 
2005 	if (rc == AE_OK) {
2006 		if (item != NULL) {
2007 			/*
2008 			 * ACPI alias objects may cause more than one objects
2009 			 * with the same ACPI processor id, only remember the
2010 			 * the first object encountered.
2011 			 */
2012 			if (item->obj == NULL) {
2013 				item->obj = obj;
2014 				item->apic_id = apic_id;
2015 			} else {
2016 				rc = AE_ALREADY_EXISTS;
2017 			}
2018 		} else if (cpu_map_count >= INT_MAX / 2) {
2019 			rc = AE_NO_MEMORY;
2020 		} else {
2021 			acpica_grow_cpu_map();
2022 			ASSERT(cpu_map != NULL);
2023 			ASSERT(cpu_map_count < cpu_map_count_max);
2024 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2025 			item->cpu_id = -1;
2026 			item->proc_id = acpi_id;
2027 			item->apic_id = apic_id;
2028 			item->obj = obj;
2029 			cpu_map[cpu_map_count] = item;
2030 			cpu_map_count++;
2031 		}
2032 	}
2033 
2034 	mutex_exit(&cpu_map_lock);
2035 
2036 	return (rc);
2037 }
2038 
2039 ACPI_STATUS
2040 acpica_remove_processor_from_map(UINT32 acpi_id)
2041 {
2042 	int i;
2043 	ACPI_STATUS rc = AE_NOT_EXIST;
2044 
2045 	mutex_enter(&cpu_map_lock);
2046 	for (i = 0; i < cpu_map_count; i++) {
2047 		if (cpu_map[i]->proc_id != acpi_id) {
2048 			continue;
2049 		}
2050 		cpu_map[i]->obj = NULL;
2051 		/* Free item if no more reference to it. */
2052 		if (cpu_map[i]->cpu_id == -1) {
2053 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2054 			cpu_map[i] = NULL;
2055 			cpu_map_count--;
2056 			if (i != cpu_map_count) {
2057 				cpu_map[i] = cpu_map[cpu_map_count];
2058 				cpu_map[cpu_map_count] = NULL;
2059 			}
2060 		}
2061 		rc = AE_OK;
2062 		break;
2063 	}
2064 	mutex_exit(&cpu_map_lock);
2065 
2066 	return (rc);
2067 }
2068 
2069 ACPI_STATUS
2070 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2071 {
2072 	int i;
2073 	ACPI_STATUS rc = AE_OK;
2074 	struct cpu_map_item *item = NULL;
2075 
2076 	ASSERT(cpuid != -1);
2077 	if (cpuid == -1) {
2078 		return (AE_ERROR);
2079 	}
2080 
2081 	mutex_enter(&cpu_map_lock);
2082 	cpu_map_called = 1;
2083 	for (i = 0; i < cpu_map_count; i++) {
2084 		if (cpu_map[i]->cpu_id == cpuid) {
2085 			rc = AE_ALREADY_EXISTS;
2086 			break;
2087 		} else if (cpu_map[i]->proc_id == acpi_id) {
2088 			ASSERT(item == NULL);
2089 			item = cpu_map[i];
2090 		}
2091 	}
2092 	if (rc == AE_OK) {
2093 		if (item != NULL) {
2094 			if (item->cpu_id == -1) {
2095 				item->cpu_id = cpuid;
2096 			} else {
2097 				rc = AE_ALREADY_EXISTS;
2098 			}
2099 		} else if (cpu_map_count >= INT_MAX / 2) {
2100 			rc = AE_NO_MEMORY;
2101 		} else {
2102 			acpica_grow_cpu_map();
2103 			ASSERT(cpu_map != NULL);
2104 			ASSERT(cpu_map_count < cpu_map_count_max);
2105 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2106 			item->cpu_id = cpuid;
2107 			item->proc_id = acpi_id;
2108 			item->apic_id = UINT32_MAX;
2109 			item->obj = NULL;
2110 			cpu_map[cpu_map_count] = item;
2111 			cpu_map_count++;
2112 		}
2113 	}
2114 	mutex_exit(&cpu_map_lock);
2115 
2116 	return (rc);
2117 }
2118 
2119 ACPI_STATUS
2120 acpica_unmap_cpu(processorid_t cpuid)
2121 {
2122 	int i;
2123 	ACPI_STATUS rc = AE_NOT_EXIST;
2124 
2125 	ASSERT(cpuid != -1);
2126 	if (cpuid == -1) {
2127 		return (rc);
2128 	}
2129 
2130 	mutex_enter(&cpu_map_lock);
2131 	for (i = 0; i < cpu_map_count; i++) {
2132 		if (cpu_map[i]->cpu_id != cpuid) {
2133 			continue;
2134 		}
2135 		cpu_map[i]->cpu_id = -1;
2136 		/* Free item if no more reference. */
2137 		if (cpu_map[i]->obj == NULL) {
2138 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2139 			cpu_map[i] = NULL;
2140 			cpu_map_count--;
2141 			if (i != cpu_map_count) {
2142 				cpu_map[i] = cpu_map[cpu_map_count];
2143 				cpu_map[cpu_map_count] = NULL;
2144 			}
2145 		}
2146 		rc = AE_OK;
2147 		break;
2148 	}
2149 	mutex_exit(&cpu_map_lock);
2150 
2151 	return (rc);
2152 }
2153 
2154 ACPI_STATUS
2155 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2156 {
2157 	int i;
2158 	ACPI_STATUS rc = AE_NOT_EXIST;
2159 
2160 	ASSERT(cpuid != -1);
2161 	if (cpuid == -1) {
2162 		return (rc);
2163 	}
2164 
2165 	mutex_enter(&cpu_map_lock);
2166 	for (i = 0; i < cpu_map_count; i++) {
2167 		if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2168 			*hdlp = cpu_map[i]->obj;
2169 			rc = AE_OK;
2170 			break;
2171 		}
2172 	}
2173 	mutex_exit(&cpu_map_lock);
2174 
2175 	return (rc);
2176 }
2177 
2178 ACPI_STATUS
2179 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2180 {
2181 	int i;
2182 	ACPI_STATUS rc = AE_NOT_EXIST;
2183 
2184 	mutex_enter(&cpu_map_lock);
2185 	for (i = 0; i < cpu_map_count; i++) {
2186 		if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2187 			*hdlp = cpu_map[i]->obj;
2188 			rc = AE_OK;
2189 			break;
2190 		}
2191 	}
2192 	mutex_exit(&cpu_map_lock);
2193 
2194 	return (rc);
2195 }
2196 
2197 ACPI_STATUS
2198 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2199 {
2200 	int i;
2201 	ACPI_STATUS rc = AE_NOT_EXIST;
2202 
2203 	ASSERT(apicid != UINT32_MAX);
2204 	if (apicid == UINT32_MAX) {
2205 		return (rc);
2206 	}
2207 
2208 	mutex_enter(&cpu_map_lock);
2209 	for (i = 0; i < cpu_map_count; i++) {
2210 		if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2211 			*hdlp = cpu_map[i]->obj;
2212 			rc = AE_OK;
2213 			break;
2214 		}
2215 	}
2216 	mutex_exit(&cpu_map_lock);
2217 
2218 	return (rc);
2219 }
2220 
2221 void
2222 acpica_set_core_feature(uint64_t features)
2223 {
2224 	atomic_or_64(&acpica_core_features, features);
2225 }
2226 
2227 void
2228 acpica_clear_core_feature(uint64_t features)
2229 {
2230 	atomic_and_64(&acpica_core_features, ~features);
2231 }
2232 
2233 uint64_t
2234 acpica_get_core_feature(uint64_t features)
2235 {
2236 	return (acpica_core_features & features);
2237 }
2238 
2239 void
2240 acpica_set_devcfg_feature(uint64_t features)
2241 {
2242 	atomic_or_64(&acpica_devcfg_features, features);
2243 }
2244 
2245 void
2246 acpica_clear_devcfg_feature(uint64_t features)
2247 {
2248 	atomic_and_64(&acpica_devcfg_features, ~features);
2249 }
2250 
2251 uint64_t
2252 acpica_get_devcfg_feature(uint64_t features)
2253 {
2254 	return (acpica_devcfg_features & features);
2255 }
2256 
2257 void
2258 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2259 {
2260 	*gbl_FADT = &AcpiGbl_FADT;
2261 }
2262 
2263 void
2264 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2265 {
2266 	if (pstates && AcpiGbl_FADT.PstateControl != 0)
2267 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2268 		    AcpiGbl_FADT.PstateControl);
2269 
2270 	if (cstates && AcpiGbl_FADT.CstControl != 0)
2271 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2272 		    AcpiGbl_FADT.CstControl);
2273 }
2274