xref: /titanic_51/usr/src/uts/intel/io/acpica/osl.c (revision 7ff836697c120cb94bd30d5c2204eb9b74718e4c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 /*
31  * ACPI CA OSL for Solaris x86
32  */
33 
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/psm.h>
37 #include <sys/pci_cfgspace.h>
38 #include <sys/apic.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/pci.h>
43 #include <sys/kobj.h>
44 #include <sys/taskq.h>
45 #include <sys/strlog.h>
46 #include <sys/note.h>
47 #include <sys/promif.h>
48 
49 #include <sys/acpi/acpi.h>
50 #include <sys/acpi/accommon.h>
51 #include <sys/acpica.h>
52 
53 #define	MAX_DAT_FILE_SIZE	(64*1024)
54 
55 /* local functions */
56 static int CompressEisaID(char *np);
57 
58 static void scan_d2a_map(void);
59 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
60 
61 static int acpica_query_bbn_problem(void);
62 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
63 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
64 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
65 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
66 static void acpica_devinfo_handler(ACPI_HANDLE, UINT32, void *);
67 
68 /*
69  * Event queue vars
70  */
71 int acpica_eventq_init = 0;
72 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
73 
74 /*
75  * Priorities relative to minclsyspri that each taskq
76  * run at; OSL_NOTIFY_HANDLER needs to run at a higher
77  * priority than OSL_GPE_HANDLER.  There's an implicit
78  * assumption that no priority here results in exceeding
79  * maxclsyspri.
80  * Note: these initializations need to match the order of
81  * ACPI_EXECUTE_TYPE.
82  */
83 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
84 	0,	/* OSL_GLOBAL_LOCK_HANDLER */
85 	2,	/* OSL_NOTIFY_HANDLER */
86 	0,	/* OSL_GPE_HANDLER */
87 	0,	/* OSL_DEBUGGER_THREAD */
88 	0,	/* OSL_EC_POLL_HANDLER */
89 	0	/* OSL_EC_BURST_HANDLER */
90 };
91 
92 /*
93  * Note, if you change this path, you need to update
94  * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
95  */
96 static char *acpi_table_path = "/boot/acpi/tables/";
97 
98 /* non-zero while scan_d2a_map() is working */
99 static int scanning_d2a_map = 0;
100 static int d2a_done = 0;
101 
102 /* features supported by ACPICA and ACPI device configuration. */
103 uint64_t acpica_core_features = 0;
104 static uint64_t acpica_devcfg_features = 0;
105 
106 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
107 int acpica_use_safe_delay = 0;
108 
109 /* CPU mapping data */
110 struct cpu_map_item {
111 	processorid_t	cpu_id;
112 	UINT32		proc_id;
113 	UINT32		apic_id;
114 	ACPI_HANDLE	obj;
115 };
116 
117 static kmutex_t cpu_map_lock;
118 static struct cpu_map_item **cpu_map = NULL;
119 static int cpu_map_count_max = 0;
120 static int cpu_map_count = 0;
121 static int cpu_map_built = 0;
122 
123 static int acpi_has_broken_bbn = -1;
124 
125 /* buffer for AcpiOsVprintf() */
126 #define	ACPI_OSL_PR_BUFLEN	1024
127 static char *acpi_osl_pr_buffer = NULL;
128 static int acpi_osl_pr_buflen;
129 
130 #define	D2A_DEBUG
131 
132 /*
133  *
134  */
135 static void
136 discard_event_queues()
137 {
138 	int	i;
139 
140 	/*
141 	 * destroy event queues
142 	 */
143 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
144 		if (osl_eventq[i])
145 			ddi_taskq_destroy(osl_eventq[i]);
146 	}
147 }
148 
149 
150 /*
151  *
152  */
153 static ACPI_STATUS
154 init_event_queues()
155 {
156 	char	namebuf[32];
157 	int	i, error = 0;
158 
159 	/*
160 	 * Initialize event queues
161 	 */
162 
163 	/* Always allocate only 1 thread per queue to force FIFO execution */
164 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
165 		snprintf(namebuf, 32, "ACPI%d", i);
166 		osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
167 		    osl_eventq_pri_delta[i] + minclsyspri, 0);
168 		if (osl_eventq[i] == NULL)
169 			error++;
170 	}
171 
172 	if (error != 0) {
173 		discard_event_queues();
174 #ifdef	DEBUG
175 		cmn_err(CE_WARN, "!acpica: could not initialize event queues");
176 #endif
177 		return (AE_ERROR);
178 	}
179 
180 	acpica_eventq_init = 1;
181 	return (AE_OK);
182 }
183 
184 /*
185  * One-time initialization of OSL layer
186  */
187 ACPI_STATUS
188 AcpiOsInitialize(void)
189 {
190 	/*
191 	 * Allocate buffer for AcpiOsVprintf() here to avoid
192 	 * kmem_alloc()/kmem_free() at high PIL
193 	 */
194 	acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
195 	if (acpi_osl_pr_buffer != NULL)
196 		acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
197 
198 	return (AE_OK);
199 }
200 
201 /*
202  * One-time shut-down of OSL layer
203  */
204 ACPI_STATUS
205 AcpiOsTerminate(void)
206 {
207 
208 	if (acpi_osl_pr_buffer != NULL)
209 		kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
210 
211 	discard_event_queues();
212 	return (AE_OK);
213 }
214 
215 
216 ACPI_PHYSICAL_ADDRESS
217 AcpiOsGetRootPointer()
218 {
219 	ACPI_PHYSICAL_ADDRESS Address;
220 
221 	/*
222 	 * For EFI firmware, the root pointer is defined in EFI systab.
223 	 * The boot code process the table and put the physical address
224 	 * in the acpi-root-tab property.
225 	 */
226 	Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
227 	    DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
228 
229 	if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
230 		Address = NULL;
231 
232 	return (Address);
233 }
234 
235 /*ARGSUSED*/
236 ACPI_STATUS
237 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
238 				ACPI_STRING *NewVal)
239 {
240 
241 	*NewVal = 0;
242 	return (AE_OK);
243 }
244 
245 static void
246 acpica_strncpy(char *dest, const char *src, int len)
247 {
248 
249 	/*LINTED*/
250 	while ((*dest++ = *src++) && (--len > 0))
251 		/* copy the string */;
252 	*dest = '\0';
253 }
254 
255 ACPI_STATUS
256 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
257 			ACPI_TABLE_HEADER **NewTable)
258 {
259 	char signature[5];
260 	char oemid[7];
261 	char oemtableid[9];
262 	struct _buf *file;
263 	char *buf1, *buf2;
264 	int count;
265 	char acpi_table_loc[128];
266 
267 	acpica_strncpy(signature, ExistingTable->Signature, 4);
268 	acpica_strncpy(oemid, ExistingTable->OemId, 6);
269 	acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
270 
271 #ifdef	DEBUG
272 	cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
273 	    " OEM TABLE ID [%s] OEM rev %x",
274 	    signature, ExistingTable->Revision, oemid, oemtableid,
275 	    ExistingTable->OemRevision);
276 #endif
277 
278 	/* File name format is "signature_oemid_oemtableid.dat" */
279 	(void) strcpy(acpi_table_loc, acpi_table_path);
280 	(void) strcat(acpi_table_loc, signature); /* for example, DSDT */
281 	(void) strcat(acpi_table_loc, "_");
282 	(void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
283 	(void) strcat(acpi_table_loc, "_");
284 	(void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
285 	(void) strcat(acpi_table_loc, ".dat");
286 
287 	file = kobj_open_file(acpi_table_loc);
288 	if (file == (struct _buf *)-1) {
289 		*NewTable = 0;
290 		return (AE_OK);
291 	} else {
292 		buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
293 		count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
294 		if (count >= MAX_DAT_FILE_SIZE) {
295 			cmn_err(CE_WARN, "!acpica: table %s file size too big",
296 			    acpi_table_loc);
297 			*NewTable = 0;
298 		} else {
299 			buf2 = (char *)kmem_alloc(count, KM_SLEEP);
300 			(void) memcpy(buf2, buf1, count);
301 			*NewTable = (ACPI_TABLE_HEADER *)buf2;
302 			cmn_err(CE_NOTE, "!acpica: replacing table: %s",
303 			    acpi_table_loc);
304 		}
305 	}
306 	kobj_close_file(file);
307 	kmem_free(buf1, MAX_DAT_FILE_SIZE);
308 
309 	return (AE_OK);
310 }
311 
312 
313 /*
314  * ACPI semaphore implementation
315  */
316 typedef struct {
317 	kmutex_t	mutex;
318 	kcondvar_t	cv;
319 	uint32_t	available;
320 	uint32_t	initial;
321 	uint32_t	maximum;
322 } acpi_sema_t;
323 
324 /*
325  *
326  */
327 void
328 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
329 {
330 	mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
331 	cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
332 	/* no need to enter mutex here at creation */
333 	sp->available = count;
334 	sp->initial = count;
335 	sp->maximum = max;
336 }
337 
338 /*
339  *
340  */
341 void
342 acpi_sema_destroy(acpi_sema_t *sp)
343 {
344 
345 	cv_destroy(&sp->cv);
346 	mutex_destroy(&sp->mutex);
347 }
348 
349 /*
350  *
351  */
352 ACPI_STATUS
353 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
354 {
355 	ACPI_STATUS rv = AE_OK;
356 	clock_t deadline;
357 
358 	mutex_enter(&sp->mutex);
359 
360 	if (sp->available >= count) {
361 		/*
362 		 * Enough units available, no blocking
363 		 */
364 		sp->available -= count;
365 		mutex_exit(&sp->mutex);
366 		return (rv);
367 	} else if (wait_time == 0) {
368 		/*
369 		 * Not enough units available and timeout
370 		 * specifies no blocking
371 		 */
372 		rv = AE_TIME;
373 		mutex_exit(&sp->mutex);
374 		return (rv);
375 	}
376 
377 	/*
378 	 * Not enough units available and timeout specifies waiting
379 	 */
380 	if (wait_time != ACPI_WAIT_FOREVER)
381 		deadline = ddi_get_lbolt() +
382 		    (clock_t)drv_usectohz(wait_time * 1000);
383 
384 	do {
385 		if (wait_time == ACPI_WAIT_FOREVER)
386 			cv_wait(&sp->cv, &sp->mutex);
387 		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
388 			rv = AE_TIME;
389 			break;
390 		}
391 	} while (sp->available < count);
392 
393 	/* if we dropped out of the wait with AE_OK, we got the units */
394 	if (rv == AE_OK)
395 		sp->available -= count;
396 
397 	mutex_exit(&sp->mutex);
398 	return (rv);
399 }
400 
401 /*
402  *
403  */
404 void
405 acpi_sema_v(acpi_sema_t *sp, unsigned count)
406 {
407 	mutex_enter(&sp->mutex);
408 	sp->available += count;
409 	cv_broadcast(&sp->cv);
410 	mutex_exit(&sp->mutex);
411 }
412 
413 
414 ACPI_STATUS
415 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
416 ACPI_HANDLE *OutHandle)
417 {
418 	acpi_sema_t *sp;
419 
420 	if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
421 		return (AE_BAD_PARAMETER);
422 
423 	sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
424 	acpi_sema_init(sp, MaxUnits, InitialUnits);
425 	*OutHandle = (ACPI_HANDLE)sp;
426 	return (AE_OK);
427 }
428 
429 
430 ACPI_STATUS
431 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
432 {
433 
434 	if (Handle == NULL)
435 		return (AE_BAD_PARAMETER);
436 
437 	acpi_sema_destroy((acpi_sema_t *)Handle);
438 	kmem_free((void *)Handle, sizeof (acpi_sema_t));
439 	return (AE_OK);
440 }
441 
442 ACPI_STATUS
443 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
444 {
445 
446 	if ((Handle == NULL) || (Units < 1))
447 		return (AE_BAD_PARAMETER);
448 
449 	return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
450 }
451 
452 ACPI_STATUS
453 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
454 {
455 
456 	if ((Handle == NULL) || (Units < 1))
457 		return (AE_BAD_PARAMETER);
458 
459 	acpi_sema_v((acpi_sema_t *)Handle, Units);
460 	return (AE_OK);
461 }
462 
463 ACPI_STATUS
464 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
465 {
466 	kmutex_t *mp;
467 
468 	if (OutHandle == NULL)
469 		return (AE_BAD_PARAMETER);
470 
471 	mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
472 	mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
473 	*OutHandle = (ACPI_HANDLE)mp;
474 	return (AE_OK);
475 }
476 
477 void
478 AcpiOsDeleteLock(ACPI_HANDLE Handle)
479 {
480 
481 	if (Handle == NULL)
482 		return;
483 
484 	mutex_destroy((kmutex_t *)Handle);
485 	kmem_free((void *)Handle, sizeof (kmutex_t));
486 }
487 
488 ACPI_CPU_FLAGS
489 AcpiOsAcquireLock(ACPI_HANDLE Handle)
490 {
491 
492 
493 	if (Handle == NULL)
494 		return (AE_BAD_PARAMETER);
495 
496 	if (curthread == CPU->cpu_idle_thread) {
497 		while (!mutex_tryenter((kmutex_t *)Handle))
498 			/* spin */;
499 	} else
500 		mutex_enter((kmutex_t *)Handle);
501 	return (AE_OK);
502 }
503 
504 void
505 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
506 {
507 	_NOTE(ARGUNUSED(Flags))
508 
509 	mutex_exit((kmutex_t *)Handle);
510 }
511 
512 
513 void *
514 AcpiOsAllocate(ACPI_SIZE Size)
515 {
516 	ACPI_SIZE *tmp_ptr;
517 
518 	Size += sizeof (Size);
519 	tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
520 	*tmp_ptr++ = Size;
521 	return (tmp_ptr);
522 }
523 
524 void
525 AcpiOsFree(void *Memory)
526 {
527 	ACPI_SIZE	size, *tmp_ptr;
528 
529 	tmp_ptr = (ACPI_SIZE *)Memory;
530 	tmp_ptr -= 1;
531 	size = *tmp_ptr;
532 	kmem_free(tmp_ptr, size);
533 }
534 
535 static int napics_found;	/* number of ioapic addresses in array */
536 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
537 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
538 static void *dummy_ioapicadr;
539 
540 void
541 acpica_find_ioapics(void)
542 {
543 	int			madt_seen, madt_size;
544 	ACPI_SUBTABLE_HEADER		*ap;
545 	ACPI_MADT_IO_APIC		*mia;
546 
547 	if (acpi_mapic_dtp != NULL)
548 		return;	/* already parsed table */
549 	if (AcpiGetTable(ACPI_SIG_MADT, 1,
550 	    (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
551 		return;
552 
553 	napics_found = 0;
554 
555 	/*
556 	 * Search the MADT for ioapics
557 	 */
558 	ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
559 	madt_size = acpi_mapic_dtp->Header.Length;
560 	madt_seen = sizeof (*acpi_mapic_dtp);
561 
562 	while (madt_seen < madt_size) {
563 
564 		switch (ap->Type) {
565 		case ACPI_MADT_TYPE_IO_APIC:
566 			mia = (ACPI_MADT_IO_APIC *) ap;
567 			if (napics_found < MAX_IO_APIC) {
568 				ioapic_paddr[napics_found++] =
569 				    (ACPI_PHYSICAL_ADDRESS)
570 				    (mia->Address & PAGEMASK);
571 			}
572 			break;
573 
574 		default:
575 			break;
576 		}
577 
578 		/* advance to next entry */
579 		madt_seen += ap->Length;
580 		ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
581 	}
582 	if (dummy_ioapicadr == NULL)
583 		dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
584 }
585 
586 
587 void *
588 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
589 {
590 	int	i;
591 
592 	/*
593 	 * If the iopaic address table is populated, check if trying
594 	 * to access an ioapic.  Instead, return a pointer to a dummy ioapic.
595 	 */
596 	for (i = 0; i < napics_found; i++) {
597 		if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
598 			return (dummy_ioapicadr);
599 	}
600 	/* FUTUREWORK: test PhysicalAddress for > 32 bits */
601 	return (psm_map_new((paddr_t)PhysicalAddress,
602 	    (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
603 }
604 
605 void
606 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
607 {
608 	/*
609 	 * Check if trying to unmap dummy ioapic address.
610 	 */
611 	if (LogicalAddress == dummy_ioapicadr)
612 		return;
613 
614 	psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
615 }
616 
617 /*ARGSUSED*/
618 ACPI_STATUS
619 AcpiOsGetPhysicalAddress(void *LogicalAddress,
620 			ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
621 {
622 
623 	/* UNIMPLEMENTED: not invoked by ACPI CA code */
624 	return (AE_NOT_IMPLEMENTED);
625 }
626 
627 
628 ACPI_OSD_HANDLER acpi_isr;
629 void *acpi_isr_context;
630 
631 uint_t
632 acpi_wrapper_isr(char *arg)
633 {
634 	_NOTE(ARGUNUSED(arg))
635 
636 	int	status;
637 
638 	status = (*acpi_isr)(acpi_isr_context);
639 
640 	if (status == ACPI_INTERRUPT_HANDLED) {
641 		return (DDI_INTR_CLAIMED);
642 	} else {
643 		return (DDI_INTR_UNCLAIMED);
644 	}
645 }
646 
647 static int acpi_intr_hooked = 0;
648 
649 ACPI_STATUS
650 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
651 		ACPI_OSD_HANDLER ServiceRoutine,
652 		void *Context)
653 {
654 	_NOTE(ARGUNUSED(InterruptNumber))
655 
656 	int retval;
657 	int sci_vect;
658 	iflag_t sci_flags;
659 
660 	acpi_isr = ServiceRoutine;
661 	acpi_isr_context = Context;
662 
663 	/*
664 	 * Get SCI (adjusted for PIC/APIC mode if necessary)
665 	 */
666 	if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
667 		return (AE_ERROR);
668 	}
669 
670 #ifdef	DEBUG
671 	cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
672 #endif
673 
674 	retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
675 	    "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
676 	if (retval) {
677 		acpi_intr_hooked = 1;
678 		return (AE_OK);
679 	} else
680 		return (AE_BAD_PARAMETER);
681 }
682 
683 ACPI_STATUS
684 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
685 			ACPI_OSD_HANDLER ServiceRoutine)
686 {
687 	_NOTE(ARGUNUSED(ServiceRoutine))
688 
689 #ifdef	DEBUG
690 	cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
691 #endif
692 	if (acpi_intr_hooked) {
693 		rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
694 		    InterruptNumber);
695 		acpi_intr_hooked = 0;
696 	}
697 	return (AE_OK);
698 }
699 
700 
701 ACPI_THREAD_ID
702 AcpiOsGetThreadId(void)
703 {
704 	/*
705 	 * ACPI CA doesn't care what actual value is returned as long
706 	 * as it is non-zero and unique to each existing thread.
707 	 * ACPI CA assumes that thread ID is castable to a pointer,
708 	 * so we use the current thread pointer.
709 	 */
710 	return (curthread);
711 }
712 
713 /*
714  *
715  */
716 ACPI_STATUS
717 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK  Function,
718     void *Context)
719 {
720 
721 	if (!acpica_eventq_init) {
722 		/*
723 		 * Create taskqs for event handling
724 		 */
725 		if (init_event_queues() != AE_OK)
726 			return (AE_ERROR);
727 	}
728 
729 	if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
730 	    DDI_NOSLEEP) == DDI_FAILURE) {
731 #ifdef	DEBUG
732 		cmn_err(CE_WARN, "!acpica: unable to dispatch event");
733 #endif
734 		return (AE_ERROR);
735 	}
736 	return (AE_OK);
737 
738 }
739 
740 void
741 AcpiOsSleep(ACPI_INTEGER Milliseconds)
742 {
743 	/*
744 	 * During kernel startup, before the first tick interrupt
745 	 * has taken place, we can't call delay; very late in
746 	 * kernel shutdown or suspend/resume, clock interrupts
747 	 * are blocked, so delay doesn't work then either.
748 	 * So we busy wait if lbolt == 0 (kernel startup)
749 	 * or if acpica_use_safe_delay has been set to a
750 	 * non-zero value.
751 	 */
752 	if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
753 		drv_usecwait(Milliseconds * 1000);
754 	else
755 		delay(drv_usectohz(Milliseconds * 1000));
756 }
757 
758 void
759 AcpiOsStall(UINT32 Microseconds)
760 {
761 	drv_usecwait(Microseconds);
762 }
763 
764 
765 /*
766  * Implementation of "Windows 2001" compatible I/O permission map
767  *
768  */
769 #define	OSL_IO_NONE	(0)
770 #define	OSL_IO_READ	(1<<0)
771 #define	OSL_IO_WRITE	(1<<1)
772 #define	OSL_IO_RW	(OSL_IO_READ | OSL_IO_WRITE)
773 #define	OSL_IO_TERM	(1<<2)
774 #define	OSL_IO_DEFAULT	OSL_IO_RW
775 
776 static struct io_perm  {
777 	ACPI_IO_ADDRESS	low;
778 	ACPI_IO_ADDRESS	high;
779 	uint8_t		perm;
780 } osl_io_perm[] = {
781 	{ 0xcf8, 0xd00, OSL_IO_NONE | OSL_IO_TERM }
782 };
783 
784 
785 /*
786  *
787  */
788 static struct io_perm *
789 osl_io_find_perm(ACPI_IO_ADDRESS addr)
790 {
791 	struct io_perm *p;
792 
793 	p = osl_io_perm;
794 	while (p != NULL) {
795 		if ((p->low <= addr) && (addr <= p->high))
796 			break;
797 		p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
798 	}
799 
800 	return (p);
801 }
802 
803 /*
804  *
805  */
806 ACPI_STATUS
807 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
808 {
809 	struct io_perm *p;
810 
811 	/* verify permission */
812 	p = osl_io_find_perm(Address);
813 	if (p && (p->perm & OSL_IO_READ) == 0) {
814 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
815 		    (long)Address, Width);
816 		*Value = 0xffffffff;
817 		return (AE_ERROR);
818 	}
819 
820 	switch (Width) {
821 	case 8:
822 		*Value = inb(Address);
823 		break;
824 	case 16:
825 		*Value = inw(Address);
826 		break;
827 	case 32:
828 		*Value = inl(Address);
829 		break;
830 	default:
831 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
832 		    (long)Address, Width);
833 		return (AE_BAD_PARAMETER);
834 	}
835 	return (AE_OK);
836 }
837 
838 ACPI_STATUS
839 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
840 {
841 	struct io_perm *p;
842 
843 	/* verify permission */
844 	p = osl_io_find_perm(Address);
845 	if (p && (p->perm & OSL_IO_WRITE) == 0) {
846 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
847 		    (long)Address, Width);
848 		return (AE_ERROR);
849 	}
850 
851 	switch (Width) {
852 	case 8:
853 		outb(Address, Value);
854 		break;
855 	case 16:
856 		outw(Address, Value);
857 		break;
858 	case 32:
859 		outl(Address, Value);
860 		break;
861 	default:
862 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
863 		    (long)Address, Width);
864 		return (AE_BAD_PARAMETER);
865 	}
866 	return (AE_OK);
867 }
868 
869 
870 /*
871  *
872  */
873 
874 #define	OSL_RW(ptr, val, type, rw) \
875 	{ if (rw) *((type *)(ptr)) = *((type *) val); \
876 	    else *((type *) val) = *((type *)(ptr)); }
877 
878 
879 static void
880 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
881     UINT32 Width, int write)
882 {
883 	size_t	maplen = Width / 8;
884 	caddr_t	ptr;
885 
886 	ptr = psm_map_new((paddr_t)Address, maplen,
887 	    PSM_PROT_WRITE | PSM_PROT_READ);
888 
889 	switch (maplen) {
890 	case 1:
891 		OSL_RW(ptr, Value, uint8_t, write);
892 		break;
893 	case 2:
894 		OSL_RW(ptr, Value, uint16_t, write);
895 		break;
896 	case 4:
897 		OSL_RW(ptr, Value, uint32_t, write);
898 		break;
899 	default:
900 		cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
901 		    Width);
902 		break;
903 	}
904 
905 	psm_unmap(ptr, maplen);
906 }
907 
908 ACPI_STATUS
909 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
910 		UINT32 *Value, UINT32 Width)
911 {
912 	osl_rw_memory(Address, Value, Width, 0);
913 	return (AE_OK);
914 }
915 
916 ACPI_STATUS
917 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
918 		UINT32 Value, UINT32 Width)
919 {
920 	osl_rw_memory(Address, &Value, Width, 1);
921 	return (AE_OK);
922 }
923 
924 
925 ACPI_STATUS
926 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
927 			void *Value, UINT32 Width)
928 {
929 
930 	switch (Width) {
931 	case 8:
932 		*((UINT64 *)Value) = (UINT64)(*pci_getb_func)
933 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
934 		break;
935 	case 16:
936 		*((UINT64 *)Value) = (UINT64)(*pci_getw_func)
937 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
938 		break;
939 	case 32:
940 		*((UINT64 *)Value) = (UINT64)(*pci_getl_func)
941 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
942 		break;
943 	case 64:
944 	default:
945 		cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
946 		    Register, Width);
947 		return (AE_BAD_PARAMETER);
948 	}
949 	return (AE_OK);
950 }
951 
952 /*
953  *
954  */
955 int acpica_write_pci_config_ok = 1;
956 
957 ACPI_STATUS
958 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
959 		ACPI_INTEGER Value, UINT32 Width)
960 {
961 
962 	if (!acpica_write_pci_config_ok) {
963 		cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
964 		    " %lx %d not permitted", PciId->Bus, PciId->Device,
965 		    PciId->Function, Register, (long)Value, Width);
966 		return (AE_OK);
967 	}
968 
969 	switch (Width) {
970 	case 8:
971 		(*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
972 		    Register, (uint8_t)Value);
973 		break;
974 	case 16:
975 		(*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
976 		    Register, (uint16_t)Value);
977 		break;
978 	case 32:
979 		(*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
980 		    Register, (uint32_t)Value);
981 		break;
982 	case 64:
983 	default:
984 		cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
985 		    Register, Width);
986 		return (AE_BAD_PARAMETER);
987 	}
988 	return (AE_OK);
989 }
990 
991 /*
992  * Called with ACPI_HANDLEs for both a PCI Config Space
993  * OpRegion and (what ACPI CA thinks is) the PCI device
994  * to which this ConfigSpace OpRegion belongs.  Since
995  * ACPI CA depends on a valid _BBN object being present
996  * and this is not always true (one old x86 had broken _BBN),
997  * we go ahead and get the correct PCI bus number using the
998  * devinfo mapping (which compensates for broken _BBN).
999  *
1000  * Default values for bus, segment, device and function are
1001  * all 0 when ACPI CA can't figure them out.
1002  *
1003  * Some BIOSes implement _BBN() by reading PCI config space
1004  * on bus #0 - which means that we'll recurse when we attempt
1005  * to create the devinfo-to-ACPI map.  If Derive is called during
1006  * scan_d2a_map, we don't translate the bus # and return.
1007  *
1008  * We get the parent of the OpRegion, which must be a PCI
1009  * node, fetch the associated devinfo node and snag the
1010  * b/d/f from it.
1011  */
1012 void
1013 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1014 		ACPI_PCI_ID **PciId)
1015 {
1016 	ACPI_HANDLE handle;
1017 	dev_info_t *dip;
1018 	int bus, device, func, devfn;
1019 
1020 
1021 	/*
1022 	 * See above - avoid recursing during scanning_d2a_map.
1023 	 */
1024 	if (scanning_d2a_map)
1025 		return;
1026 
1027 	/*
1028 	 * Get the OpRegion's parent
1029 	 */
1030 	if (AcpiGetParent(chandle, &handle) != AE_OK)
1031 		return;
1032 
1033 	/*
1034 	 * If we've mapped the ACPI node to the devinfo
1035 	 * tree, use the devinfo reg property
1036 	 */
1037 	if (acpica_get_devinfo(handle, &dip) == AE_OK) {
1038 		(void) acpica_get_bdf(dip, &bus, &device, &func);
1039 		(*PciId)->Bus = bus;
1040 		(*PciId)->Device = device;
1041 		(*PciId)->Function = func;
1042 	} else if (acpica_eval_int(handle, "_ADR", &devfn) == AE_OK) {
1043 		/* no devinfo node - just confirm the d/f */
1044 		(*PciId)->Device = (devfn >> 16) & 0xFFFF;
1045 		(*PciId)->Function = devfn & 0xFFFF;
1046 	}
1047 }
1048 
1049 
1050 /*ARGSUSED*/
1051 BOOLEAN
1052 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1053 {
1054 
1055 	/* Always says yes; all mapped memory assumed readable */
1056 	return (1);
1057 }
1058 
1059 /*ARGSUSED*/
1060 BOOLEAN
1061 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1062 {
1063 
1064 	/* Always says yes; all mapped memory assumed writable */
1065 	return (1);
1066 }
1067 
1068 UINT64
1069 AcpiOsGetTimer(void)
1070 {
1071 	/* gethrtime() returns 1nS resolution; convert to 100nS granules */
1072 	return ((gethrtime() + 50) / 100);
1073 }
1074 
1075 static struct AcpiOSIFeature_s {
1076 	uint64_t	control_flag;
1077 	const char	*feature_name;
1078 } AcpiOSIFeatures[] = {
1079 	{ ACPI_FEATURE_OSI_MODULE,	"Module Device" },
1080 	{ 0,				"Processor Device" }
1081 };
1082 
1083 /*ARGSUSED*/
1084 ACPI_STATUS
1085 AcpiOsValidateInterface(char *feature)
1086 {
1087 	int i;
1088 
1089 	ASSERT(feature != NULL);
1090 	for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1091 	    i++) {
1092 		if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1093 			continue;
1094 		}
1095 		/* Check whether required core features are available. */
1096 		if (AcpiOSIFeatures[i].control_flag != 0 &&
1097 		    acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1098 		    AcpiOSIFeatures[i].control_flag) {
1099 			break;
1100 		}
1101 		/* Feature supported. */
1102 		return (AE_OK);
1103 	}
1104 
1105 	return (AE_SUPPORT);
1106 }
1107 
1108 /*ARGSUSED*/
1109 ACPI_STATUS
1110 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1111     ACPI_SIZE length)
1112 {
1113 	return (AE_OK);
1114 }
1115 
1116 ACPI_STATUS
1117 AcpiOsSignal(UINT32 Function, void *Info)
1118 {
1119 	_NOTE(ARGUNUSED(Function, Info))
1120 
1121 	/* FUTUREWORK: debugger support */
1122 
1123 	cmn_err(CE_NOTE, "!OsSignal unimplemented");
1124 	return (AE_OK);
1125 }
1126 
1127 void ACPI_INTERNAL_VAR_XFACE
1128 AcpiOsPrintf(const char *Format, ...)
1129 {
1130 	va_list ap;
1131 
1132 	va_start(ap, Format);
1133 	AcpiOsVprintf(Format, ap);
1134 	va_end(ap);
1135 }
1136 
1137 /*
1138  * When != 0, sends output to console
1139  * Patchable with kmdb or /etc/system.
1140  */
1141 int acpica_console_out = 0;
1142 
1143 #define	ACPICA_OUTBUF_LEN	160
1144 char	acpica_outbuf[ACPICA_OUTBUF_LEN];
1145 int	acpica_outbuf_offset;
1146 
1147 /*
1148  *
1149  */
1150 static void
1151 acpica_pr_buf(char *buf)
1152 {
1153 	char c, *bufp, *outp;
1154 	int	out_remaining;
1155 
1156 	/*
1157 	 * copy the supplied buffer into the output buffer
1158 	 * when we hit a '\n' or overflow the output buffer,
1159 	 * output and reset the output buffer
1160 	 */
1161 	bufp = buf;
1162 	outp = acpica_outbuf + acpica_outbuf_offset;
1163 	out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1164 	while (c = *bufp++) {
1165 		*outp++ = c;
1166 		if (c == '\n' || --out_remaining == 0) {
1167 			*outp = '\0';
1168 			switch (acpica_console_out) {
1169 			case 1:
1170 				printf(acpica_outbuf);
1171 				break;
1172 			case 2:
1173 				prom_printf(acpica_outbuf);
1174 				break;
1175 			case 0:
1176 			default:
1177 				(void) strlog(0, 0, 0,
1178 				    SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1179 				    acpica_outbuf);
1180 				break;
1181 			}
1182 			acpica_outbuf_offset = 0;
1183 			outp = acpica_outbuf;
1184 			out_remaining = ACPICA_OUTBUF_LEN - 1;
1185 		}
1186 	}
1187 
1188 	acpica_outbuf_offset = outp - acpica_outbuf;
1189 }
1190 
1191 void
1192 AcpiOsVprintf(const char *Format, va_list Args)
1193 {
1194 
1195 	/*
1196 	 * If AcpiOsInitialize() failed to allocate a string buffer,
1197 	 * resort to vprintf().
1198 	 */
1199 	if (acpi_osl_pr_buffer == NULL) {
1200 		vprintf(Format, Args);
1201 		return;
1202 	}
1203 
1204 	/*
1205 	 * It is possible that a very long debug output statement will
1206 	 * be truncated; this is silently ignored.
1207 	 */
1208 	(void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1209 	acpica_pr_buf(acpi_osl_pr_buffer);
1210 }
1211 
1212 void
1213 AcpiOsRedirectOutput(void *Destination)
1214 {
1215 	_NOTE(ARGUNUSED(Destination))
1216 
1217 	/* FUTUREWORK: debugger support */
1218 
1219 #ifdef	DEBUG
1220 	cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1221 #endif
1222 }
1223 
1224 
1225 UINT32
1226 AcpiOsGetLine(char *Buffer)
1227 {
1228 	_NOTE(ARGUNUSED(Buffer))
1229 
1230 	/* FUTUREWORK: debugger support */
1231 
1232 	return (0);
1233 }
1234 
1235 /*
1236  * Device tree binding
1237  */
1238 static ACPI_STATUS
1239 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1240 {
1241 	_NOTE(ARGUNUSED(lvl));
1242 
1243 	int sta, hid, bbn;
1244 	int busno = (intptr_t)ctxp;
1245 	ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1246 
1247 	/* Check whether device exists. */
1248 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1249 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1250 		/*
1251 		 * Skip object if device doesn't exist.
1252 		 * According to ACPI Spec,
1253 		 * 1) setting either bit 0 or bit 3 means that device exists.
1254 		 * 2) Absence of _STA method means all status bits set.
1255 		 */
1256 		return (AE_CTRL_DEPTH);
1257 	}
1258 
1259 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1260 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1261 		/* Non PCI/PCIe host bridge. */
1262 		return (AE_OK);
1263 	}
1264 
1265 	if (acpi_has_broken_bbn) {
1266 		ACPI_BUFFER rb;
1267 		rb.Pointer = NULL;
1268 		rb.Length = ACPI_ALLOCATE_BUFFER;
1269 
1270 		/* Decree _BBN == n from PCI<n> */
1271 		if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1272 			return (AE_CTRL_TERMINATE);
1273 		}
1274 		bbn = ((char *)rb.Pointer)[3] - '0';
1275 		AcpiOsFree(rb.Pointer);
1276 		if (bbn == busno || busno == 0) {
1277 			*hdlp = hdl;
1278 			return (AE_CTRL_TERMINATE);
1279 		}
1280 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1281 		if (bbn == busno) {
1282 			*hdlp = hdl;
1283 			return (AE_CTRL_TERMINATE);
1284 		}
1285 	} else if (busno == 0) {
1286 		*hdlp = hdl;
1287 		return (AE_CTRL_TERMINATE);
1288 	}
1289 
1290 	return (AE_CTRL_DEPTH);
1291 }
1292 
1293 static int
1294 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1295 {
1296 	ACPI_HANDLE sbobj, busobj;
1297 
1298 	/* initialize static flag by querying ACPI namespace for bug */
1299 	if (acpi_has_broken_bbn == -1)
1300 		acpi_has_broken_bbn = acpica_query_bbn_problem();
1301 
1302 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1303 		busobj = NULL;
1304 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1305 		    acpica_find_pcibus_walker, (void *)(intptr_t)busno,
1306 		    (void **)&busobj);
1307 		if (busobj != NULL) {
1308 			*rh = busobj;
1309 			return (AE_OK);
1310 		}
1311 	}
1312 
1313 	return (AE_ERROR);
1314 }
1315 
1316 static ACPI_STATUS
1317 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1318 {
1319 	_NOTE(ARGUNUSED(lvl));
1320 	_NOTE(ARGUNUSED(rvpp));
1321 
1322 	int sta, hid, bbn;
1323 	int *cntp = (int *)ctxp;
1324 
1325 	/* Check whether device exists. */
1326 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1327 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1328 		/*
1329 		 * Skip object if device doesn't exist.
1330 		 * According to ACPI Spec,
1331 		 * 1) setting either bit 0 or bit 3 means that device exists.
1332 		 * 2) Absence of _STA method means all status bits set.
1333 		 */
1334 		return (AE_CTRL_DEPTH);
1335 	}
1336 
1337 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1338 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1339 		/* Non PCI/PCIe host bridge. */
1340 		return (AE_OK);
1341 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1342 	    bbn == 0 && ++(*cntp) > 1) {
1343 		/*
1344 		 * If we find more than one bus with a 0 _BBN
1345 		 * we have the problem that BigBear's BIOS shows
1346 		 */
1347 		return (AE_CTRL_TERMINATE);
1348 	} else {
1349 		/*
1350 		 * Skip children of PCI/PCIe host bridge.
1351 		 */
1352 		return (AE_CTRL_DEPTH);
1353 	}
1354 }
1355 
1356 /*
1357  * Look for ACPI problem where _BBN is zero for multiple PCI buses
1358  * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1359  * below if it exists.
1360  */
1361 static int
1362 acpica_query_bbn_problem(void)
1363 {
1364 	ACPI_HANDLE sbobj;
1365 	int zerobbncnt;
1366 	void *rv;
1367 
1368 	zerobbncnt = 0;
1369 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1370 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1371 		    acpica_query_bbn_walker, &zerobbncnt, &rv);
1372 	}
1373 
1374 	return (zerobbncnt > 1 ? 1 : 0);
1375 }
1376 
1377 static const char hextab[] = "0123456789ABCDEF";
1378 
1379 static int
1380 hexdig(int c)
1381 {
1382 	/*
1383 	 *  Get hex digit:
1384 	 *
1385 	 *  Returns the 4-bit hex digit named by the input character.  Returns
1386 	 *  zero if the input character is not valid hex!
1387 	 */
1388 
1389 	int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1390 	int j = sizeof (hextab);
1391 
1392 	while (--j && (x != hextab[j])) {
1393 	}
1394 	return (j);
1395 }
1396 
1397 static int
1398 CompressEisaID(char *np)
1399 {
1400 	/*
1401 	 *  Compress an EISA device name:
1402 	 *
1403 	 *  This routine converts a 7-byte ASCII device name into the 4-byte
1404 	 *  compressed form used by EISA (50 bytes of ROM to save 1 byte of
1405 	 *  NV-RAM!)
1406 	 */
1407 
1408 	union { char octets[4]; int retval; } myu;
1409 
1410 	myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1411 	myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1412 	myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1413 	myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1414 
1415 	return (myu.retval);
1416 }
1417 
1418 ACPI_STATUS
1419 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1420 {
1421 	ACPI_STATUS status;
1422 	ACPI_BUFFER rb;
1423 	ACPI_OBJECT ro;
1424 
1425 	rb.Pointer = &ro;
1426 	rb.Length = sizeof (ro);
1427 	if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1428 	    ACPI_TYPE_INTEGER)) == AE_OK)
1429 		*rint = ro.Integer.Value;
1430 
1431 	return (status);
1432 }
1433 
1434 static int
1435 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1436 {
1437 	ACPI_BUFFER rb;
1438 	ACPI_OBJECT *rv;
1439 
1440 	rb.Pointer = NULL;
1441 	rb.Length = ACPI_ALLOCATE_BUFFER;
1442 	if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1443 	    rb.Length != 0) {
1444 		rv = rb.Pointer;
1445 		if (rv->Type == ACPI_TYPE_INTEGER) {
1446 			*rint = rv->Integer.Value;
1447 			AcpiOsFree(rv);
1448 			return (AE_OK);
1449 		} else if (rv->Type == ACPI_TYPE_STRING) {
1450 			char *stringData;
1451 
1452 			/* Convert the string into an EISA ID */
1453 			if (rv->String.Pointer == NULL) {
1454 				AcpiOsFree(rv);
1455 				return (AE_ERROR);
1456 			}
1457 
1458 			stringData = rv->String.Pointer;
1459 
1460 			/*
1461 			 * If the string is an EisaID, it must be 7
1462 			 * characters; if it's an ACPI ID, it will be 8
1463 			 * (and we don't care about ACPI ids here).
1464 			 */
1465 			if (strlen(stringData) != 7) {
1466 				AcpiOsFree(rv);
1467 				return (AE_ERROR);
1468 			}
1469 
1470 			*rint = CompressEisaID(stringData);
1471 			AcpiOsFree(rv);
1472 			return (AE_OK);
1473 		} else
1474 			AcpiOsFree(rv);
1475 	}
1476 	return (AE_ERROR);
1477 }
1478 
1479 /*
1480  * Create linkage between devinfo nodes and ACPI nodes
1481  */
1482 ACPI_STATUS
1483 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1484 {
1485 	ACPI_STATUS status;
1486 	ACPI_BUFFER rb;
1487 
1488 	/*
1489 	 * Tag the devinfo node with the ACPI name
1490 	 */
1491 	rb.Pointer = NULL;
1492 	rb.Length = ACPI_ALLOCATE_BUFFER;
1493 	status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1494 	if (ACPI_FAILURE(status)) {
1495 		cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1496 	} else {
1497 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1498 		    "acpi-namespace", (char *)rb.Pointer);
1499 		AcpiOsFree(rb.Pointer);
1500 
1501 		/*
1502 		 * Tag the ACPI node with the dip
1503 		 */
1504 		status = acpica_set_devinfo(acpiobj, dip);
1505 		ASSERT(ACPI_SUCCESS(status));
1506 	}
1507 
1508 	return (status);
1509 }
1510 
1511 /*
1512  * Destroy linkage between devinfo nodes and ACPI nodes
1513  */
1514 ACPI_STATUS
1515 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1516 {
1517 	(void) acpica_unset_devinfo(acpiobj);
1518 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1519 
1520 	return (AE_OK);
1521 }
1522 
1523 /*
1524  * Return the ACPI device node matching the CPU dev_info node.
1525  */
1526 ACPI_STATUS
1527 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1528 {
1529 	int i;
1530 
1531 	/*
1532 	 * if cpu_map itself is NULL, we're a uppc system and
1533 	 * acpica_build_processor_map() hasn't been called yet.
1534 	 * So call it here
1535 	 */
1536 	if (cpu_map == NULL) {
1537 		(void) acpica_build_processor_map();
1538 		if (cpu_map == NULL)
1539 			return (AE_ERROR);
1540 	}
1541 
1542 	if (cpu_id < 0) {
1543 		return (AE_ERROR);
1544 	}
1545 
1546 	/*
1547 	 * search object with cpuid in cpu_map
1548 	 */
1549 	mutex_enter(&cpu_map_lock);
1550 	for (i = 0; i < cpu_map_count; i++) {
1551 		if (cpu_map[i]->cpu_id == cpu_id) {
1552 			break;
1553 		}
1554 	}
1555 	if (i >= cpu_map_count || (cpu_map[i]->obj == NULL)) {
1556 		mutex_exit(&cpu_map_lock);
1557 		return (AE_ERROR);
1558 	}
1559 	*rh = cpu_map[cpu_id]->obj;
1560 	mutex_exit(&cpu_map_lock);
1561 
1562 	return (AE_OK);
1563 }
1564 
1565 /*
1566  * Determine if this object is a processor
1567  */
1568 static ACPI_STATUS
1569 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1570 {
1571 	ACPI_STATUS status;
1572 	ACPI_OBJECT_TYPE objtype;
1573 	unsigned long acpi_id;
1574 	ACPI_BUFFER rb;
1575 
1576 	if (AcpiGetType(obj, &objtype) != AE_OK)
1577 		return (AE_OK);
1578 
1579 	if (objtype == ACPI_TYPE_PROCESSOR) {
1580 		/* process a Processor */
1581 		rb.Pointer = NULL;
1582 		rb.Length = ACPI_ALLOCATE_BUFFER;
1583 		status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1584 		    ACPI_TYPE_PROCESSOR);
1585 		if (status != AE_OK) {
1586 			cmn_err(CE_WARN, "!acpica: error probing Processor");
1587 			return (status);
1588 		}
1589 		acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1590 		AcpiOsFree(rb.Pointer);
1591 	} else if (objtype == ACPI_TYPE_DEVICE) {
1592 		/* process a processor Device */
1593 		rb.Pointer = NULL;
1594 		rb.Length = ACPI_ALLOCATE_BUFFER;
1595 		status = AcpiGetObjectInfo(obj, &rb);
1596 		if (status != AE_OK) {
1597 			cmn_err(CE_WARN,
1598 			    "!acpica: error probing Processor Device\n");
1599 			return (status);
1600 		}
1601 		ASSERT(((ACPI_OBJECT *)rb.Pointer)->Type ==
1602 		    ACPI_TYPE_DEVICE);
1603 
1604 		if (ddi_strtoul(
1605 		    ((ACPI_DEVICE_INFO *)rb.Pointer)->UniqueId.Value,
1606 		    NULL, 10, &acpi_id) != 0) {
1607 			AcpiOsFree(rb.Pointer);
1608 			cmn_err(CE_WARN,
1609 			    "!acpica: error probing Processor Device _UID\n");
1610 			return (AE_ERROR);
1611 		}
1612 		AcpiOsFree(rb.Pointer);
1613 	}
1614 	(void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1615 
1616 	return (AE_OK);
1617 }
1618 
1619 static void
1620 scan_d2a_map(void)
1621 {
1622 	dev_info_t *dip, *cdip;
1623 	ACPI_HANDLE acpiobj;
1624 	char *device_type_prop;
1625 	int bus;
1626 	static int map_error = 0;
1627 
1628 	if (map_error)
1629 		return;
1630 
1631 	scanning_d2a_map = 1;
1632 
1633 	/*
1634 	 * Find all child-of-root PCI buses, and find their corresponding
1635 	 * ACPI child-of-root PCI nodes.  For each one, add to the
1636 	 * d2a table.
1637 	 */
1638 
1639 	for (dip = ddi_get_child(ddi_root_node());
1640 	    dip != NULL;
1641 	    dip = ddi_get_next_sibling(dip)) {
1642 
1643 		/* prune non-PCI nodes */
1644 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1645 		    DDI_PROP_DONTPASS,
1646 		    "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1647 			continue;
1648 
1649 		if ((strcmp("pci", device_type_prop) != 0) &&
1650 		    (strcmp("pciex", device_type_prop) != 0)) {
1651 			ddi_prop_free(device_type_prop);
1652 			continue;
1653 		}
1654 
1655 		ddi_prop_free(device_type_prop);
1656 
1657 		/*
1658 		 * To get bus number of dip, get first child and get its
1659 		 * bus number.  If NULL, just continue, because we don't
1660 		 * care about bus nodes with no children anyway.
1661 		 */
1662 		if ((cdip = ddi_get_child(dip)) == NULL)
1663 			continue;
1664 
1665 		if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1666 #ifdef D2ADEBUG
1667 			cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1668 #endif
1669 			map_error = 1;
1670 			scanning_d2a_map = 0;
1671 			d2a_done = 1;
1672 			return;
1673 		}
1674 
1675 		if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1676 #ifdef D2ADEBUG
1677 			cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1678 #endif
1679 			map_error = 1;
1680 			continue;
1681 		}
1682 
1683 		acpica_tag_devinfo(dip, acpiobj);
1684 
1685 		/* call recursively to enumerate subtrees */
1686 		scan_d2a_subtree(dip, acpiobj, bus);
1687 	}
1688 
1689 	scanning_d2a_map = 0;
1690 	d2a_done = 1;
1691 }
1692 
1693 /*
1694  * For all acpi child devices of acpiobj, find their matching
1695  * dip under "dip" argument.  (matching means "matches dev/fn").
1696  * bus is assumed to already be a match from caller, and is
1697  * used here only to record in the d2a entry.  Recurse if necessary.
1698  */
1699 static void
1700 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1701 {
1702 	int acpi_devfn, hid;
1703 	ACPI_HANDLE acld;
1704 	dev_info_t *dcld;
1705 	int dcld_b, dcld_d, dcld_f;
1706 	int dev, func;
1707 	char *device_type_prop;
1708 
1709 	acld = NULL;
1710 	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1711 	    == AE_OK) {
1712 		/* get the dev/func we're looking for in the devinfo tree */
1713 		if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1714 			continue;
1715 		dev = (acpi_devfn >> 16) & 0xFFFF;
1716 		func = acpi_devfn & 0xFFFF;
1717 
1718 		/* look through all the immediate children of dip */
1719 		for (dcld = ddi_get_child(dip); dcld != NULL;
1720 		    dcld = ddi_get_next_sibling(dcld)) {
1721 			if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1722 				continue;
1723 
1724 			/* dev must match; function must match or wildcard */
1725 			if (dcld_d != dev ||
1726 			    (func != 0xFFFF && func != dcld_f))
1727 				continue;
1728 			bus = dcld_b;
1729 
1730 			/* found a match, record it */
1731 			acpica_tag_devinfo(dcld, acld);
1732 
1733 			/* if we find a bridge, recurse from here */
1734 			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1735 			    DDI_PROP_DONTPASS, "device_type",
1736 			    &device_type_prop) == DDI_PROP_SUCCESS) {
1737 				if ((strcmp("pci", device_type_prop) == 0) ||
1738 				    (strcmp("pciex", device_type_prop) == 0))
1739 					scan_d2a_subtree(dcld, acld, bus);
1740 				ddi_prop_free(device_type_prop);
1741 			}
1742 
1743 			/* done finding a match, so break now */
1744 			break;
1745 		}
1746 	}
1747 }
1748 
1749 /*
1750  * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1751  */
1752 int
1753 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1754 {
1755 	pci_regspec_t *pci_rp;
1756 	int len;
1757 
1758 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1759 	    "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1760 		return (-1);
1761 
1762 	if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1763 		ddi_prop_free(pci_rp);
1764 		return (-1);
1765 	}
1766 	if (bus != NULL)
1767 		*bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1768 	if (device != NULL)
1769 		*device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1770 	if (func != NULL)
1771 		*func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1772 	ddi_prop_free(pci_rp);
1773 	return (0);
1774 }
1775 
1776 /*
1777  * Return the ACPI device node matching this dev_info node, if it
1778  * exists in the ACPI tree.
1779  */
1780 ACPI_STATUS
1781 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1782 {
1783 	ACPI_STATUS status;
1784 	char *acpiname;
1785 
1786 	if (!d2a_done)
1787 		scan_d2a_map();
1788 
1789 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1790 	    "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1791 		return (AE_ERROR);
1792 	}
1793 
1794 	status = AcpiGetHandle(NULL, acpiname, rh);
1795 	ddi_prop_free((void *)acpiname);
1796 	return (status);
1797 }
1798 
1799 
1800 
1801 /*
1802  * Manage OS data attachment to ACPI nodes
1803  */
1804 
1805 /*
1806  * Return the (dev_info_t *) associated with the ACPI node.
1807  */
1808 ACPI_STATUS
1809 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1810 {
1811 	ACPI_STATUS status;
1812 	void *ptr;
1813 
1814 	status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1815 	if (status == AE_OK)
1816 		*dipp = (dev_info_t *)ptr;
1817 
1818 	return (status);
1819 }
1820 
1821 /*
1822  * Set the dev_info_t associated with the ACPI node.
1823  */
1824 static ACPI_STATUS
1825 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1826 {
1827 	ACPI_STATUS status;
1828 
1829 	status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1830 	return (status);
1831 }
1832 
1833 /*
1834  * Unset the dev_info_t associated with the ACPI node.
1835  */
1836 static ACPI_STATUS
1837 acpica_unset_devinfo(ACPI_HANDLE obj)
1838 {
1839 	return (AcpiDetachData(obj, acpica_devinfo_handler));
1840 }
1841 
1842 /*
1843  *
1844  */
1845 void
1846 acpica_devinfo_handler(ACPI_HANDLE obj, UINT32 func, void *data)
1847 {
1848 	/* noop */
1849 }
1850 
1851 ACPI_STATUS
1852 acpica_build_processor_map(void)
1853 {
1854 	ACPI_STATUS status;
1855 	void *rv;
1856 
1857 	/*
1858 	 * shouldn't be called more than once anyway
1859 	 */
1860 	if (cpu_map_built)
1861 		return (AE_OK);
1862 
1863 	/*
1864 	 * ACPI device configuration driver has built mapping information
1865 	 * among processor id and object handle, no need to probe again.
1866 	 */
1867 	if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1868 		cpu_map_built = 1;
1869 		return (AE_OK);
1870 	}
1871 
1872 	/*
1873 	 * Look for Processor objects
1874 	 */
1875 	status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1876 	    ACPI_ROOT_OBJECT,
1877 	    4,
1878 	    acpica_probe_processor,
1879 	    NULL,
1880 	    &rv);
1881 	ASSERT(status == AE_OK);
1882 
1883 	/*
1884 	 * Look for processor Device objects
1885 	 */
1886 	status = AcpiGetDevices("ACPI0007",
1887 	    acpica_probe_processor,
1888 	    NULL,
1889 	    &rv);
1890 	ASSERT(status == AE_OK);
1891 	cpu_map_built = 1;
1892 
1893 	return (status);
1894 }
1895 
1896 /*
1897  * Grow cpu map table on demand.
1898  */
1899 static void
1900 acpica_grow_cpu_map(void)
1901 {
1902 	if (cpu_map_count == cpu_map_count_max) {
1903 		size_t sz;
1904 		struct cpu_map_item **new_map;
1905 
1906 		ASSERT(cpu_map_count_max < INT_MAX / 2);
1907 		cpu_map_count_max += max_ncpus;
1908 		new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1909 		    KM_SLEEP);
1910 		if (cpu_map_count != 0) {
1911 			ASSERT(cpu_map != NULL);
1912 			sz = sizeof (cpu_map[0]) * cpu_map_count;
1913 			kcopy(cpu_map, new_map, sz);
1914 			kmem_free(cpu_map, sz);
1915 		}
1916 		cpu_map = new_map;
1917 	}
1918 }
1919 
1920 /*
1921  * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1922  * ACPI handle). The mapping table will be setup in two steps:
1923  * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1924  *    processor id and ACPI object handle.
1925  * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1926  * On system with ACPI device configuration for CPU enabled, acpica_map_cpu()
1927  * will be called before acpica_add_processor_to_map(), otherwise
1928  * acpica_map_cpu() will be called after acpica_add_processor_to_map().
1929  */
1930 ACPI_STATUS
1931 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1932 {
1933 	int i;
1934 	ACPI_STATUS rc = AE_OK;
1935 	struct cpu_map_item *item = NULL;
1936 
1937 	ASSERT(obj != NULL);
1938 	if (obj == NULL) {
1939 		return (AE_ERROR);
1940 	}
1941 
1942 	mutex_enter(&cpu_map_lock);
1943 
1944 	/*
1945 	 * Special case for uppc
1946 	 * If we're a uppc system and ACPI device configuration for CPU has
1947 	 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1948 	 * call acpica_map_cpu(). So create one and use the passed-in processor
1949 	 * as CPU 0
1950 	 */
1951 	if (cpu_map == NULL &&
1952 	    !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1953 		acpica_grow_cpu_map();
1954 		ASSERT(cpu_map != NULL);
1955 		item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1956 		item->cpu_id = 0;
1957 		item->proc_id = acpi_id;
1958 		item->apic_id = apic_id;
1959 		item->obj = obj;
1960 		cpu_map[0] = item;
1961 		cpu_map_count = 1;
1962 		mutex_exit(&cpu_map_lock);
1963 		return (AE_OK);
1964 	}
1965 
1966 	for (i = 0; i < cpu_map_count; i++) {
1967 		if (cpu_map[i]->obj == obj) {
1968 			rc = AE_ALREADY_EXISTS;
1969 			break;
1970 		} else if (cpu_map[i]->proc_id == acpi_id) {
1971 			ASSERT(item == NULL);
1972 			item = cpu_map[i];
1973 		}
1974 	}
1975 
1976 	if (rc == AE_OK) {
1977 		if (item != NULL) {
1978 			/*
1979 			 * ACPI alias objects may cause more than one objects
1980 			 * with the same ACPI processor id, only remember the
1981 			 * the first object encountered.
1982 			 */
1983 			if (item->obj == NULL) {
1984 				item->obj = obj;
1985 				item->apic_id = apic_id;
1986 			} else {
1987 				rc = AE_ALREADY_EXISTS;
1988 			}
1989 		} else if (cpu_map_count >= INT_MAX / 2) {
1990 			rc = AE_NO_MEMORY;
1991 		} else {
1992 			acpica_grow_cpu_map();
1993 			ASSERT(cpu_map != NULL);
1994 			ASSERT(cpu_map_count < cpu_map_count_max);
1995 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1996 			item->cpu_id = -1;
1997 			item->proc_id = acpi_id;
1998 			item->apic_id = apic_id;
1999 			item->obj = obj;
2000 			cpu_map[cpu_map_count] = item;
2001 			cpu_map_count++;
2002 		}
2003 	}
2004 
2005 	mutex_exit(&cpu_map_lock);
2006 
2007 	return (rc);
2008 }
2009 
2010 ACPI_STATUS
2011 acpica_remove_processor_from_map(UINT32 acpi_id)
2012 {
2013 	int i;
2014 	ACPI_STATUS rc = AE_NOT_EXIST;
2015 
2016 	mutex_enter(&cpu_map_lock);
2017 	for (i = 0; i < cpu_map_count; i++) {
2018 		if (cpu_map[i]->proc_id != acpi_id) {
2019 			continue;
2020 		}
2021 		cpu_map[i]->obj = NULL;
2022 		/* Free item if no more reference to it. */
2023 		if (cpu_map[i]->cpu_id == -1) {
2024 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2025 			cpu_map[i] = NULL;
2026 			cpu_map_count--;
2027 			if (i != cpu_map_count) {
2028 				cpu_map[i] = cpu_map[cpu_map_count];
2029 				cpu_map[cpu_map_count] = NULL;
2030 			}
2031 		}
2032 		rc = AE_OK;
2033 		break;
2034 	}
2035 	mutex_exit(&cpu_map_lock);
2036 
2037 	return (rc);
2038 }
2039 
2040 ACPI_STATUS
2041 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2042 {
2043 	int i;
2044 	ACPI_STATUS rc = AE_OK;
2045 	struct cpu_map_item *item = NULL;
2046 
2047 	ASSERT(cpuid != -1);
2048 	if (cpuid == -1) {
2049 		return (AE_ERROR);
2050 	}
2051 
2052 	mutex_enter(&cpu_map_lock);
2053 	for (i = 0; i < cpu_map_count; i++) {
2054 		if (cpu_map[i]->cpu_id == cpuid) {
2055 			rc = AE_ALREADY_EXISTS;
2056 			break;
2057 		} else if (cpu_map[i]->proc_id == acpi_id) {
2058 			ASSERT(item == NULL);
2059 			item = cpu_map[i];
2060 		}
2061 	}
2062 	if (rc == AE_OK) {
2063 		if (item != NULL) {
2064 			if (item->cpu_id == -1) {
2065 				item->cpu_id = cpuid;
2066 			} else {
2067 				rc = AE_ALREADY_EXISTS;
2068 			}
2069 		} else if (cpu_map_count >= INT_MAX / 2) {
2070 			rc = AE_NO_MEMORY;
2071 		} else {
2072 			acpica_grow_cpu_map();
2073 			ASSERT(cpu_map != NULL);
2074 			ASSERT(cpu_map_count < cpu_map_count_max);
2075 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2076 			item->cpu_id = cpuid;
2077 			item->proc_id = acpi_id;
2078 			item->apic_id = UINT32_MAX;
2079 			item->obj = NULL;
2080 			cpu_map[cpu_map_count] = item;
2081 			cpu_map_count++;
2082 		}
2083 	}
2084 	mutex_exit(&cpu_map_lock);
2085 
2086 	return (rc);
2087 }
2088 
2089 ACPI_STATUS
2090 acpica_unmap_cpu(processorid_t cpuid)
2091 {
2092 	int i;
2093 	ACPI_STATUS rc = AE_NOT_EXIST;
2094 
2095 	ASSERT(cpuid != -1);
2096 	if (cpuid == -1) {
2097 		return (rc);
2098 	}
2099 
2100 	mutex_enter(&cpu_map_lock);
2101 	for (i = 0; i < cpu_map_count; i++) {
2102 		if (cpu_map[i]->cpu_id != cpuid) {
2103 			continue;
2104 		}
2105 		cpu_map[i]->cpu_id = -1;
2106 		/* Free item if no more reference. */
2107 		if (cpu_map[i]->obj == NULL) {
2108 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2109 			cpu_map[i] = NULL;
2110 			cpu_map_count--;
2111 			if (i != cpu_map_count) {
2112 				cpu_map[i] = cpu_map[cpu_map_count];
2113 				cpu_map[cpu_map_count] = NULL;
2114 			}
2115 		}
2116 		rc = AE_OK;
2117 		break;
2118 	}
2119 	mutex_exit(&cpu_map_lock);
2120 
2121 	return (rc);
2122 }
2123 
2124 ACPI_STATUS
2125 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2126 {
2127 	int i;
2128 	ACPI_STATUS rc = AE_NOT_EXIST;
2129 
2130 	ASSERT(cpuid != -1);
2131 	if (cpuid == -1) {
2132 		return (rc);
2133 	}
2134 
2135 	mutex_enter(&cpu_map_lock);
2136 	for (i = 0; i < cpu_map_count; i++) {
2137 		if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2138 			*hdlp = cpu_map[i]->obj;
2139 			rc = AE_OK;
2140 			break;
2141 		}
2142 	}
2143 	mutex_exit(&cpu_map_lock);
2144 
2145 	return (rc);
2146 }
2147 
2148 ACPI_STATUS
2149 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2150 {
2151 	int i;
2152 	ACPI_STATUS rc = AE_NOT_EXIST;
2153 
2154 	mutex_enter(&cpu_map_lock);
2155 	for (i = 0; i < cpu_map_count; i++) {
2156 		if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2157 			*hdlp = cpu_map[i]->obj;
2158 			rc = AE_OK;
2159 			break;
2160 		}
2161 	}
2162 	mutex_exit(&cpu_map_lock);
2163 
2164 	return (rc);
2165 }
2166 
2167 ACPI_STATUS
2168 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2169 {
2170 	int i;
2171 	ACPI_STATUS rc = AE_NOT_EXIST;
2172 
2173 	ASSERT(apicid != UINT32_MAX);
2174 	if (apicid == UINT32_MAX) {
2175 		return (rc);
2176 	}
2177 
2178 	mutex_enter(&cpu_map_lock);
2179 	for (i = 0; i < cpu_map_count; i++) {
2180 		if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2181 			*hdlp = cpu_map[i]->obj;
2182 			rc = AE_OK;
2183 			break;
2184 		}
2185 	}
2186 	mutex_exit(&cpu_map_lock);
2187 
2188 	return (rc);
2189 }
2190 
2191 void
2192 acpica_set_core_feature(uint64_t features)
2193 {
2194 	atomic_or_64(&acpica_core_features, features);
2195 }
2196 
2197 void
2198 acpica_clear_core_feature(uint64_t features)
2199 {
2200 	atomic_and_64(&acpica_core_features, ~features);
2201 }
2202 
2203 uint64_t
2204 acpica_get_core_feature(uint64_t features)
2205 {
2206 	return (acpica_core_features & features);
2207 }
2208 
2209 void
2210 acpica_set_devcfg_feature(uint64_t features)
2211 {
2212 	atomic_or_64(&acpica_devcfg_features, features);
2213 }
2214 
2215 void
2216 acpica_clear_devcfg_feature(uint64_t features)
2217 {
2218 	atomic_and_64(&acpica_devcfg_features, ~features);
2219 }
2220 
2221 uint64_t
2222 acpica_get_devcfg_feature(uint64_t features)
2223 {
2224 	return (acpica_devcfg_features & features);
2225 }
2226 
2227 void
2228 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2229 {
2230 	*gbl_FADT = &AcpiGbl_FADT;
2231 }
2232 
2233 void
2234 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2235 {
2236 	if (pstates && AcpiGbl_FADT.PstateControl != 0)
2237 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2238 		    AcpiGbl_FADT.PstateControl);
2239 
2240 	if (cstates && AcpiGbl_FADT.CstControl != 0)
2241 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2242 		    AcpiGbl_FADT.CstControl);
2243 }
2244