xref: /titanic_41/usr/src/uts/intel/io/acpica/osl.c (revision ddee57fa6897d2e39c7a9c3ea4247b298ac7da48)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 /*
31  * ACPI CA OSL for Solaris x86
32  */
33 
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/psm.h>
37 #include <sys/pci_cfgspace.h>
38 #include <sys/apic.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/pci.h>
43 #include <sys/kobj.h>
44 #include <sys/taskq.h>
45 #include <sys/strlog.h>
46 #include <sys/note.h>
47 
48 #include <sys/acpi/acpi.h>
49 #include <sys/acpi/accommon.h>
50 #include <sys/acpica.h>
51 
52 #define	MAX_DAT_FILE_SIZE	(64*1024)
53 
54 /* local functions */
55 static int CompressEisaID(char *np);
56 
57 static void scan_d2a_map(void);
58 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
59 
60 static int acpica_query_bbn_problem(void);
61 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
62 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
63 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
64 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
65 static void acpica_devinfo_handler(ACPI_HANDLE, UINT32, void *);
66 
67 /*
68  * Event queue vars
69  */
70 int acpica_eventq_init = 0;
71 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
72 
73 /*
74  * Priorities relative to minclsyspri that each taskq
75  * run at; OSL_NOTIFY_HANDLER needs to run at a higher
76  * priority than OSL_GPE_HANDLER.  There's an implicit
77  * assumption that no priority here results in exceeding
78  * maxclsyspri.
79  * Note: these initializations need to match the order of
80  * ACPI_EXECUTE_TYPE.
81  */
82 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
83 	0,	/* OSL_GLOBAL_LOCK_HANDLER */
84 	2,	/* OSL_NOTIFY_HANDLER */
85 	0,	/* OSL_GPE_HANDLER */
86 	0,	/* OSL_DEBUGGER_THREAD */
87 	0,	/* OSL_EC_POLL_HANDLER */
88 	0	/* OSL_EC_BURST_HANDLER */
89 };
90 
91 /*
92  * Note, if you change this path, you need to update
93  * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
94  */
95 static char *acpi_table_path = "/boot/acpi/tables/";
96 
97 /* non-zero while scan_d2a_map() is working */
98 static int scanning_d2a_map = 0;
99 static int d2a_done = 0;
100 
101 /* features supported by ACPICA and ACPI device configuration. */
102 uint64_t acpica_core_features = 0;
103 static uint64_t acpica_devcfg_features = 0;
104 
105 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
106 int acpica_use_safe_delay = 0;
107 
108 /* CPU mapping data */
109 struct cpu_map_item {
110 	processorid_t	cpu_id;
111 	UINT32		proc_id;
112 	UINT32		apic_id;
113 	ACPI_HANDLE	obj;
114 };
115 
116 static kmutex_t cpu_map_lock;
117 static struct cpu_map_item **cpu_map = NULL;
118 static int cpu_map_count_max = 0;
119 static int cpu_map_count = 0;
120 static int cpu_map_built = 0;
121 
122 static int acpi_has_broken_bbn = -1;
123 
124 /* buffer for AcpiOsVprintf() */
125 #define	ACPI_OSL_PR_BUFLEN	1024
126 static char *acpi_osl_pr_buffer = NULL;
127 static int acpi_osl_pr_buflen;
128 
129 #define	D2A_DEBUG
130 
131 /*
132  *
133  */
134 static void
135 discard_event_queues()
136 {
137 	int	i;
138 
139 	/*
140 	 * destroy event queues
141 	 */
142 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
143 		if (osl_eventq[i])
144 			ddi_taskq_destroy(osl_eventq[i]);
145 	}
146 }
147 
148 
149 /*
150  *
151  */
152 static ACPI_STATUS
153 init_event_queues()
154 {
155 	char	namebuf[32];
156 	int	i, error = 0;
157 
158 	/*
159 	 * Initialize event queues
160 	 */
161 
162 	/* Always allocate only 1 thread per queue to force FIFO execution */
163 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
164 		snprintf(namebuf, 32, "ACPI%d", i);
165 		osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
166 		    osl_eventq_pri_delta[i] + minclsyspri, 0);
167 		if (osl_eventq[i] == NULL)
168 			error++;
169 	}
170 
171 	if (error != 0) {
172 		discard_event_queues();
173 #ifdef	DEBUG
174 		cmn_err(CE_WARN, "!acpica: could not initialize event queues");
175 #endif
176 		return (AE_ERROR);
177 	}
178 
179 	acpica_eventq_init = 1;
180 	return (AE_OK);
181 }
182 
183 /*
184  * One-time initialization of OSL layer
185  */
186 ACPI_STATUS
187 AcpiOsInitialize(void)
188 {
189 	/*
190 	 * Allocate buffer for AcpiOsVprintf() here to avoid
191 	 * kmem_alloc()/kmem_free() at high PIL
192 	 */
193 	acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
194 	if (acpi_osl_pr_buffer != NULL)
195 		acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
196 
197 	return (AE_OK);
198 }
199 
200 /*
201  * One-time shut-down of OSL layer
202  */
203 ACPI_STATUS
204 AcpiOsTerminate(void)
205 {
206 
207 	if (acpi_osl_pr_buffer != NULL)
208 		kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
209 
210 	discard_event_queues();
211 	return (AE_OK);
212 }
213 
214 
215 ACPI_PHYSICAL_ADDRESS
216 AcpiOsGetRootPointer()
217 {
218 	ACPI_PHYSICAL_ADDRESS Address;
219 
220 	/*
221 	 * For EFI firmware, the root pointer is defined in EFI systab.
222 	 * The boot code process the table and put the physical address
223 	 * in the acpi-root-tab property.
224 	 */
225 	Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
226 	    DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
227 
228 	if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
229 		Address = NULL;
230 
231 	return (Address);
232 }
233 
234 /*ARGSUSED*/
235 ACPI_STATUS
236 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
237 				ACPI_STRING *NewVal)
238 {
239 
240 	*NewVal = 0;
241 	return (AE_OK);
242 }
243 
244 static void
245 acpica_strncpy(char *dest, const char *src, int len)
246 {
247 
248 	/*LINTED*/
249 	while ((*dest++ = *src++) && (--len > 0))
250 		/* copy the string */;
251 	*dest = '\0';
252 }
253 
254 ACPI_STATUS
255 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
256 			ACPI_TABLE_HEADER **NewTable)
257 {
258 	char signature[5];
259 	char oemid[7];
260 	char oemtableid[9];
261 	struct _buf *file;
262 	char *buf1, *buf2;
263 	int count;
264 	char acpi_table_loc[128];
265 
266 	acpica_strncpy(signature, ExistingTable->Signature, 4);
267 	acpica_strncpy(oemid, ExistingTable->OemId, 6);
268 	acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
269 
270 #ifdef	DEBUG
271 	cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
272 	    " OEM TABLE ID [%s] OEM rev %x",
273 	    signature, ExistingTable->Revision, oemid, oemtableid,
274 	    ExistingTable->OemRevision);
275 #endif
276 
277 	/* File name format is "signature_oemid_oemtableid.dat" */
278 	(void) strcpy(acpi_table_loc, acpi_table_path);
279 	(void) strcat(acpi_table_loc, signature); /* for example, DSDT */
280 	(void) strcat(acpi_table_loc, "_");
281 	(void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
282 	(void) strcat(acpi_table_loc, "_");
283 	(void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
284 	(void) strcat(acpi_table_loc, ".dat");
285 
286 	file = kobj_open_file(acpi_table_loc);
287 	if (file == (struct _buf *)-1) {
288 		*NewTable = 0;
289 		return (AE_OK);
290 	} else {
291 		buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
292 		count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
293 		if (count >= MAX_DAT_FILE_SIZE) {
294 			cmn_err(CE_WARN, "!acpica: table %s file size too big",
295 			    acpi_table_loc);
296 			*NewTable = 0;
297 		} else {
298 			buf2 = (char *)kmem_alloc(count, KM_SLEEP);
299 			(void) memcpy(buf2, buf1, count);
300 			*NewTable = (ACPI_TABLE_HEADER *)buf2;
301 			cmn_err(CE_NOTE, "!acpica: replacing table: %s",
302 			    acpi_table_loc);
303 		}
304 	}
305 	kobj_close_file(file);
306 	kmem_free(buf1, MAX_DAT_FILE_SIZE);
307 
308 	return (AE_OK);
309 }
310 
311 
312 /*
313  * ACPI semaphore implementation
314  */
315 typedef struct {
316 	kmutex_t	mutex;
317 	kcondvar_t	cv;
318 	uint32_t	available;
319 	uint32_t	initial;
320 	uint32_t	maximum;
321 } acpi_sema_t;
322 
323 /*
324  *
325  */
326 void
327 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
328 {
329 	mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
330 	cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
331 	/* no need to enter mutex here at creation */
332 	sp->available = count;
333 	sp->initial = count;
334 	sp->maximum = max;
335 }
336 
337 /*
338  *
339  */
340 void
341 acpi_sema_destroy(acpi_sema_t *sp)
342 {
343 
344 	cv_destroy(&sp->cv);
345 	mutex_destroy(&sp->mutex);
346 }
347 
348 /*
349  *
350  */
351 ACPI_STATUS
352 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
353 {
354 	ACPI_STATUS rv = AE_OK;
355 	clock_t deadline;
356 
357 	mutex_enter(&sp->mutex);
358 
359 	if (sp->available >= count) {
360 		/*
361 		 * Enough units available, no blocking
362 		 */
363 		sp->available -= count;
364 		mutex_exit(&sp->mutex);
365 		return (rv);
366 	} else if (wait_time == 0) {
367 		/*
368 		 * Not enough units available and timeout
369 		 * specifies no blocking
370 		 */
371 		rv = AE_TIME;
372 		mutex_exit(&sp->mutex);
373 		return (rv);
374 	}
375 
376 	/*
377 	 * Not enough units available and timeout specifies waiting
378 	 */
379 	if (wait_time != ACPI_WAIT_FOREVER)
380 		deadline = ddi_get_lbolt() +
381 		    (clock_t)drv_usectohz(wait_time * 1000);
382 
383 	do {
384 		if (wait_time == ACPI_WAIT_FOREVER)
385 			cv_wait(&sp->cv, &sp->mutex);
386 		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
387 			rv = AE_TIME;
388 			break;
389 		}
390 	} while (sp->available < count);
391 
392 	/* if we dropped out of the wait with AE_OK, we got the units */
393 	if (rv == AE_OK)
394 		sp->available -= count;
395 
396 	mutex_exit(&sp->mutex);
397 	return (rv);
398 }
399 
400 /*
401  *
402  */
403 void
404 acpi_sema_v(acpi_sema_t *sp, unsigned count)
405 {
406 	mutex_enter(&sp->mutex);
407 	sp->available += count;
408 	cv_broadcast(&sp->cv);
409 	mutex_exit(&sp->mutex);
410 }
411 
412 
413 ACPI_STATUS
414 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
415 ACPI_HANDLE *OutHandle)
416 {
417 	acpi_sema_t *sp;
418 
419 	if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
420 		return (AE_BAD_PARAMETER);
421 
422 	sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
423 	acpi_sema_init(sp, MaxUnits, InitialUnits);
424 	*OutHandle = (ACPI_HANDLE)sp;
425 	return (AE_OK);
426 }
427 
428 
429 ACPI_STATUS
430 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
431 {
432 
433 	if (Handle == NULL)
434 		return (AE_BAD_PARAMETER);
435 
436 	acpi_sema_destroy((acpi_sema_t *)Handle);
437 	kmem_free((void *)Handle, sizeof (acpi_sema_t));
438 	return (AE_OK);
439 }
440 
441 ACPI_STATUS
442 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
443 {
444 
445 	if ((Handle == NULL) || (Units < 1))
446 		return (AE_BAD_PARAMETER);
447 
448 	return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
449 }
450 
451 ACPI_STATUS
452 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
453 {
454 
455 	if ((Handle == NULL) || (Units < 1))
456 		return (AE_BAD_PARAMETER);
457 
458 	acpi_sema_v((acpi_sema_t *)Handle, Units);
459 	return (AE_OK);
460 }
461 
462 ACPI_STATUS
463 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
464 {
465 	kmutex_t *mp;
466 
467 	if (OutHandle == NULL)
468 		return (AE_BAD_PARAMETER);
469 
470 	mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
471 	mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
472 	*OutHandle = (ACPI_HANDLE)mp;
473 	return (AE_OK);
474 }
475 
476 void
477 AcpiOsDeleteLock(ACPI_HANDLE Handle)
478 {
479 
480 	if (Handle == NULL)
481 		return;
482 
483 	mutex_destroy((kmutex_t *)Handle);
484 	kmem_free((void *)Handle, sizeof (kmutex_t));
485 }
486 
487 ACPI_CPU_FLAGS
488 AcpiOsAcquireLock(ACPI_HANDLE Handle)
489 {
490 
491 
492 	if (Handle == NULL)
493 		return (AE_BAD_PARAMETER);
494 
495 	if (curthread == CPU->cpu_idle_thread) {
496 		while (!mutex_tryenter((kmutex_t *)Handle))
497 			/* spin */;
498 	} else
499 		mutex_enter((kmutex_t *)Handle);
500 	return (AE_OK);
501 }
502 
503 void
504 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
505 {
506 	_NOTE(ARGUNUSED(Flags))
507 
508 	mutex_exit((kmutex_t *)Handle);
509 }
510 
511 
512 void *
513 AcpiOsAllocate(ACPI_SIZE Size)
514 {
515 	ACPI_SIZE *tmp_ptr;
516 
517 	Size += sizeof (Size);
518 	tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
519 	*tmp_ptr++ = Size;
520 	return (tmp_ptr);
521 }
522 
523 void
524 AcpiOsFree(void *Memory)
525 {
526 	ACPI_SIZE	size, *tmp_ptr;
527 
528 	tmp_ptr = (ACPI_SIZE *)Memory;
529 	tmp_ptr -= 1;
530 	size = *tmp_ptr;
531 	kmem_free(tmp_ptr, size);
532 }
533 
534 static int napics_found;	/* number of ioapic addresses in array */
535 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
536 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
537 static void *dummy_ioapicadr;
538 
539 void
540 acpica_find_ioapics(void)
541 {
542 	int			madt_seen, madt_size;
543 	ACPI_SUBTABLE_HEADER		*ap;
544 	ACPI_MADT_IO_APIC		*mia;
545 
546 	if (acpi_mapic_dtp != NULL)
547 		return;	/* already parsed table */
548 	if (AcpiGetTable(ACPI_SIG_MADT, 1,
549 	    (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
550 		return;
551 
552 	napics_found = 0;
553 
554 	/*
555 	 * Search the MADT for ioapics
556 	 */
557 	ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
558 	madt_size = acpi_mapic_dtp->Header.Length;
559 	madt_seen = sizeof (*acpi_mapic_dtp);
560 
561 	while (madt_seen < madt_size) {
562 
563 		switch (ap->Type) {
564 		case ACPI_MADT_TYPE_IO_APIC:
565 			mia = (ACPI_MADT_IO_APIC *) ap;
566 			if (napics_found < MAX_IO_APIC) {
567 				ioapic_paddr[napics_found++] =
568 				    (ACPI_PHYSICAL_ADDRESS)
569 				    (mia->Address & PAGEMASK);
570 			}
571 			break;
572 
573 		default:
574 			break;
575 		}
576 
577 		/* advance to next entry */
578 		madt_seen += ap->Length;
579 		ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
580 	}
581 	if (dummy_ioapicadr == NULL)
582 		dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
583 }
584 
585 
586 void *
587 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
588 {
589 	int	i;
590 
591 	/*
592 	 * If the iopaic address table is populated, check if trying
593 	 * to access an ioapic.  Instead, return a pointer to a dummy ioapic.
594 	 */
595 	for (i = 0; i < napics_found; i++) {
596 		if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
597 			return (dummy_ioapicadr);
598 	}
599 	/* FUTUREWORK: test PhysicalAddress for > 32 bits */
600 	return (psm_map_new((paddr_t)PhysicalAddress,
601 	    (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
602 }
603 
604 void
605 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
606 {
607 	/*
608 	 * Check if trying to unmap dummy ioapic address.
609 	 */
610 	if (LogicalAddress == dummy_ioapicadr)
611 		return;
612 
613 	psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
614 }
615 
616 /*ARGSUSED*/
617 ACPI_STATUS
618 AcpiOsGetPhysicalAddress(void *LogicalAddress,
619 			ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
620 {
621 
622 	/* UNIMPLEMENTED: not invoked by ACPI CA code */
623 	return (AE_NOT_IMPLEMENTED);
624 }
625 
626 
627 ACPI_OSD_HANDLER acpi_isr;
628 void *acpi_isr_context;
629 
630 uint_t
631 acpi_wrapper_isr(char *arg)
632 {
633 	_NOTE(ARGUNUSED(arg))
634 
635 	int	status;
636 
637 	status = (*acpi_isr)(acpi_isr_context);
638 
639 	if (status == ACPI_INTERRUPT_HANDLED) {
640 		return (DDI_INTR_CLAIMED);
641 	} else {
642 		return (DDI_INTR_UNCLAIMED);
643 	}
644 }
645 
646 static int acpi_intr_hooked = 0;
647 
648 ACPI_STATUS
649 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
650 		ACPI_OSD_HANDLER ServiceRoutine,
651 		void *Context)
652 {
653 	_NOTE(ARGUNUSED(InterruptNumber))
654 
655 	int retval;
656 	int sci_vect;
657 	iflag_t sci_flags;
658 
659 	acpi_isr = ServiceRoutine;
660 	acpi_isr_context = Context;
661 
662 	/*
663 	 * Get SCI (adjusted for PIC/APIC mode if necessary)
664 	 */
665 	if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
666 		return (AE_ERROR);
667 	}
668 
669 #ifdef	DEBUG
670 	cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
671 #endif
672 
673 	retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
674 	    "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
675 	if (retval) {
676 		acpi_intr_hooked = 1;
677 		return (AE_OK);
678 	} else
679 		return (AE_BAD_PARAMETER);
680 }
681 
682 ACPI_STATUS
683 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
684 			ACPI_OSD_HANDLER ServiceRoutine)
685 {
686 	_NOTE(ARGUNUSED(ServiceRoutine))
687 
688 #ifdef	DEBUG
689 	cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
690 #endif
691 	if (acpi_intr_hooked) {
692 		rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
693 		    InterruptNumber);
694 		acpi_intr_hooked = 0;
695 	}
696 	return (AE_OK);
697 }
698 
699 
700 ACPI_THREAD_ID
701 AcpiOsGetThreadId(void)
702 {
703 	/*
704 	 * ACPI CA doesn't care what actual value is returned as long
705 	 * as it is non-zero and unique to each existing thread.
706 	 * ACPI CA assumes that thread ID is castable to a pointer,
707 	 * so we use the current thread pointer.
708 	 */
709 	return (curthread);
710 }
711 
712 /*
713  *
714  */
715 ACPI_STATUS
716 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK  Function,
717     void *Context)
718 {
719 
720 	if (!acpica_eventq_init) {
721 		/*
722 		 * Create taskqs for event handling
723 		 */
724 		if (init_event_queues() != AE_OK)
725 			return (AE_ERROR);
726 	}
727 
728 	if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
729 	    DDI_NOSLEEP) == DDI_FAILURE) {
730 #ifdef	DEBUG
731 		cmn_err(CE_WARN, "!acpica: unable to dispatch event");
732 #endif
733 		return (AE_ERROR);
734 	}
735 	return (AE_OK);
736 
737 }
738 
739 void
740 AcpiOsSleep(ACPI_INTEGER Milliseconds)
741 {
742 	/*
743 	 * During kernel startup, before the first tick interrupt
744 	 * has taken place, we can't call delay; very late in
745 	 * kernel shutdown or suspend/resume, clock interrupts
746 	 * are blocked, so delay doesn't work then either.
747 	 * So we busy wait if lbolt == 0 (kernel startup)
748 	 * or if acpica_use_safe_delay has been set to a
749 	 * non-zero value.
750 	 */
751 	if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
752 		drv_usecwait(Milliseconds * 1000);
753 	else
754 		delay(drv_usectohz(Milliseconds * 1000));
755 }
756 
757 void
758 AcpiOsStall(UINT32 Microseconds)
759 {
760 	drv_usecwait(Microseconds);
761 }
762 
763 
764 /*
765  * Implementation of "Windows 2001" compatible I/O permission map
766  *
767  */
768 #define	OSL_IO_NONE	(0)
769 #define	OSL_IO_READ	(1<<0)
770 #define	OSL_IO_WRITE	(1<<1)
771 #define	OSL_IO_RW	(OSL_IO_READ | OSL_IO_WRITE)
772 #define	OSL_IO_TERM	(1<<2)
773 #define	OSL_IO_DEFAULT	OSL_IO_RW
774 
775 static struct io_perm  {
776 	ACPI_IO_ADDRESS	low;
777 	ACPI_IO_ADDRESS	high;
778 	uint8_t		perm;
779 } osl_io_perm[] = {
780 	{ 0xcf8, 0xd00, OSL_IO_NONE | OSL_IO_TERM }
781 };
782 
783 
784 /*
785  *
786  */
787 static struct io_perm *
788 osl_io_find_perm(ACPI_IO_ADDRESS addr)
789 {
790 	struct io_perm *p;
791 
792 	p = osl_io_perm;
793 	while (p != NULL) {
794 		if ((p->low <= addr) && (addr <= p->high))
795 			break;
796 		p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
797 	}
798 
799 	return (p);
800 }
801 
802 /*
803  *
804  */
805 ACPI_STATUS
806 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
807 {
808 	struct io_perm *p;
809 
810 	/* verify permission */
811 	p = osl_io_find_perm(Address);
812 	if (p && (p->perm & OSL_IO_READ) == 0) {
813 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
814 		    (long)Address, Width);
815 		*Value = 0xffffffff;
816 		return (AE_ERROR);
817 	}
818 
819 	switch (Width) {
820 	case 8:
821 		*Value = inb(Address);
822 		break;
823 	case 16:
824 		*Value = inw(Address);
825 		break;
826 	case 32:
827 		*Value = inl(Address);
828 		break;
829 	default:
830 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
831 		    (long)Address, Width);
832 		return (AE_BAD_PARAMETER);
833 	}
834 	return (AE_OK);
835 }
836 
837 ACPI_STATUS
838 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
839 {
840 	struct io_perm *p;
841 
842 	/* verify permission */
843 	p = osl_io_find_perm(Address);
844 	if (p && (p->perm & OSL_IO_WRITE) == 0) {
845 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
846 		    (long)Address, Width);
847 		return (AE_ERROR);
848 	}
849 
850 	switch (Width) {
851 	case 8:
852 		outb(Address, Value);
853 		break;
854 	case 16:
855 		outw(Address, Value);
856 		break;
857 	case 32:
858 		outl(Address, Value);
859 		break;
860 	default:
861 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
862 		    (long)Address, Width);
863 		return (AE_BAD_PARAMETER);
864 	}
865 	return (AE_OK);
866 }
867 
868 
869 /*
870  *
871  */
872 
873 #define	OSL_RW(ptr, val, type, rw) \
874 	{ if (rw) *((type *)(ptr)) = *((type *) val); \
875 	    else *((type *) val) = *((type *)(ptr)); }
876 
877 
878 static void
879 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
880     UINT32 Width, int write)
881 {
882 	size_t	maplen = Width / 8;
883 	caddr_t	ptr;
884 
885 	ptr = psm_map_new((paddr_t)Address, maplen,
886 	    PSM_PROT_WRITE | PSM_PROT_READ);
887 
888 	switch (maplen) {
889 	case 1:
890 		OSL_RW(ptr, Value, uint8_t, write);
891 		break;
892 	case 2:
893 		OSL_RW(ptr, Value, uint16_t, write);
894 		break;
895 	case 4:
896 		OSL_RW(ptr, Value, uint32_t, write);
897 		break;
898 	default:
899 		cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
900 		    Width);
901 		break;
902 	}
903 
904 	psm_unmap(ptr, maplen);
905 }
906 
907 ACPI_STATUS
908 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
909 		UINT32 *Value, UINT32 Width)
910 {
911 	osl_rw_memory(Address, Value, Width, 0);
912 	return (AE_OK);
913 }
914 
915 ACPI_STATUS
916 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
917 		UINT32 Value, UINT32 Width)
918 {
919 	osl_rw_memory(Address, &Value, Width, 1);
920 	return (AE_OK);
921 }
922 
923 
924 ACPI_STATUS
925 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
926 			void *Value, UINT32 Width)
927 {
928 
929 	switch (Width) {
930 	case 8:
931 		*((UINT64 *)Value) = (UINT64)(*pci_getb_func)
932 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
933 		break;
934 	case 16:
935 		*((UINT64 *)Value) = (UINT64)(*pci_getw_func)
936 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
937 		break;
938 	case 32:
939 		*((UINT64 *)Value) = (UINT64)(*pci_getl_func)
940 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
941 		break;
942 	case 64:
943 	default:
944 		cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
945 		    Register, Width);
946 		return (AE_BAD_PARAMETER);
947 	}
948 	return (AE_OK);
949 }
950 
951 /*
952  *
953  */
954 int acpica_write_pci_config_ok = 1;
955 
956 ACPI_STATUS
957 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
958 		ACPI_INTEGER Value, UINT32 Width)
959 {
960 
961 	if (!acpica_write_pci_config_ok) {
962 		cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
963 		    " %lx %d not permitted", PciId->Bus, PciId->Device,
964 		    PciId->Function, Register, (long)Value, Width);
965 		return (AE_OK);
966 	}
967 
968 	switch (Width) {
969 	case 8:
970 		(*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
971 		    Register, (uint8_t)Value);
972 		break;
973 	case 16:
974 		(*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
975 		    Register, (uint16_t)Value);
976 		break;
977 	case 32:
978 		(*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
979 		    Register, (uint32_t)Value);
980 		break;
981 	case 64:
982 	default:
983 		cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
984 		    Register, Width);
985 		return (AE_BAD_PARAMETER);
986 	}
987 	return (AE_OK);
988 }
989 
990 /*
991  * Called with ACPI_HANDLEs for both a PCI Config Space
992  * OpRegion and (what ACPI CA thinks is) the PCI device
993  * to which this ConfigSpace OpRegion belongs.  Since
994  * ACPI CA depends on a valid _BBN object being present
995  * and this is not always true (one old x86 had broken _BBN),
996  * we go ahead and get the correct PCI bus number using the
997  * devinfo mapping (which compensates for broken _BBN).
998  *
999  * Default values for bus, segment, device and function are
1000  * all 0 when ACPI CA can't figure them out.
1001  *
1002  * Some BIOSes implement _BBN() by reading PCI config space
1003  * on bus #0 - which means that we'll recurse when we attempt
1004  * to create the devinfo-to-ACPI map.  If Derive is called during
1005  * scan_d2a_map, we don't translate the bus # and return.
1006  *
1007  * We get the parent of the OpRegion, which must be a PCI
1008  * node, fetch the associated devinfo node and snag the
1009  * b/d/f from it.
1010  */
1011 void
1012 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1013 		ACPI_PCI_ID **PciId)
1014 {
1015 	ACPI_HANDLE handle;
1016 	dev_info_t *dip;
1017 	int bus, device, func, devfn;
1018 
1019 
1020 	/*
1021 	 * See above - avoid recursing during scanning_d2a_map.
1022 	 */
1023 	if (scanning_d2a_map)
1024 		return;
1025 
1026 	/*
1027 	 * Get the OpRegion's parent
1028 	 */
1029 	if (AcpiGetParent(chandle, &handle) != AE_OK)
1030 		return;
1031 
1032 	/*
1033 	 * If we've mapped the ACPI node to the devinfo
1034 	 * tree, use the devinfo reg property
1035 	 */
1036 	if (acpica_get_devinfo(handle, &dip) == AE_OK) {
1037 		(void) acpica_get_bdf(dip, &bus, &device, &func);
1038 		(*PciId)->Bus = bus;
1039 		(*PciId)->Device = device;
1040 		(*PciId)->Function = func;
1041 	} else if (acpica_eval_int(handle, "_ADR", &devfn) == AE_OK) {
1042 		/* no devinfo node - just confirm the d/f */
1043 		(*PciId)->Device = (devfn >> 16) & 0xFFFF;
1044 		(*PciId)->Function = devfn & 0xFFFF;
1045 	}
1046 }
1047 
1048 
1049 /*ARGSUSED*/
1050 BOOLEAN
1051 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1052 {
1053 
1054 	/* Always says yes; all mapped memory assumed readable */
1055 	return (1);
1056 }
1057 
1058 /*ARGSUSED*/
1059 BOOLEAN
1060 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1061 {
1062 
1063 	/* Always says yes; all mapped memory assumed writable */
1064 	return (1);
1065 }
1066 
1067 UINT64
1068 AcpiOsGetTimer(void)
1069 {
1070 	/* gethrtime() returns 1nS resolution; convert to 100nS granules */
1071 	return ((gethrtime() + 50) / 100);
1072 }
1073 
1074 static struct AcpiOSIFeature_s {
1075 	uint64_t	control_flag;
1076 	const char	*feature_name;
1077 } AcpiOSIFeatures[] = {
1078 	{ ACPI_FEATURE_OSI_MODULE,	"Module Device" },
1079 	{ 0,				"Processor Device" }
1080 };
1081 
1082 /*ARGSUSED*/
1083 ACPI_STATUS
1084 AcpiOsValidateInterface(char *feature)
1085 {
1086 	int i;
1087 
1088 	ASSERT(feature != NULL);
1089 	for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1090 	    i++) {
1091 		if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1092 			continue;
1093 		}
1094 		/* Check whether required core features are available. */
1095 		if (AcpiOSIFeatures[i].control_flag != 0 &&
1096 		    acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1097 		    AcpiOSIFeatures[i].control_flag) {
1098 			break;
1099 		}
1100 		/* Feature supported. */
1101 		return (AE_OK);
1102 	}
1103 
1104 	return (AE_SUPPORT);
1105 }
1106 
1107 /*ARGSUSED*/
1108 ACPI_STATUS
1109 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1110     ACPI_SIZE length)
1111 {
1112 	return (AE_OK);
1113 }
1114 
1115 ACPI_STATUS
1116 AcpiOsSignal(UINT32 Function, void *Info)
1117 {
1118 	_NOTE(ARGUNUSED(Function, Info))
1119 
1120 	/* FUTUREWORK: debugger support */
1121 
1122 	cmn_err(CE_NOTE, "!OsSignal unimplemented");
1123 	return (AE_OK);
1124 }
1125 
1126 void ACPI_INTERNAL_VAR_XFACE
1127 AcpiOsPrintf(const char *Format, ...)
1128 {
1129 	va_list ap;
1130 
1131 	va_start(ap, Format);
1132 	AcpiOsVprintf(Format, ap);
1133 	va_end(ap);
1134 }
1135 
1136 /*
1137  * When != 0, sends output to console
1138  * Patchable with kmdb or /etc/system.
1139  */
1140 int acpica_console_out = 0;
1141 
1142 #define	ACPICA_OUTBUF_LEN	160
1143 char	acpica_outbuf[ACPICA_OUTBUF_LEN];
1144 int	acpica_outbuf_offset;
1145 
1146 /*
1147  *
1148  */
1149 static void
1150 acpica_pr_buf(char *buf)
1151 {
1152 	char c, *bufp, *outp;
1153 	int	out_remaining;
1154 
1155 	/*
1156 	 * copy the supplied buffer into the output buffer
1157 	 * when we hit a '\n' or overflow the output buffer,
1158 	 * output and reset the output buffer
1159 	 */
1160 	bufp = buf;
1161 	outp = acpica_outbuf + acpica_outbuf_offset;
1162 	out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1163 	while (c = *bufp++) {
1164 		*outp++ = c;
1165 		if (c == '\n' || --out_remaining == 0) {
1166 			*outp = '\0';
1167 			if (acpica_console_out)
1168 				printf(acpica_outbuf);
1169 			else
1170 				(void) strlog(0, 0, 0,
1171 				    SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1172 				    acpica_outbuf);
1173 			acpica_outbuf_offset = 0;
1174 			outp = acpica_outbuf;
1175 			out_remaining = ACPICA_OUTBUF_LEN - 1;
1176 		}
1177 	}
1178 
1179 	acpica_outbuf_offset = outp - acpica_outbuf;
1180 }
1181 
1182 void
1183 AcpiOsVprintf(const char *Format, va_list Args)
1184 {
1185 
1186 	/*
1187 	 * If AcpiOsInitialize() failed to allocate a string buffer,
1188 	 * resort to vprintf().
1189 	 */
1190 	if (acpi_osl_pr_buffer == NULL) {
1191 		vprintf(Format, Args);
1192 		return;
1193 	}
1194 
1195 	/*
1196 	 * It is possible that a very long debug output statement will
1197 	 * be truncated; this is silently ignored.
1198 	 */
1199 	(void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1200 	acpica_pr_buf(acpi_osl_pr_buffer);
1201 }
1202 
1203 void
1204 AcpiOsRedirectOutput(void *Destination)
1205 {
1206 	_NOTE(ARGUNUSED(Destination))
1207 
1208 	/* FUTUREWORK: debugger support */
1209 
1210 #ifdef	DEBUG
1211 	cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1212 #endif
1213 }
1214 
1215 
1216 UINT32
1217 AcpiOsGetLine(char *Buffer)
1218 {
1219 	_NOTE(ARGUNUSED(Buffer))
1220 
1221 	/* FUTUREWORK: debugger support */
1222 
1223 	return (0);
1224 }
1225 
1226 /*
1227  * Device tree binding
1228  */
1229 static ACPI_STATUS
1230 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1231 {
1232 	_NOTE(ARGUNUSED(lvl));
1233 
1234 	int sta, hid, bbn;
1235 	int busno = (intptr_t)ctxp;
1236 	ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1237 
1238 	/* Check whether device exists. */
1239 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1240 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1241 		/*
1242 		 * Skip object if device doesn't exist.
1243 		 * According to ACPI Spec,
1244 		 * 1) setting either bit 0 or bit 3 means that device exists.
1245 		 * 2) Absence of _STA method means all status bits set.
1246 		 */
1247 		return (AE_CTRL_DEPTH);
1248 	}
1249 
1250 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1251 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1252 		/* Non PCI/PCIe host bridge. */
1253 		return (AE_OK);
1254 	}
1255 
1256 	if (acpi_has_broken_bbn) {
1257 		ACPI_BUFFER rb;
1258 		rb.Pointer = NULL;
1259 		rb.Length = ACPI_ALLOCATE_BUFFER;
1260 
1261 		/* Decree _BBN == n from PCI<n> */
1262 		if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1263 			return (AE_CTRL_TERMINATE);
1264 		}
1265 		bbn = ((char *)rb.Pointer)[3] - '0';
1266 		AcpiOsFree(rb.Pointer);
1267 		if (bbn == busno || busno == 0) {
1268 			*hdlp = hdl;
1269 			return (AE_CTRL_TERMINATE);
1270 		}
1271 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1272 		if (bbn == busno) {
1273 			*hdlp = hdl;
1274 			return (AE_CTRL_TERMINATE);
1275 		}
1276 	} else if (busno == 0) {
1277 		*hdlp = hdl;
1278 		return (AE_CTRL_TERMINATE);
1279 	}
1280 
1281 	return (AE_CTRL_DEPTH);
1282 }
1283 
1284 static int
1285 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1286 {
1287 	ACPI_HANDLE sbobj, busobj;
1288 
1289 	/* initialize static flag by querying ACPI namespace for bug */
1290 	if (acpi_has_broken_bbn == -1)
1291 		acpi_has_broken_bbn = acpica_query_bbn_problem();
1292 
1293 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1294 		busobj = NULL;
1295 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1296 		    acpica_find_pcibus_walker, (void *)(intptr_t)busno,
1297 		    (void **)&busobj);
1298 		if (busobj != NULL) {
1299 			*rh = busobj;
1300 			return (AE_OK);
1301 		}
1302 	}
1303 
1304 	return (AE_ERROR);
1305 }
1306 
1307 static ACPI_STATUS
1308 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1309 {
1310 	_NOTE(ARGUNUSED(lvl));
1311 	_NOTE(ARGUNUSED(rvpp));
1312 
1313 	int sta, hid, bbn;
1314 	int *cntp = (int *)ctxp;
1315 
1316 	/* Check whether device exists. */
1317 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1318 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1319 		/*
1320 		 * Skip object if device doesn't exist.
1321 		 * According to ACPI Spec,
1322 		 * 1) setting either bit 0 or bit 3 means that device exists.
1323 		 * 2) Absence of _STA method means all status bits set.
1324 		 */
1325 		return (AE_CTRL_DEPTH);
1326 	}
1327 
1328 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1329 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1330 		/* Non PCI/PCIe host bridge. */
1331 		return (AE_OK);
1332 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1333 	    bbn == 0 && ++(*cntp) > 1) {
1334 		/*
1335 		 * If we find more than one bus with a 0 _BBN
1336 		 * we have the problem that BigBear's BIOS shows
1337 		 */
1338 		return (AE_CTRL_TERMINATE);
1339 	} else {
1340 		/*
1341 		 * Skip children of PCI/PCIe host bridge.
1342 		 */
1343 		return (AE_CTRL_DEPTH);
1344 	}
1345 }
1346 
1347 /*
1348  * Look for ACPI problem where _BBN is zero for multiple PCI buses
1349  * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1350  * below if it exists.
1351  */
1352 static int
1353 acpica_query_bbn_problem(void)
1354 {
1355 	ACPI_HANDLE sbobj;
1356 	int zerobbncnt;
1357 	void *rv;
1358 
1359 	zerobbncnt = 0;
1360 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1361 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1362 		    acpica_query_bbn_walker, &zerobbncnt, &rv);
1363 	}
1364 
1365 	return (zerobbncnt > 1 ? 1 : 0);
1366 }
1367 
1368 static const char hextab[] = "0123456789ABCDEF";
1369 
1370 static int
1371 hexdig(int c)
1372 {
1373 	/*
1374 	 *  Get hex digit:
1375 	 *
1376 	 *  Returns the 4-bit hex digit named by the input character.  Returns
1377 	 *  zero if the input character is not valid hex!
1378 	 */
1379 
1380 	int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1381 	int j = sizeof (hextab);
1382 
1383 	while (--j && (x != hextab[j])) {
1384 	}
1385 	return (j);
1386 }
1387 
1388 static int
1389 CompressEisaID(char *np)
1390 {
1391 	/*
1392 	 *  Compress an EISA device name:
1393 	 *
1394 	 *  This routine converts a 7-byte ASCII device name into the 4-byte
1395 	 *  compressed form used by EISA (50 bytes of ROM to save 1 byte of
1396 	 *  NV-RAM!)
1397 	 */
1398 
1399 	union { char octets[4]; int retval; } myu;
1400 
1401 	myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1402 	myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1403 	myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1404 	myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1405 
1406 	return (myu.retval);
1407 }
1408 
1409 ACPI_STATUS
1410 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1411 {
1412 	ACPI_STATUS status;
1413 	ACPI_BUFFER rb;
1414 	ACPI_OBJECT ro;
1415 
1416 	rb.Pointer = &ro;
1417 	rb.Length = sizeof (ro);
1418 	if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1419 	    ACPI_TYPE_INTEGER)) == AE_OK)
1420 		*rint = ro.Integer.Value;
1421 
1422 	return (status);
1423 }
1424 
1425 static int
1426 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1427 {
1428 	ACPI_BUFFER rb;
1429 	ACPI_OBJECT *rv;
1430 
1431 	rb.Pointer = NULL;
1432 	rb.Length = ACPI_ALLOCATE_BUFFER;
1433 	if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1434 	    rb.Length != 0) {
1435 		rv = rb.Pointer;
1436 		if (rv->Type == ACPI_TYPE_INTEGER) {
1437 			*rint = rv->Integer.Value;
1438 			AcpiOsFree(rv);
1439 			return (AE_OK);
1440 		} else if (rv->Type == ACPI_TYPE_STRING) {
1441 			char *stringData;
1442 
1443 			/* Convert the string into an EISA ID */
1444 			if (rv->String.Pointer == NULL) {
1445 				AcpiOsFree(rv);
1446 				return (AE_ERROR);
1447 			}
1448 
1449 			stringData = rv->String.Pointer;
1450 
1451 			/*
1452 			 * If the string is an EisaID, it must be 7
1453 			 * characters; if it's an ACPI ID, it will be 8
1454 			 * (and we don't care about ACPI ids here).
1455 			 */
1456 			if (strlen(stringData) != 7) {
1457 				AcpiOsFree(rv);
1458 				return (AE_ERROR);
1459 			}
1460 
1461 			*rint = CompressEisaID(stringData);
1462 			AcpiOsFree(rv);
1463 			return (AE_OK);
1464 		} else
1465 			AcpiOsFree(rv);
1466 	}
1467 	return (AE_ERROR);
1468 }
1469 
1470 /*
1471  * Create linkage between devinfo nodes and ACPI nodes
1472  */
1473 ACPI_STATUS
1474 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1475 {
1476 	ACPI_STATUS status;
1477 	ACPI_BUFFER rb;
1478 
1479 	/*
1480 	 * Tag the devinfo node with the ACPI name
1481 	 */
1482 	rb.Pointer = NULL;
1483 	rb.Length = ACPI_ALLOCATE_BUFFER;
1484 	status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1485 	if (ACPI_FAILURE(status)) {
1486 		cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1487 	} else {
1488 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1489 		    "acpi-namespace", (char *)rb.Pointer);
1490 		AcpiOsFree(rb.Pointer);
1491 
1492 		/*
1493 		 * Tag the ACPI node with the dip
1494 		 */
1495 		status = acpica_set_devinfo(acpiobj, dip);
1496 		ASSERT(ACPI_SUCCESS(status));
1497 	}
1498 
1499 	return (status);
1500 }
1501 
1502 /*
1503  * Destroy linkage between devinfo nodes and ACPI nodes
1504  */
1505 ACPI_STATUS
1506 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1507 {
1508 	(void) acpica_unset_devinfo(acpiobj);
1509 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1510 
1511 	return (AE_OK);
1512 }
1513 
1514 /*
1515  * Return the ACPI device node matching the CPU dev_info node.
1516  */
1517 ACPI_STATUS
1518 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1519 {
1520 	int i;
1521 
1522 	/*
1523 	 * if cpu_map itself is NULL, we're a uppc system and
1524 	 * acpica_build_processor_map() hasn't been called yet.
1525 	 * So call it here
1526 	 */
1527 	if (cpu_map == NULL) {
1528 		(void) acpica_build_processor_map();
1529 		if (cpu_map == NULL)
1530 			return (AE_ERROR);
1531 	}
1532 
1533 	if (cpu_id < 0) {
1534 		return (AE_ERROR);
1535 	}
1536 
1537 	/*
1538 	 * search object with cpuid in cpu_map
1539 	 */
1540 	mutex_enter(&cpu_map_lock);
1541 	for (i = 0; i < cpu_map_count; i++) {
1542 		if (cpu_map[i]->cpu_id == cpu_id) {
1543 			break;
1544 		}
1545 	}
1546 	if (i >= cpu_map_count || (cpu_map[i]->obj == NULL)) {
1547 		mutex_exit(&cpu_map_lock);
1548 		return (AE_ERROR);
1549 	}
1550 	*rh = cpu_map[cpu_id]->obj;
1551 	mutex_exit(&cpu_map_lock);
1552 
1553 	return (AE_OK);
1554 }
1555 
1556 /*
1557  * Determine if this object is a processor
1558  */
1559 static ACPI_STATUS
1560 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1561 {
1562 	ACPI_STATUS status;
1563 	ACPI_OBJECT_TYPE objtype;
1564 	unsigned long acpi_id;
1565 	ACPI_BUFFER rb;
1566 
1567 	if (AcpiGetType(obj, &objtype) != AE_OK)
1568 		return (AE_OK);
1569 
1570 	if (objtype == ACPI_TYPE_PROCESSOR) {
1571 		/* process a Processor */
1572 		rb.Pointer = NULL;
1573 		rb.Length = ACPI_ALLOCATE_BUFFER;
1574 		status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1575 		    ACPI_TYPE_PROCESSOR);
1576 		if (status != AE_OK) {
1577 			cmn_err(CE_WARN, "!acpica: error probing Processor");
1578 			return (status);
1579 		}
1580 		acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1581 		AcpiOsFree(rb.Pointer);
1582 	} else if (objtype == ACPI_TYPE_DEVICE) {
1583 		/* process a processor Device */
1584 		rb.Pointer = NULL;
1585 		rb.Length = ACPI_ALLOCATE_BUFFER;
1586 		status = AcpiGetObjectInfo(obj, &rb);
1587 		if (status != AE_OK) {
1588 			cmn_err(CE_WARN,
1589 			    "!acpica: error probing Processor Device\n");
1590 			return (status);
1591 		}
1592 		ASSERT(((ACPI_OBJECT *)rb.Pointer)->Type ==
1593 		    ACPI_TYPE_DEVICE);
1594 
1595 		if (ddi_strtoul(
1596 		    ((ACPI_DEVICE_INFO *)rb.Pointer)->UniqueId.Value,
1597 		    NULL, 10, &acpi_id) != 0) {
1598 			AcpiOsFree(rb.Pointer);
1599 			cmn_err(CE_WARN,
1600 			    "!acpica: error probing Processor Device _UID\n");
1601 			return (AE_ERROR);
1602 		}
1603 		AcpiOsFree(rb.Pointer);
1604 	}
1605 	(void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1606 
1607 	return (AE_OK);
1608 }
1609 
1610 static void
1611 scan_d2a_map(void)
1612 {
1613 	dev_info_t *dip, *cdip;
1614 	ACPI_HANDLE acpiobj;
1615 	char *device_type_prop;
1616 	int bus;
1617 	static int map_error = 0;
1618 
1619 	if (map_error)
1620 		return;
1621 
1622 	scanning_d2a_map = 1;
1623 
1624 	/*
1625 	 * Find all child-of-root PCI buses, and find their corresponding
1626 	 * ACPI child-of-root PCI nodes.  For each one, add to the
1627 	 * d2a table.
1628 	 */
1629 
1630 	for (dip = ddi_get_child(ddi_root_node());
1631 	    dip != NULL;
1632 	    dip = ddi_get_next_sibling(dip)) {
1633 
1634 		/* prune non-PCI nodes */
1635 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1636 		    DDI_PROP_DONTPASS,
1637 		    "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1638 			continue;
1639 
1640 		if ((strcmp("pci", device_type_prop) != 0) &&
1641 		    (strcmp("pciex", device_type_prop) != 0)) {
1642 			ddi_prop_free(device_type_prop);
1643 			continue;
1644 		}
1645 
1646 		ddi_prop_free(device_type_prop);
1647 
1648 		/*
1649 		 * To get bus number of dip, get first child and get its
1650 		 * bus number.  If NULL, just continue, because we don't
1651 		 * care about bus nodes with no children anyway.
1652 		 */
1653 		if ((cdip = ddi_get_child(dip)) == NULL)
1654 			continue;
1655 
1656 		if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1657 #ifdef D2ADEBUG
1658 			cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1659 #endif
1660 			map_error = 1;
1661 			scanning_d2a_map = 0;
1662 			d2a_done = 1;
1663 			return;
1664 		}
1665 
1666 		if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1667 #ifdef D2ADEBUG
1668 			cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1669 #endif
1670 			map_error = 1;
1671 			continue;
1672 		}
1673 
1674 		acpica_tag_devinfo(dip, acpiobj);
1675 
1676 		/* call recursively to enumerate subtrees */
1677 		scan_d2a_subtree(dip, acpiobj, bus);
1678 	}
1679 
1680 	scanning_d2a_map = 0;
1681 	d2a_done = 1;
1682 }
1683 
1684 /*
1685  * For all acpi child devices of acpiobj, find their matching
1686  * dip under "dip" argument.  (matching means "matches dev/fn").
1687  * bus is assumed to already be a match from caller, and is
1688  * used here only to record in the d2a entry.  Recurse if necessary.
1689  */
1690 static void
1691 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1692 {
1693 	int acpi_devfn, hid;
1694 	ACPI_HANDLE acld;
1695 	dev_info_t *dcld;
1696 	int dcld_b, dcld_d, dcld_f;
1697 	int dev, func;
1698 	char *device_type_prop;
1699 
1700 	acld = NULL;
1701 	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1702 	    == AE_OK) {
1703 		/* get the dev/func we're looking for in the devinfo tree */
1704 		if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1705 			continue;
1706 		dev = (acpi_devfn >> 16) & 0xFFFF;
1707 		func = acpi_devfn & 0xFFFF;
1708 
1709 		/* look through all the immediate children of dip */
1710 		for (dcld = ddi_get_child(dip); dcld != NULL;
1711 		    dcld = ddi_get_next_sibling(dcld)) {
1712 			if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1713 				continue;
1714 
1715 			/* dev must match; function must match or wildcard */
1716 			if (dcld_d != dev ||
1717 			    (func != 0xFFFF && func != dcld_f))
1718 				continue;
1719 			bus = dcld_b;
1720 
1721 			/* found a match, record it */
1722 			acpica_tag_devinfo(dcld, acld);
1723 
1724 			/* if we find a bridge, recurse from here */
1725 			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1726 			    DDI_PROP_DONTPASS, "device_type",
1727 			    &device_type_prop) == DDI_PROP_SUCCESS) {
1728 				if ((strcmp("pci", device_type_prop) == 0) ||
1729 				    (strcmp("pciex", device_type_prop) == 0))
1730 					scan_d2a_subtree(dcld, acld, bus);
1731 				ddi_prop_free(device_type_prop);
1732 			}
1733 
1734 			/* done finding a match, so break now */
1735 			break;
1736 		}
1737 	}
1738 }
1739 
1740 /*
1741  * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1742  */
1743 int
1744 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1745 {
1746 	pci_regspec_t *pci_rp;
1747 	int len;
1748 
1749 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1750 	    "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1751 		return (-1);
1752 
1753 	if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1754 		ddi_prop_free(pci_rp);
1755 		return (-1);
1756 	}
1757 	if (bus != NULL)
1758 		*bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1759 	if (device != NULL)
1760 		*device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1761 	if (func != NULL)
1762 		*func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1763 	ddi_prop_free(pci_rp);
1764 	return (0);
1765 }
1766 
1767 /*
1768  * Return the ACPI device node matching this dev_info node, if it
1769  * exists in the ACPI tree.
1770  */
1771 ACPI_STATUS
1772 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1773 {
1774 	ACPI_STATUS status;
1775 	char *acpiname;
1776 
1777 	if (!d2a_done)
1778 		scan_d2a_map();
1779 
1780 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1781 	    "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1782 		return (AE_ERROR);
1783 	}
1784 
1785 	status = AcpiGetHandle(NULL, acpiname, rh);
1786 	ddi_prop_free((void *)acpiname);
1787 	return (status);
1788 }
1789 
1790 
1791 
1792 /*
1793  * Manage OS data attachment to ACPI nodes
1794  */
1795 
1796 /*
1797  * Return the (dev_info_t *) associated with the ACPI node.
1798  */
1799 ACPI_STATUS
1800 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1801 {
1802 	ACPI_STATUS status;
1803 	void *ptr;
1804 
1805 	status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1806 	if (status == AE_OK)
1807 		*dipp = (dev_info_t *)ptr;
1808 
1809 	return (status);
1810 }
1811 
1812 /*
1813  * Set the dev_info_t associated with the ACPI node.
1814  */
1815 static ACPI_STATUS
1816 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1817 {
1818 	ACPI_STATUS status;
1819 
1820 	status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1821 	return (status);
1822 }
1823 
1824 /*
1825  * Unset the dev_info_t associated with the ACPI node.
1826  */
1827 static ACPI_STATUS
1828 acpica_unset_devinfo(ACPI_HANDLE obj)
1829 {
1830 	return (AcpiDetachData(obj, acpica_devinfo_handler));
1831 }
1832 
1833 /*
1834  *
1835  */
1836 void
1837 acpica_devinfo_handler(ACPI_HANDLE obj, UINT32 func, void *data)
1838 {
1839 	/* noop */
1840 }
1841 
1842 ACPI_STATUS
1843 acpica_build_processor_map(void)
1844 {
1845 	ACPI_STATUS status;
1846 	void *rv;
1847 
1848 	/*
1849 	 * shouldn't be called more than once anyway
1850 	 */
1851 	if (cpu_map_built)
1852 		return (AE_OK);
1853 
1854 	/*
1855 	 * ACPI device configuration driver has built mapping information
1856 	 * among processor id and object handle, no need to probe again.
1857 	 */
1858 	if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1859 		cpu_map_built = 1;
1860 		return (AE_OK);
1861 	}
1862 
1863 	/*
1864 	 * Look for Processor objects
1865 	 */
1866 	status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1867 	    ACPI_ROOT_OBJECT,
1868 	    4,
1869 	    acpica_probe_processor,
1870 	    NULL,
1871 	    &rv);
1872 	ASSERT(status == AE_OK);
1873 
1874 	/*
1875 	 * Look for processor Device objects
1876 	 */
1877 	status = AcpiGetDevices("ACPI0007",
1878 	    acpica_probe_processor,
1879 	    NULL,
1880 	    &rv);
1881 	ASSERT(status == AE_OK);
1882 	cpu_map_built = 1;
1883 
1884 	return (status);
1885 }
1886 
1887 /*
1888  * Grow cpu map table on demand.
1889  */
1890 static void
1891 acpica_grow_cpu_map(void)
1892 {
1893 	if (cpu_map_count == cpu_map_count_max) {
1894 		size_t sz;
1895 		struct cpu_map_item **new_map;
1896 
1897 		ASSERT(cpu_map_count_max < INT_MAX / 2);
1898 		cpu_map_count_max += max_ncpus;
1899 		new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1900 		    KM_SLEEP);
1901 		if (cpu_map_count != 0) {
1902 			ASSERT(cpu_map != NULL);
1903 			sz = sizeof (cpu_map[0]) * cpu_map_count;
1904 			kcopy(cpu_map, new_map, sz);
1905 			kmem_free(cpu_map, sz);
1906 		}
1907 		cpu_map = new_map;
1908 	}
1909 }
1910 
1911 /*
1912  * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1913  * ACPI handle). The mapping table will be setup in two steps:
1914  * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1915  *    processor id and ACPI object handle.
1916  * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1917  * On system with ACPI device configuration for CPU enabled, acpica_map_cpu()
1918  * will be called before acpica_add_processor_to_map(), otherwise
1919  * acpica_map_cpu() will be called after acpica_add_processor_to_map().
1920  */
1921 ACPI_STATUS
1922 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1923 {
1924 	int i;
1925 	ACPI_STATUS rc = AE_OK;
1926 	struct cpu_map_item *item = NULL;
1927 
1928 	ASSERT(obj != NULL);
1929 	if (obj == NULL) {
1930 		return (AE_ERROR);
1931 	}
1932 
1933 	mutex_enter(&cpu_map_lock);
1934 
1935 	/*
1936 	 * Special case for uppc
1937 	 * If we're a uppc system and ACPI device configuration for CPU has
1938 	 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1939 	 * call acpica_map_cpu(). So create one and use the passed-in processor
1940 	 * as CPU 0
1941 	 */
1942 	if (cpu_map == NULL &&
1943 	    !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1944 		acpica_grow_cpu_map();
1945 		ASSERT(cpu_map != NULL);
1946 		item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1947 		item->cpu_id = 0;
1948 		item->proc_id = acpi_id;
1949 		item->apic_id = apic_id;
1950 		item->obj = obj;
1951 		cpu_map[0] = item;
1952 		cpu_map_count = 1;
1953 		mutex_exit(&cpu_map_lock);
1954 		return (AE_OK);
1955 	}
1956 
1957 	for (i = 0; i < cpu_map_count; i++) {
1958 		if (cpu_map[i]->obj == obj) {
1959 			rc = AE_ALREADY_EXISTS;
1960 			break;
1961 		} else if (cpu_map[i]->proc_id == acpi_id) {
1962 			ASSERT(item == NULL);
1963 			item = cpu_map[i];
1964 		}
1965 	}
1966 
1967 	if (rc == AE_OK) {
1968 		if (item != NULL) {
1969 			/*
1970 			 * ACPI alias objects may cause more than one objects
1971 			 * with the same ACPI processor id, only remember the
1972 			 * the first object encountered.
1973 			 */
1974 			if (item->obj == NULL) {
1975 				item->obj = obj;
1976 				item->apic_id = apic_id;
1977 			} else {
1978 				rc = AE_ALREADY_EXISTS;
1979 			}
1980 		} else if (cpu_map_count >= INT_MAX / 2) {
1981 			rc = AE_NO_MEMORY;
1982 		} else {
1983 			acpica_grow_cpu_map();
1984 			ASSERT(cpu_map != NULL);
1985 			ASSERT(cpu_map_count < cpu_map_count_max);
1986 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1987 			item->cpu_id = -1;
1988 			item->proc_id = acpi_id;
1989 			item->apic_id = apic_id;
1990 			item->obj = obj;
1991 			cpu_map[cpu_map_count] = item;
1992 			cpu_map_count++;
1993 		}
1994 	}
1995 
1996 	mutex_exit(&cpu_map_lock);
1997 
1998 	return (rc);
1999 }
2000 
2001 ACPI_STATUS
2002 acpica_remove_processor_from_map(UINT32 acpi_id)
2003 {
2004 	int i;
2005 	ACPI_STATUS rc = AE_NOT_EXIST;
2006 
2007 	mutex_enter(&cpu_map_lock);
2008 	for (i = 0; i < cpu_map_count; i++) {
2009 		if (cpu_map[i]->proc_id != acpi_id) {
2010 			continue;
2011 		}
2012 		cpu_map[i]->obj = NULL;
2013 		/* Free item if no more reference to it. */
2014 		if (cpu_map[i]->cpu_id == -1) {
2015 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2016 			cpu_map[i] = NULL;
2017 			cpu_map_count--;
2018 			if (i != cpu_map_count) {
2019 				cpu_map[i] = cpu_map[cpu_map_count];
2020 				cpu_map[cpu_map_count] = NULL;
2021 			}
2022 		}
2023 		rc = AE_OK;
2024 		break;
2025 	}
2026 	mutex_exit(&cpu_map_lock);
2027 
2028 	return (rc);
2029 }
2030 
2031 ACPI_STATUS
2032 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2033 {
2034 	int i;
2035 	ACPI_STATUS rc = AE_OK;
2036 	struct cpu_map_item *item = NULL;
2037 
2038 	ASSERT(cpuid != -1);
2039 	if (cpuid == -1) {
2040 		return (AE_ERROR);
2041 	}
2042 
2043 	mutex_enter(&cpu_map_lock);
2044 	for (i = 0; i < cpu_map_count; i++) {
2045 		if (cpu_map[i]->cpu_id == cpuid) {
2046 			rc = AE_ALREADY_EXISTS;
2047 			break;
2048 		} else if (cpu_map[i]->proc_id == acpi_id) {
2049 			ASSERT(item == NULL);
2050 			item = cpu_map[i];
2051 		}
2052 	}
2053 	if (rc == AE_OK) {
2054 		if (item != NULL) {
2055 			if (item->cpu_id == -1) {
2056 				item->cpu_id = cpuid;
2057 			} else {
2058 				rc = AE_ALREADY_EXISTS;
2059 			}
2060 		} else if (cpu_map_count >= INT_MAX / 2) {
2061 			rc = AE_NO_MEMORY;
2062 		} else {
2063 			acpica_grow_cpu_map();
2064 			ASSERT(cpu_map != NULL);
2065 			ASSERT(cpu_map_count < cpu_map_count_max);
2066 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2067 			item->cpu_id = cpuid;
2068 			item->proc_id = acpi_id;
2069 			item->apic_id = UINT32_MAX;
2070 			item->obj = NULL;
2071 			cpu_map[cpu_map_count] = item;
2072 			cpu_map_count++;
2073 		}
2074 	}
2075 	mutex_exit(&cpu_map_lock);
2076 
2077 	return (rc);
2078 }
2079 
2080 ACPI_STATUS
2081 acpica_unmap_cpu(processorid_t cpuid)
2082 {
2083 	int i;
2084 	ACPI_STATUS rc = AE_NOT_EXIST;
2085 
2086 	ASSERT(cpuid != -1);
2087 	if (cpuid == -1) {
2088 		return (rc);
2089 	}
2090 
2091 	mutex_enter(&cpu_map_lock);
2092 	for (i = 0; i < cpu_map_count; i++) {
2093 		if (cpu_map[i]->cpu_id != cpuid) {
2094 			continue;
2095 		}
2096 		cpu_map[i]->cpu_id = -1;
2097 		/* Free item if no more reference. */
2098 		if (cpu_map[i]->obj == NULL) {
2099 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2100 			cpu_map[i] = NULL;
2101 			cpu_map_count--;
2102 			if (i != cpu_map_count) {
2103 				cpu_map[i] = cpu_map[cpu_map_count];
2104 				cpu_map[cpu_map_count] = NULL;
2105 			}
2106 		}
2107 		rc = AE_OK;
2108 		break;
2109 	}
2110 	mutex_exit(&cpu_map_lock);
2111 
2112 	return (rc);
2113 }
2114 
2115 ACPI_STATUS
2116 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2117 {
2118 	int i;
2119 	ACPI_STATUS rc = AE_NOT_EXIST;
2120 
2121 	ASSERT(cpuid != -1);
2122 	if (cpuid == -1) {
2123 		return (rc);
2124 	}
2125 
2126 	mutex_enter(&cpu_map_lock);
2127 	for (i = 0; i < cpu_map_count; i++) {
2128 		if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2129 			*hdlp = cpu_map[i]->obj;
2130 			rc = AE_OK;
2131 			break;
2132 		}
2133 	}
2134 	mutex_exit(&cpu_map_lock);
2135 
2136 	return (rc);
2137 }
2138 
2139 ACPI_STATUS
2140 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2141 {
2142 	int i;
2143 	ACPI_STATUS rc = AE_NOT_EXIST;
2144 
2145 	mutex_enter(&cpu_map_lock);
2146 	for (i = 0; i < cpu_map_count; i++) {
2147 		if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2148 			*hdlp = cpu_map[i]->obj;
2149 			rc = AE_OK;
2150 			break;
2151 		}
2152 	}
2153 	mutex_exit(&cpu_map_lock);
2154 
2155 	return (rc);
2156 }
2157 
2158 ACPI_STATUS
2159 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2160 {
2161 	int i;
2162 	ACPI_STATUS rc = AE_NOT_EXIST;
2163 
2164 	ASSERT(apicid != UINT32_MAX);
2165 	if (apicid == UINT32_MAX) {
2166 		return (rc);
2167 	}
2168 
2169 	mutex_enter(&cpu_map_lock);
2170 	for (i = 0; i < cpu_map_count; i++) {
2171 		if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2172 			*hdlp = cpu_map[i]->obj;
2173 			rc = AE_OK;
2174 			break;
2175 		}
2176 	}
2177 	mutex_exit(&cpu_map_lock);
2178 
2179 	return (rc);
2180 }
2181 
2182 void
2183 acpica_set_core_feature(uint64_t features)
2184 {
2185 	atomic_or_64(&acpica_core_features, features);
2186 }
2187 
2188 void
2189 acpica_clear_core_feature(uint64_t features)
2190 {
2191 	atomic_and_64(&acpica_core_features, ~features);
2192 }
2193 
2194 uint64_t
2195 acpica_get_core_feature(uint64_t features)
2196 {
2197 	return (acpica_core_features & features);
2198 }
2199 
2200 void
2201 acpica_set_devcfg_feature(uint64_t features)
2202 {
2203 	atomic_or_64(&acpica_devcfg_features, features);
2204 }
2205 
2206 void
2207 acpica_clear_devcfg_feature(uint64_t features)
2208 {
2209 	atomic_and_64(&acpica_devcfg_features, ~features);
2210 }
2211 
2212 uint64_t
2213 acpica_get_devcfg_feature(uint64_t features)
2214 {
2215 	return (acpica_devcfg_features & features);
2216 }
2217 
2218 void
2219 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2220 {
2221 	*gbl_FADT = &AcpiGbl_FADT;
2222 }
2223 
2224 void
2225 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2226 {
2227 	if (pstates && AcpiGbl_FADT.PstateControl != 0)
2228 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2229 		    AcpiGbl_FADT.PstateControl);
2230 
2231 	if (cstates && AcpiGbl_FADT.CstControl != 0)
2232 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2233 		    AcpiGbl_FADT.CstControl);
2234 }
2235