1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
26 * Copyright 2019 Western Digital Corporation
27 */
28 /*
29 * Copyright (c) 2009-2010, Intel Corporation.
30 * All rights reserved.
31 */
32 /*
33 * ACPI CA OSL for Solaris x86
34 */
35
36 #include <sys/types.h>
37 #include <sys/kmem.h>
38 #include <sys/psm.h>
39 #include <sys/pci_cfgspace.h>
40 #include <sys/apic.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/pci.h>
45 #include <sys/kobj.h>
46 #include <sys/taskq.h>
47 #include <sys/strlog.h>
48 #include <sys/x86_archext.h>
49 #include <sys/note.h>
50 #include <sys/promif.h>
51
52 #include <sys/acpi/accommon.h>
53 #include <sys/acpica.h>
54
55 #define MAX_DAT_FILE_SIZE (64*1024)
56
57 /* local functions */
58 static int CompressEisaID(char *np);
59
60 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
61 static int acpica_query_bbn_problem(void);
62 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
63 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
64 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
65 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
66 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
67
68 /*
69 * Event queue vars
70 */
71 int acpica_eventq_init = 0;
72 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
73
74 /*
75 * Priorities relative to minclsyspri that each taskq
76 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
77 * priority than OSL_GPE_HANDLER. There's an implicit
78 * assumption that no priority here results in exceeding
79 * maxclsyspri.
80 * Note: these initializations need to match the order of
81 * ACPI_EXECUTE_TYPE.
82 */
83 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
84 0, /* OSL_GLOBAL_LOCK_HANDLER */
85 2, /* OSL_NOTIFY_HANDLER */
86 0, /* OSL_GPE_HANDLER */
87 0, /* OSL_DEBUGGER_THREAD */
88 0, /* OSL_EC_POLL_HANDLER */
89 0 /* OSL_EC_BURST_HANDLER */
90 };
91
92 /*
93 * Note, if you change this path, you need to update
94 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
95 */
96 static char *acpi_table_path = "/boot/acpi/tables/";
97
98 /* non-zero while scan_d2a_map() is working */
99 static int scanning_d2a_map = 0;
100 static int d2a_done = 0;
101
102 /* features supported by ACPICA and ACPI device configuration. */
103 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
104 static uint64_t acpica_devcfg_features = 0;
105
106 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
107 int acpica_use_safe_delay = 0;
108
109 /* CPU mapping data */
110 struct cpu_map_item {
111 processorid_t cpu_id;
112 UINT32 proc_id;
113 UINT32 apic_id;
114 ACPI_HANDLE obj;
115 };
116
117 kmutex_t cpu_map_lock;
118 static struct cpu_map_item **cpu_map = NULL;
119 static int cpu_map_count_max = 0;
120 static int cpu_map_count = 0;
121 static int cpu_map_built = 0;
122
123 /*
124 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
125 * This flag is used to check for uppc-only systems by detecting whether
126 * acpica_map_cpu() has been called or not.
127 */
128 static int cpu_map_called = 0;
129
130 static int acpi_has_broken_bbn = -1;
131
132 /* buffer for AcpiOsVprintf() */
133 #define ACPI_OSL_PR_BUFLEN 1024
134 static char *acpi_osl_pr_buffer = NULL;
135 static int acpi_osl_pr_buflen;
136
137 #define D2A_DEBUG
138
139 /*
140 *
141 */
142 static void
discard_event_queues()143 discard_event_queues()
144 {
145 int i;
146
147 /*
148 * destroy event queues
149 */
150 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
151 if (osl_eventq[i])
152 ddi_taskq_destroy(osl_eventq[i]);
153 }
154 }
155
156
157 /*
158 *
159 */
160 static ACPI_STATUS
init_event_queues()161 init_event_queues()
162 {
163 char namebuf[32];
164 int i, error = 0;
165
166 /*
167 * Initialize event queues
168 */
169
170 /* Always allocate only 1 thread per queue to force FIFO execution */
171 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
172 snprintf(namebuf, 32, "ACPI%d", i);
173 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
174 osl_eventq_pri_delta[i] + minclsyspri, 0);
175 if (osl_eventq[i] == NULL)
176 error++;
177 }
178
179 if (error != 0) {
180 discard_event_queues();
181 #ifdef DEBUG
182 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
183 #endif
184 return (AE_ERROR);
185 }
186
187 acpica_eventq_init = 1;
188 return (AE_OK);
189 }
190
191 /*
192 * One-time initialization of OSL layer
193 */
194 ACPI_STATUS
AcpiOsInitialize(void)195 AcpiOsInitialize(void)
196 {
197 /*
198 * Allocate buffer for AcpiOsVprintf() here to avoid
199 * kmem_alloc()/kmem_free() at high PIL
200 */
201 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
202 if (acpi_osl_pr_buffer != NULL)
203 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
204
205 return (AE_OK);
206 }
207
208 /*
209 * One-time shut-down of OSL layer
210 */
211 ACPI_STATUS
AcpiOsTerminate(void)212 AcpiOsTerminate(void)
213 {
214
215 if (acpi_osl_pr_buffer != NULL)
216 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
217
218 discard_event_queues();
219 return (AE_OK);
220 }
221
222
223 ACPI_PHYSICAL_ADDRESS
AcpiOsGetRootPointer()224 AcpiOsGetRootPointer()
225 {
226 ACPI_PHYSICAL_ADDRESS Address;
227
228 /*
229 * For EFI firmware, the root pointer is defined in EFI systab.
230 * The boot code process the table and put the physical address
231 * in the acpi-root-tab property.
232 */
233 Address = ddi_prop_get_int64(DDI_DEV_T_ANY, ddi_root_node(),
234 DDI_PROP_DONTPASS, "acpi-root-tab", 0);
235
236 if ((Address == 0) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
237 Address = 0;
238
239 return (Address);
240 }
241
242 /*ARGSUSED*/
243 ACPI_STATUS
AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES * InitVal,ACPI_STRING * NewVal)244 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
245 ACPI_STRING *NewVal)
246 {
247
248 *NewVal = 0;
249 return (AE_OK);
250 }
251
252 static void
acpica_strncpy(char * dest,const char * src,int len)253 acpica_strncpy(char *dest, const char *src, int len)
254 {
255
256 /*LINTED*/
257 while ((*dest++ = *src++) && (--len > 0))
258 /* copy the string */;
259 *dest = '\0';
260 }
261
262 ACPI_STATUS
AcpiOsTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_TABLE_HEADER ** NewTable)263 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
264 ACPI_TABLE_HEADER **NewTable)
265 {
266 char signature[5];
267 char oemid[7];
268 char oemtableid[9];
269 struct _buf *file;
270 char *buf1, *buf2;
271 int count;
272 char acpi_table_loc[128];
273
274 acpica_strncpy(signature, ExistingTable->Signature, 4);
275 acpica_strncpy(oemid, ExistingTable->OemId, 6);
276 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
277
278 /* File name format is "signature_oemid_oemtableid.dat" */
279 (void) strcpy(acpi_table_loc, acpi_table_path);
280 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
281 (void) strcat(acpi_table_loc, "_");
282 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
283 (void) strcat(acpi_table_loc, "_");
284 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
285 (void) strcat(acpi_table_loc, ".dat");
286
287 file = kobj_open_file(acpi_table_loc);
288 if (file == (struct _buf *)-1) {
289 *NewTable = 0;
290 return (AE_OK);
291 } else {
292 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
293 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
294 if (count >= MAX_DAT_FILE_SIZE) {
295 cmn_err(CE_WARN, "!acpica: table %s file size too big",
296 acpi_table_loc);
297 *NewTable = 0;
298 } else {
299 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
300 (void) memcpy(buf2, buf1, count);
301 *NewTable = (ACPI_TABLE_HEADER *)buf2;
302 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
303 acpi_table_loc);
304 }
305 }
306 kobj_close_file(file);
307 kmem_free(buf1, MAX_DAT_FILE_SIZE);
308
309 return (AE_OK);
310 }
311
312 ACPI_STATUS
AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_PHYSICAL_ADDRESS * NewAddress,UINT32 * NewTableLength)313 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
314 ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
315 {
316 return (AE_SUPPORT);
317 }
318
319 /*
320 * ACPI semaphore implementation
321 */
322 typedef struct {
323 kmutex_t mutex;
324 kcondvar_t cv;
325 uint32_t available;
326 uint32_t initial;
327 uint32_t maximum;
328 } acpi_sema_t;
329
330 /*
331 *
332 */
333 void
acpi_sema_init(acpi_sema_t * sp,unsigned max,unsigned count)334 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
335 {
336 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
337 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
338 /* no need to enter mutex here at creation */
339 sp->available = count;
340 sp->initial = count;
341 sp->maximum = max;
342 }
343
344 /*
345 *
346 */
347 void
acpi_sema_destroy(acpi_sema_t * sp)348 acpi_sema_destroy(acpi_sema_t *sp)
349 {
350
351 cv_destroy(&sp->cv);
352 mutex_destroy(&sp->mutex);
353 }
354
355 /*
356 *
357 */
358 ACPI_STATUS
acpi_sema_p(acpi_sema_t * sp,unsigned count,uint16_t wait_time)359 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
360 {
361 ACPI_STATUS rv = AE_OK;
362 clock_t deadline;
363
364 mutex_enter(&sp->mutex);
365
366 if (sp->available >= count) {
367 /*
368 * Enough units available, no blocking
369 */
370 sp->available -= count;
371 mutex_exit(&sp->mutex);
372 return (rv);
373 } else if (wait_time == 0) {
374 /*
375 * Not enough units available and timeout
376 * specifies no blocking
377 */
378 rv = AE_TIME;
379 mutex_exit(&sp->mutex);
380 return (rv);
381 }
382
383 /*
384 * Not enough units available and timeout specifies waiting
385 */
386 if (wait_time != ACPI_WAIT_FOREVER)
387 deadline = ddi_get_lbolt() +
388 (clock_t)drv_usectohz(wait_time * 1000);
389
390 do {
391 if (wait_time == ACPI_WAIT_FOREVER)
392 cv_wait(&sp->cv, &sp->mutex);
393 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
394 rv = AE_TIME;
395 break;
396 }
397 } while (sp->available < count);
398
399 /* if we dropped out of the wait with AE_OK, we got the units */
400 if (rv == AE_OK)
401 sp->available -= count;
402
403 mutex_exit(&sp->mutex);
404 return (rv);
405 }
406
407 /*
408 *
409 */
410 void
acpi_sema_v(acpi_sema_t * sp,unsigned count)411 acpi_sema_v(acpi_sema_t *sp, unsigned count)
412 {
413 mutex_enter(&sp->mutex);
414 sp->available += count;
415 cv_broadcast(&sp->cv);
416 mutex_exit(&sp->mutex);
417 }
418
419
420 ACPI_STATUS
AcpiOsCreateSemaphore(UINT32 MaxUnits,UINT32 InitialUnits,ACPI_HANDLE * OutHandle)421 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
422 ACPI_HANDLE *OutHandle)
423 {
424 acpi_sema_t *sp;
425
426 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
427 return (AE_BAD_PARAMETER);
428
429 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
430 acpi_sema_init(sp, MaxUnits, InitialUnits);
431 *OutHandle = (ACPI_HANDLE)sp;
432 return (AE_OK);
433 }
434
435
436 ACPI_STATUS
AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)437 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
438 {
439
440 if (Handle == NULL)
441 return (AE_BAD_PARAMETER);
442
443 acpi_sema_destroy((acpi_sema_t *)Handle);
444 kmem_free((void *)Handle, sizeof (acpi_sema_t));
445 return (AE_OK);
446 }
447
448 ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_HANDLE Handle,UINT32 Units,UINT16 Timeout)449 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
450 {
451
452 if ((Handle == NULL) || (Units < 1))
453 return (AE_BAD_PARAMETER);
454
455 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
456 }
457
458 ACPI_STATUS
AcpiOsSignalSemaphore(ACPI_HANDLE Handle,UINT32 Units)459 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
460 {
461
462 if ((Handle == NULL) || (Units < 1))
463 return (AE_BAD_PARAMETER);
464
465 acpi_sema_v((acpi_sema_t *)Handle, Units);
466 return (AE_OK);
467 }
468
469 ACPI_STATUS
AcpiOsCreateLock(ACPI_HANDLE * OutHandle)470 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
471 {
472 kmutex_t *mp;
473
474 if (OutHandle == NULL)
475 return (AE_BAD_PARAMETER);
476
477 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
478 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
479 *OutHandle = (ACPI_HANDLE)mp;
480 return (AE_OK);
481 }
482
483 void
AcpiOsDeleteLock(ACPI_HANDLE Handle)484 AcpiOsDeleteLock(ACPI_HANDLE Handle)
485 {
486
487 if (Handle == NULL)
488 return;
489
490 mutex_destroy((kmutex_t *)Handle);
491 kmem_free((void *)Handle, sizeof (kmutex_t));
492 }
493
494 ACPI_CPU_FLAGS
AcpiOsAcquireLock(ACPI_HANDLE Handle)495 AcpiOsAcquireLock(ACPI_HANDLE Handle)
496 {
497
498
499 if (Handle == NULL)
500 return (AE_BAD_PARAMETER);
501
502 if (curthread == CPU->cpu_idle_thread) {
503 while (!mutex_tryenter((kmutex_t *)Handle))
504 /* spin */;
505 } else
506 mutex_enter((kmutex_t *)Handle);
507 return (AE_OK);
508 }
509
510 void
AcpiOsReleaseLock(ACPI_HANDLE Handle,ACPI_CPU_FLAGS Flags)511 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
512 {
513 _NOTE(ARGUNUSED(Flags))
514
515 mutex_exit((kmutex_t *)Handle);
516 }
517
518
519 void *
AcpiOsAllocate(ACPI_SIZE Size)520 AcpiOsAllocate(ACPI_SIZE Size)
521 {
522 ACPI_SIZE *tmp_ptr;
523
524 Size += sizeof (Size);
525 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
526 *tmp_ptr++ = Size;
527 return (tmp_ptr);
528 }
529
530 void
AcpiOsFree(void * Memory)531 AcpiOsFree(void *Memory)
532 {
533 ACPI_SIZE size, *tmp_ptr;
534
535 tmp_ptr = (ACPI_SIZE *)Memory;
536 tmp_ptr -= 1;
537 size = *tmp_ptr;
538 kmem_free(tmp_ptr, size);
539 }
540
541 static int napics_found; /* number of ioapic addresses in array */
542 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
543 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
544 static void *dummy_ioapicadr;
545
546 void
acpica_find_ioapics(void)547 acpica_find_ioapics(void)
548 {
549 int madt_seen, madt_size;
550 ACPI_SUBTABLE_HEADER *ap;
551 ACPI_MADT_IO_APIC *mia;
552
553 if (acpi_mapic_dtp != NULL)
554 return; /* already parsed table */
555 if (AcpiGetTable(ACPI_SIG_MADT, 1,
556 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
557 return;
558
559 napics_found = 0;
560
561 /*
562 * Search the MADT for ioapics
563 */
564 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
565 madt_size = acpi_mapic_dtp->Header.Length;
566 madt_seen = sizeof (*acpi_mapic_dtp);
567
568 while (madt_seen < madt_size) {
569
570 switch (ap->Type) {
571 case ACPI_MADT_TYPE_IO_APIC:
572 mia = (ACPI_MADT_IO_APIC *) ap;
573 if (napics_found < MAX_IO_APIC) {
574 ioapic_paddr[napics_found++] =
575 (ACPI_PHYSICAL_ADDRESS)
576 (mia->Address & PAGEMASK);
577 }
578 break;
579
580 default:
581 break;
582 }
583
584 /* advance to next entry */
585 madt_seen += ap->Length;
586 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
587 }
588 if (dummy_ioapicadr == NULL)
589 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
590 }
591
592
593 void *
AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress,ACPI_SIZE Size)594 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
595 {
596 int i;
597
598 /*
599 * If the iopaic address table is populated, check if trying
600 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
601 */
602 for (i = 0; i < napics_found; i++) {
603 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
604 return (dummy_ioapicadr);
605 }
606 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
607 return (psm_map_new((paddr_t)PhysicalAddress,
608 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
609 }
610
611 void
AcpiOsUnmapMemory(void * LogicalAddress,ACPI_SIZE Size)612 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
613 {
614 /*
615 * Check if trying to unmap dummy ioapic address.
616 */
617 if (LogicalAddress == dummy_ioapicadr)
618 return;
619
620 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
621 }
622
623 /*ARGSUSED*/
624 ACPI_STATUS
AcpiOsGetPhysicalAddress(void * LogicalAddress,ACPI_PHYSICAL_ADDRESS * PhysicalAddress)625 AcpiOsGetPhysicalAddress(void *LogicalAddress,
626 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
627 {
628
629 /* UNIMPLEMENTED: not invoked by ACPI CA code */
630 return (AE_NOT_IMPLEMENTED);
631 }
632
633
634 ACPI_OSD_HANDLER acpi_isr;
635 void *acpi_isr_context;
636
637 uint_t
acpi_wrapper_isr(char * arg,char * arg1 __unused)638 acpi_wrapper_isr(char *arg, char *arg1 __unused)
639 {
640 _NOTE(ARGUNUSED(arg))
641
642 int status;
643
644 status = (*acpi_isr)(acpi_isr_context);
645
646 if (status == ACPI_INTERRUPT_HANDLED) {
647 return (DDI_INTR_CLAIMED);
648 } else {
649 return (DDI_INTR_UNCLAIMED);
650 }
651 }
652
653 static int acpi_intr_hooked = 0;
654
655 ACPI_STATUS
AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine,void * Context)656 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
657 ACPI_OSD_HANDLER ServiceRoutine,
658 void *Context)
659 {
660 _NOTE(ARGUNUSED(InterruptNumber))
661
662 int retval;
663 int sci_vect;
664 iflag_t sci_flags;
665
666 acpi_isr = ServiceRoutine;
667 acpi_isr_context = Context;
668
669 /*
670 * Get SCI (adjusted for PIC/APIC mode if necessary)
671 */
672 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
673 return (AE_ERROR);
674 }
675
676 #ifdef DEBUG
677 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
678 #endif
679
680 retval = add_avintr(NULL, SCI_IPL, acpi_wrapper_isr,
681 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
682 if (retval) {
683 acpi_intr_hooked = 1;
684 return (AE_OK);
685 } else
686 return (AE_BAD_PARAMETER);
687 }
688
689 ACPI_STATUS
AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine)690 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
691 ACPI_OSD_HANDLER ServiceRoutine)
692 {
693 _NOTE(ARGUNUSED(ServiceRoutine))
694
695 #ifdef DEBUG
696 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
697 #endif
698 if (acpi_intr_hooked) {
699 rem_avintr(NULL, LOCK_LEVEL - 1, acpi_wrapper_isr,
700 InterruptNumber);
701 acpi_intr_hooked = 0;
702 }
703 return (AE_OK);
704 }
705
706
707 ACPI_THREAD_ID
AcpiOsGetThreadId(void)708 AcpiOsGetThreadId(void)
709 {
710 /*
711 * ACPI CA doesn't care what actual value is returned as long
712 * as it is non-zero and unique to each existing thread.
713 * ACPI CA assumes that thread ID is castable to a pointer,
714 * so we use the current thread pointer.
715 */
716 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
717 }
718
719 /*
720 *
721 */
722 ACPI_STATUS
AcpiOsExecute(ACPI_EXECUTE_TYPE Type,ACPI_OSD_EXEC_CALLBACK Function,void * Context)723 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
724 void *Context)
725 {
726
727 if (!acpica_eventq_init) {
728 /*
729 * Create taskqs for event handling
730 */
731 if (init_event_queues() != AE_OK)
732 return (AE_ERROR);
733 }
734
735 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
736 DDI_NOSLEEP) == DDI_FAILURE) {
737 #ifdef DEBUG
738 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
739 #endif
740 return (AE_ERROR);
741 }
742 return (AE_OK);
743
744 }
745
746
747 void
AcpiOsWaitEventsComplete(void)748 AcpiOsWaitEventsComplete(void)
749 {
750 int i;
751
752 /*
753 * Wait for event queues to be empty.
754 */
755 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
756 if (osl_eventq[i] != NULL) {
757 ddi_taskq_wait(osl_eventq[i]);
758 }
759 }
760 }
761
762 void
AcpiOsSleep(ACPI_INTEGER Milliseconds)763 AcpiOsSleep(ACPI_INTEGER Milliseconds)
764 {
765 /*
766 * During kernel startup, before the first tick interrupt
767 * has taken place, we can't call delay; very late in
768 * kernel shutdown or suspend/resume, clock interrupts
769 * are blocked, so delay doesn't work then either.
770 * So we busy wait if lbolt == 0 (kernel startup)
771 * or if acpica_use_safe_delay has been set to a
772 * non-zero value.
773 */
774 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
775 drv_usecwait(Milliseconds * 1000);
776 else
777 delay(drv_usectohz(Milliseconds * 1000));
778 }
779
780 void
AcpiOsStall(UINT32 Microseconds)781 AcpiOsStall(UINT32 Microseconds)
782 {
783 drv_usecwait(Microseconds);
784 }
785
786
787 /*
788 * Implementation of "Windows 2001" compatible I/O permission map
789 *
790 */
791 #define OSL_IO_NONE (0)
792 #define OSL_IO_READ (1<<0)
793 #define OSL_IO_WRITE (1<<1)
794 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
795 #define OSL_IO_TERM (1<<2)
796 #define OSL_IO_DEFAULT OSL_IO_RW
797
798 static struct io_perm {
799 ACPI_IO_ADDRESS low;
800 ACPI_IO_ADDRESS high;
801 uint8_t perm;
802 } osl_io_perm[] = {
803 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
804 };
805
806
807 /*
808 *
809 */
810 static struct io_perm *
osl_io_find_perm(ACPI_IO_ADDRESS addr)811 osl_io_find_perm(ACPI_IO_ADDRESS addr)
812 {
813 struct io_perm *p;
814
815 p = osl_io_perm;
816 while (p != NULL) {
817 if ((p->low <= addr) && (addr <= p->high))
818 break;
819 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
820 }
821
822 return (p);
823 }
824
825 /*
826 *
827 */
828 ACPI_STATUS
AcpiOsReadPort(ACPI_IO_ADDRESS Address,UINT32 * Value,UINT32 Width)829 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
830 {
831 struct io_perm *p;
832
833 /* verify permission */
834 p = osl_io_find_perm(Address);
835 if (p && (p->perm & OSL_IO_READ) == 0) {
836 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
837 (long)Address, Width);
838 *Value = 0xffffffff;
839 return (AE_ERROR);
840 }
841
842 switch (Width) {
843 case 8:
844 *Value = inb(Address);
845 break;
846 case 16:
847 *Value = inw(Address);
848 break;
849 case 32:
850 *Value = inl(Address);
851 break;
852 default:
853 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
854 (long)Address, Width);
855 return (AE_BAD_PARAMETER);
856 }
857 return (AE_OK);
858 }
859
860 ACPI_STATUS
AcpiOsWritePort(ACPI_IO_ADDRESS Address,UINT32 Value,UINT32 Width)861 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
862 {
863 struct io_perm *p;
864
865 /* verify permission */
866 p = osl_io_find_perm(Address);
867 if (p && (p->perm & OSL_IO_WRITE) == 0) {
868 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
869 (long)Address, Width);
870 return (AE_ERROR);
871 }
872
873 switch (Width) {
874 case 8:
875 outb(Address, Value);
876 break;
877 case 16:
878 outw(Address, Value);
879 break;
880 case 32:
881 outl(Address, Value);
882 break;
883 default:
884 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
885 (long)Address, Width);
886 return (AE_BAD_PARAMETER);
887 }
888 return (AE_OK);
889 }
890
891
892 /*
893 *
894 */
895
896 #define OSL_RW(ptr, val, type, rw) \
897 { if (rw) *((type *)(ptr)) = *((type *) val); \
898 else *((type *) val) = *((type *)(ptr)); }
899
900
901 static void
osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address,UINT64 * Value,UINT32 Width,int write)902 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
903 UINT32 Width, int write)
904 {
905 size_t maplen = Width / 8;
906 caddr_t ptr;
907
908 ptr = psm_map_new((paddr_t)Address, maplen,
909 PSM_PROT_WRITE | PSM_PROT_READ);
910
911 switch (maplen) {
912 case 1:
913 OSL_RW(ptr, Value, uint8_t, write);
914 break;
915 case 2:
916 OSL_RW(ptr, Value, uint16_t, write);
917 break;
918 case 4:
919 OSL_RW(ptr, Value, uint32_t, write);
920 break;
921 case 8:
922 OSL_RW(ptr, Value, uint64_t, write);
923 break;
924 default:
925 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
926 Width);
927 break;
928 }
929
930 psm_unmap(ptr, maplen);
931 }
932
933 ACPI_STATUS
AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 * Value,UINT32 Width)934 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
935 UINT64 *Value, UINT32 Width)
936 {
937 osl_rw_memory(Address, Value, Width, 0);
938 return (AE_OK);
939 }
940
941 ACPI_STATUS
AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 Value,UINT32 Width)942 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
943 UINT64 Value, UINT32 Width)
944 {
945 osl_rw_memory(Address, &Value, Width, 1);
946 return (AE_OK);
947 }
948
949
950 ACPI_STATUS
AcpiOsReadPciConfiguration(ACPI_PCI_ID * PciId,UINT32 Reg,UINT64 * Value,UINT32 Width)951 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
952 UINT64 *Value, UINT32 Width)
953 {
954
955 switch (Width) {
956 case 8:
957 *Value = (UINT64)(*pci_getb_func)
958 (PciId->Bus, PciId->Device, PciId->Function, Reg);
959 break;
960 case 16:
961 *Value = (UINT64)(*pci_getw_func)
962 (PciId->Bus, PciId->Device, PciId->Function, Reg);
963 break;
964 case 32:
965 *Value = (UINT64)(*pci_getl_func)
966 (PciId->Bus, PciId->Device, PciId->Function, Reg);
967 break;
968 case 64:
969 default:
970 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
971 Reg, Width);
972 return (AE_BAD_PARAMETER);
973 }
974 return (AE_OK);
975 }
976
977 /*
978 *
979 */
980 int acpica_write_pci_config_ok = 1;
981
982 ACPI_STATUS
AcpiOsWritePciConfiguration(ACPI_PCI_ID * PciId,UINT32 Reg,UINT64 Value,UINT32 Width)983 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
984 UINT64 Value, UINT32 Width)
985 {
986
987 if (!acpica_write_pci_config_ok) {
988 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
989 " %lx %d not permitted", PciId->Bus, PciId->Device,
990 PciId->Function, Reg, (long)Value, Width);
991 return (AE_OK);
992 }
993
994 switch (Width) {
995 case 8:
996 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
997 Reg, (uint8_t)Value);
998 break;
999 case 16:
1000 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
1001 Reg, (uint16_t)Value);
1002 break;
1003 case 32:
1004 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
1005 Reg, (uint32_t)Value);
1006 break;
1007 case 64:
1008 default:
1009 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
1010 Reg, Width);
1011 return (AE_BAD_PARAMETER);
1012 }
1013 return (AE_OK);
1014 }
1015
1016 /*
1017 * Called with ACPI_HANDLEs for both a PCI Config Space
1018 * OpRegion and (what ACPI CA thinks is) the PCI device
1019 * to which this ConfigSpace OpRegion belongs.
1020 *
1021 * ACPI CA uses _BBN and _ADR objects to determine the default
1022 * values for bus, segment, device and function; anything ACPI CA
1023 * can't figure out from the ACPI tables will be 0. One very
1024 * old 32-bit x86 system is known to have broken _BBN; this is
1025 * not addressed here.
1026 *
1027 * Some BIOSes implement _BBN() by reading PCI config space
1028 * on bus #0 - which means that we'll recurse when we attempt
1029 * to create the devinfo-to-ACPI map. If Derive is called during
1030 * scan_d2a_map, we don't translate the bus # and return.
1031 *
1032 * We get the parent of the OpRegion, which must be a PCI
1033 * node, fetch the associated devinfo node and snag the
1034 * b/d/f from it.
1035 */
1036 void
AcpiOsDerivePciId(ACPI_HANDLE rhandle,ACPI_HANDLE chandle,ACPI_PCI_ID ** PciId)1037 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1038 ACPI_PCI_ID **PciId)
1039 {
1040 ACPI_HANDLE handle;
1041 dev_info_t *dip;
1042 int bus, device, func, devfn;
1043
1044 /*
1045 * See above - avoid recursing during scanning_d2a_map.
1046 */
1047 if (scanning_d2a_map)
1048 return;
1049
1050 /*
1051 * Get the OpRegion's parent
1052 */
1053 if (AcpiGetParent(chandle, &handle) != AE_OK)
1054 return;
1055
1056 /*
1057 * If we've mapped the ACPI node to the devinfo
1058 * tree, use the devinfo reg property
1059 */
1060 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1061 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1062 (*PciId)->Bus = bus;
1063 (*PciId)->Device = device;
1064 (*PciId)->Function = func;
1065 }
1066 }
1067
1068
1069 /*ARGSUSED*/
1070 BOOLEAN
AcpiOsReadable(void * Pointer,ACPI_SIZE Length)1071 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1072 {
1073
1074 /* Always says yes; all mapped memory assumed readable */
1075 return (1);
1076 }
1077
1078 /*ARGSUSED*/
1079 BOOLEAN
AcpiOsWritable(void * Pointer,ACPI_SIZE Length)1080 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1081 {
1082
1083 /* Always says yes; all mapped memory assumed writable */
1084 return (1);
1085 }
1086
1087 UINT64
AcpiOsGetTimer(void)1088 AcpiOsGetTimer(void)
1089 {
1090 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1091 return ((gethrtime() + 50) / 100);
1092 }
1093
1094 static struct AcpiOSIFeature_s {
1095 uint64_t control_flag;
1096 const char *feature_name;
1097 } AcpiOSIFeatures[] = {
1098 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1099 { 0, "Processor Device" }
1100 };
1101
1102 /*ARGSUSED*/
1103 ACPI_STATUS
AcpiOsValidateInterface(char * feature)1104 AcpiOsValidateInterface(char *feature)
1105 {
1106 int i;
1107
1108 ASSERT(feature != NULL);
1109 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1110 i++) {
1111 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1112 continue;
1113 }
1114 /* Check whether required core features are available. */
1115 if (AcpiOSIFeatures[i].control_flag != 0 &&
1116 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1117 AcpiOSIFeatures[i].control_flag) {
1118 break;
1119 }
1120 /* Feature supported. */
1121 return (AE_OK);
1122 }
1123
1124 return (AE_SUPPORT);
1125 }
1126
1127 /*ARGSUSED*/
1128 ACPI_STATUS
AcpiOsValidateAddress(UINT8 spaceid,ACPI_PHYSICAL_ADDRESS addr,ACPI_SIZE length)1129 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1130 ACPI_SIZE length)
1131 {
1132 return (AE_OK);
1133 }
1134
1135 ACPI_STATUS
AcpiOsSignal(UINT32 Function,void * Info)1136 AcpiOsSignal(UINT32 Function, void *Info)
1137 {
1138 _NOTE(ARGUNUSED(Function, Info))
1139
1140 /* FUTUREWORK: debugger support */
1141
1142 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1143 return (AE_OK);
1144 }
1145
1146 void ACPI_INTERNAL_VAR_XFACE
AcpiOsPrintf(const char * Format,...)1147 AcpiOsPrintf(const char *Format, ...)
1148 {
1149 va_list ap;
1150
1151 va_start(ap, Format);
1152 AcpiOsVprintf(Format, ap);
1153 va_end(ap);
1154 }
1155
1156 /*ARGSUSED*/
1157 ACPI_STATUS
AcpiOsEnterSleep(UINT8 SleepState,UINT32 Rega,UINT32 Regb)1158 AcpiOsEnterSleep(UINT8 SleepState, UINT32 Rega, UINT32 Regb)
1159 {
1160 return (AE_OK);
1161 }
1162
1163 /*
1164 * When != 0, sends output to console
1165 * Patchable with kmdb or /etc/system.
1166 */
1167 int acpica_console_out = 0;
1168
1169 #define ACPICA_OUTBUF_LEN 160
1170 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1171 int acpica_outbuf_offset;
1172
1173 /*
1174 *
1175 */
1176 static void
acpica_pr_buf(char * buf)1177 acpica_pr_buf(char *buf)
1178 {
1179 char c, *bufp, *outp;
1180 int out_remaining;
1181
1182 /*
1183 * copy the supplied buffer into the output buffer
1184 * when we hit a '\n' or overflow the output buffer,
1185 * output and reset the output buffer
1186 */
1187 bufp = buf;
1188 outp = acpica_outbuf + acpica_outbuf_offset;
1189 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1190 while (c = *bufp++) {
1191 *outp++ = c;
1192 if (c == '\n' || --out_remaining == 0) {
1193 *outp = '\0';
1194 switch (acpica_console_out) {
1195 case 1:
1196 printf(acpica_outbuf);
1197 break;
1198 case 2:
1199 prom_printf(acpica_outbuf);
1200 break;
1201 case 0:
1202 default:
1203 (void) strlog(0, 0, 0,
1204 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1205 acpica_outbuf);
1206 break;
1207 }
1208 acpica_outbuf_offset = 0;
1209 outp = acpica_outbuf;
1210 out_remaining = ACPICA_OUTBUF_LEN - 1;
1211 }
1212 }
1213
1214 acpica_outbuf_offset = outp - acpica_outbuf;
1215 }
1216
1217 void
AcpiOsVprintf(const char * Format,va_list Args)1218 AcpiOsVprintf(const char *Format, va_list Args)
1219 {
1220
1221 /*
1222 * If AcpiOsInitialize() failed to allocate a string buffer,
1223 * resort to vprintf().
1224 */
1225 if (acpi_osl_pr_buffer == NULL) {
1226 vprintf(Format, Args);
1227 return;
1228 }
1229
1230 /*
1231 * It is possible that a very long debug output statement will
1232 * be truncated; this is silently ignored.
1233 */
1234 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1235 acpica_pr_buf(acpi_osl_pr_buffer);
1236 }
1237
1238 void
AcpiOsRedirectOutput(void * Destination)1239 AcpiOsRedirectOutput(void *Destination)
1240 {
1241 _NOTE(ARGUNUSED(Destination))
1242
1243 /* FUTUREWORK: debugger support */
1244
1245 #ifdef DEBUG
1246 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1247 #endif
1248 }
1249
1250
1251 UINT32
AcpiOsGetLine(char * Buffer,UINT32 len,UINT32 * BytesRead)1252 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1253 {
1254 _NOTE(ARGUNUSED(Buffer))
1255 _NOTE(ARGUNUSED(len))
1256 _NOTE(ARGUNUSED(BytesRead))
1257
1258 /* FUTUREWORK: debugger support */
1259
1260 return (0);
1261 }
1262
1263 static ACPI_STATUS
acpica_crs_cb(ACPI_RESOURCE * rp,void * context)1264 acpica_crs_cb(ACPI_RESOURCE *rp, void *context)
1265 {
1266 int *busno = context;
1267
1268 if (rp->Data.Address.ProducerConsumer == 1)
1269 return (AE_OK);
1270
1271 switch (rp->Type) {
1272 case ACPI_RESOURCE_TYPE_ADDRESS16:
1273 if (rp->Data.Address16.Address.AddressLength == 0)
1274 break;
1275 if (rp->Data.Address16.ResourceType != ACPI_BUS_NUMBER_RANGE)
1276 break;
1277
1278 *busno = rp->Data.Address16.Address.Minimum;
1279 break;
1280
1281 case ACPI_RESOURCE_TYPE_ADDRESS32:
1282 if (rp->Data.Address32.Address.AddressLength == 0)
1283 break;
1284 if (rp->Data.Address32.ResourceType != ACPI_BUS_NUMBER_RANGE)
1285 break;
1286
1287 *busno = rp->Data.Address32.Address.Minimum;
1288 break;
1289
1290 case ACPI_RESOURCE_TYPE_ADDRESS64:
1291 if (rp->Data.Address64.Address.AddressLength == 0)
1292 break;
1293 if (rp->Data.Address64.ResourceType != ACPI_BUS_NUMBER_RANGE)
1294 break;
1295
1296 *busno = (int)rp->Data.Address64.Address.Minimum;
1297 break;
1298
1299 default:
1300 break;
1301 }
1302
1303 return (AE_OK);
1304 }
1305
1306 /*
1307 * Retrieve the bus number for a root bus.
1308 *
1309 * _CRS (Current Resource Setting) holds the bus number as set in
1310 * PCI configuration, this may differ from _BBN and is a more reliable
1311 * indicator of what the bus number is.
1312 */
1313 ACPI_STATUS
acpica_get_busno(ACPI_HANDLE hdl,int * busno)1314 acpica_get_busno(ACPI_HANDLE hdl, int *busno)
1315 {
1316 ACPI_STATUS rv;
1317 int bus = -1;
1318 int bbn;
1319
1320 if (ACPI_FAILURE(rv = acpica_eval_int(hdl, "_BBN", &bbn)))
1321 return (rv);
1322
1323 (void) AcpiWalkResources(hdl, "_CRS", acpica_crs_cb, &bus);
1324
1325 *busno = bus == -1 ? bbn : bus;
1326
1327 return (AE_OK);
1328 }
1329
1330 /*
1331 * Device tree binding
1332 */
1333 static ACPI_STATUS
acpica_find_pcibus_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1334 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1335 {
1336 _NOTE(ARGUNUSED(lvl));
1337
1338 int sta, hid, bbn;
1339 int busno = (intptr_t)ctxp;
1340 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1341
1342 /* Check whether device exists. */
1343 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1344 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1345 /*
1346 * Skip object if device doesn't exist.
1347 * According to ACPI Spec,
1348 * 1) setting either bit 0 or bit 3 means that device exists.
1349 * 2) Absence of _STA method means all status bits set.
1350 */
1351 return (AE_CTRL_DEPTH);
1352 }
1353
1354 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1355 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1356 /* Non PCI/PCIe host bridge. */
1357 return (AE_OK);
1358 }
1359
1360 if (acpi_has_broken_bbn) {
1361 ACPI_BUFFER rb;
1362 rb.Pointer = NULL;
1363 rb.Length = ACPI_ALLOCATE_BUFFER;
1364
1365 /* Decree _BBN == n from PCI<n> */
1366 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1367 return (AE_CTRL_TERMINATE);
1368 }
1369 bbn = ((char *)rb.Pointer)[3] - '0';
1370 AcpiOsFree(rb.Pointer);
1371 if (bbn == busno || busno == 0) {
1372 *hdlp = hdl;
1373 return (AE_CTRL_TERMINATE);
1374 }
1375 } else if (ACPI_SUCCESS(acpica_get_busno(hdl, &bbn))) {
1376 if (bbn == busno) {
1377 *hdlp = hdl;
1378 return (AE_CTRL_TERMINATE);
1379 }
1380 } else if (busno == 0) {
1381 *hdlp = hdl;
1382 return (AE_CTRL_TERMINATE);
1383 }
1384
1385 return (AE_CTRL_DEPTH);
1386 }
1387
1388 static int
acpica_find_pcibus(int busno,ACPI_HANDLE * rh)1389 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1390 {
1391 ACPI_HANDLE sbobj, busobj;
1392
1393 /* initialize static flag by querying ACPI namespace for bug */
1394 if (acpi_has_broken_bbn == -1)
1395 acpi_has_broken_bbn = acpica_query_bbn_problem();
1396
1397 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1398 busobj = NULL;
1399 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1400 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1401 (void **)&busobj);
1402 if (busobj != NULL) {
1403 *rh = busobj;
1404 return (AE_OK);
1405 }
1406 }
1407
1408 return (AE_ERROR);
1409 }
1410
1411 static ACPI_STATUS
acpica_query_bbn_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1412 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1413 {
1414 _NOTE(ARGUNUSED(lvl));
1415 _NOTE(ARGUNUSED(rvpp));
1416
1417 int sta, hid, bbn;
1418 int *cntp = (int *)ctxp;
1419
1420 /* Check whether device exists. */
1421 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1422 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1423 /*
1424 * Skip object if device doesn't exist.
1425 * According to ACPI Spec,
1426 * 1) setting either bit 0 or bit 3 means that device exists.
1427 * 2) Absence of _STA method means all status bits set.
1428 */
1429 return (AE_CTRL_DEPTH);
1430 }
1431
1432 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1433 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1434 /* Non PCI/PCIe host bridge. */
1435 return (AE_OK);
1436 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1437 bbn == 0 && ++(*cntp) > 1) {
1438 /*
1439 * If we find more than one bus with a 0 _BBN
1440 * we have the problem that BigBear's BIOS shows
1441 */
1442 return (AE_CTRL_TERMINATE);
1443 } else {
1444 /*
1445 * Skip children of PCI/PCIe host bridge.
1446 */
1447 return (AE_CTRL_DEPTH);
1448 }
1449 }
1450
1451 /*
1452 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1453 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1454 * below if it exists.
1455 */
1456 static int
acpica_query_bbn_problem(void)1457 acpica_query_bbn_problem(void)
1458 {
1459 ACPI_HANDLE sbobj;
1460 int zerobbncnt;
1461 void *rv;
1462
1463 zerobbncnt = 0;
1464 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1465 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1466 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1467 }
1468
1469 return (zerobbncnt > 1 ? 1 : 0);
1470 }
1471
1472 static const char hextab[] = "0123456789ABCDEF";
1473
1474 static int
hexdig(int c)1475 hexdig(int c)
1476 {
1477 /*
1478 * Get hex digit:
1479 *
1480 * Returns the 4-bit hex digit named by the input character. Returns
1481 * zero if the input character is not valid hex!
1482 */
1483
1484 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1485 int j = sizeof (hextab);
1486
1487 while (--j && (x != hextab[j])) {
1488 }
1489 return (j);
1490 }
1491
1492 static int
CompressEisaID(char * np)1493 CompressEisaID(char *np)
1494 {
1495 /*
1496 * Compress an EISA device name:
1497 *
1498 * This routine converts a 7-byte ASCII device name into the 4-byte
1499 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1500 * NV-RAM!)
1501 */
1502
1503 union { char octets[4]; int retval; } myu;
1504
1505 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1506 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1507 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1508 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1509
1510 return (myu.retval);
1511 }
1512
1513 ACPI_STATUS
acpica_eval_int(ACPI_HANDLE dev,char * method,int * rint)1514 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1515 {
1516 ACPI_STATUS status;
1517 ACPI_BUFFER rb;
1518 ACPI_OBJECT ro;
1519
1520 rb.Pointer = &ro;
1521 rb.Length = sizeof (ro);
1522 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1523 ACPI_TYPE_INTEGER)) == AE_OK)
1524 *rint = ro.Integer.Value;
1525
1526 return (status);
1527 }
1528
1529 static int
acpica_eval_hid(ACPI_HANDLE dev,char * method,int * rint)1530 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1531 {
1532 ACPI_BUFFER rb;
1533 ACPI_OBJECT *rv;
1534
1535 rb.Pointer = NULL;
1536 rb.Length = ACPI_ALLOCATE_BUFFER;
1537 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1538 rb.Length != 0) {
1539 rv = rb.Pointer;
1540 if (rv->Type == ACPI_TYPE_INTEGER) {
1541 *rint = rv->Integer.Value;
1542 AcpiOsFree(rv);
1543 return (AE_OK);
1544 } else if (rv->Type == ACPI_TYPE_STRING) {
1545 char *stringData;
1546
1547 /* Convert the string into an EISA ID */
1548 if (rv->String.Pointer == NULL) {
1549 AcpiOsFree(rv);
1550 return (AE_ERROR);
1551 }
1552
1553 stringData = rv->String.Pointer;
1554
1555 /*
1556 * If the string is an EisaID, it must be 7
1557 * characters; if it's an ACPI ID, it will be 8
1558 * (and we don't care about ACPI ids here).
1559 */
1560 if (strlen(stringData) != 7) {
1561 AcpiOsFree(rv);
1562 return (AE_ERROR);
1563 }
1564
1565 *rint = CompressEisaID(stringData);
1566 AcpiOsFree(rv);
1567 return (AE_OK);
1568 } else
1569 AcpiOsFree(rv);
1570 }
1571 return (AE_ERROR);
1572 }
1573
1574 /*
1575 * Create linkage between devinfo nodes and ACPI nodes
1576 */
1577 ACPI_STATUS
acpica_tag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1578 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1579 {
1580 ACPI_STATUS status;
1581 ACPI_BUFFER rb;
1582
1583 /*
1584 * Tag the devinfo node with the ACPI name
1585 */
1586 rb.Pointer = NULL;
1587 rb.Length = ACPI_ALLOCATE_BUFFER;
1588 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1589 if (ACPI_FAILURE(status)) {
1590 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1591 } else {
1592 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1593 "acpi-namespace", (char *)rb.Pointer);
1594 AcpiOsFree(rb.Pointer);
1595
1596 /*
1597 * Tag the ACPI node with the dip
1598 */
1599 status = acpica_set_devinfo(acpiobj, dip);
1600 ASSERT(ACPI_SUCCESS(status));
1601 }
1602
1603 return (status);
1604 }
1605
1606 /*
1607 * Destroy linkage between devinfo nodes and ACPI nodes
1608 */
1609 ACPI_STATUS
acpica_untag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1610 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1611 {
1612 (void) acpica_unset_devinfo(acpiobj);
1613 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1614
1615 return (AE_OK);
1616 }
1617
1618 /*
1619 * Return the ACPI device node matching the CPU dev_info node.
1620 */
1621 ACPI_STATUS
acpica_get_handle_cpu(int cpu_id,ACPI_HANDLE * rh)1622 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1623 {
1624 int i;
1625
1626 /*
1627 * if cpu_map itself is NULL, we're a uppc system and
1628 * acpica_build_processor_map() hasn't been called yet.
1629 * So call it here
1630 */
1631 if (cpu_map == NULL) {
1632 (void) acpica_build_processor_map();
1633 if (cpu_map == NULL)
1634 return (AE_ERROR);
1635 }
1636
1637 if (cpu_id < 0) {
1638 return (AE_ERROR);
1639 }
1640
1641 /*
1642 * search object with cpuid in cpu_map
1643 */
1644 mutex_enter(&cpu_map_lock);
1645 for (i = 0; i < cpu_map_count; i++) {
1646 if (cpu_map[i]->cpu_id == cpu_id) {
1647 break;
1648 }
1649 }
1650 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1651 *rh = cpu_map[i]->obj;
1652 mutex_exit(&cpu_map_lock);
1653 return (AE_OK);
1654 }
1655
1656 /* Handle special case for uppc-only systems. */
1657 if (cpu_map_called == 0) {
1658 uint32_t apicid = cpuid_get_apicid(CPU);
1659 if (apicid != UINT32_MAX) {
1660 for (i = 0; i < cpu_map_count; i++) {
1661 if (cpu_map[i]->apic_id == apicid) {
1662 break;
1663 }
1664 }
1665 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1666 *rh = cpu_map[i]->obj;
1667 mutex_exit(&cpu_map_lock);
1668 return (AE_OK);
1669 }
1670 }
1671 }
1672 mutex_exit(&cpu_map_lock);
1673
1674 return (AE_ERROR);
1675 }
1676
1677 /*
1678 * Determine if this object is a processor
1679 */
1680 static ACPI_STATUS
acpica_probe_processor(ACPI_HANDLE obj,UINT32 level,void * ctx,void ** rv)1681 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1682 {
1683 ACPI_STATUS status;
1684 ACPI_OBJECT_TYPE objtype;
1685 unsigned long acpi_id;
1686 ACPI_BUFFER rb;
1687 ACPI_DEVICE_INFO *di;
1688
1689 if (AcpiGetType(obj, &objtype) != AE_OK)
1690 return (AE_OK);
1691
1692 if (objtype == ACPI_TYPE_PROCESSOR) {
1693 /* process a Processor */
1694 rb.Pointer = NULL;
1695 rb.Length = ACPI_ALLOCATE_BUFFER;
1696 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1697 ACPI_TYPE_PROCESSOR);
1698 if (status != AE_OK) {
1699 cmn_err(CE_WARN, "!acpica: error probing Processor");
1700 return (status);
1701 }
1702 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1703 AcpiOsFree(rb.Pointer);
1704 } else if (objtype == ACPI_TYPE_DEVICE) {
1705 /* process a processor Device */
1706 status = AcpiGetObjectInfo(obj, &di);
1707 if (status != AE_OK) {
1708 cmn_err(CE_WARN,
1709 "!acpica: error probing Processor Device\n");
1710 return (status);
1711 }
1712
1713 if (!(di->Valid & ACPI_VALID_UID) ||
1714 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1715 ACPI_FREE(di);
1716 cmn_err(CE_WARN,
1717 "!acpica: error probing Processor Device _UID\n");
1718 return (AE_ERROR);
1719 }
1720 ACPI_FREE(di);
1721 }
1722 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1723
1724 return (AE_OK);
1725 }
1726
1727 void
scan_d2a_map(void)1728 scan_d2a_map(void)
1729 {
1730 dev_info_t *dip, *cdip;
1731 ACPI_HANDLE acpiobj;
1732 char *device_type_prop;
1733 int bus;
1734 static int map_error = 0;
1735
1736 if (map_error || (d2a_done != 0))
1737 return;
1738
1739 scanning_d2a_map = 1;
1740
1741 /*
1742 * Find all child-of-root PCI buses, and find their corresponding
1743 * ACPI child-of-root PCI nodes. For each one, add to the
1744 * d2a table.
1745 */
1746
1747 for (dip = ddi_get_child(ddi_root_node());
1748 dip != NULL;
1749 dip = ddi_get_next_sibling(dip)) {
1750
1751 /* prune non-PCI nodes */
1752 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1753 DDI_PROP_DONTPASS,
1754 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1755 continue;
1756
1757 if ((strcmp("pci", device_type_prop) != 0) &&
1758 (strcmp("pciex", device_type_prop) != 0)) {
1759 ddi_prop_free(device_type_prop);
1760 continue;
1761 }
1762
1763 ddi_prop_free(device_type_prop);
1764
1765 /*
1766 * To get bus number of dip, get first child and get its
1767 * bus number. If NULL, just continue, because we don't
1768 * care about bus nodes with no children anyway.
1769 */
1770 if ((cdip = ddi_get_child(dip)) == NULL)
1771 continue;
1772
1773 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1774 #ifdef D2ADEBUG
1775 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1776 #endif
1777 map_error = 1;
1778 scanning_d2a_map = 0;
1779 d2a_done = 1;
1780 return;
1781 }
1782
1783 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1784 #ifdef D2ADEBUG
1785 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1786 #endif
1787 map_error = 1;
1788 continue;
1789 }
1790
1791 acpica_tag_devinfo(dip, acpiobj);
1792
1793 /* call recursively to enumerate subtrees */
1794 scan_d2a_subtree(dip, acpiobj, bus);
1795 }
1796
1797 scanning_d2a_map = 0;
1798 d2a_done = 1;
1799 }
1800
1801 /*
1802 * For all acpi child devices of acpiobj, find their matching
1803 * dip under "dip" argument. (matching means "matches dev/fn").
1804 * bus is assumed to already be a match from caller, and is
1805 * used here only to record in the d2a entry. Recurse if necessary.
1806 */
1807 static void
scan_d2a_subtree(dev_info_t * dip,ACPI_HANDLE acpiobj,int bus)1808 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1809 {
1810 int acpi_devfn, hid;
1811 ACPI_HANDLE acld;
1812 dev_info_t *dcld;
1813 int dcld_b, dcld_d, dcld_f;
1814 int dev, func;
1815 char *device_type_prop;
1816
1817 acld = NULL;
1818 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1819 == AE_OK) {
1820 /* get the dev/func we're looking for in the devinfo tree */
1821 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1822 continue;
1823 dev = (acpi_devfn >> 16) & 0xFFFF;
1824 func = acpi_devfn & 0xFFFF;
1825
1826 /* look through all the immediate children of dip */
1827 for (dcld = ddi_get_child(dip); dcld != NULL;
1828 dcld = ddi_get_next_sibling(dcld)) {
1829 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1830 continue;
1831
1832 /* dev must match; function must match or wildcard */
1833 if (dcld_d != dev ||
1834 (func != 0xFFFF && func != dcld_f))
1835 continue;
1836 bus = dcld_b;
1837
1838 /* found a match, record it */
1839 acpica_tag_devinfo(dcld, acld);
1840
1841 /* if we find a bridge, recurse from here */
1842 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1843 DDI_PROP_DONTPASS, "device_type",
1844 &device_type_prop) == DDI_PROP_SUCCESS) {
1845 if ((strcmp("pci", device_type_prop) == 0) ||
1846 (strcmp("pciex", device_type_prop) == 0))
1847 scan_d2a_subtree(dcld, acld, bus);
1848 ddi_prop_free(device_type_prop);
1849 }
1850
1851 /* done finding a match, so break now */
1852 break;
1853 }
1854 }
1855 }
1856
1857 /*
1858 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1859 */
1860 int
acpica_get_bdf(dev_info_t * dip,int * bus,int * device,int * func)1861 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1862 {
1863 pci_regspec_t *pci_rp;
1864 int len;
1865
1866 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1867 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1868 return (-1);
1869
1870 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1871 ddi_prop_free(pci_rp);
1872 return (-1);
1873 }
1874 if (bus != NULL)
1875 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1876 if (device != NULL)
1877 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1878 if (func != NULL)
1879 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1880 ddi_prop_free(pci_rp);
1881 return (0);
1882 }
1883
1884 /*
1885 * Return the ACPI device node matching this dev_info node, if it
1886 * exists in the ACPI tree.
1887 */
1888 ACPI_STATUS
acpica_get_handle(dev_info_t * dip,ACPI_HANDLE * rh)1889 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1890 {
1891 ACPI_STATUS status;
1892 char *acpiname;
1893
1894 #ifdef DEBUG
1895 if (d2a_done == 0)
1896 cmn_err(CE_WARN, "!acpica_get_handle:"
1897 " no ACPI mapping for %s", ddi_node_name(dip));
1898 #endif
1899
1900 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1901 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1902 return (AE_ERROR);
1903 }
1904
1905 status = AcpiGetHandle(NULL, acpiname, rh);
1906 ddi_prop_free((void *)acpiname);
1907 return (status);
1908 }
1909
1910
1911
1912 /*
1913 * Manage OS data attachment to ACPI nodes
1914 */
1915
1916 /*
1917 * Return the (dev_info_t *) associated with the ACPI node.
1918 */
1919 ACPI_STATUS
acpica_get_devinfo(ACPI_HANDLE obj,dev_info_t ** dipp)1920 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1921 {
1922 ACPI_STATUS status;
1923 void *ptr;
1924
1925 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1926 if (status == AE_OK)
1927 *dipp = (dev_info_t *)ptr;
1928
1929 return (status);
1930 }
1931
1932 /*
1933 * Set the dev_info_t associated with the ACPI node.
1934 */
1935 static ACPI_STATUS
acpica_set_devinfo(ACPI_HANDLE obj,dev_info_t * dip)1936 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1937 {
1938 ACPI_STATUS status;
1939
1940 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1941 return (status);
1942 }
1943
1944 /*
1945 * Unset the dev_info_t associated with the ACPI node.
1946 */
1947 static ACPI_STATUS
acpica_unset_devinfo(ACPI_HANDLE obj)1948 acpica_unset_devinfo(ACPI_HANDLE obj)
1949 {
1950 return (AcpiDetachData(obj, acpica_devinfo_handler));
1951 }
1952
1953 /*
1954 *
1955 */
1956 void
acpica_devinfo_handler(ACPI_HANDLE obj,void * data)1957 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1958 {
1959 /* no-op */
1960 }
1961
1962 ACPI_STATUS
acpica_build_processor_map(void)1963 acpica_build_processor_map(void)
1964 {
1965 ACPI_STATUS status;
1966 void *rv;
1967
1968 /*
1969 * shouldn't be called more than once anyway
1970 */
1971 if (cpu_map_built)
1972 return (AE_OK);
1973
1974 /*
1975 * ACPI device configuration driver has built mapping information
1976 * among processor id and object handle, no need to probe again.
1977 */
1978 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1979 cpu_map_built = 1;
1980 return (AE_OK);
1981 }
1982
1983 /*
1984 * Look for Processor objects
1985 */
1986 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1987 ACPI_ROOT_OBJECT,
1988 4,
1989 acpica_probe_processor,
1990 NULL,
1991 NULL,
1992 &rv);
1993 ASSERT(status == AE_OK);
1994
1995 /*
1996 * Look for processor Device objects
1997 */
1998 status = AcpiGetDevices("ACPI0007",
1999 acpica_probe_processor,
2000 NULL,
2001 &rv);
2002 ASSERT(status == AE_OK);
2003 cpu_map_built = 1;
2004
2005 return (status);
2006 }
2007
2008 /*
2009 * Grow cpu map table on demand.
2010 */
2011 static void
acpica_grow_cpu_map(void)2012 acpica_grow_cpu_map(void)
2013 {
2014 if (cpu_map_count == cpu_map_count_max) {
2015 size_t sz;
2016 struct cpu_map_item **new_map;
2017
2018 ASSERT(cpu_map_count_max < INT_MAX / 2);
2019 cpu_map_count_max += max_ncpus;
2020 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
2021 KM_SLEEP);
2022 if (cpu_map_count != 0) {
2023 ASSERT(cpu_map != NULL);
2024 sz = sizeof (cpu_map[0]) * cpu_map_count;
2025 kcopy(cpu_map, new_map, sz);
2026 kmem_free(cpu_map, sz);
2027 }
2028 cpu_map = new_map;
2029 }
2030 }
2031
2032 /*
2033 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
2034 * ACPI handle). The mapping table will be setup in two steps:
2035 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
2036 * processor id and ACPI object handle.
2037 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
2038 * On systems with which have ACPI device configuration for CPUs enabled,
2039 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
2040 * otherwise acpica_map_cpu() will be called before
2041 * acpica_add_processor_to_map().
2042 */
2043 ACPI_STATUS
acpica_add_processor_to_map(UINT32 acpi_id,ACPI_HANDLE obj,UINT32 apic_id)2044 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
2045 {
2046 int i;
2047 ACPI_STATUS rc = AE_OK;
2048 struct cpu_map_item *item = NULL;
2049
2050 ASSERT(obj != NULL);
2051 if (obj == NULL) {
2052 return (AE_ERROR);
2053 }
2054
2055 mutex_enter(&cpu_map_lock);
2056
2057 /*
2058 * Special case for uppc
2059 * If we're a uppc system and ACPI device configuration for CPU has
2060 * been disabled, there won't be a CPU map yet because uppc psm doesn't
2061 * call acpica_map_cpu(). So create one and use the passed-in processor
2062 * as CPU 0
2063 * Assumption: the first CPU returned by
2064 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
2065 * Unfortunately there appears to be no good way to ASSERT this.
2066 */
2067 if (cpu_map == NULL &&
2068 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
2069 acpica_grow_cpu_map();
2070 ASSERT(cpu_map != NULL);
2071 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2072 item->cpu_id = 0;
2073 item->proc_id = acpi_id;
2074 item->apic_id = apic_id;
2075 item->obj = obj;
2076 cpu_map[0] = item;
2077 cpu_map_count = 1;
2078 mutex_exit(&cpu_map_lock);
2079 return (AE_OK);
2080 }
2081
2082 for (i = 0; i < cpu_map_count; i++) {
2083 if (cpu_map[i]->obj == obj) {
2084 rc = AE_ALREADY_EXISTS;
2085 break;
2086 } else if (cpu_map[i]->proc_id == acpi_id) {
2087 ASSERT(item == NULL);
2088 item = cpu_map[i];
2089 }
2090 }
2091
2092 if (rc == AE_OK) {
2093 if (item != NULL) {
2094 /*
2095 * ACPI alias objects may cause more than one objects
2096 * with the same ACPI processor id, only remember the
2097 * the first object encountered.
2098 */
2099 if (item->obj == NULL) {
2100 item->obj = obj;
2101 item->apic_id = apic_id;
2102 } else {
2103 rc = AE_ALREADY_EXISTS;
2104 }
2105 } else if (cpu_map_count >= INT_MAX / 2) {
2106 rc = AE_NO_MEMORY;
2107 } else {
2108 acpica_grow_cpu_map();
2109 ASSERT(cpu_map != NULL);
2110 ASSERT(cpu_map_count < cpu_map_count_max);
2111 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2112 item->cpu_id = -1;
2113 item->proc_id = acpi_id;
2114 item->apic_id = apic_id;
2115 item->obj = obj;
2116 cpu_map[cpu_map_count] = item;
2117 cpu_map_count++;
2118 }
2119 }
2120
2121 mutex_exit(&cpu_map_lock);
2122
2123 return (rc);
2124 }
2125
2126 ACPI_STATUS
acpica_remove_processor_from_map(UINT32 acpi_id)2127 acpica_remove_processor_from_map(UINT32 acpi_id)
2128 {
2129 int i;
2130 ACPI_STATUS rc = AE_NOT_EXIST;
2131
2132 mutex_enter(&cpu_map_lock);
2133 for (i = 0; i < cpu_map_count; i++) {
2134 if (cpu_map[i]->proc_id != acpi_id) {
2135 continue;
2136 }
2137 cpu_map[i]->obj = NULL;
2138 /* Free item if no more reference to it. */
2139 if (cpu_map[i]->cpu_id == -1) {
2140 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2141 cpu_map[i] = NULL;
2142 cpu_map_count--;
2143 if (i != cpu_map_count) {
2144 cpu_map[i] = cpu_map[cpu_map_count];
2145 cpu_map[cpu_map_count] = NULL;
2146 }
2147 }
2148 rc = AE_OK;
2149 break;
2150 }
2151 mutex_exit(&cpu_map_lock);
2152
2153 return (rc);
2154 }
2155
2156 ACPI_STATUS
acpica_map_cpu(processorid_t cpuid,UINT32 acpi_id)2157 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2158 {
2159 int i;
2160 ACPI_STATUS rc = AE_OK;
2161 struct cpu_map_item *item = NULL;
2162
2163 ASSERT(cpuid != -1);
2164 if (cpuid == -1) {
2165 return (AE_ERROR);
2166 }
2167
2168 mutex_enter(&cpu_map_lock);
2169 cpu_map_called = 1;
2170 for (i = 0; i < cpu_map_count; i++) {
2171 if (cpu_map[i]->cpu_id == cpuid) {
2172 rc = AE_ALREADY_EXISTS;
2173 break;
2174 } else if (cpu_map[i]->proc_id == acpi_id) {
2175 ASSERT(item == NULL);
2176 item = cpu_map[i];
2177 }
2178 }
2179 if (rc == AE_OK) {
2180 if (item != NULL) {
2181 if (item->cpu_id == -1) {
2182 item->cpu_id = cpuid;
2183 } else {
2184 rc = AE_ALREADY_EXISTS;
2185 }
2186 } else if (cpu_map_count >= INT_MAX / 2) {
2187 rc = AE_NO_MEMORY;
2188 } else {
2189 acpica_grow_cpu_map();
2190 ASSERT(cpu_map != NULL);
2191 ASSERT(cpu_map_count < cpu_map_count_max);
2192 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2193 item->cpu_id = cpuid;
2194 item->proc_id = acpi_id;
2195 item->apic_id = UINT32_MAX;
2196 item->obj = NULL;
2197 cpu_map[cpu_map_count] = item;
2198 cpu_map_count++;
2199 }
2200 }
2201 mutex_exit(&cpu_map_lock);
2202
2203 return (rc);
2204 }
2205
2206 ACPI_STATUS
acpica_unmap_cpu(processorid_t cpuid)2207 acpica_unmap_cpu(processorid_t cpuid)
2208 {
2209 int i;
2210 ACPI_STATUS rc = AE_NOT_EXIST;
2211
2212 ASSERT(cpuid != -1);
2213 if (cpuid == -1) {
2214 return (rc);
2215 }
2216
2217 mutex_enter(&cpu_map_lock);
2218 for (i = 0; i < cpu_map_count; i++) {
2219 if (cpu_map[i]->cpu_id != cpuid) {
2220 continue;
2221 }
2222 cpu_map[i]->cpu_id = -1;
2223 /* Free item if no more reference. */
2224 if (cpu_map[i]->obj == NULL) {
2225 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2226 cpu_map[i] = NULL;
2227 cpu_map_count--;
2228 if (i != cpu_map_count) {
2229 cpu_map[i] = cpu_map[cpu_map_count];
2230 cpu_map[cpu_map_count] = NULL;
2231 }
2232 }
2233 rc = AE_OK;
2234 break;
2235 }
2236 mutex_exit(&cpu_map_lock);
2237
2238 return (rc);
2239 }
2240
2241 ACPI_STATUS
acpica_get_cpu_object_by_cpuid(processorid_t cpuid,ACPI_HANDLE * hdlp)2242 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2243 {
2244 int i;
2245 ACPI_STATUS rc = AE_NOT_EXIST;
2246
2247 ASSERT(cpuid != -1);
2248 if (cpuid == -1) {
2249 return (rc);
2250 }
2251
2252 mutex_enter(&cpu_map_lock);
2253 for (i = 0; i < cpu_map_count; i++) {
2254 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2255 *hdlp = cpu_map[i]->obj;
2256 rc = AE_OK;
2257 break;
2258 }
2259 }
2260 mutex_exit(&cpu_map_lock);
2261
2262 return (rc);
2263 }
2264
2265 ACPI_STATUS
acpica_get_cpu_object_by_procid(UINT32 procid,ACPI_HANDLE * hdlp)2266 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2267 {
2268 int i;
2269 ACPI_STATUS rc = AE_NOT_EXIST;
2270
2271 mutex_enter(&cpu_map_lock);
2272 for (i = 0; i < cpu_map_count; i++) {
2273 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2274 *hdlp = cpu_map[i]->obj;
2275 rc = AE_OK;
2276 break;
2277 }
2278 }
2279 mutex_exit(&cpu_map_lock);
2280
2281 return (rc);
2282 }
2283
2284 ACPI_STATUS
acpica_get_cpu_object_by_apicid(UINT32 apicid,ACPI_HANDLE * hdlp)2285 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2286 {
2287 int i;
2288 ACPI_STATUS rc = AE_NOT_EXIST;
2289
2290 ASSERT(apicid != UINT32_MAX);
2291 if (apicid == UINT32_MAX) {
2292 return (rc);
2293 }
2294
2295 mutex_enter(&cpu_map_lock);
2296 for (i = 0; i < cpu_map_count; i++) {
2297 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2298 *hdlp = cpu_map[i]->obj;
2299 rc = AE_OK;
2300 break;
2301 }
2302 }
2303 mutex_exit(&cpu_map_lock);
2304
2305 return (rc);
2306 }
2307
2308 ACPI_STATUS
acpica_get_cpu_id_by_object(ACPI_HANDLE hdl,processorid_t * cpuidp)2309 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2310 {
2311 int i;
2312 ACPI_STATUS rc = AE_NOT_EXIST;
2313
2314 ASSERT(cpuidp != NULL);
2315 if (hdl == NULL || cpuidp == NULL) {
2316 return (rc);
2317 }
2318
2319 *cpuidp = -1;
2320 mutex_enter(&cpu_map_lock);
2321 for (i = 0; i < cpu_map_count; i++) {
2322 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2323 *cpuidp = cpu_map[i]->cpu_id;
2324 rc = AE_OK;
2325 break;
2326 }
2327 }
2328 mutex_exit(&cpu_map_lock);
2329
2330 return (rc);
2331 }
2332
2333 ACPI_STATUS
acpica_get_apicid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2334 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2335 {
2336 int i;
2337 ACPI_STATUS rc = AE_NOT_EXIST;
2338
2339 ASSERT(rp != NULL);
2340 if (hdl == NULL || rp == NULL) {
2341 return (rc);
2342 }
2343
2344 *rp = UINT32_MAX;
2345 mutex_enter(&cpu_map_lock);
2346 for (i = 0; i < cpu_map_count; i++) {
2347 if (cpu_map[i]->obj == hdl &&
2348 cpu_map[i]->apic_id != UINT32_MAX) {
2349 *rp = cpu_map[i]->apic_id;
2350 rc = AE_OK;
2351 break;
2352 }
2353 }
2354 mutex_exit(&cpu_map_lock);
2355
2356 return (rc);
2357 }
2358
2359 ACPI_STATUS
acpica_get_procid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2360 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2361 {
2362 int i;
2363 ACPI_STATUS rc = AE_NOT_EXIST;
2364
2365 ASSERT(rp != NULL);
2366 if (hdl == NULL || rp == NULL) {
2367 return (rc);
2368 }
2369
2370 *rp = UINT32_MAX;
2371 mutex_enter(&cpu_map_lock);
2372 for (i = 0; i < cpu_map_count; i++) {
2373 if (cpu_map[i]->obj == hdl) {
2374 *rp = cpu_map[i]->proc_id;
2375 rc = AE_OK;
2376 break;
2377 }
2378 }
2379 mutex_exit(&cpu_map_lock);
2380
2381 return (rc);
2382 }
2383
2384 void
acpica_set_core_feature(uint64_t features)2385 acpica_set_core_feature(uint64_t features)
2386 {
2387 atomic_or_64(&acpica_core_features, features);
2388 }
2389
2390 void
acpica_clear_core_feature(uint64_t features)2391 acpica_clear_core_feature(uint64_t features)
2392 {
2393 atomic_and_64(&acpica_core_features, ~features);
2394 }
2395
2396 uint64_t
acpica_get_core_feature(uint64_t features)2397 acpica_get_core_feature(uint64_t features)
2398 {
2399 return (acpica_core_features & features);
2400 }
2401
2402 void
acpica_set_devcfg_feature(uint64_t features)2403 acpica_set_devcfg_feature(uint64_t features)
2404 {
2405 atomic_or_64(&acpica_devcfg_features, features);
2406 }
2407
2408 void
acpica_clear_devcfg_feature(uint64_t features)2409 acpica_clear_devcfg_feature(uint64_t features)
2410 {
2411 atomic_and_64(&acpica_devcfg_features, ~features);
2412 }
2413
2414 uint64_t
acpica_get_devcfg_feature(uint64_t features)2415 acpica_get_devcfg_feature(uint64_t features)
2416 {
2417 return (acpica_devcfg_features & features);
2418 }
2419
2420 void
acpica_get_global_FADT(ACPI_TABLE_FADT ** gbl_FADT)2421 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2422 {
2423 *gbl_FADT = &AcpiGbl_FADT;
2424 }
2425
2426 void
acpica_write_cpupm_capabilities(boolean_t pstates,boolean_t cstates)2427 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2428 {
2429 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2430 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2431 AcpiGbl_FADT.PstateControl);
2432
2433 if (cstates && AcpiGbl_FADT.CstControl != 0)
2434 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2435 AcpiGbl_FADT.CstControl);
2436 }
2437
2438 uint32_t
acpi_strtoul(const char * str,char ** ep,int base)2439 acpi_strtoul(const char *str, char **ep, int base)
2440 {
2441 ulong_t v;
2442
2443 if (ddi_strtoul(str, ep, base, &v) != 0 || v > ACPI_UINT32_MAX) {
2444 return (ACPI_UINT32_MAX);
2445 }
2446
2447 return ((uint32_t)v);
2448 }
2449
2450 /*
2451 * In prior versions of ACPI, the AcpiGetObjectInfo() function would provide
2452 * information about the status of the object via the _STA method. This has been
2453 * removed and this function is used to replace.
2454 *
2455 * Not every ACPI object has a _STA method. In cases where it is not found, then
2456 * the OSPM (aka us) is supposed to interpret that as though it indicates that
2457 * the device is present, enabled, shown in the UI, and functioning. This is the
2458 * value 0xF.
2459 */
2460 ACPI_STATUS
acpica_get_object_status(ACPI_HANDLE obj,int * statusp)2461 acpica_get_object_status(ACPI_HANDLE obj, int *statusp)
2462 {
2463 ACPI_STATUS status;
2464 int ival;
2465
2466 status = acpica_eval_int(obj, METHOD_NAME__STA, &ival);
2467 if (ACPI_FAILURE(status)) {
2468 if (status == AE_NOT_FOUND) {
2469 *statusp = 0xf;
2470 return (AE_OK);
2471 }
2472
2473 return (status);
2474 }
2475
2476 /*
2477 * This should not be a negative value. However, firmware is often the
2478 * enemy. If it does, complain and treat that as a hard failure.
2479 */
2480 if (ival < 0) {
2481 cmn_err(CE_WARN, "!acpica_get_object_status: encountered "
2482 "negative _STA value on obj %p", obj);
2483 return (AE_ERROR);
2484 }
2485
2486 *statusp = ival;
2487 return (AE_OK);
2488 }
2489