1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2016 Joyent, Inc.
26 */
27 /*
28 * Copyright (c) 2009-2010, Intel Corporation.
29 * All rights reserved.
30 */
31 /*
32 * ACPI CA OSL for Solaris x86
33 */
34
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/psm.h>
38 #include <sys/pci_cfgspace.h>
39 #include <sys/apic.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/pci.h>
44 #include <sys/kobj.h>
45 #include <sys/taskq.h>
46 #include <sys/strlog.h>
47 #include <sys/x86_archext.h>
48 #include <sys/note.h>
49 #include <sys/promif.h>
50
51 #include <sys/acpi/accommon.h>
52 #include <sys/acpica.h>
53
54 #define MAX_DAT_FILE_SIZE (64*1024)
55
56 /* local functions */
57 static int CompressEisaID(char *np);
58
59 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
60 static int acpica_query_bbn_problem(void);
61 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
62 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
63 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
64 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
65 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
66
67 /*
68 * Event queue vars
69 */
70 int acpica_eventq_init = 0;
71 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
72
73 /*
74 * Priorities relative to minclsyspri that each taskq
75 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
76 * priority than OSL_GPE_HANDLER. There's an implicit
77 * assumption that no priority here results in exceeding
78 * maxclsyspri.
79 * Note: these initializations need to match the order of
80 * ACPI_EXECUTE_TYPE.
81 */
82 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
83 0, /* OSL_GLOBAL_LOCK_HANDLER */
84 2, /* OSL_NOTIFY_HANDLER */
85 0, /* OSL_GPE_HANDLER */
86 0, /* OSL_DEBUGGER_THREAD */
87 0, /* OSL_EC_POLL_HANDLER */
88 0 /* OSL_EC_BURST_HANDLER */
89 };
90
91 /*
92 * Note, if you change this path, you need to update
93 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
94 */
95 static char *acpi_table_path = "/boot/acpi/tables/";
96
97 /* non-zero while scan_d2a_map() is working */
98 static int scanning_d2a_map = 0;
99 static int d2a_done = 0;
100
101 /* features supported by ACPICA and ACPI device configuration. */
102 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
103 static uint64_t acpica_devcfg_features = 0;
104
105 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
106 int acpica_use_safe_delay = 0;
107
108 /* CPU mapping data */
109 struct cpu_map_item {
110 processorid_t cpu_id;
111 UINT32 proc_id;
112 UINT32 apic_id;
113 ACPI_HANDLE obj;
114 };
115
116 kmutex_t cpu_map_lock;
117 static struct cpu_map_item **cpu_map = NULL;
118 static int cpu_map_count_max = 0;
119 static int cpu_map_count = 0;
120 static int cpu_map_built = 0;
121
122 /*
123 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
124 * This flag is used to check for uppc-only systems by detecting whether
125 * acpica_map_cpu() has been called or not.
126 */
127 static int cpu_map_called = 0;
128
129 static int acpi_has_broken_bbn = -1;
130
131 /* buffer for AcpiOsVprintf() */
132 #define ACPI_OSL_PR_BUFLEN 1024
133 static char *acpi_osl_pr_buffer = NULL;
134 static int acpi_osl_pr_buflen;
135
136 #define D2A_DEBUG
137
138 /*
139 *
140 */
141 static void
discard_event_queues()142 discard_event_queues()
143 {
144 int i;
145
146 /*
147 * destroy event queues
148 */
149 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
150 if (osl_eventq[i])
151 ddi_taskq_destroy(osl_eventq[i]);
152 }
153 }
154
155
156 /*
157 *
158 */
159 static ACPI_STATUS
init_event_queues()160 init_event_queues()
161 {
162 char namebuf[32];
163 int i, error = 0;
164
165 /*
166 * Initialize event queues
167 */
168
169 /* Always allocate only 1 thread per queue to force FIFO execution */
170 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
171 snprintf(namebuf, 32, "ACPI%d", i);
172 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
173 osl_eventq_pri_delta[i] + minclsyspri, 0);
174 if (osl_eventq[i] == NULL)
175 error++;
176 }
177
178 if (error != 0) {
179 discard_event_queues();
180 #ifdef DEBUG
181 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
182 #endif
183 return (AE_ERROR);
184 }
185
186 acpica_eventq_init = 1;
187 return (AE_OK);
188 }
189
190 /*
191 * One-time initialization of OSL layer
192 */
193 ACPI_STATUS
AcpiOsInitialize(void)194 AcpiOsInitialize(void)
195 {
196 /*
197 * Allocate buffer for AcpiOsVprintf() here to avoid
198 * kmem_alloc()/kmem_free() at high PIL
199 */
200 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
201 if (acpi_osl_pr_buffer != NULL)
202 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
203
204 return (AE_OK);
205 }
206
207 /*
208 * One-time shut-down of OSL layer
209 */
210 ACPI_STATUS
AcpiOsTerminate(void)211 AcpiOsTerminate(void)
212 {
213
214 if (acpi_osl_pr_buffer != NULL)
215 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
216
217 discard_event_queues();
218 return (AE_OK);
219 }
220
221
222 ACPI_PHYSICAL_ADDRESS
AcpiOsGetRootPointer()223 AcpiOsGetRootPointer()
224 {
225 ACPI_PHYSICAL_ADDRESS Address;
226
227 /*
228 * For EFI firmware, the root pointer is defined in EFI systab.
229 * The boot code process the table and put the physical address
230 * in the acpi-root-tab property.
231 */
232 Address = ddi_prop_get_int64(DDI_DEV_T_ANY, ddi_root_node(),
233 DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
234
235 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
236 Address = NULL;
237
238 return (Address);
239 }
240
241 /*ARGSUSED*/
242 ACPI_STATUS
AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES * InitVal,ACPI_STRING * NewVal)243 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
244 ACPI_STRING *NewVal)
245 {
246
247 *NewVal = 0;
248 return (AE_OK);
249 }
250
251 static void
acpica_strncpy(char * dest,const char * src,int len)252 acpica_strncpy(char *dest, const char *src, int len)
253 {
254
255 /*LINTED*/
256 while ((*dest++ = *src++) && (--len > 0))
257 /* copy the string */;
258 *dest = '\0';
259 }
260
261 ACPI_STATUS
AcpiOsTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_TABLE_HEADER ** NewTable)262 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
263 ACPI_TABLE_HEADER **NewTable)
264 {
265 char signature[5];
266 char oemid[7];
267 char oemtableid[9];
268 struct _buf *file;
269 char *buf1, *buf2;
270 int count;
271 char acpi_table_loc[128];
272
273 acpica_strncpy(signature, ExistingTable->Signature, 4);
274 acpica_strncpy(oemid, ExistingTable->OemId, 6);
275 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
276
277 #ifdef DEBUG
278 cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
279 " OEM TABLE ID [%s] OEM rev %x",
280 signature, ExistingTable->Revision, oemid, oemtableid,
281 ExistingTable->OemRevision);
282 #endif
283
284 /* File name format is "signature_oemid_oemtableid.dat" */
285 (void) strcpy(acpi_table_loc, acpi_table_path);
286 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
287 (void) strcat(acpi_table_loc, "_");
288 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
289 (void) strcat(acpi_table_loc, "_");
290 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
291 (void) strcat(acpi_table_loc, ".dat");
292
293 file = kobj_open_file(acpi_table_loc);
294 if (file == (struct _buf *)-1) {
295 *NewTable = 0;
296 return (AE_OK);
297 } else {
298 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
299 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
300 if (count >= MAX_DAT_FILE_SIZE) {
301 cmn_err(CE_WARN, "!acpica: table %s file size too big",
302 acpi_table_loc);
303 *NewTable = 0;
304 } else {
305 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
306 (void) memcpy(buf2, buf1, count);
307 *NewTable = (ACPI_TABLE_HEADER *)buf2;
308 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
309 acpi_table_loc);
310 }
311 }
312 kobj_close_file(file);
313 kmem_free(buf1, MAX_DAT_FILE_SIZE);
314
315 return (AE_OK);
316 }
317
318 ACPI_STATUS
AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_PHYSICAL_ADDRESS * NewAddress,UINT32 * NewTableLength)319 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
320 ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
321 {
322 return (AE_SUPPORT);
323 }
324
325 /*
326 * ACPI semaphore implementation
327 */
328 typedef struct {
329 kmutex_t mutex;
330 kcondvar_t cv;
331 uint32_t available;
332 uint32_t initial;
333 uint32_t maximum;
334 } acpi_sema_t;
335
336 /*
337 *
338 */
339 void
acpi_sema_init(acpi_sema_t * sp,unsigned max,unsigned count)340 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
341 {
342 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
343 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
344 /* no need to enter mutex here at creation */
345 sp->available = count;
346 sp->initial = count;
347 sp->maximum = max;
348 }
349
350 /*
351 *
352 */
353 void
acpi_sema_destroy(acpi_sema_t * sp)354 acpi_sema_destroy(acpi_sema_t *sp)
355 {
356
357 cv_destroy(&sp->cv);
358 mutex_destroy(&sp->mutex);
359 }
360
361 /*
362 *
363 */
364 ACPI_STATUS
acpi_sema_p(acpi_sema_t * sp,unsigned count,uint16_t wait_time)365 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
366 {
367 ACPI_STATUS rv = AE_OK;
368 clock_t deadline;
369
370 mutex_enter(&sp->mutex);
371
372 if (sp->available >= count) {
373 /*
374 * Enough units available, no blocking
375 */
376 sp->available -= count;
377 mutex_exit(&sp->mutex);
378 return (rv);
379 } else if (wait_time == 0) {
380 /*
381 * Not enough units available and timeout
382 * specifies no blocking
383 */
384 rv = AE_TIME;
385 mutex_exit(&sp->mutex);
386 return (rv);
387 }
388
389 /*
390 * Not enough units available and timeout specifies waiting
391 */
392 if (wait_time != ACPI_WAIT_FOREVER)
393 deadline = ddi_get_lbolt() +
394 (clock_t)drv_usectohz(wait_time * 1000);
395
396 do {
397 if (wait_time == ACPI_WAIT_FOREVER)
398 cv_wait(&sp->cv, &sp->mutex);
399 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
400 rv = AE_TIME;
401 break;
402 }
403 } while (sp->available < count);
404
405 /* if we dropped out of the wait with AE_OK, we got the units */
406 if (rv == AE_OK)
407 sp->available -= count;
408
409 mutex_exit(&sp->mutex);
410 return (rv);
411 }
412
413 /*
414 *
415 */
416 void
acpi_sema_v(acpi_sema_t * sp,unsigned count)417 acpi_sema_v(acpi_sema_t *sp, unsigned count)
418 {
419 mutex_enter(&sp->mutex);
420 sp->available += count;
421 cv_broadcast(&sp->cv);
422 mutex_exit(&sp->mutex);
423 }
424
425
426 ACPI_STATUS
AcpiOsCreateSemaphore(UINT32 MaxUnits,UINT32 InitialUnits,ACPI_HANDLE * OutHandle)427 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
428 ACPI_HANDLE *OutHandle)
429 {
430 acpi_sema_t *sp;
431
432 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
433 return (AE_BAD_PARAMETER);
434
435 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
436 acpi_sema_init(sp, MaxUnits, InitialUnits);
437 *OutHandle = (ACPI_HANDLE)sp;
438 return (AE_OK);
439 }
440
441
442 ACPI_STATUS
AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)443 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
444 {
445
446 if (Handle == NULL)
447 return (AE_BAD_PARAMETER);
448
449 acpi_sema_destroy((acpi_sema_t *)Handle);
450 kmem_free((void *)Handle, sizeof (acpi_sema_t));
451 return (AE_OK);
452 }
453
454 ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_HANDLE Handle,UINT32 Units,UINT16 Timeout)455 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
456 {
457
458 if ((Handle == NULL) || (Units < 1))
459 return (AE_BAD_PARAMETER);
460
461 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
462 }
463
464 ACPI_STATUS
AcpiOsSignalSemaphore(ACPI_HANDLE Handle,UINT32 Units)465 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
466 {
467
468 if ((Handle == NULL) || (Units < 1))
469 return (AE_BAD_PARAMETER);
470
471 acpi_sema_v((acpi_sema_t *)Handle, Units);
472 return (AE_OK);
473 }
474
475 ACPI_STATUS
AcpiOsCreateLock(ACPI_HANDLE * OutHandle)476 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
477 {
478 kmutex_t *mp;
479
480 if (OutHandle == NULL)
481 return (AE_BAD_PARAMETER);
482
483 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
484 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
485 *OutHandle = (ACPI_HANDLE)mp;
486 return (AE_OK);
487 }
488
489 void
AcpiOsDeleteLock(ACPI_HANDLE Handle)490 AcpiOsDeleteLock(ACPI_HANDLE Handle)
491 {
492
493 if (Handle == NULL)
494 return;
495
496 mutex_destroy((kmutex_t *)Handle);
497 kmem_free((void *)Handle, sizeof (kmutex_t));
498 }
499
500 ACPI_CPU_FLAGS
AcpiOsAcquireLock(ACPI_HANDLE Handle)501 AcpiOsAcquireLock(ACPI_HANDLE Handle)
502 {
503
504
505 if (Handle == NULL)
506 return (AE_BAD_PARAMETER);
507
508 if (curthread == CPU->cpu_idle_thread) {
509 while (!mutex_tryenter((kmutex_t *)Handle))
510 /* spin */;
511 } else
512 mutex_enter((kmutex_t *)Handle);
513 return (AE_OK);
514 }
515
516 void
AcpiOsReleaseLock(ACPI_HANDLE Handle,ACPI_CPU_FLAGS Flags)517 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
518 {
519 _NOTE(ARGUNUSED(Flags))
520
521 mutex_exit((kmutex_t *)Handle);
522 }
523
524
525 void *
AcpiOsAllocate(ACPI_SIZE Size)526 AcpiOsAllocate(ACPI_SIZE Size)
527 {
528 ACPI_SIZE *tmp_ptr;
529
530 Size += sizeof (Size);
531 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
532 *tmp_ptr++ = Size;
533 return (tmp_ptr);
534 }
535
536 void
AcpiOsFree(void * Memory)537 AcpiOsFree(void *Memory)
538 {
539 ACPI_SIZE size, *tmp_ptr;
540
541 tmp_ptr = (ACPI_SIZE *)Memory;
542 tmp_ptr -= 1;
543 size = *tmp_ptr;
544 kmem_free(tmp_ptr, size);
545 }
546
547 static int napics_found; /* number of ioapic addresses in array */
548 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
549 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
550 static void *dummy_ioapicadr;
551
552 void
acpica_find_ioapics(void)553 acpica_find_ioapics(void)
554 {
555 int madt_seen, madt_size;
556 ACPI_SUBTABLE_HEADER *ap;
557 ACPI_MADT_IO_APIC *mia;
558
559 if (acpi_mapic_dtp != NULL)
560 return; /* already parsed table */
561 if (AcpiGetTable(ACPI_SIG_MADT, 1,
562 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
563 return;
564
565 napics_found = 0;
566
567 /*
568 * Search the MADT for ioapics
569 */
570 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
571 madt_size = acpi_mapic_dtp->Header.Length;
572 madt_seen = sizeof (*acpi_mapic_dtp);
573
574 while (madt_seen < madt_size) {
575
576 switch (ap->Type) {
577 case ACPI_MADT_TYPE_IO_APIC:
578 mia = (ACPI_MADT_IO_APIC *) ap;
579 if (napics_found < MAX_IO_APIC) {
580 ioapic_paddr[napics_found++] =
581 (ACPI_PHYSICAL_ADDRESS)
582 (mia->Address & PAGEMASK);
583 }
584 break;
585
586 default:
587 break;
588 }
589
590 /* advance to next entry */
591 madt_seen += ap->Length;
592 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
593 }
594 if (dummy_ioapicadr == NULL)
595 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
596 }
597
598
599 void *
AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress,ACPI_SIZE Size)600 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
601 {
602 int i;
603
604 /*
605 * If the iopaic address table is populated, check if trying
606 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
607 */
608 for (i = 0; i < napics_found; i++) {
609 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
610 return (dummy_ioapicadr);
611 }
612 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
613 return (psm_map_new((paddr_t)PhysicalAddress,
614 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
615 }
616
617 void
AcpiOsUnmapMemory(void * LogicalAddress,ACPI_SIZE Size)618 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
619 {
620 /*
621 * Check if trying to unmap dummy ioapic address.
622 */
623 if (LogicalAddress == dummy_ioapicadr)
624 return;
625
626 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
627 }
628
629 /*ARGSUSED*/
630 ACPI_STATUS
AcpiOsGetPhysicalAddress(void * LogicalAddress,ACPI_PHYSICAL_ADDRESS * PhysicalAddress)631 AcpiOsGetPhysicalAddress(void *LogicalAddress,
632 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
633 {
634
635 /* UNIMPLEMENTED: not invoked by ACPI CA code */
636 return (AE_NOT_IMPLEMENTED);
637 }
638
639
640 ACPI_OSD_HANDLER acpi_isr;
641 void *acpi_isr_context;
642
643 uint_t
acpi_wrapper_isr(char * arg)644 acpi_wrapper_isr(char *arg)
645 {
646 _NOTE(ARGUNUSED(arg))
647
648 int status;
649
650 status = (*acpi_isr)(acpi_isr_context);
651
652 if (status == ACPI_INTERRUPT_HANDLED) {
653 return (DDI_INTR_CLAIMED);
654 } else {
655 return (DDI_INTR_UNCLAIMED);
656 }
657 }
658
659 static int acpi_intr_hooked = 0;
660
661 ACPI_STATUS
AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine,void * Context)662 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
663 ACPI_OSD_HANDLER ServiceRoutine,
664 void *Context)
665 {
666 _NOTE(ARGUNUSED(InterruptNumber))
667
668 int retval;
669 int sci_vect;
670 iflag_t sci_flags;
671
672 acpi_isr = ServiceRoutine;
673 acpi_isr_context = Context;
674
675 /*
676 * Get SCI (adjusted for PIC/APIC mode if necessary)
677 */
678 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
679 return (AE_ERROR);
680 }
681
682 #ifdef DEBUG
683 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
684 #endif
685
686 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
687 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
688 if (retval) {
689 acpi_intr_hooked = 1;
690 return (AE_OK);
691 } else
692 return (AE_BAD_PARAMETER);
693 }
694
695 ACPI_STATUS
AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine)696 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
697 ACPI_OSD_HANDLER ServiceRoutine)
698 {
699 _NOTE(ARGUNUSED(ServiceRoutine))
700
701 #ifdef DEBUG
702 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
703 #endif
704 if (acpi_intr_hooked) {
705 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
706 InterruptNumber);
707 acpi_intr_hooked = 0;
708 }
709 return (AE_OK);
710 }
711
712
713 ACPI_THREAD_ID
AcpiOsGetThreadId(void)714 AcpiOsGetThreadId(void)
715 {
716 /*
717 * ACPI CA doesn't care what actual value is returned as long
718 * as it is non-zero and unique to each existing thread.
719 * ACPI CA assumes that thread ID is castable to a pointer,
720 * so we use the current thread pointer.
721 */
722 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
723 }
724
725 /*
726 *
727 */
728 ACPI_STATUS
AcpiOsExecute(ACPI_EXECUTE_TYPE Type,ACPI_OSD_EXEC_CALLBACK Function,void * Context)729 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
730 void *Context)
731 {
732
733 if (!acpica_eventq_init) {
734 /*
735 * Create taskqs for event handling
736 */
737 if (init_event_queues() != AE_OK)
738 return (AE_ERROR);
739 }
740
741 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
742 DDI_NOSLEEP) == DDI_FAILURE) {
743 #ifdef DEBUG
744 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
745 #endif
746 return (AE_ERROR);
747 }
748 return (AE_OK);
749
750 }
751
752
753 void
AcpiOsWaitEventsComplete(void)754 AcpiOsWaitEventsComplete(void)
755 {
756 int i;
757
758 /*
759 * Wait for event queues to be empty.
760 */
761 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
762 if (osl_eventq[i] != NULL) {
763 ddi_taskq_wait(osl_eventq[i]);
764 }
765 }
766 }
767
768 void
AcpiOsSleep(ACPI_INTEGER Milliseconds)769 AcpiOsSleep(ACPI_INTEGER Milliseconds)
770 {
771 /*
772 * During kernel startup, before the first tick interrupt
773 * has taken place, we can't call delay; very late in
774 * kernel shutdown or suspend/resume, clock interrupts
775 * are blocked, so delay doesn't work then either.
776 * So we busy wait if lbolt == 0 (kernel startup)
777 * or if acpica_use_safe_delay has been set to a
778 * non-zero value.
779 */
780 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
781 drv_usecwait(Milliseconds * 1000);
782 else
783 delay(drv_usectohz(Milliseconds * 1000));
784 }
785
786 void
AcpiOsStall(UINT32 Microseconds)787 AcpiOsStall(UINT32 Microseconds)
788 {
789 drv_usecwait(Microseconds);
790 }
791
792
793 /*
794 * Implementation of "Windows 2001" compatible I/O permission map
795 *
796 */
797 #define OSL_IO_NONE (0)
798 #define OSL_IO_READ (1<<0)
799 #define OSL_IO_WRITE (1<<1)
800 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
801 #define OSL_IO_TERM (1<<2)
802 #define OSL_IO_DEFAULT OSL_IO_RW
803
804 static struct io_perm {
805 ACPI_IO_ADDRESS low;
806 ACPI_IO_ADDRESS high;
807 uint8_t perm;
808 } osl_io_perm[] = {
809 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
810 };
811
812
813 /*
814 *
815 */
816 static struct io_perm *
osl_io_find_perm(ACPI_IO_ADDRESS addr)817 osl_io_find_perm(ACPI_IO_ADDRESS addr)
818 {
819 struct io_perm *p;
820
821 p = osl_io_perm;
822 while (p != NULL) {
823 if ((p->low <= addr) && (addr <= p->high))
824 break;
825 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
826 }
827
828 return (p);
829 }
830
831 /*
832 *
833 */
834 ACPI_STATUS
AcpiOsReadPort(ACPI_IO_ADDRESS Address,UINT32 * Value,UINT32 Width)835 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
836 {
837 struct io_perm *p;
838
839 /* verify permission */
840 p = osl_io_find_perm(Address);
841 if (p && (p->perm & OSL_IO_READ) == 0) {
842 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
843 (long)Address, Width);
844 *Value = 0xffffffff;
845 return (AE_ERROR);
846 }
847
848 switch (Width) {
849 case 8:
850 *Value = inb(Address);
851 break;
852 case 16:
853 *Value = inw(Address);
854 break;
855 case 32:
856 *Value = inl(Address);
857 break;
858 default:
859 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
860 (long)Address, Width);
861 return (AE_BAD_PARAMETER);
862 }
863 return (AE_OK);
864 }
865
866 ACPI_STATUS
AcpiOsWritePort(ACPI_IO_ADDRESS Address,UINT32 Value,UINT32 Width)867 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
868 {
869 struct io_perm *p;
870
871 /* verify permission */
872 p = osl_io_find_perm(Address);
873 if (p && (p->perm & OSL_IO_WRITE) == 0) {
874 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
875 (long)Address, Width);
876 return (AE_ERROR);
877 }
878
879 switch (Width) {
880 case 8:
881 outb(Address, Value);
882 break;
883 case 16:
884 outw(Address, Value);
885 break;
886 case 32:
887 outl(Address, Value);
888 break;
889 default:
890 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
891 (long)Address, Width);
892 return (AE_BAD_PARAMETER);
893 }
894 return (AE_OK);
895 }
896
897
898 /*
899 *
900 */
901
902 #define OSL_RW(ptr, val, type, rw) \
903 { if (rw) *((type *)(ptr)) = *((type *) val); \
904 else *((type *) val) = *((type *)(ptr)); }
905
906
907 static void
osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address,UINT64 * Value,UINT32 Width,int write)908 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
909 UINT32 Width, int write)
910 {
911 size_t maplen = Width / 8;
912 caddr_t ptr;
913
914 ptr = psm_map_new((paddr_t)Address, maplen,
915 PSM_PROT_WRITE | PSM_PROT_READ);
916
917 switch (maplen) {
918 case 1:
919 OSL_RW(ptr, Value, uint8_t, write);
920 break;
921 case 2:
922 OSL_RW(ptr, Value, uint16_t, write);
923 break;
924 case 4:
925 OSL_RW(ptr, Value, uint32_t, write);
926 break;
927 case 8:
928 OSL_RW(ptr, Value, uint64_t, write);
929 break;
930 default:
931 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
932 Width);
933 break;
934 }
935
936 psm_unmap(ptr, maplen);
937 }
938
939 ACPI_STATUS
AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 * Value,UINT32 Width)940 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
941 UINT64 *Value, UINT32 Width)
942 {
943 osl_rw_memory(Address, Value, Width, 0);
944 return (AE_OK);
945 }
946
947 ACPI_STATUS
AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 Value,UINT32 Width)948 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
949 UINT64 Value, UINT32 Width)
950 {
951 osl_rw_memory(Address, &Value, Width, 1);
952 return (AE_OK);
953 }
954
955
956 ACPI_STATUS
AcpiOsReadPciConfiguration(ACPI_PCI_ID * PciId,UINT32 Reg,UINT64 * Value,UINT32 Width)957 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
958 UINT64 *Value, UINT32 Width)
959 {
960
961 switch (Width) {
962 case 8:
963 *Value = (UINT64)(*pci_getb_func)
964 (PciId->Bus, PciId->Device, PciId->Function, Reg);
965 break;
966 case 16:
967 *Value = (UINT64)(*pci_getw_func)
968 (PciId->Bus, PciId->Device, PciId->Function, Reg);
969 break;
970 case 32:
971 *Value = (UINT64)(*pci_getl_func)
972 (PciId->Bus, PciId->Device, PciId->Function, Reg);
973 break;
974 case 64:
975 default:
976 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
977 Reg, Width);
978 return (AE_BAD_PARAMETER);
979 }
980 return (AE_OK);
981 }
982
983 /*
984 *
985 */
986 int acpica_write_pci_config_ok = 1;
987
988 ACPI_STATUS
AcpiOsWritePciConfiguration(ACPI_PCI_ID * PciId,UINT32 Reg,UINT64 Value,UINT32 Width)989 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
990 UINT64 Value, UINT32 Width)
991 {
992
993 if (!acpica_write_pci_config_ok) {
994 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
995 " %lx %d not permitted", PciId->Bus, PciId->Device,
996 PciId->Function, Reg, (long)Value, Width);
997 return (AE_OK);
998 }
999
1000 switch (Width) {
1001 case 8:
1002 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
1003 Reg, (uint8_t)Value);
1004 break;
1005 case 16:
1006 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
1007 Reg, (uint16_t)Value);
1008 break;
1009 case 32:
1010 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
1011 Reg, (uint32_t)Value);
1012 break;
1013 case 64:
1014 default:
1015 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
1016 Reg, Width);
1017 return (AE_BAD_PARAMETER);
1018 }
1019 return (AE_OK);
1020 }
1021
1022 /*
1023 * Called with ACPI_HANDLEs for both a PCI Config Space
1024 * OpRegion and (what ACPI CA thinks is) the PCI device
1025 * to which this ConfigSpace OpRegion belongs.
1026 *
1027 * ACPI CA uses _BBN and _ADR objects to determine the default
1028 * values for bus, segment, device and function; anything ACPI CA
1029 * can't figure out from the ACPI tables will be 0. One very
1030 * old 32-bit x86 system is known to have broken _BBN; this is
1031 * not addressed here.
1032 *
1033 * Some BIOSes implement _BBN() by reading PCI config space
1034 * on bus #0 - which means that we'll recurse when we attempt
1035 * to create the devinfo-to-ACPI map. If Derive is called during
1036 * scan_d2a_map, we don't translate the bus # and return.
1037 *
1038 * We get the parent of the OpRegion, which must be a PCI
1039 * node, fetch the associated devinfo node and snag the
1040 * b/d/f from it.
1041 */
1042 void
AcpiOsDerivePciId(ACPI_HANDLE rhandle,ACPI_HANDLE chandle,ACPI_PCI_ID ** PciId)1043 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1044 ACPI_PCI_ID **PciId)
1045 {
1046 ACPI_HANDLE handle;
1047 dev_info_t *dip;
1048 int bus, device, func, devfn;
1049
1050 /*
1051 * See above - avoid recursing during scanning_d2a_map.
1052 */
1053 if (scanning_d2a_map)
1054 return;
1055
1056 /*
1057 * Get the OpRegion's parent
1058 */
1059 if (AcpiGetParent(chandle, &handle) != AE_OK)
1060 return;
1061
1062 /*
1063 * If we've mapped the ACPI node to the devinfo
1064 * tree, use the devinfo reg property
1065 */
1066 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1067 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1068 (*PciId)->Bus = bus;
1069 (*PciId)->Device = device;
1070 (*PciId)->Function = func;
1071 }
1072 }
1073
1074
1075 /*ARGSUSED*/
1076 BOOLEAN
AcpiOsReadable(void * Pointer,ACPI_SIZE Length)1077 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1078 {
1079
1080 /* Always says yes; all mapped memory assumed readable */
1081 return (1);
1082 }
1083
1084 /*ARGSUSED*/
1085 BOOLEAN
AcpiOsWritable(void * Pointer,ACPI_SIZE Length)1086 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1087 {
1088
1089 /* Always says yes; all mapped memory assumed writable */
1090 return (1);
1091 }
1092
1093 UINT64
AcpiOsGetTimer(void)1094 AcpiOsGetTimer(void)
1095 {
1096 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1097 return ((gethrtime() + 50) / 100);
1098 }
1099
1100 static struct AcpiOSIFeature_s {
1101 uint64_t control_flag;
1102 const char *feature_name;
1103 } AcpiOSIFeatures[] = {
1104 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1105 { 0, "Processor Device" }
1106 };
1107
1108 /*ARGSUSED*/
1109 ACPI_STATUS
AcpiOsValidateInterface(char * feature)1110 AcpiOsValidateInterface(char *feature)
1111 {
1112 int i;
1113
1114 ASSERT(feature != NULL);
1115 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1116 i++) {
1117 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1118 continue;
1119 }
1120 /* Check whether required core features are available. */
1121 if (AcpiOSIFeatures[i].control_flag != 0 &&
1122 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1123 AcpiOSIFeatures[i].control_flag) {
1124 break;
1125 }
1126 /* Feature supported. */
1127 return (AE_OK);
1128 }
1129
1130 return (AE_SUPPORT);
1131 }
1132
1133 /*ARGSUSED*/
1134 ACPI_STATUS
AcpiOsValidateAddress(UINT8 spaceid,ACPI_PHYSICAL_ADDRESS addr,ACPI_SIZE length)1135 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1136 ACPI_SIZE length)
1137 {
1138 return (AE_OK);
1139 }
1140
1141 ACPI_STATUS
AcpiOsSignal(UINT32 Function,void * Info)1142 AcpiOsSignal(UINT32 Function, void *Info)
1143 {
1144 _NOTE(ARGUNUSED(Function, Info))
1145
1146 /* FUTUREWORK: debugger support */
1147
1148 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1149 return (AE_OK);
1150 }
1151
1152 void ACPI_INTERNAL_VAR_XFACE
AcpiOsPrintf(const char * Format,...)1153 AcpiOsPrintf(const char *Format, ...)
1154 {
1155 va_list ap;
1156
1157 va_start(ap, Format);
1158 AcpiOsVprintf(Format, ap);
1159 va_end(ap);
1160 }
1161
1162 /*
1163 * When != 0, sends output to console
1164 * Patchable with kmdb or /etc/system.
1165 */
1166 int acpica_console_out = 0;
1167
1168 #define ACPICA_OUTBUF_LEN 160
1169 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1170 int acpica_outbuf_offset;
1171
1172 /*
1173 *
1174 */
1175 static void
acpica_pr_buf(char * buf)1176 acpica_pr_buf(char *buf)
1177 {
1178 char c, *bufp, *outp;
1179 int out_remaining;
1180
1181 /*
1182 * copy the supplied buffer into the output buffer
1183 * when we hit a '\n' or overflow the output buffer,
1184 * output and reset the output buffer
1185 */
1186 bufp = buf;
1187 outp = acpica_outbuf + acpica_outbuf_offset;
1188 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1189 while (c = *bufp++) {
1190 *outp++ = c;
1191 if (c == '\n' || --out_remaining == 0) {
1192 *outp = '\0';
1193 switch (acpica_console_out) {
1194 case 1:
1195 printf(acpica_outbuf);
1196 break;
1197 case 2:
1198 prom_printf(acpica_outbuf);
1199 break;
1200 case 0:
1201 default:
1202 (void) strlog(0, 0, 0,
1203 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1204 acpica_outbuf);
1205 break;
1206 }
1207 acpica_outbuf_offset = 0;
1208 outp = acpica_outbuf;
1209 out_remaining = ACPICA_OUTBUF_LEN - 1;
1210 }
1211 }
1212
1213 acpica_outbuf_offset = outp - acpica_outbuf;
1214 }
1215
1216 void
AcpiOsVprintf(const char * Format,va_list Args)1217 AcpiOsVprintf(const char *Format, va_list Args)
1218 {
1219
1220 /*
1221 * If AcpiOsInitialize() failed to allocate a string buffer,
1222 * resort to vprintf().
1223 */
1224 if (acpi_osl_pr_buffer == NULL) {
1225 vprintf(Format, Args);
1226 return;
1227 }
1228
1229 /*
1230 * It is possible that a very long debug output statement will
1231 * be truncated; this is silently ignored.
1232 */
1233 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1234 acpica_pr_buf(acpi_osl_pr_buffer);
1235 }
1236
1237 void
AcpiOsRedirectOutput(void * Destination)1238 AcpiOsRedirectOutput(void *Destination)
1239 {
1240 _NOTE(ARGUNUSED(Destination))
1241
1242 /* FUTUREWORK: debugger support */
1243
1244 #ifdef DEBUG
1245 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1246 #endif
1247 }
1248
1249
1250 UINT32
AcpiOsGetLine(char * Buffer,UINT32 len,UINT32 * BytesRead)1251 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1252 {
1253 _NOTE(ARGUNUSED(Buffer))
1254 _NOTE(ARGUNUSED(len))
1255 _NOTE(ARGUNUSED(BytesRead))
1256
1257 /* FUTUREWORK: debugger support */
1258
1259 return (0);
1260 }
1261
1262 /*
1263 * Device tree binding
1264 */
1265 static ACPI_STATUS
acpica_find_pcibus_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1266 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1267 {
1268 _NOTE(ARGUNUSED(lvl));
1269
1270 int sta, hid, bbn;
1271 int busno = (intptr_t)ctxp;
1272 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1273
1274 /* Check whether device exists. */
1275 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1276 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1277 /*
1278 * Skip object if device doesn't exist.
1279 * According to ACPI Spec,
1280 * 1) setting either bit 0 or bit 3 means that device exists.
1281 * 2) Absence of _STA method means all status bits set.
1282 */
1283 return (AE_CTRL_DEPTH);
1284 }
1285
1286 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1287 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1288 /* Non PCI/PCIe host bridge. */
1289 return (AE_OK);
1290 }
1291
1292 if (acpi_has_broken_bbn) {
1293 ACPI_BUFFER rb;
1294 rb.Pointer = NULL;
1295 rb.Length = ACPI_ALLOCATE_BUFFER;
1296
1297 /* Decree _BBN == n from PCI<n> */
1298 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1299 return (AE_CTRL_TERMINATE);
1300 }
1301 bbn = ((char *)rb.Pointer)[3] - '0';
1302 AcpiOsFree(rb.Pointer);
1303 if (bbn == busno || busno == 0) {
1304 *hdlp = hdl;
1305 return (AE_CTRL_TERMINATE);
1306 }
1307 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1308 if (bbn == busno) {
1309 *hdlp = hdl;
1310 return (AE_CTRL_TERMINATE);
1311 }
1312 } else if (busno == 0) {
1313 *hdlp = hdl;
1314 return (AE_CTRL_TERMINATE);
1315 }
1316
1317 return (AE_CTRL_DEPTH);
1318 }
1319
1320 static int
acpica_find_pcibus(int busno,ACPI_HANDLE * rh)1321 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1322 {
1323 ACPI_HANDLE sbobj, busobj;
1324
1325 /* initialize static flag by querying ACPI namespace for bug */
1326 if (acpi_has_broken_bbn == -1)
1327 acpi_has_broken_bbn = acpica_query_bbn_problem();
1328
1329 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1330 busobj = NULL;
1331 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1332 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1333 (void **)&busobj);
1334 if (busobj != NULL) {
1335 *rh = busobj;
1336 return (AE_OK);
1337 }
1338 }
1339
1340 return (AE_ERROR);
1341 }
1342
1343 static ACPI_STATUS
acpica_query_bbn_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1344 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1345 {
1346 _NOTE(ARGUNUSED(lvl));
1347 _NOTE(ARGUNUSED(rvpp));
1348
1349 int sta, hid, bbn;
1350 int *cntp = (int *)ctxp;
1351
1352 /* Check whether device exists. */
1353 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1354 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1355 /*
1356 * Skip object if device doesn't exist.
1357 * According to ACPI Spec,
1358 * 1) setting either bit 0 or bit 3 means that device exists.
1359 * 2) Absence of _STA method means all status bits set.
1360 */
1361 return (AE_CTRL_DEPTH);
1362 }
1363
1364 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1365 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1366 /* Non PCI/PCIe host bridge. */
1367 return (AE_OK);
1368 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1369 bbn == 0 && ++(*cntp) > 1) {
1370 /*
1371 * If we find more than one bus with a 0 _BBN
1372 * we have the problem that BigBear's BIOS shows
1373 */
1374 return (AE_CTRL_TERMINATE);
1375 } else {
1376 /*
1377 * Skip children of PCI/PCIe host bridge.
1378 */
1379 return (AE_CTRL_DEPTH);
1380 }
1381 }
1382
1383 /*
1384 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1385 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1386 * below if it exists.
1387 */
1388 static int
acpica_query_bbn_problem(void)1389 acpica_query_bbn_problem(void)
1390 {
1391 ACPI_HANDLE sbobj;
1392 int zerobbncnt;
1393 void *rv;
1394
1395 zerobbncnt = 0;
1396 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1397 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1398 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1399 }
1400
1401 return (zerobbncnt > 1 ? 1 : 0);
1402 }
1403
1404 static const char hextab[] = "0123456789ABCDEF";
1405
1406 static int
hexdig(int c)1407 hexdig(int c)
1408 {
1409 /*
1410 * Get hex digit:
1411 *
1412 * Returns the 4-bit hex digit named by the input character. Returns
1413 * zero if the input character is not valid hex!
1414 */
1415
1416 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1417 int j = sizeof (hextab);
1418
1419 while (--j && (x != hextab[j])) {
1420 }
1421 return (j);
1422 }
1423
1424 static int
CompressEisaID(char * np)1425 CompressEisaID(char *np)
1426 {
1427 /*
1428 * Compress an EISA device name:
1429 *
1430 * This routine converts a 7-byte ASCII device name into the 4-byte
1431 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1432 * NV-RAM!)
1433 */
1434
1435 union { char octets[4]; int retval; } myu;
1436
1437 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1438 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1439 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1440 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1441
1442 return (myu.retval);
1443 }
1444
1445 ACPI_STATUS
acpica_eval_int(ACPI_HANDLE dev,char * method,int * rint)1446 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1447 {
1448 ACPI_STATUS status;
1449 ACPI_BUFFER rb;
1450 ACPI_OBJECT ro;
1451
1452 rb.Pointer = &ro;
1453 rb.Length = sizeof (ro);
1454 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1455 ACPI_TYPE_INTEGER)) == AE_OK)
1456 *rint = ro.Integer.Value;
1457
1458 return (status);
1459 }
1460
1461 static int
acpica_eval_hid(ACPI_HANDLE dev,char * method,int * rint)1462 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1463 {
1464 ACPI_BUFFER rb;
1465 ACPI_OBJECT *rv;
1466
1467 rb.Pointer = NULL;
1468 rb.Length = ACPI_ALLOCATE_BUFFER;
1469 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1470 rb.Length != 0) {
1471 rv = rb.Pointer;
1472 if (rv->Type == ACPI_TYPE_INTEGER) {
1473 *rint = rv->Integer.Value;
1474 AcpiOsFree(rv);
1475 return (AE_OK);
1476 } else if (rv->Type == ACPI_TYPE_STRING) {
1477 char *stringData;
1478
1479 /* Convert the string into an EISA ID */
1480 if (rv->String.Pointer == NULL) {
1481 AcpiOsFree(rv);
1482 return (AE_ERROR);
1483 }
1484
1485 stringData = rv->String.Pointer;
1486
1487 /*
1488 * If the string is an EisaID, it must be 7
1489 * characters; if it's an ACPI ID, it will be 8
1490 * (and we don't care about ACPI ids here).
1491 */
1492 if (strlen(stringData) != 7) {
1493 AcpiOsFree(rv);
1494 return (AE_ERROR);
1495 }
1496
1497 *rint = CompressEisaID(stringData);
1498 AcpiOsFree(rv);
1499 return (AE_OK);
1500 } else
1501 AcpiOsFree(rv);
1502 }
1503 return (AE_ERROR);
1504 }
1505
1506 /*
1507 * Create linkage between devinfo nodes and ACPI nodes
1508 */
1509 ACPI_STATUS
acpica_tag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1510 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1511 {
1512 ACPI_STATUS status;
1513 ACPI_BUFFER rb;
1514
1515 /*
1516 * Tag the devinfo node with the ACPI name
1517 */
1518 rb.Pointer = NULL;
1519 rb.Length = ACPI_ALLOCATE_BUFFER;
1520 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1521 if (ACPI_FAILURE(status)) {
1522 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1523 } else {
1524 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1525 "acpi-namespace", (char *)rb.Pointer);
1526 AcpiOsFree(rb.Pointer);
1527
1528 /*
1529 * Tag the ACPI node with the dip
1530 */
1531 status = acpica_set_devinfo(acpiobj, dip);
1532 ASSERT(ACPI_SUCCESS(status));
1533 }
1534
1535 return (status);
1536 }
1537
1538 /*
1539 * Destroy linkage between devinfo nodes and ACPI nodes
1540 */
1541 ACPI_STATUS
acpica_untag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1542 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1543 {
1544 (void) acpica_unset_devinfo(acpiobj);
1545 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1546
1547 return (AE_OK);
1548 }
1549
1550 /*
1551 * Return the ACPI device node matching the CPU dev_info node.
1552 */
1553 ACPI_STATUS
acpica_get_handle_cpu(int cpu_id,ACPI_HANDLE * rh)1554 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1555 {
1556 int i;
1557
1558 /*
1559 * if cpu_map itself is NULL, we're a uppc system and
1560 * acpica_build_processor_map() hasn't been called yet.
1561 * So call it here
1562 */
1563 if (cpu_map == NULL) {
1564 (void) acpica_build_processor_map();
1565 if (cpu_map == NULL)
1566 return (AE_ERROR);
1567 }
1568
1569 if (cpu_id < 0) {
1570 return (AE_ERROR);
1571 }
1572
1573 /*
1574 * search object with cpuid in cpu_map
1575 */
1576 mutex_enter(&cpu_map_lock);
1577 for (i = 0; i < cpu_map_count; i++) {
1578 if (cpu_map[i]->cpu_id == cpu_id) {
1579 break;
1580 }
1581 }
1582 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1583 *rh = cpu_map[i]->obj;
1584 mutex_exit(&cpu_map_lock);
1585 return (AE_OK);
1586 }
1587
1588 /* Handle special case for uppc-only systems. */
1589 if (cpu_map_called == 0) {
1590 uint32_t apicid = cpuid_get_apicid(CPU);
1591 if (apicid != UINT32_MAX) {
1592 for (i = 0; i < cpu_map_count; i++) {
1593 if (cpu_map[i]->apic_id == apicid) {
1594 break;
1595 }
1596 }
1597 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1598 *rh = cpu_map[i]->obj;
1599 mutex_exit(&cpu_map_lock);
1600 return (AE_OK);
1601 }
1602 }
1603 }
1604 mutex_exit(&cpu_map_lock);
1605
1606 return (AE_ERROR);
1607 }
1608
1609 /*
1610 * Determine if this object is a processor
1611 */
1612 static ACPI_STATUS
acpica_probe_processor(ACPI_HANDLE obj,UINT32 level,void * ctx,void ** rv)1613 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1614 {
1615 ACPI_STATUS status;
1616 ACPI_OBJECT_TYPE objtype;
1617 unsigned long acpi_id;
1618 ACPI_BUFFER rb;
1619 ACPI_DEVICE_INFO *di;
1620
1621 if (AcpiGetType(obj, &objtype) != AE_OK)
1622 return (AE_OK);
1623
1624 if (objtype == ACPI_TYPE_PROCESSOR) {
1625 /* process a Processor */
1626 rb.Pointer = NULL;
1627 rb.Length = ACPI_ALLOCATE_BUFFER;
1628 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1629 ACPI_TYPE_PROCESSOR);
1630 if (status != AE_OK) {
1631 cmn_err(CE_WARN, "!acpica: error probing Processor");
1632 return (status);
1633 }
1634 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1635 AcpiOsFree(rb.Pointer);
1636 } else if (objtype == ACPI_TYPE_DEVICE) {
1637 /* process a processor Device */
1638 status = AcpiGetObjectInfo(obj, &di);
1639 if (status != AE_OK) {
1640 cmn_err(CE_WARN,
1641 "!acpica: error probing Processor Device\n");
1642 return (status);
1643 }
1644
1645 if (!(di->Valid & ACPI_VALID_UID) ||
1646 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1647 ACPI_FREE(di);
1648 cmn_err(CE_WARN,
1649 "!acpica: error probing Processor Device _UID\n");
1650 return (AE_ERROR);
1651 }
1652 ACPI_FREE(di);
1653 }
1654 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1655
1656 return (AE_OK);
1657 }
1658
1659 void
scan_d2a_map(void)1660 scan_d2a_map(void)
1661 {
1662 dev_info_t *dip, *cdip;
1663 ACPI_HANDLE acpiobj;
1664 char *device_type_prop;
1665 int bus;
1666 static int map_error = 0;
1667
1668 if (map_error || (d2a_done != 0))
1669 return;
1670
1671 scanning_d2a_map = 1;
1672
1673 /*
1674 * Find all child-of-root PCI buses, and find their corresponding
1675 * ACPI child-of-root PCI nodes. For each one, add to the
1676 * d2a table.
1677 */
1678
1679 for (dip = ddi_get_child(ddi_root_node());
1680 dip != NULL;
1681 dip = ddi_get_next_sibling(dip)) {
1682
1683 /* prune non-PCI nodes */
1684 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1685 DDI_PROP_DONTPASS,
1686 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1687 continue;
1688
1689 if ((strcmp("pci", device_type_prop) != 0) &&
1690 (strcmp("pciex", device_type_prop) != 0)) {
1691 ddi_prop_free(device_type_prop);
1692 continue;
1693 }
1694
1695 ddi_prop_free(device_type_prop);
1696
1697 /*
1698 * To get bus number of dip, get first child and get its
1699 * bus number. If NULL, just continue, because we don't
1700 * care about bus nodes with no children anyway.
1701 */
1702 if ((cdip = ddi_get_child(dip)) == NULL)
1703 continue;
1704
1705 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1706 #ifdef D2ADEBUG
1707 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1708 #endif
1709 map_error = 1;
1710 scanning_d2a_map = 0;
1711 d2a_done = 1;
1712 return;
1713 }
1714
1715 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1716 #ifdef D2ADEBUG
1717 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1718 #endif
1719 map_error = 1;
1720 continue;
1721 }
1722
1723 acpica_tag_devinfo(dip, acpiobj);
1724
1725 /* call recursively to enumerate subtrees */
1726 scan_d2a_subtree(dip, acpiobj, bus);
1727 }
1728
1729 scanning_d2a_map = 0;
1730 d2a_done = 1;
1731 }
1732
1733 /*
1734 * For all acpi child devices of acpiobj, find their matching
1735 * dip under "dip" argument. (matching means "matches dev/fn").
1736 * bus is assumed to already be a match from caller, and is
1737 * used here only to record in the d2a entry. Recurse if necessary.
1738 */
1739 static void
scan_d2a_subtree(dev_info_t * dip,ACPI_HANDLE acpiobj,int bus)1740 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1741 {
1742 int acpi_devfn, hid;
1743 ACPI_HANDLE acld;
1744 dev_info_t *dcld;
1745 int dcld_b, dcld_d, dcld_f;
1746 int dev, func;
1747 char *device_type_prop;
1748
1749 acld = NULL;
1750 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1751 == AE_OK) {
1752 /* get the dev/func we're looking for in the devinfo tree */
1753 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1754 continue;
1755 dev = (acpi_devfn >> 16) & 0xFFFF;
1756 func = acpi_devfn & 0xFFFF;
1757
1758 /* look through all the immediate children of dip */
1759 for (dcld = ddi_get_child(dip); dcld != NULL;
1760 dcld = ddi_get_next_sibling(dcld)) {
1761 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1762 continue;
1763
1764 /* dev must match; function must match or wildcard */
1765 if (dcld_d != dev ||
1766 (func != 0xFFFF && func != dcld_f))
1767 continue;
1768 bus = dcld_b;
1769
1770 /* found a match, record it */
1771 acpica_tag_devinfo(dcld, acld);
1772
1773 /* if we find a bridge, recurse from here */
1774 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1775 DDI_PROP_DONTPASS, "device_type",
1776 &device_type_prop) == DDI_PROP_SUCCESS) {
1777 if ((strcmp("pci", device_type_prop) == 0) ||
1778 (strcmp("pciex", device_type_prop) == 0))
1779 scan_d2a_subtree(dcld, acld, bus);
1780 ddi_prop_free(device_type_prop);
1781 }
1782
1783 /* done finding a match, so break now */
1784 break;
1785 }
1786 }
1787 }
1788
1789 /*
1790 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1791 */
1792 int
acpica_get_bdf(dev_info_t * dip,int * bus,int * device,int * func)1793 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1794 {
1795 pci_regspec_t *pci_rp;
1796 int len;
1797
1798 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1799 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1800 return (-1);
1801
1802 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1803 ddi_prop_free(pci_rp);
1804 return (-1);
1805 }
1806 if (bus != NULL)
1807 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1808 if (device != NULL)
1809 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1810 if (func != NULL)
1811 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1812 ddi_prop_free(pci_rp);
1813 return (0);
1814 }
1815
1816 /*
1817 * Return the ACPI device node matching this dev_info node, if it
1818 * exists in the ACPI tree.
1819 */
1820 ACPI_STATUS
acpica_get_handle(dev_info_t * dip,ACPI_HANDLE * rh)1821 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1822 {
1823 ACPI_STATUS status;
1824 char *acpiname;
1825
1826 #ifdef DEBUG
1827 if (d2a_done == 0)
1828 cmn_err(CE_WARN, "!acpica_get_handle:"
1829 " no ACPI mapping for %s", ddi_node_name(dip));
1830 #endif
1831
1832 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1833 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1834 return (AE_ERROR);
1835 }
1836
1837 status = AcpiGetHandle(NULL, acpiname, rh);
1838 ddi_prop_free((void *)acpiname);
1839 return (status);
1840 }
1841
1842
1843
1844 /*
1845 * Manage OS data attachment to ACPI nodes
1846 */
1847
1848 /*
1849 * Return the (dev_info_t *) associated with the ACPI node.
1850 */
1851 ACPI_STATUS
acpica_get_devinfo(ACPI_HANDLE obj,dev_info_t ** dipp)1852 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1853 {
1854 ACPI_STATUS status;
1855 void *ptr;
1856
1857 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1858 if (status == AE_OK)
1859 *dipp = (dev_info_t *)ptr;
1860
1861 return (status);
1862 }
1863
1864 /*
1865 * Set the dev_info_t associated with the ACPI node.
1866 */
1867 static ACPI_STATUS
acpica_set_devinfo(ACPI_HANDLE obj,dev_info_t * dip)1868 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1869 {
1870 ACPI_STATUS status;
1871
1872 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1873 return (status);
1874 }
1875
1876 /*
1877 * Unset the dev_info_t associated with the ACPI node.
1878 */
1879 static ACPI_STATUS
acpica_unset_devinfo(ACPI_HANDLE obj)1880 acpica_unset_devinfo(ACPI_HANDLE obj)
1881 {
1882 return (AcpiDetachData(obj, acpica_devinfo_handler));
1883 }
1884
1885 /*
1886 *
1887 */
1888 void
acpica_devinfo_handler(ACPI_HANDLE obj,void * data)1889 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1890 {
1891 /* no-op */
1892 }
1893
1894 ACPI_STATUS
acpica_build_processor_map(void)1895 acpica_build_processor_map(void)
1896 {
1897 ACPI_STATUS status;
1898 void *rv;
1899
1900 /*
1901 * shouldn't be called more than once anyway
1902 */
1903 if (cpu_map_built)
1904 return (AE_OK);
1905
1906 /*
1907 * ACPI device configuration driver has built mapping information
1908 * among processor id and object handle, no need to probe again.
1909 */
1910 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1911 cpu_map_built = 1;
1912 return (AE_OK);
1913 }
1914
1915 /*
1916 * Look for Processor objects
1917 */
1918 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1919 ACPI_ROOT_OBJECT,
1920 4,
1921 acpica_probe_processor,
1922 NULL,
1923 NULL,
1924 &rv);
1925 ASSERT(status == AE_OK);
1926
1927 /*
1928 * Look for processor Device objects
1929 */
1930 status = AcpiGetDevices("ACPI0007",
1931 acpica_probe_processor,
1932 NULL,
1933 &rv);
1934 ASSERT(status == AE_OK);
1935 cpu_map_built = 1;
1936
1937 return (status);
1938 }
1939
1940 /*
1941 * Grow cpu map table on demand.
1942 */
1943 static void
acpica_grow_cpu_map(void)1944 acpica_grow_cpu_map(void)
1945 {
1946 if (cpu_map_count == cpu_map_count_max) {
1947 size_t sz;
1948 struct cpu_map_item **new_map;
1949
1950 ASSERT(cpu_map_count_max < INT_MAX / 2);
1951 cpu_map_count_max += max_ncpus;
1952 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1953 KM_SLEEP);
1954 if (cpu_map_count != 0) {
1955 ASSERT(cpu_map != NULL);
1956 sz = sizeof (cpu_map[0]) * cpu_map_count;
1957 kcopy(cpu_map, new_map, sz);
1958 kmem_free(cpu_map, sz);
1959 }
1960 cpu_map = new_map;
1961 }
1962 }
1963
1964 /*
1965 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1966 * ACPI handle). The mapping table will be setup in two steps:
1967 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1968 * processor id and ACPI object handle.
1969 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1970 * On systems with which have ACPI device configuration for CPUs enabled,
1971 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1972 * otherwise acpica_map_cpu() will be called before
1973 * acpica_add_processor_to_map().
1974 */
1975 ACPI_STATUS
acpica_add_processor_to_map(UINT32 acpi_id,ACPI_HANDLE obj,UINT32 apic_id)1976 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1977 {
1978 int i;
1979 ACPI_STATUS rc = AE_OK;
1980 struct cpu_map_item *item = NULL;
1981
1982 ASSERT(obj != NULL);
1983 if (obj == NULL) {
1984 return (AE_ERROR);
1985 }
1986
1987 mutex_enter(&cpu_map_lock);
1988
1989 /*
1990 * Special case for uppc
1991 * If we're a uppc system and ACPI device configuration for CPU has
1992 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1993 * call acpica_map_cpu(). So create one and use the passed-in processor
1994 * as CPU 0
1995 * Assumption: the first CPU returned by
1996 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1997 * Unfortunately there appears to be no good way to ASSERT this.
1998 */
1999 if (cpu_map == NULL &&
2000 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
2001 acpica_grow_cpu_map();
2002 ASSERT(cpu_map != NULL);
2003 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2004 item->cpu_id = 0;
2005 item->proc_id = acpi_id;
2006 item->apic_id = apic_id;
2007 item->obj = obj;
2008 cpu_map[0] = item;
2009 cpu_map_count = 1;
2010 mutex_exit(&cpu_map_lock);
2011 return (AE_OK);
2012 }
2013
2014 for (i = 0; i < cpu_map_count; i++) {
2015 if (cpu_map[i]->obj == obj) {
2016 rc = AE_ALREADY_EXISTS;
2017 break;
2018 } else if (cpu_map[i]->proc_id == acpi_id) {
2019 ASSERT(item == NULL);
2020 item = cpu_map[i];
2021 }
2022 }
2023
2024 if (rc == AE_OK) {
2025 if (item != NULL) {
2026 /*
2027 * ACPI alias objects may cause more than one objects
2028 * with the same ACPI processor id, only remember the
2029 * the first object encountered.
2030 */
2031 if (item->obj == NULL) {
2032 item->obj = obj;
2033 item->apic_id = apic_id;
2034 } else {
2035 rc = AE_ALREADY_EXISTS;
2036 }
2037 } else if (cpu_map_count >= INT_MAX / 2) {
2038 rc = AE_NO_MEMORY;
2039 } else {
2040 acpica_grow_cpu_map();
2041 ASSERT(cpu_map != NULL);
2042 ASSERT(cpu_map_count < cpu_map_count_max);
2043 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2044 item->cpu_id = -1;
2045 item->proc_id = acpi_id;
2046 item->apic_id = apic_id;
2047 item->obj = obj;
2048 cpu_map[cpu_map_count] = item;
2049 cpu_map_count++;
2050 }
2051 }
2052
2053 mutex_exit(&cpu_map_lock);
2054
2055 return (rc);
2056 }
2057
2058 ACPI_STATUS
acpica_remove_processor_from_map(UINT32 acpi_id)2059 acpica_remove_processor_from_map(UINT32 acpi_id)
2060 {
2061 int i;
2062 ACPI_STATUS rc = AE_NOT_EXIST;
2063
2064 mutex_enter(&cpu_map_lock);
2065 for (i = 0; i < cpu_map_count; i++) {
2066 if (cpu_map[i]->proc_id != acpi_id) {
2067 continue;
2068 }
2069 cpu_map[i]->obj = NULL;
2070 /* Free item if no more reference to it. */
2071 if (cpu_map[i]->cpu_id == -1) {
2072 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2073 cpu_map[i] = NULL;
2074 cpu_map_count--;
2075 if (i != cpu_map_count) {
2076 cpu_map[i] = cpu_map[cpu_map_count];
2077 cpu_map[cpu_map_count] = NULL;
2078 }
2079 }
2080 rc = AE_OK;
2081 break;
2082 }
2083 mutex_exit(&cpu_map_lock);
2084
2085 return (rc);
2086 }
2087
2088 ACPI_STATUS
acpica_map_cpu(processorid_t cpuid,UINT32 acpi_id)2089 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2090 {
2091 int i;
2092 ACPI_STATUS rc = AE_OK;
2093 struct cpu_map_item *item = NULL;
2094
2095 ASSERT(cpuid != -1);
2096 if (cpuid == -1) {
2097 return (AE_ERROR);
2098 }
2099
2100 mutex_enter(&cpu_map_lock);
2101 cpu_map_called = 1;
2102 for (i = 0; i < cpu_map_count; i++) {
2103 if (cpu_map[i]->cpu_id == cpuid) {
2104 rc = AE_ALREADY_EXISTS;
2105 break;
2106 } else if (cpu_map[i]->proc_id == acpi_id) {
2107 ASSERT(item == NULL);
2108 item = cpu_map[i];
2109 }
2110 }
2111 if (rc == AE_OK) {
2112 if (item != NULL) {
2113 if (item->cpu_id == -1) {
2114 item->cpu_id = cpuid;
2115 } else {
2116 rc = AE_ALREADY_EXISTS;
2117 }
2118 } else if (cpu_map_count >= INT_MAX / 2) {
2119 rc = AE_NO_MEMORY;
2120 } else {
2121 acpica_grow_cpu_map();
2122 ASSERT(cpu_map != NULL);
2123 ASSERT(cpu_map_count < cpu_map_count_max);
2124 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2125 item->cpu_id = cpuid;
2126 item->proc_id = acpi_id;
2127 item->apic_id = UINT32_MAX;
2128 item->obj = NULL;
2129 cpu_map[cpu_map_count] = item;
2130 cpu_map_count++;
2131 }
2132 }
2133 mutex_exit(&cpu_map_lock);
2134
2135 return (rc);
2136 }
2137
2138 ACPI_STATUS
acpica_unmap_cpu(processorid_t cpuid)2139 acpica_unmap_cpu(processorid_t cpuid)
2140 {
2141 int i;
2142 ACPI_STATUS rc = AE_NOT_EXIST;
2143
2144 ASSERT(cpuid != -1);
2145 if (cpuid == -1) {
2146 return (rc);
2147 }
2148
2149 mutex_enter(&cpu_map_lock);
2150 for (i = 0; i < cpu_map_count; i++) {
2151 if (cpu_map[i]->cpu_id != cpuid) {
2152 continue;
2153 }
2154 cpu_map[i]->cpu_id = -1;
2155 /* Free item if no more reference. */
2156 if (cpu_map[i]->obj == NULL) {
2157 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2158 cpu_map[i] = NULL;
2159 cpu_map_count--;
2160 if (i != cpu_map_count) {
2161 cpu_map[i] = cpu_map[cpu_map_count];
2162 cpu_map[cpu_map_count] = NULL;
2163 }
2164 }
2165 rc = AE_OK;
2166 break;
2167 }
2168 mutex_exit(&cpu_map_lock);
2169
2170 return (rc);
2171 }
2172
2173 ACPI_STATUS
acpica_get_cpu_object_by_cpuid(processorid_t cpuid,ACPI_HANDLE * hdlp)2174 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2175 {
2176 int i;
2177 ACPI_STATUS rc = AE_NOT_EXIST;
2178
2179 ASSERT(cpuid != -1);
2180 if (cpuid == -1) {
2181 return (rc);
2182 }
2183
2184 mutex_enter(&cpu_map_lock);
2185 for (i = 0; i < cpu_map_count; i++) {
2186 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2187 *hdlp = cpu_map[i]->obj;
2188 rc = AE_OK;
2189 break;
2190 }
2191 }
2192 mutex_exit(&cpu_map_lock);
2193
2194 return (rc);
2195 }
2196
2197 ACPI_STATUS
acpica_get_cpu_object_by_procid(UINT32 procid,ACPI_HANDLE * hdlp)2198 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2199 {
2200 int i;
2201 ACPI_STATUS rc = AE_NOT_EXIST;
2202
2203 mutex_enter(&cpu_map_lock);
2204 for (i = 0; i < cpu_map_count; i++) {
2205 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2206 *hdlp = cpu_map[i]->obj;
2207 rc = AE_OK;
2208 break;
2209 }
2210 }
2211 mutex_exit(&cpu_map_lock);
2212
2213 return (rc);
2214 }
2215
2216 ACPI_STATUS
acpica_get_cpu_object_by_apicid(UINT32 apicid,ACPI_HANDLE * hdlp)2217 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2218 {
2219 int i;
2220 ACPI_STATUS rc = AE_NOT_EXIST;
2221
2222 ASSERT(apicid != UINT32_MAX);
2223 if (apicid == UINT32_MAX) {
2224 return (rc);
2225 }
2226
2227 mutex_enter(&cpu_map_lock);
2228 for (i = 0; i < cpu_map_count; i++) {
2229 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2230 *hdlp = cpu_map[i]->obj;
2231 rc = AE_OK;
2232 break;
2233 }
2234 }
2235 mutex_exit(&cpu_map_lock);
2236
2237 return (rc);
2238 }
2239
2240 ACPI_STATUS
acpica_get_cpu_id_by_object(ACPI_HANDLE hdl,processorid_t * cpuidp)2241 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2242 {
2243 int i;
2244 ACPI_STATUS rc = AE_NOT_EXIST;
2245
2246 ASSERT(cpuidp != NULL);
2247 if (hdl == NULL || cpuidp == NULL) {
2248 return (rc);
2249 }
2250
2251 *cpuidp = -1;
2252 mutex_enter(&cpu_map_lock);
2253 for (i = 0; i < cpu_map_count; i++) {
2254 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2255 *cpuidp = cpu_map[i]->cpu_id;
2256 rc = AE_OK;
2257 break;
2258 }
2259 }
2260 mutex_exit(&cpu_map_lock);
2261
2262 return (rc);
2263 }
2264
2265 ACPI_STATUS
acpica_get_apicid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2266 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2267 {
2268 int i;
2269 ACPI_STATUS rc = AE_NOT_EXIST;
2270
2271 ASSERT(rp != NULL);
2272 if (hdl == NULL || rp == NULL) {
2273 return (rc);
2274 }
2275
2276 *rp = UINT32_MAX;
2277 mutex_enter(&cpu_map_lock);
2278 for (i = 0; i < cpu_map_count; i++) {
2279 if (cpu_map[i]->obj == hdl &&
2280 cpu_map[i]->apic_id != UINT32_MAX) {
2281 *rp = cpu_map[i]->apic_id;
2282 rc = AE_OK;
2283 break;
2284 }
2285 }
2286 mutex_exit(&cpu_map_lock);
2287
2288 return (rc);
2289 }
2290
2291 ACPI_STATUS
acpica_get_procid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2292 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2293 {
2294 int i;
2295 ACPI_STATUS rc = AE_NOT_EXIST;
2296
2297 ASSERT(rp != NULL);
2298 if (hdl == NULL || rp == NULL) {
2299 return (rc);
2300 }
2301
2302 *rp = UINT32_MAX;
2303 mutex_enter(&cpu_map_lock);
2304 for (i = 0; i < cpu_map_count; i++) {
2305 if (cpu_map[i]->obj == hdl) {
2306 *rp = cpu_map[i]->proc_id;
2307 rc = AE_OK;
2308 break;
2309 }
2310 }
2311 mutex_exit(&cpu_map_lock);
2312
2313 return (rc);
2314 }
2315
2316 void
acpica_set_core_feature(uint64_t features)2317 acpica_set_core_feature(uint64_t features)
2318 {
2319 atomic_or_64(&acpica_core_features, features);
2320 }
2321
2322 void
acpica_clear_core_feature(uint64_t features)2323 acpica_clear_core_feature(uint64_t features)
2324 {
2325 atomic_and_64(&acpica_core_features, ~features);
2326 }
2327
2328 uint64_t
acpica_get_core_feature(uint64_t features)2329 acpica_get_core_feature(uint64_t features)
2330 {
2331 return (acpica_core_features & features);
2332 }
2333
2334 void
acpica_set_devcfg_feature(uint64_t features)2335 acpica_set_devcfg_feature(uint64_t features)
2336 {
2337 atomic_or_64(&acpica_devcfg_features, features);
2338 }
2339
2340 void
acpica_clear_devcfg_feature(uint64_t features)2341 acpica_clear_devcfg_feature(uint64_t features)
2342 {
2343 atomic_and_64(&acpica_devcfg_features, ~features);
2344 }
2345
2346 uint64_t
acpica_get_devcfg_feature(uint64_t features)2347 acpica_get_devcfg_feature(uint64_t features)
2348 {
2349 return (acpica_devcfg_features & features);
2350 }
2351
2352 void
acpica_get_global_FADT(ACPI_TABLE_FADT ** gbl_FADT)2353 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2354 {
2355 *gbl_FADT = &AcpiGbl_FADT;
2356 }
2357
2358 void
acpica_write_cpupm_capabilities(boolean_t pstates,boolean_t cstates)2359 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2360 {
2361 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2362 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2363 AcpiGbl_FADT.PstateControl);
2364
2365 if (cstates && AcpiGbl_FADT.CstControl != 0)
2366 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2367 AcpiGbl_FADT.CstControl);
2368 }
2369
2370 uint32_t
acpi_strtoul(const char * str,char ** ep,int base)2371 acpi_strtoul(const char *str, char **ep, int base)
2372 {
2373 ulong_t v;
2374
2375 if (ddi_strtoul(str, ep, base, &v) != 0 || v > ACPI_UINT32_MAX) {
2376 return (ACPI_UINT32_MAX);
2377 }
2378
2379 return ((uint32_t)v);
2380 }
2381