1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright 2016 Joyent, Inc. 26 */ 27 /* 28 * Copyright (c) 2009-2010, Intel Corporation. 29 * All rights reserved. 30 */ 31 /* 32 * ACPI CA OSL for Solaris x86 33 */ 34 35 #include <sys/types.h> 36 #include <sys/kmem.h> 37 #include <sys/psm.h> 38 #include <sys/pci_cfgspace.h> 39 #include <sys/apic.h> 40 #include <sys/ddi.h> 41 #include <sys/sunddi.h> 42 #include <sys/sunndi.h> 43 #include <sys/pci.h> 44 #include <sys/kobj.h> 45 #include <sys/taskq.h> 46 #include <sys/strlog.h> 47 #include <sys/x86_archext.h> 48 #include <sys/note.h> 49 #include <sys/promif.h> 50 51 #include <sys/acpi/accommon.h> 52 #include <sys/acpica.h> 53 54 #define MAX_DAT_FILE_SIZE (64*1024) 55 56 /* local functions */ 57 static int CompressEisaID(char *np); 58 59 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus); 60 static int acpica_query_bbn_problem(void); 61 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh); 62 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint); 63 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *); 64 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE); 65 static void acpica_devinfo_handler(ACPI_HANDLE, void *); 66 67 /* 68 * Event queue vars 69 */ 70 int acpica_eventq_init = 0; 71 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1]; 72 73 /* 74 * Priorities relative to minclsyspri that each taskq 75 * run at; OSL_NOTIFY_HANDLER needs to run at a higher 76 * priority than OSL_GPE_HANDLER. There's an implicit 77 * assumption that no priority here results in exceeding 78 * maxclsyspri. 79 * Note: these initializations need to match the order of 80 * ACPI_EXECUTE_TYPE. 81 */ 82 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = { 83 0, /* OSL_GLOBAL_LOCK_HANDLER */ 84 2, /* OSL_NOTIFY_HANDLER */ 85 0, /* OSL_GPE_HANDLER */ 86 0, /* OSL_DEBUGGER_THREAD */ 87 0, /* OSL_EC_POLL_HANDLER */ 88 0 /* OSL_EC_BURST_HANDLER */ 89 }; 90 91 /* 92 * Note, if you change this path, you need to update 93 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386 94 */ 95 static char *acpi_table_path = "/boot/acpi/tables/"; 96 97 /* non-zero while scan_d2a_map() is working */ 98 static int scanning_d2a_map = 0; 99 static int d2a_done = 0; 100 101 /* features supported by ACPICA and ACPI device configuration. */ 102 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE; 103 static uint64_t acpica_devcfg_features = 0; 104 105 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */ 106 int acpica_use_safe_delay = 0; 107 108 /* CPU mapping data */ 109 struct cpu_map_item { 110 processorid_t cpu_id; 111 UINT32 proc_id; 112 UINT32 apic_id; 113 ACPI_HANDLE obj; 114 }; 115 116 kmutex_t cpu_map_lock; 117 static struct cpu_map_item **cpu_map = NULL; 118 static int cpu_map_count_max = 0; 119 static int cpu_map_count = 0; 120 static int cpu_map_built = 0; 121 122 /* 123 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all. 124 * This flag is used to check for uppc-only systems by detecting whether 125 * acpica_map_cpu() has been called or not. 126 */ 127 static int cpu_map_called = 0; 128 129 static int acpi_has_broken_bbn = -1; 130 131 /* buffer for AcpiOsVprintf() */ 132 #define ACPI_OSL_PR_BUFLEN 1024 133 static char *acpi_osl_pr_buffer = NULL; 134 static int acpi_osl_pr_buflen; 135 136 #define D2A_DEBUG 137 138 /* 139 * 140 */ 141 static void 142 discard_event_queues() 143 { 144 int i; 145 146 /* 147 * destroy event queues 148 */ 149 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) { 150 if (osl_eventq[i]) 151 ddi_taskq_destroy(osl_eventq[i]); 152 } 153 } 154 155 156 /* 157 * 158 */ 159 static ACPI_STATUS 160 init_event_queues() 161 { 162 char namebuf[32]; 163 int i, error = 0; 164 165 /* 166 * Initialize event queues 167 */ 168 169 /* Always allocate only 1 thread per queue to force FIFO execution */ 170 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) { 171 snprintf(namebuf, 32, "ACPI%d", i); 172 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1, 173 osl_eventq_pri_delta[i] + minclsyspri, 0); 174 if (osl_eventq[i] == NULL) 175 error++; 176 } 177 178 if (error != 0) { 179 discard_event_queues(); 180 #ifdef DEBUG 181 cmn_err(CE_WARN, "!acpica: could not initialize event queues"); 182 #endif 183 return (AE_ERROR); 184 } 185 186 acpica_eventq_init = 1; 187 return (AE_OK); 188 } 189 190 /* 191 * One-time initialization of OSL layer 192 */ 193 ACPI_STATUS 194 AcpiOsInitialize(void) 195 { 196 /* 197 * Allocate buffer for AcpiOsVprintf() here to avoid 198 * kmem_alloc()/kmem_free() at high PIL 199 */ 200 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP); 201 if (acpi_osl_pr_buffer != NULL) 202 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN; 203 204 return (AE_OK); 205 } 206 207 /* 208 * One-time shut-down of OSL layer 209 */ 210 ACPI_STATUS 211 AcpiOsTerminate(void) 212 { 213 214 if (acpi_osl_pr_buffer != NULL) 215 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen); 216 217 discard_event_queues(); 218 return (AE_OK); 219 } 220 221 222 ACPI_PHYSICAL_ADDRESS 223 AcpiOsGetRootPointer() 224 { 225 ACPI_PHYSICAL_ADDRESS Address; 226 227 /* 228 * For EFI firmware, the root pointer is defined in EFI systab. 229 * The boot code process the table and put the physical address 230 * in the acpi-root-tab property. 231 */ 232 Address = ddi_prop_get_int64(DDI_DEV_T_ANY, ddi_root_node(), 233 DDI_PROP_DONTPASS, "acpi-root-tab", NULL); 234 235 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address))) 236 Address = NULL; 237 238 return (Address); 239 } 240 241 /*ARGSUSED*/ 242 ACPI_STATUS 243 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal, 244 ACPI_STRING *NewVal) 245 { 246 247 *NewVal = 0; 248 return (AE_OK); 249 } 250 251 static void 252 acpica_strncpy(char *dest, const char *src, int len) 253 { 254 255 /*LINTED*/ 256 while ((*dest++ = *src++) && (--len > 0)) 257 /* copy the string */; 258 *dest = '\0'; 259 } 260 261 ACPI_STATUS 262 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable, 263 ACPI_TABLE_HEADER **NewTable) 264 { 265 char signature[5]; 266 char oemid[7]; 267 char oemtableid[9]; 268 struct _buf *file; 269 char *buf1, *buf2; 270 int count; 271 char acpi_table_loc[128]; 272 273 acpica_strncpy(signature, ExistingTable->Signature, 4); 274 acpica_strncpy(oemid, ExistingTable->OemId, 6); 275 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8); 276 277 /* File name format is "signature_oemid_oemtableid.dat" */ 278 (void) strcpy(acpi_table_loc, acpi_table_path); 279 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */ 280 (void) strcat(acpi_table_loc, "_"); 281 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */ 282 (void) strcat(acpi_table_loc, "_"); 283 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */ 284 (void) strcat(acpi_table_loc, ".dat"); 285 286 file = kobj_open_file(acpi_table_loc); 287 if (file == (struct _buf *)-1) { 288 *NewTable = 0; 289 return (AE_OK); 290 } else { 291 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP); 292 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0); 293 if (count >= MAX_DAT_FILE_SIZE) { 294 cmn_err(CE_WARN, "!acpica: table %s file size too big", 295 acpi_table_loc); 296 *NewTable = 0; 297 } else { 298 buf2 = (char *)kmem_alloc(count, KM_SLEEP); 299 (void) memcpy(buf2, buf1, count); 300 *NewTable = (ACPI_TABLE_HEADER *)buf2; 301 cmn_err(CE_NOTE, "!acpica: replacing table: %s", 302 acpi_table_loc); 303 } 304 } 305 kobj_close_file(file); 306 kmem_free(buf1, MAX_DAT_FILE_SIZE); 307 308 return (AE_OK); 309 } 310 311 ACPI_STATUS 312 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable, 313 ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength) 314 { 315 return (AE_SUPPORT); 316 } 317 318 /* 319 * ACPI semaphore implementation 320 */ 321 typedef struct { 322 kmutex_t mutex; 323 kcondvar_t cv; 324 uint32_t available; 325 uint32_t initial; 326 uint32_t maximum; 327 } acpi_sema_t; 328 329 /* 330 * 331 */ 332 void 333 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count) 334 { 335 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL); 336 cv_init(&sp->cv, NULL, CV_DRIVER, NULL); 337 /* no need to enter mutex here at creation */ 338 sp->available = count; 339 sp->initial = count; 340 sp->maximum = max; 341 } 342 343 /* 344 * 345 */ 346 void 347 acpi_sema_destroy(acpi_sema_t *sp) 348 { 349 350 cv_destroy(&sp->cv); 351 mutex_destroy(&sp->mutex); 352 } 353 354 /* 355 * 356 */ 357 ACPI_STATUS 358 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time) 359 { 360 ACPI_STATUS rv = AE_OK; 361 clock_t deadline; 362 363 mutex_enter(&sp->mutex); 364 365 if (sp->available >= count) { 366 /* 367 * Enough units available, no blocking 368 */ 369 sp->available -= count; 370 mutex_exit(&sp->mutex); 371 return (rv); 372 } else if (wait_time == 0) { 373 /* 374 * Not enough units available and timeout 375 * specifies no blocking 376 */ 377 rv = AE_TIME; 378 mutex_exit(&sp->mutex); 379 return (rv); 380 } 381 382 /* 383 * Not enough units available and timeout specifies waiting 384 */ 385 if (wait_time != ACPI_WAIT_FOREVER) 386 deadline = ddi_get_lbolt() + 387 (clock_t)drv_usectohz(wait_time * 1000); 388 389 do { 390 if (wait_time == ACPI_WAIT_FOREVER) 391 cv_wait(&sp->cv, &sp->mutex); 392 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) { 393 rv = AE_TIME; 394 break; 395 } 396 } while (sp->available < count); 397 398 /* if we dropped out of the wait with AE_OK, we got the units */ 399 if (rv == AE_OK) 400 sp->available -= count; 401 402 mutex_exit(&sp->mutex); 403 return (rv); 404 } 405 406 /* 407 * 408 */ 409 void 410 acpi_sema_v(acpi_sema_t *sp, unsigned count) 411 { 412 mutex_enter(&sp->mutex); 413 sp->available += count; 414 cv_broadcast(&sp->cv); 415 mutex_exit(&sp->mutex); 416 } 417 418 419 ACPI_STATUS 420 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, 421 ACPI_HANDLE *OutHandle) 422 { 423 acpi_sema_t *sp; 424 425 if ((OutHandle == NULL) || (InitialUnits > MaxUnits)) 426 return (AE_BAD_PARAMETER); 427 428 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP); 429 acpi_sema_init(sp, MaxUnits, InitialUnits); 430 *OutHandle = (ACPI_HANDLE)sp; 431 return (AE_OK); 432 } 433 434 435 ACPI_STATUS 436 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle) 437 { 438 439 if (Handle == NULL) 440 return (AE_BAD_PARAMETER); 441 442 acpi_sema_destroy((acpi_sema_t *)Handle); 443 kmem_free((void *)Handle, sizeof (acpi_sema_t)); 444 return (AE_OK); 445 } 446 447 ACPI_STATUS 448 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout) 449 { 450 451 if ((Handle == NULL) || (Units < 1)) 452 return (AE_BAD_PARAMETER); 453 454 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout)); 455 } 456 457 ACPI_STATUS 458 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units) 459 { 460 461 if ((Handle == NULL) || (Units < 1)) 462 return (AE_BAD_PARAMETER); 463 464 acpi_sema_v((acpi_sema_t *)Handle, Units); 465 return (AE_OK); 466 } 467 468 ACPI_STATUS 469 AcpiOsCreateLock(ACPI_HANDLE *OutHandle) 470 { 471 kmutex_t *mp; 472 473 if (OutHandle == NULL) 474 return (AE_BAD_PARAMETER); 475 476 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP); 477 mutex_init(mp, NULL, MUTEX_DRIVER, NULL); 478 *OutHandle = (ACPI_HANDLE)mp; 479 return (AE_OK); 480 } 481 482 void 483 AcpiOsDeleteLock(ACPI_HANDLE Handle) 484 { 485 486 if (Handle == NULL) 487 return; 488 489 mutex_destroy((kmutex_t *)Handle); 490 kmem_free((void *)Handle, sizeof (kmutex_t)); 491 } 492 493 ACPI_CPU_FLAGS 494 AcpiOsAcquireLock(ACPI_HANDLE Handle) 495 { 496 497 498 if (Handle == NULL) 499 return (AE_BAD_PARAMETER); 500 501 if (curthread == CPU->cpu_idle_thread) { 502 while (!mutex_tryenter((kmutex_t *)Handle)) 503 /* spin */; 504 } else 505 mutex_enter((kmutex_t *)Handle); 506 return (AE_OK); 507 } 508 509 void 510 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags) 511 { 512 _NOTE(ARGUNUSED(Flags)) 513 514 mutex_exit((kmutex_t *)Handle); 515 } 516 517 518 void * 519 AcpiOsAllocate(ACPI_SIZE Size) 520 { 521 ACPI_SIZE *tmp_ptr; 522 523 Size += sizeof (Size); 524 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP); 525 *tmp_ptr++ = Size; 526 return (tmp_ptr); 527 } 528 529 void 530 AcpiOsFree(void *Memory) 531 { 532 ACPI_SIZE size, *tmp_ptr; 533 534 tmp_ptr = (ACPI_SIZE *)Memory; 535 tmp_ptr -= 1; 536 size = *tmp_ptr; 537 kmem_free(tmp_ptr, size); 538 } 539 540 static int napics_found; /* number of ioapic addresses in array */ 541 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC]; 542 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL; 543 static void *dummy_ioapicadr; 544 545 void 546 acpica_find_ioapics(void) 547 { 548 int madt_seen, madt_size; 549 ACPI_SUBTABLE_HEADER *ap; 550 ACPI_MADT_IO_APIC *mia; 551 552 if (acpi_mapic_dtp != NULL) 553 return; /* already parsed table */ 554 if (AcpiGetTable(ACPI_SIG_MADT, 1, 555 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK) 556 return; 557 558 napics_found = 0; 559 560 /* 561 * Search the MADT for ioapics 562 */ 563 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1); 564 madt_size = acpi_mapic_dtp->Header.Length; 565 madt_seen = sizeof (*acpi_mapic_dtp); 566 567 while (madt_seen < madt_size) { 568 569 switch (ap->Type) { 570 case ACPI_MADT_TYPE_IO_APIC: 571 mia = (ACPI_MADT_IO_APIC *) ap; 572 if (napics_found < MAX_IO_APIC) { 573 ioapic_paddr[napics_found++] = 574 (ACPI_PHYSICAL_ADDRESS) 575 (mia->Address & PAGEMASK); 576 } 577 break; 578 579 default: 580 break; 581 } 582 583 /* advance to next entry */ 584 madt_seen += ap->Length; 585 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length); 586 } 587 if (dummy_ioapicadr == NULL) 588 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP); 589 } 590 591 592 void * 593 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size) 594 { 595 int i; 596 597 /* 598 * If the iopaic address table is populated, check if trying 599 * to access an ioapic. Instead, return a pointer to a dummy ioapic. 600 */ 601 for (i = 0; i < napics_found; i++) { 602 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i]) 603 return (dummy_ioapicadr); 604 } 605 /* FUTUREWORK: test PhysicalAddress for > 32 bits */ 606 return (psm_map_new((paddr_t)PhysicalAddress, 607 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ)); 608 } 609 610 void 611 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size) 612 { 613 /* 614 * Check if trying to unmap dummy ioapic address. 615 */ 616 if (LogicalAddress == dummy_ioapicadr) 617 return; 618 619 psm_unmap((caddr_t)LogicalAddress, (size_t)Size); 620 } 621 622 /*ARGSUSED*/ 623 ACPI_STATUS 624 AcpiOsGetPhysicalAddress(void *LogicalAddress, 625 ACPI_PHYSICAL_ADDRESS *PhysicalAddress) 626 { 627 628 /* UNIMPLEMENTED: not invoked by ACPI CA code */ 629 return (AE_NOT_IMPLEMENTED); 630 } 631 632 633 ACPI_OSD_HANDLER acpi_isr; 634 void *acpi_isr_context; 635 636 uint_t 637 acpi_wrapper_isr(char *arg) 638 { 639 _NOTE(ARGUNUSED(arg)) 640 641 int status; 642 643 status = (*acpi_isr)(acpi_isr_context); 644 645 if (status == ACPI_INTERRUPT_HANDLED) { 646 return (DDI_INTR_CLAIMED); 647 } else { 648 return (DDI_INTR_UNCLAIMED); 649 } 650 } 651 652 static int acpi_intr_hooked = 0; 653 654 ACPI_STATUS 655 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber, 656 ACPI_OSD_HANDLER ServiceRoutine, 657 void *Context) 658 { 659 _NOTE(ARGUNUSED(InterruptNumber)) 660 661 int retval; 662 int sci_vect; 663 iflag_t sci_flags; 664 665 acpi_isr = ServiceRoutine; 666 acpi_isr_context = Context; 667 668 /* 669 * Get SCI (adjusted for PIC/APIC mode if necessary) 670 */ 671 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) { 672 return (AE_ERROR); 673 } 674 675 #ifdef DEBUG 676 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect); 677 #endif 678 679 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr, 680 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL); 681 if (retval) { 682 acpi_intr_hooked = 1; 683 return (AE_OK); 684 } else 685 return (AE_BAD_PARAMETER); 686 } 687 688 ACPI_STATUS 689 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber, 690 ACPI_OSD_HANDLER ServiceRoutine) 691 { 692 _NOTE(ARGUNUSED(ServiceRoutine)) 693 694 #ifdef DEBUG 695 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber); 696 #endif 697 if (acpi_intr_hooked) { 698 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr, 699 InterruptNumber); 700 acpi_intr_hooked = 0; 701 } 702 return (AE_OK); 703 } 704 705 706 ACPI_THREAD_ID 707 AcpiOsGetThreadId(void) 708 { 709 /* 710 * ACPI CA doesn't care what actual value is returned as long 711 * as it is non-zero and unique to each existing thread. 712 * ACPI CA assumes that thread ID is castable to a pointer, 713 * so we use the current thread pointer. 714 */ 715 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread)); 716 } 717 718 /* 719 * 720 */ 721 ACPI_STATUS 722 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function, 723 void *Context) 724 { 725 726 if (!acpica_eventq_init) { 727 /* 728 * Create taskqs for event handling 729 */ 730 if (init_event_queues() != AE_OK) 731 return (AE_ERROR); 732 } 733 734 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context, 735 DDI_NOSLEEP) == DDI_FAILURE) { 736 #ifdef DEBUG 737 cmn_err(CE_WARN, "!acpica: unable to dispatch event"); 738 #endif 739 return (AE_ERROR); 740 } 741 return (AE_OK); 742 743 } 744 745 746 void 747 AcpiOsWaitEventsComplete(void) 748 { 749 int i; 750 751 /* 752 * Wait for event queues to be empty. 753 */ 754 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) { 755 if (osl_eventq[i] != NULL) { 756 ddi_taskq_wait(osl_eventq[i]); 757 } 758 } 759 } 760 761 void 762 AcpiOsSleep(ACPI_INTEGER Milliseconds) 763 { 764 /* 765 * During kernel startup, before the first tick interrupt 766 * has taken place, we can't call delay; very late in 767 * kernel shutdown or suspend/resume, clock interrupts 768 * are blocked, so delay doesn't work then either. 769 * So we busy wait if lbolt == 0 (kernel startup) 770 * or if acpica_use_safe_delay has been set to a 771 * non-zero value. 772 */ 773 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay) 774 drv_usecwait(Milliseconds * 1000); 775 else 776 delay(drv_usectohz(Milliseconds * 1000)); 777 } 778 779 void 780 AcpiOsStall(UINT32 Microseconds) 781 { 782 drv_usecwait(Microseconds); 783 } 784 785 786 /* 787 * Implementation of "Windows 2001" compatible I/O permission map 788 * 789 */ 790 #define OSL_IO_NONE (0) 791 #define OSL_IO_READ (1<<0) 792 #define OSL_IO_WRITE (1<<1) 793 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE) 794 #define OSL_IO_TERM (1<<2) 795 #define OSL_IO_DEFAULT OSL_IO_RW 796 797 static struct io_perm { 798 ACPI_IO_ADDRESS low; 799 ACPI_IO_ADDRESS high; 800 uint8_t perm; 801 } osl_io_perm[] = { 802 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW} 803 }; 804 805 806 /* 807 * 808 */ 809 static struct io_perm * 810 osl_io_find_perm(ACPI_IO_ADDRESS addr) 811 { 812 struct io_perm *p; 813 814 p = osl_io_perm; 815 while (p != NULL) { 816 if ((p->low <= addr) && (addr <= p->high)) 817 break; 818 p = (p->perm & OSL_IO_TERM) ? NULL : p+1; 819 } 820 821 return (p); 822 } 823 824 /* 825 * 826 */ 827 ACPI_STATUS 828 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width) 829 { 830 struct io_perm *p; 831 832 /* verify permission */ 833 p = osl_io_find_perm(Address); 834 if (p && (p->perm & OSL_IO_READ) == 0) { 835 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted", 836 (long)Address, Width); 837 *Value = 0xffffffff; 838 return (AE_ERROR); 839 } 840 841 switch (Width) { 842 case 8: 843 *Value = inb(Address); 844 break; 845 case 16: 846 *Value = inw(Address); 847 break; 848 case 32: 849 *Value = inl(Address); 850 break; 851 default: 852 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed", 853 (long)Address, Width); 854 return (AE_BAD_PARAMETER); 855 } 856 return (AE_OK); 857 } 858 859 ACPI_STATUS 860 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width) 861 { 862 struct io_perm *p; 863 864 /* verify permission */ 865 p = osl_io_find_perm(Address); 866 if (p && (p->perm & OSL_IO_WRITE) == 0) { 867 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted", 868 (long)Address, Width); 869 return (AE_ERROR); 870 } 871 872 switch (Width) { 873 case 8: 874 outb(Address, Value); 875 break; 876 case 16: 877 outw(Address, Value); 878 break; 879 case 32: 880 outl(Address, Value); 881 break; 882 default: 883 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed", 884 (long)Address, Width); 885 return (AE_BAD_PARAMETER); 886 } 887 return (AE_OK); 888 } 889 890 891 /* 892 * 893 */ 894 895 #define OSL_RW(ptr, val, type, rw) \ 896 { if (rw) *((type *)(ptr)) = *((type *) val); \ 897 else *((type *) val) = *((type *)(ptr)); } 898 899 900 static void 901 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value, 902 UINT32 Width, int write) 903 { 904 size_t maplen = Width / 8; 905 caddr_t ptr; 906 907 ptr = psm_map_new((paddr_t)Address, maplen, 908 PSM_PROT_WRITE | PSM_PROT_READ); 909 910 switch (maplen) { 911 case 1: 912 OSL_RW(ptr, Value, uint8_t, write); 913 break; 914 case 2: 915 OSL_RW(ptr, Value, uint16_t, write); 916 break; 917 case 4: 918 OSL_RW(ptr, Value, uint32_t, write); 919 break; 920 case 8: 921 OSL_RW(ptr, Value, uint64_t, write); 922 break; 923 default: 924 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d", 925 Width); 926 break; 927 } 928 929 psm_unmap(ptr, maplen); 930 } 931 932 ACPI_STATUS 933 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address, 934 UINT64 *Value, UINT32 Width) 935 { 936 osl_rw_memory(Address, Value, Width, 0); 937 return (AE_OK); 938 } 939 940 ACPI_STATUS 941 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address, 942 UINT64 Value, UINT32 Width) 943 { 944 osl_rw_memory(Address, &Value, Width, 1); 945 return (AE_OK); 946 } 947 948 949 ACPI_STATUS 950 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg, 951 UINT64 *Value, UINT32 Width) 952 { 953 954 switch (Width) { 955 case 8: 956 *Value = (UINT64)(*pci_getb_func) 957 (PciId->Bus, PciId->Device, PciId->Function, Reg); 958 break; 959 case 16: 960 *Value = (UINT64)(*pci_getw_func) 961 (PciId->Bus, PciId->Device, PciId->Function, Reg); 962 break; 963 case 32: 964 *Value = (UINT64)(*pci_getl_func) 965 (PciId->Bus, PciId->Device, PciId->Function, Reg); 966 break; 967 case 64: 968 default: 969 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed", 970 Reg, Width); 971 return (AE_BAD_PARAMETER); 972 } 973 return (AE_OK); 974 } 975 976 /* 977 * 978 */ 979 int acpica_write_pci_config_ok = 1; 980 981 ACPI_STATUS 982 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg, 983 UINT64 Value, UINT32 Width) 984 { 985 986 if (!acpica_write_pci_config_ok) { 987 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x" 988 " %lx %d not permitted", PciId->Bus, PciId->Device, 989 PciId->Function, Reg, (long)Value, Width); 990 return (AE_OK); 991 } 992 993 switch (Width) { 994 case 8: 995 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function, 996 Reg, (uint8_t)Value); 997 break; 998 case 16: 999 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function, 1000 Reg, (uint16_t)Value); 1001 break; 1002 case 32: 1003 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function, 1004 Reg, (uint32_t)Value); 1005 break; 1006 case 64: 1007 default: 1008 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed", 1009 Reg, Width); 1010 return (AE_BAD_PARAMETER); 1011 } 1012 return (AE_OK); 1013 } 1014 1015 /* 1016 * Called with ACPI_HANDLEs for both a PCI Config Space 1017 * OpRegion and (what ACPI CA thinks is) the PCI device 1018 * to which this ConfigSpace OpRegion belongs. 1019 * 1020 * ACPI CA uses _BBN and _ADR objects to determine the default 1021 * values for bus, segment, device and function; anything ACPI CA 1022 * can't figure out from the ACPI tables will be 0. One very 1023 * old 32-bit x86 system is known to have broken _BBN; this is 1024 * not addressed here. 1025 * 1026 * Some BIOSes implement _BBN() by reading PCI config space 1027 * on bus #0 - which means that we'll recurse when we attempt 1028 * to create the devinfo-to-ACPI map. If Derive is called during 1029 * scan_d2a_map, we don't translate the bus # and return. 1030 * 1031 * We get the parent of the OpRegion, which must be a PCI 1032 * node, fetch the associated devinfo node and snag the 1033 * b/d/f from it. 1034 */ 1035 void 1036 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle, 1037 ACPI_PCI_ID **PciId) 1038 { 1039 ACPI_HANDLE handle; 1040 dev_info_t *dip; 1041 int bus, device, func, devfn; 1042 1043 /* 1044 * See above - avoid recursing during scanning_d2a_map. 1045 */ 1046 if (scanning_d2a_map) 1047 return; 1048 1049 /* 1050 * Get the OpRegion's parent 1051 */ 1052 if (AcpiGetParent(chandle, &handle) != AE_OK) 1053 return; 1054 1055 /* 1056 * If we've mapped the ACPI node to the devinfo 1057 * tree, use the devinfo reg property 1058 */ 1059 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) && 1060 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) { 1061 (*PciId)->Bus = bus; 1062 (*PciId)->Device = device; 1063 (*PciId)->Function = func; 1064 } 1065 } 1066 1067 1068 /*ARGSUSED*/ 1069 BOOLEAN 1070 AcpiOsReadable(void *Pointer, ACPI_SIZE Length) 1071 { 1072 1073 /* Always says yes; all mapped memory assumed readable */ 1074 return (1); 1075 } 1076 1077 /*ARGSUSED*/ 1078 BOOLEAN 1079 AcpiOsWritable(void *Pointer, ACPI_SIZE Length) 1080 { 1081 1082 /* Always says yes; all mapped memory assumed writable */ 1083 return (1); 1084 } 1085 1086 UINT64 1087 AcpiOsGetTimer(void) 1088 { 1089 /* gethrtime() returns 1nS resolution; convert to 100nS granules */ 1090 return ((gethrtime() + 50) / 100); 1091 } 1092 1093 static struct AcpiOSIFeature_s { 1094 uint64_t control_flag; 1095 const char *feature_name; 1096 } AcpiOSIFeatures[] = { 1097 { ACPI_FEATURE_OSI_MODULE, "Module Device" }, 1098 { 0, "Processor Device" } 1099 }; 1100 1101 /*ARGSUSED*/ 1102 ACPI_STATUS 1103 AcpiOsValidateInterface(char *feature) 1104 { 1105 int i; 1106 1107 ASSERT(feature != NULL); 1108 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]); 1109 i++) { 1110 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) { 1111 continue; 1112 } 1113 /* Check whether required core features are available. */ 1114 if (AcpiOSIFeatures[i].control_flag != 0 && 1115 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) != 1116 AcpiOSIFeatures[i].control_flag) { 1117 break; 1118 } 1119 /* Feature supported. */ 1120 return (AE_OK); 1121 } 1122 1123 return (AE_SUPPORT); 1124 } 1125 1126 /*ARGSUSED*/ 1127 ACPI_STATUS 1128 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr, 1129 ACPI_SIZE length) 1130 { 1131 return (AE_OK); 1132 } 1133 1134 ACPI_STATUS 1135 AcpiOsSignal(UINT32 Function, void *Info) 1136 { 1137 _NOTE(ARGUNUSED(Function, Info)) 1138 1139 /* FUTUREWORK: debugger support */ 1140 1141 cmn_err(CE_NOTE, "!OsSignal unimplemented"); 1142 return (AE_OK); 1143 } 1144 1145 void ACPI_INTERNAL_VAR_XFACE 1146 AcpiOsPrintf(const char *Format, ...) 1147 { 1148 va_list ap; 1149 1150 va_start(ap, Format); 1151 AcpiOsVprintf(Format, ap); 1152 va_end(ap); 1153 } 1154 1155 /* 1156 * When != 0, sends output to console 1157 * Patchable with kmdb or /etc/system. 1158 */ 1159 int acpica_console_out = 0; 1160 1161 #define ACPICA_OUTBUF_LEN 160 1162 char acpica_outbuf[ACPICA_OUTBUF_LEN]; 1163 int acpica_outbuf_offset; 1164 1165 /* 1166 * 1167 */ 1168 static void 1169 acpica_pr_buf(char *buf) 1170 { 1171 char c, *bufp, *outp; 1172 int out_remaining; 1173 1174 /* 1175 * copy the supplied buffer into the output buffer 1176 * when we hit a '\n' or overflow the output buffer, 1177 * output and reset the output buffer 1178 */ 1179 bufp = buf; 1180 outp = acpica_outbuf + acpica_outbuf_offset; 1181 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1; 1182 while (c = *bufp++) { 1183 *outp++ = c; 1184 if (c == '\n' || --out_remaining == 0) { 1185 *outp = '\0'; 1186 switch (acpica_console_out) { 1187 case 1: 1188 printf(acpica_outbuf); 1189 break; 1190 case 2: 1191 prom_printf(acpica_outbuf); 1192 break; 1193 case 0: 1194 default: 1195 (void) strlog(0, 0, 0, 1196 SL_CONSOLE | SL_NOTE | SL_LOGONLY, 1197 acpica_outbuf); 1198 break; 1199 } 1200 acpica_outbuf_offset = 0; 1201 outp = acpica_outbuf; 1202 out_remaining = ACPICA_OUTBUF_LEN - 1; 1203 } 1204 } 1205 1206 acpica_outbuf_offset = outp - acpica_outbuf; 1207 } 1208 1209 void 1210 AcpiOsVprintf(const char *Format, va_list Args) 1211 { 1212 1213 /* 1214 * If AcpiOsInitialize() failed to allocate a string buffer, 1215 * resort to vprintf(). 1216 */ 1217 if (acpi_osl_pr_buffer == NULL) { 1218 vprintf(Format, Args); 1219 return; 1220 } 1221 1222 /* 1223 * It is possible that a very long debug output statement will 1224 * be truncated; this is silently ignored. 1225 */ 1226 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args); 1227 acpica_pr_buf(acpi_osl_pr_buffer); 1228 } 1229 1230 void 1231 AcpiOsRedirectOutput(void *Destination) 1232 { 1233 _NOTE(ARGUNUSED(Destination)) 1234 1235 /* FUTUREWORK: debugger support */ 1236 1237 #ifdef DEBUG 1238 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called"); 1239 #endif 1240 } 1241 1242 1243 UINT32 1244 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead) 1245 { 1246 _NOTE(ARGUNUSED(Buffer)) 1247 _NOTE(ARGUNUSED(len)) 1248 _NOTE(ARGUNUSED(BytesRead)) 1249 1250 /* FUTUREWORK: debugger support */ 1251 1252 return (0); 1253 } 1254 1255 /* 1256 * Device tree binding 1257 */ 1258 static ACPI_STATUS 1259 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp) 1260 { 1261 _NOTE(ARGUNUSED(lvl)); 1262 1263 int sta, hid, bbn; 1264 int busno = (intptr_t)ctxp; 1265 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp; 1266 1267 /* Check whether device exists. */ 1268 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) && 1269 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) { 1270 /* 1271 * Skip object if device doesn't exist. 1272 * According to ACPI Spec, 1273 * 1) setting either bit 0 or bit 3 means that device exists. 1274 * 2) Absence of _STA method means all status bits set. 1275 */ 1276 return (AE_CTRL_DEPTH); 1277 } 1278 1279 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) || 1280 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) { 1281 /* Non PCI/PCIe host bridge. */ 1282 return (AE_OK); 1283 } 1284 1285 if (acpi_has_broken_bbn) { 1286 ACPI_BUFFER rb; 1287 rb.Pointer = NULL; 1288 rb.Length = ACPI_ALLOCATE_BUFFER; 1289 1290 /* Decree _BBN == n from PCI<n> */ 1291 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) { 1292 return (AE_CTRL_TERMINATE); 1293 } 1294 bbn = ((char *)rb.Pointer)[3] - '0'; 1295 AcpiOsFree(rb.Pointer); 1296 if (bbn == busno || busno == 0) { 1297 *hdlp = hdl; 1298 return (AE_CTRL_TERMINATE); 1299 } 1300 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) { 1301 if (bbn == busno) { 1302 *hdlp = hdl; 1303 return (AE_CTRL_TERMINATE); 1304 } 1305 } else if (busno == 0) { 1306 *hdlp = hdl; 1307 return (AE_CTRL_TERMINATE); 1308 } 1309 1310 return (AE_CTRL_DEPTH); 1311 } 1312 1313 static int 1314 acpica_find_pcibus(int busno, ACPI_HANDLE *rh) 1315 { 1316 ACPI_HANDLE sbobj, busobj; 1317 1318 /* initialize static flag by querying ACPI namespace for bug */ 1319 if (acpi_has_broken_bbn == -1) 1320 acpi_has_broken_bbn = acpica_query_bbn_problem(); 1321 1322 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) { 1323 busobj = NULL; 1324 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX, 1325 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno, 1326 (void **)&busobj); 1327 if (busobj != NULL) { 1328 *rh = busobj; 1329 return (AE_OK); 1330 } 1331 } 1332 1333 return (AE_ERROR); 1334 } 1335 1336 static ACPI_STATUS 1337 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp) 1338 { 1339 _NOTE(ARGUNUSED(lvl)); 1340 _NOTE(ARGUNUSED(rvpp)); 1341 1342 int sta, hid, bbn; 1343 int *cntp = (int *)ctxp; 1344 1345 /* Check whether device exists. */ 1346 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) && 1347 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) { 1348 /* 1349 * Skip object if device doesn't exist. 1350 * According to ACPI Spec, 1351 * 1) setting either bit 0 or bit 3 means that device exists. 1352 * 2) Absence of _STA method means all status bits set. 1353 */ 1354 return (AE_CTRL_DEPTH); 1355 } 1356 1357 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) || 1358 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) { 1359 /* Non PCI/PCIe host bridge. */ 1360 return (AE_OK); 1361 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) && 1362 bbn == 0 && ++(*cntp) > 1) { 1363 /* 1364 * If we find more than one bus with a 0 _BBN 1365 * we have the problem that BigBear's BIOS shows 1366 */ 1367 return (AE_CTRL_TERMINATE); 1368 } else { 1369 /* 1370 * Skip children of PCI/PCIe host bridge. 1371 */ 1372 return (AE_CTRL_DEPTH); 1373 } 1374 } 1375 1376 /* 1377 * Look for ACPI problem where _BBN is zero for multiple PCI buses 1378 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus() 1379 * below if it exists. 1380 */ 1381 static int 1382 acpica_query_bbn_problem(void) 1383 { 1384 ACPI_HANDLE sbobj; 1385 int zerobbncnt; 1386 void *rv; 1387 1388 zerobbncnt = 0; 1389 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) { 1390 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX, 1391 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv); 1392 } 1393 1394 return (zerobbncnt > 1 ? 1 : 0); 1395 } 1396 1397 static const char hextab[] = "0123456789ABCDEF"; 1398 1399 static int 1400 hexdig(int c) 1401 { 1402 /* 1403 * Get hex digit: 1404 * 1405 * Returns the 4-bit hex digit named by the input character. Returns 1406 * zero if the input character is not valid hex! 1407 */ 1408 1409 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' '); 1410 int j = sizeof (hextab); 1411 1412 while (--j && (x != hextab[j])) { 1413 } 1414 return (j); 1415 } 1416 1417 static int 1418 CompressEisaID(char *np) 1419 { 1420 /* 1421 * Compress an EISA device name: 1422 * 1423 * This routine converts a 7-byte ASCII device name into the 4-byte 1424 * compressed form used by EISA (50 bytes of ROM to save 1 byte of 1425 * NV-RAM!) 1426 */ 1427 1428 union { char octets[4]; int retval; } myu; 1429 1430 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03); 1431 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F); 1432 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]); 1433 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]); 1434 1435 return (myu.retval); 1436 } 1437 1438 ACPI_STATUS 1439 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint) 1440 { 1441 ACPI_STATUS status; 1442 ACPI_BUFFER rb; 1443 ACPI_OBJECT ro; 1444 1445 rb.Pointer = &ro; 1446 rb.Length = sizeof (ro); 1447 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb, 1448 ACPI_TYPE_INTEGER)) == AE_OK) 1449 *rint = ro.Integer.Value; 1450 1451 return (status); 1452 } 1453 1454 static int 1455 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint) 1456 { 1457 ACPI_BUFFER rb; 1458 ACPI_OBJECT *rv; 1459 1460 rb.Pointer = NULL; 1461 rb.Length = ACPI_ALLOCATE_BUFFER; 1462 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK && 1463 rb.Length != 0) { 1464 rv = rb.Pointer; 1465 if (rv->Type == ACPI_TYPE_INTEGER) { 1466 *rint = rv->Integer.Value; 1467 AcpiOsFree(rv); 1468 return (AE_OK); 1469 } else if (rv->Type == ACPI_TYPE_STRING) { 1470 char *stringData; 1471 1472 /* Convert the string into an EISA ID */ 1473 if (rv->String.Pointer == NULL) { 1474 AcpiOsFree(rv); 1475 return (AE_ERROR); 1476 } 1477 1478 stringData = rv->String.Pointer; 1479 1480 /* 1481 * If the string is an EisaID, it must be 7 1482 * characters; if it's an ACPI ID, it will be 8 1483 * (and we don't care about ACPI ids here). 1484 */ 1485 if (strlen(stringData) != 7) { 1486 AcpiOsFree(rv); 1487 return (AE_ERROR); 1488 } 1489 1490 *rint = CompressEisaID(stringData); 1491 AcpiOsFree(rv); 1492 return (AE_OK); 1493 } else 1494 AcpiOsFree(rv); 1495 } 1496 return (AE_ERROR); 1497 } 1498 1499 /* 1500 * Create linkage between devinfo nodes and ACPI nodes 1501 */ 1502 ACPI_STATUS 1503 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj) 1504 { 1505 ACPI_STATUS status; 1506 ACPI_BUFFER rb; 1507 1508 /* 1509 * Tag the devinfo node with the ACPI name 1510 */ 1511 rb.Pointer = NULL; 1512 rb.Length = ACPI_ALLOCATE_BUFFER; 1513 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb); 1514 if (ACPI_FAILURE(status)) { 1515 cmn_err(CE_WARN, "acpica: could not get ACPI path!"); 1516 } else { 1517 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 1518 "acpi-namespace", (char *)rb.Pointer); 1519 AcpiOsFree(rb.Pointer); 1520 1521 /* 1522 * Tag the ACPI node with the dip 1523 */ 1524 status = acpica_set_devinfo(acpiobj, dip); 1525 ASSERT(ACPI_SUCCESS(status)); 1526 } 1527 1528 return (status); 1529 } 1530 1531 /* 1532 * Destroy linkage between devinfo nodes and ACPI nodes 1533 */ 1534 ACPI_STATUS 1535 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj) 1536 { 1537 (void) acpica_unset_devinfo(acpiobj); 1538 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace"); 1539 1540 return (AE_OK); 1541 } 1542 1543 /* 1544 * Return the ACPI device node matching the CPU dev_info node. 1545 */ 1546 ACPI_STATUS 1547 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh) 1548 { 1549 int i; 1550 1551 /* 1552 * if cpu_map itself is NULL, we're a uppc system and 1553 * acpica_build_processor_map() hasn't been called yet. 1554 * So call it here 1555 */ 1556 if (cpu_map == NULL) { 1557 (void) acpica_build_processor_map(); 1558 if (cpu_map == NULL) 1559 return (AE_ERROR); 1560 } 1561 1562 if (cpu_id < 0) { 1563 return (AE_ERROR); 1564 } 1565 1566 /* 1567 * search object with cpuid in cpu_map 1568 */ 1569 mutex_enter(&cpu_map_lock); 1570 for (i = 0; i < cpu_map_count; i++) { 1571 if (cpu_map[i]->cpu_id == cpu_id) { 1572 break; 1573 } 1574 } 1575 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) { 1576 *rh = cpu_map[i]->obj; 1577 mutex_exit(&cpu_map_lock); 1578 return (AE_OK); 1579 } 1580 1581 /* Handle special case for uppc-only systems. */ 1582 if (cpu_map_called == 0) { 1583 uint32_t apicid = cpuid_get_apicid(CPU); 1584 if (apicid != UINT32_MAX) { 1585 for (i = 0; i < cpu_map_count; i++) { 1586 if (cpu_map[i]->apic_id == apicid) { 1587 break; 1588 } 1589 } 1590 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) { 1591 *rh = cpu_map[i]->obj; 1592 mutex_exit(&cpu_map_lock); 1593 return (AE_OK); 1594 } 1595 } 1596 } 1597 mutex_exit(&cpu_map_lock); 1598 1599 return (AE_ERROR); 1600 } 1601 1602 /* 1603 * Determine if this object is a processor 1604 */ 1605 static ACPI_STATUS 1606 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv) 1607 { 1608 ACPI_STATUS status; 1609 ACPI_OBJECT_TYPE objtype; 1610 unsigned long acpi_id; 1611 ACPI_BUFFER rb; 1612 ACPI_DEVICE_INFO *di; 1613 1614 if (AcpiGetType(obj, &objtype) != AE_OK) 1615 return (AE_OK); 1616 1617 if (objtype == ACPI_TYPE_PROCESSOR) { 1618 /* process a Processor */ 1619 rb.Pointer = NULL; 1620 rb.Length = ACPI_ALLOCATE_BUFFER; 1621 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb, 1622 ACPI_TYPE_PROCESSOR); 1623 if (status != AE_OK) { 1624 cmn_err(CE_WARN, "!acpica: error probing Processor"); 1625 return (status); 1626 } 1627 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId; 1628 AcpiOsFree(rb.Pointer); 1629 } else if (objtype == ACPI_TYPE_DEVICE) { 1630 /* process a processor Device */ 1631 status = AcpiGetObjectInfo(obj, &di); 1632 if (status != AE_OK) { 1633 cmn_err(CE_WARN, 1634 "!acpica: error probing Processor Device\n"); 1635 return (status); 1636 } 1637 1638 if (!(di->Valid & ACPI_VALID_UID) || 1639 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) { 1640 ACPI_FREE(di); 1641 cmn_err(CE_WARN, 1642 "!acpica: error probing Processor Device _UID\n"); 1643 return (AE_ERROR); 1644 } 1645 ACPI_FREE(di); 1646 } 1647 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX); 1648 1649 return (AE_OK); 1650 } 1651 1652 void 1653 scan_d2a_map(void) 1654 { 1655 dev_info_t *dip, *cdip; 1656 ACPI_HANDLE acpiobj; 1657 char *device_type_prop; 1658 int bus; 1659 static int map_error = 0; 1660 1661 if (map_error || (d2a_done != 0)) 1662 return; 1663 1664 scanning_d2a_map = 1; 1665 1666 /* 1667 * Find all child-of-root PCI buses, and find their corresponding 1668 * ACPI child-of-root PCI nodes. For each one, add to the 1669 * d2a table. 1670 */ 1671 1672 for (dip = ddi_get_child(ddi_root_node()); 1673 dip != NULL; 1674 dip = ddi_get_next_sibling(dip)) { 1675 1676 /* prune non-PCI nodes */ 1677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 1678 DDI_PROP_DONTPASS, 1679 "device_type", &device_type_prop) != DDI_PROP_SUCCESS) 1680 continue; 1681 1682 if ((strcmp("pci", device_type_prop) != 0) && 1683 (strcmp("pciex", device_type_prop) != 0)) { 1684 ddi_prop_free(device_type_prop); 1685 continue; 1686 } 1687 1688 ddi_prop_free(device_type_prop); 1689 1690 /* 1691 * To get bus number of dip, get first child and get its 1692 * bus number. If NULL, just continue, because we don't 1693 * care about bus nodes with no children anyway. 1694 */ 1695 if ((cdip = ddi_get_child(dip)) == NULL) 1696 continue; 1697 1698 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) { 1699 #ifdef D2ADEBUG 1700 cmn_err(CE_WARN, "Can't get bus number of PCI child?"); 1701 #endif 1702 map_error = 1; 1703 scanning_d2a_map = 0; 1704 d2a_done = 1; 1705 return; 1706 } 1707 1708 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) { 1709 #ifdef D2ADEBUG 1710 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus); 1711 #endif 1712 map_error = 1; 1713 continue; 1714 } 1715 1716 acpica_tag_devinfo(dip, acpiobj); 1717 1718 /* call recursively to enumerate subtrees */ 1719 scan_d2a_subtree(dip, acpiobj, bus); 1720 } 1721 1722 scanning_d2a_map = 0; 1723 d2a_done = 1; 1724 } 1725 1726 /* 1727 * For all acpi child devices of acpiobj, find their matching 1728 * dip under "dip" argument. (matching means "matches dev/fn"). 1729 * bus is assumed to already be a match from caller, and is 1730 * used here only to record in the d2a entry. Recurse if necessary. 1731 */ 1732 static void 1733 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus) 1734 { 1735 int acpi_devfn, hid; 1736 ACPI_HANDLE acld; 1737 dev_info_t *dcld; 1738 int dcld_b, dcld_d, dcld_f; 1739 int dev, func; 1740 char *device_type_prop; 1741 1742 acld = NULL; 1743 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld) 1744 == AE_OK) { 1745 /* get the dev/func we're looking for in the devinfo tree */ 1746 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK) 1747 continue; 1748 dev = (acpi_devfn >> 16) & 0xFFFF; 1749 func = acpi_devfn & 0xFFFF; 1750 1751 /* look through all the immediate children of dip */ 1752 for (dcld = ddi_get_child(dip); dcld != NULL; 1753 dcld = ddi_get_next_sibling(dcld)) { 1754 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0) 1755 continue; 1756 1757 /* dev must match; function must match or wildcard */ 1758 if (dcld_d != dev || 1759 (func != 0xFFFF && func != dcld_f)) 1760 continue; 1761 bus = dcld_b; 1762 1763 /* found a match, record it */ 1764 acpica_tag_devinfo(dcld, acld); 1765 1766 /* if we find a bridge, recurse from here */ 1767 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld, 1768 DDI_PROP_DONTPASS, "device_type", 1769 &device_type_prop) == DDI_PROP_SUCCESS) { 1770 if ((strcmp("pci", device_type_prop) == 0) || 1771 (strcmp("pciex", device_type_prop) == 0)) 1772 scan_d2a_subtree(dcld, acld, bus); 1773 ddi_prop_free(device_type_prop); 1774 } 1775 1776 /* done finding a match, so break now */ 1777 break; 1778 } 1779 } 1780 } 1781 1782 /* 1783 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node). 1784 */ 1785 int 1786 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func) 1787 { 1788 pci_regspec_t *pci_rp; 1789 int len; 1790 1791 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1792 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS) 1793 return (-1); 1794 1795 if (len < (sizeof (pci_regspec_t) / sizeof (int))) { 1796 ddi_prop_free(pci_rp); 1797 return (-1); 1798 } 1799 if (bus != NULL) 1800 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi); 1801 if (device != NULL) 1802 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi); 1803 if (func != NULL) 1804 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 1805 ddi_prop_free(pci_rp); 1806 return (0); 1807 } 1808 1809 /* 1810 * Return the ACPI device node matching this dev_info node, if it 1811 * exists in the ACPI tree. 1812 */ 1813 ACPI_STATUS 1814 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh) 1815 { 1816 ACPI_STATUS status; 1817 char *acpiname; 1818 1819 #ifdef DEBUG 1820 if (d2a_done == 0) 1821 cmn_err(CE_WARN, "!acpica_get_handle:" 1822 " no ACPI mapping for %s", ddi_node_name(dip)); 1823 #endif 1824 1825 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1826 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) { 1827 return (AE_ERROR); 1828 } 1829 1830 status = AcpiGetHandle(NULL, acpiname, rh); 1831 ddi_prop_free((void *)acpiname); 1832 return (status); 1833 } 1834 1835 1836 1837 /* 1838 * Manage OS data attachment to ACPI nodes 1839 */ 1840 1841 /* 1842 * Return the (dev_info_t *) associated with the ACPI node. 1843 */ 1844 ACPI_STATUS 1845 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp) 1846 { 1847 ACPI_STATUS status; 1848 void *ptr; 1849 1850 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr); 1851 if (status == AE_OK) 1852 *dipp = (dev_info_t *)ptr; 1853 1854 return (status); 1855 } 1856 1857 /* 1858 * Set the dev_info_t associated with the ACPI node. 1859 */ 1860 static ACPI_STATUS 1861 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip) 1862 { 1863 ACPI_STATUS status; 1864 1865 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip); 1866 return (status); 1867 } 1868 1869 /* 1870 * Unset the dev_info_t associated with the ACPI node. 1871 */ 1872 static ACPI_STATUS 1873 acpica_unset_devinfo(ACPI_HANDLE obj) 1874 { 1875 return (AcpiDetachData(obj, acpica_devinfo_handler)); 1876 } 1877 1878 /* 1879 * 1880 */ 1881 void 1882 acpica_devinfo_handler(ACPI_HANDLE obj, void *data) 1883 { 1884 /* no-op */ 1885 } 1886 1887 ACPI_STATUS 1888 acpica_build_processor_map(void) 1889 { 1890 ACPI_STATUS status; 1891 void *rv; 1892 1893 /* 1894 * shouldn't be called more than once anyway 1895 */ 1896 if (cpu_map_built) 1897 return (AE_OK); 1898 1899 /* 1900 * ACPI device configuration driver has built mapping information 1901 * among processor id and object handle, no need to probe again. 1902 */ 1903 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) { 1904 cpu_map_built = 1; 1905 return (AE_OK); 1906 } 1907 1908 /* 1909 * Look for Processor objects 1910 */ 1911 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR, 1912 ACPI_ROOT_OBJECT, 1913 4, 1914 acpica_probe_processor, 1915 NULL, 1916 NULL, 1917 &rv); 1918 ASSERT(status == AE_OK); 1919 1920 /* 1921 * Look for processor Device objects 1922 */ 1923 status = AcpiGetDevices("ACPI0007", 1924 acpica_probe_processor, 1925 NULL, 1926 &rv); 1927 ASSERT(status == AE_OK); 1928 cpu_map_built = 1; 1929 1930 return (status); 1931 } 1932 1933 /* 1934 * Grow cpu map table on demand. 1935 */ 1936 static void 1937 acpica_grow_cpu_map(void) 1938 { 1939 if (cpu_map_count == cpu_map_count_max) { 1940 size_t sz; 1941 struct cpu_map_item **new_map; 1942 1943 ASSERT(cpu_map_count_max < INT_MAX / 2); 1944 cpu_map_count_max += max_ncpus; 1945 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max, 1946 KM_SLEEP); 1947 if (cpu_map_count != 0) { 1948 ASSERT(cpu_map != NULL); 1949 sz = sizeof (cpu_map[0]) * cpu_map_count; 1950 kcopy(cpu_map, new_map, sz); 1951 kmem_free(cpu_map, sz); 1952 } 1953 cpu_map = new_map; 1954 } 1955 } 1956 1957 /* 1958 * Maintain mapping information among (cpu id, ACPI processor id, APIC id, 1959 * ACPI handle). The mapping table will be setup in two steps: 1960 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI 1961 * processor id and ACPI object handle. 1962 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id. 1963 * On systems with which have ACPI device configuration for CPUs enabled, 1964 * acpica_map_cpu() will be called after acpica_add_processor_to_map(), 1965 * otherwise acpica_map_cpu() will be called before 1966 * acpica_add_processor_to_map(). 1967 */ 1968 ACPI_STATUS 1969 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id) 1970 { 1971 int i; 1972 ACPI_STATUS rc = AE_OK; 1973 struct cpu_map_item *item = NULL; 1974 1975 ASSERT(obj != NULL); 1976 if (obj == NULL) { 1977 return (AE_ERROR); 1978 } 1979 1980 mutex_enter(&cpu_map_lock); 1981 1982 /* 1983 * Special case for uppc 1984 * If we're a uppc system and ACPI device configuration for CPU has 1985 * been disabled, there won't be a CPU map yet because uppc psm doesn't 1986 * call acpica_map_cpu(). So create one and use the passed-in processor 1987 * as CPU 0 1988 * Assumption: the first CPU returned by 1989 * AcpiGetDevices/AcpiWalkNamespace will be the BSP. 1990 * Unfortunately there appears to be no good way to ASSERT this. 1991 */ 1992 if (cpu_map == NULL && 1993 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) { 1994 acpica_grow_cpu_map(); 1995 ASSERT(cpu_map != NULL); 1996 item = kmem_zalloc(sizeof (*item), KM_SLEEP); 1997 item->cpu_id = 0; 1998 item->proc_id = acpi_id; 1999 item->apic_id = apic_id; 2000 item->obj = obj; 2001 cpu_map[0] = item; 2002 cpu_map_count = 1; 2003 mutex_exit(&cpu_map_lock); 2004 return (AE_OK); 2005 } 2006 2007 for (i = 0; i < cpu_map_count; i++) { 2008 if (cpu_map[i]->obj == obj) { 2009 rc = AE_ALREADY_EXISTS; 2010 break; 2011 } else if (cpu_map[i]->proc_id == acpi_id) { 2012 ASSERT(item == NULL); 2013 item = cpu_map[i]; 2014 } 2015 } 2016 2017 if (rc == AE_OK) { 2018 if (item != NULL) { 2019 /* 2020 * ACPI alias objects may cause more than one objects 2021 * with the same ACPI processor id, only remember the 2022 * the first object encountered. 2023 */ 2024 if (item->obj == NULL) { 2025 item->obj = obj; 2026 item->apic_id = apic_id; 2027 } else { 2028 rc = AE_ALREADY_EXISTS; 2029 } 2030 } else if (cpu_map_count >= INT_MAX / 2) { 2031 rc = AE_NO_MEMORY; 2032 } else { 2033 acpica_grow_cpu_map(); 2034 ASSERT(cpu_map != NULL); 2035 ASSERT(cpu_map_count < cpu_map_count_max); 2036 item = kmem_zalloc(sizeof (*item), KM_SLEEP); 2037 item->cpu_id = -1; 2038 item->proc_id = acpi_id; 2039 item->apic_id = apic_id; 2040 item->obj = obj; 2041 cpu_map[cpu_map_count] = item; 2042 cpu_map_count++; 2043 } 2044 } 2045 2046 mutex_exit(&cpu_map_lock); 2047 2048 return (rc); 2049 } 2050 2051 ACPI_STATUS 2052 acpica_remove_processor_from_map(UINT32 acpi_id) 2053 { 2054 int i; 2055 ACPI_STATUS rc = AE_NOT_EXIST; 2056 2057 mutex_enter(&cpu_map_lock); 2058 for (i = 0; i < cpu_map_count; i++) { 2059 if (cpu_map[i]->proc_id != acpi_id) { 2060 continue; 2061 } 2062 cpu_map[i]->obj = NULL; 2063 /* Free item if no more reference to it. */ 2064 if (cpu_map[i]->cpu_id == -1) { 2065 kmem_free(cpu_map[i], sizeof (struct cpu_map_item)); 2066 cpu_map[i] = NULL; 2067 cpu_map_count--; 2068 if (i != cpu_map_count) { 2069 cpu_map[i] = cpu_map[cpu_map_count]; 2070 cpu_map[cpu_map_count] = NULL; 2071 } 2072 } 2073 rc = AE_OK; 2074 break; 2075 } 2076 mutex_exit(&cpu_map_lock); 2077 2078 return (rc); 2079 } 2080 2081 ACPI_STATUS 2082 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id) 2083 { 2084 int i; 2085 ACPI_STATUS rc = AE_OK; 2086 struct cpu_map_item *item = NULL; 2087 2088 ASSERT(cpuid != -1); 2089 if (cpuid == -1) { 2090 return (AE_ERROR); 2091 } 2092 2093 mutex_enter(&cpu_map_lock); 2094 cpu_map_called = 1; 2095 for (i = 0; i < cpu_map_count; i++) { 2096 if (cpu_map[i]->cpu_id == cpuid) { 2097 rc = AE_ALREADY_EXISTS; 2098 break; 2099 } else if (cpu_map[i]->proc_id == acpi_id) { 2100 ASSERT(item == NULL); 2101 item = cpu_map[i]; 2102 } 2103 } 2104 if (rc == AE_OK) { 2105 if (item != NULL) { 2106 if (item->cpu_id == -1) { 2107 item->cpu_id = cpuid; 2108 } else { 2109 rc = AE_ALREADY_EXISTS; 2110 } 2111 } else if (cpu_map_count >= INT_MAX / 2) { 2112 rc = AE_NO_MEMORY; 2113 } else { 2114 acpica_grow_cpu_map(); 2115 ASSERT(cpu_map != NULL); 2116 ASSERT(cpu_map_count < cpu_map_count_max); 2117 item = kmem_zalloc(sizeof (*item), KM_SLEEP); 2118 item->cpu_id = cpuid; 2119 item->proc_id = acpi_id; 2120 item->apic_id = UINT32_MAX; 2121 item->obj = NULL; 2122 cpu_map[cpu_map_count] = item; 2123 cpu_map_count++; 2124 } 2125 } 2126 mutex_exit(&cpu_map_lock); 2127 2128 return (rc); 2129 } 2130 2131 ACPI_STATUS 2132 acpica_unmap_cpu(processorid_t cpuid) 2133 { 2134 int i; 2135 ACPI_STATUS rc = AE_NOT_EXIST; 2136 2137 ASSERT(cpuid != -1); 2138 if (cpuid == -1) { 2139 return (rc); 2140 } 2141 2142 mutex_enter(&cpu_map_lock); 2143 for (i = 0; i < cpu_map_count; i++) { 2144 if (cpu_map[i]->cpu_id != cpuid) { 2145 continue; 2146 } 2147 cpu_map[i]->cpu_id = -1; 2148 /* Free item if no more reference. */ 2149 if (cpu_map[i]->obj == NULL) { 2150 kmem_free(cpu_map[i], sizeof (struct cpu_map_item)); 2151 cpu_map[i] = NULL; 2152 cpu_map_count--; 2153 if (i != cpu_map_count) { 2154 cpu_map[i] = cpu_map[cpu_map_count]; 2155 cpu_map[cpu_map_count] = NULL; 2156 } 2157 } 2158 rc = AE_OK; 2159 break; 2160 } 2161 mutex_exit(&cpu_map_lock); 2162 2163 return (rc); 2164 } 2165 2166 ACPI_STATUS 2167 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp) 2168 { 2169 int i; 2170 ACPI_STATUS rc = AE_NOT_EXIST; 2171 2172 ASSERT(cpuid != -1); 2173 if (cpuid == -1) { 2174 return (rc); 2175 } 2176 2177 mutex_enter(&cpu_map_lock); 2178 for (i = 0; i < cpu_map_count; i++) { 2179 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) { 2180 *hdlp = cpu_map[i]->obj; 2181 rc = AE_OK; 2182 break; 2183 } 2184 } 2185 mutex_exit(&cpu_map_lock); 2186 2187 return (rc); 2188 } 2189 2190 ACPI_STATUS 2191 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp) 2192 { 2193 int i; 2194 ACPI_STATUS rc = AE_NOT_EXIST; 2195 2196 mutex_enter(&cpu_map_lock); 2197 for (i = 0; i < cpu_map_count; i++) { 2198 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) { 2199 *hdlp = cpu_map[i]->obj; 2200 rc = AE_OK; 2201 break; 2202 } 2203 } 2204 mutex_exit(&cpu_map_lock); 2205 2206 return (rc); 2207 } 2208 2209 ACPI_STATUS 2210 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp) 2211 { 2212 int i; 2213 ACPI_STATUS rc = AE_NOT_EXIST; 2214 2215 ASSERT(apicid != UINT32_MAX); 2216 if (apicid == UINT32_MAX) { 2217 return (rc); 2218 } 2219 2220 mutex_enter(&cpu_map_lock); 2221 for (i = 0; i < cpu_map_count; i++) { 2222 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) { 2223 *hdlp = cpu_map[i]->obj; 2224 rc = AE_OK; 2225 break; 2226 } 2227 } 2228 mutex_exit(&cpu_map_lock); 2229 2230 return (rc); 2231 } 2232 2233 ACPI_STATUS 2234 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp) 2235 { 2236 int i; 2237 ACPI_STATUS rc = AE_NOT_EXIST; 2238 2239 ASSERT(cpuidp != NULL); 2240 if (hdl == NULL || cpuidp == NULL) { 2241 return (rc); 2242 } 2243 2244 *cpuidp = -1; 2245 mutex_enter(&cpu_map_lock); 2246 for (i = 0; i < cpu_map_count; i++) { 2247 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) { 2248 *cpuidp = cpu_map[i]->cpu_id; 2249 rc = AE_OK; 2250 break; 2251 } 2252 } 2253 mutex_exit(&cpu_map_lock); 2254 2255 return (rc); 2256 } 2257 2258 ACPI_STATUS 2259 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp) 2260 { 2261 int i; 2262 ACPI_STATUS rc = AE_NOT_EXIST; 2263 2264 ASSERT(rp != NULL); 2265 if (hdl == NULL || rp == NULL) { 2266 return (rc); 2267 } 2268 2269 *rp = UINT32_MAX; 2270 mutex_enter(&cpu_map_lock); 2271 for (i = 0; i < cpu_map_count; i++) { 2272 if (cpu_map[i]->obj == hdl && 2273 cpu_map[i]->apic_id != UINT32_MAX) { 2274 *rp = cpu_map[i]->apic_id; 2275 rc = AE_OK; 2276 break; 2277 } 2278 } 2279 mutex_exit(&cpu_map_lock); 2280 2281 return (rc); 2282 } 2283 2284 ACPI_STATUS 2285 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp) 2286 { 2287 int i; 2288 ACPI_STATUS rc = AE_NOT_EXIST; 2289 2290 ASSERT(rp != NULL); 2291 if (hdl == NULL || rp == NULL) { 2292 return (rc); 2293 } 2294 2295 *rp = UINT32_MAX; 2296 mutex_enter(&cpu_map_lock); 2297 for (i = 0; i < cpu_map_count; i++) { 2298 if (cpu_map[i]->obj == hdl) { 2299 *rp = cpu_map[i]->proc_id; 2300 rc = AE_OK; 2301 break; 2302 } 2303 } 2304 mutex_exit(&cpu_map_lock); 2305 2306 return (rc); 2307 } 2308 2309 void 2310 acpica_set_core_feature(uint64_t features) 2311 { 2312 atomic_or_64(&acpica_core_features, features); 2313 } 2314 2315 void 2316 acpica_clear_core_feature(uint64_t features) 2317 { 2318 atomic_and_64(&acpica_core_features, ~features); 2319 } 2320 2321 uint64_t 2322 acpica_get_core_feature(uint64_t features) 2323 { 2324 return (acpica_core_features & features); 2325 } 2326 2327 void 2328 acpica_set_devcfg_feature(uint64_t features) 2329 { 2330 atomic_or_64(&acpica_devcfg_features, features); 2331 } 2332 2333 void 2334 acpica_clear_devcfg_feature(uint64_t features) 2335 { 2336 atomic_and_64(&acpica_devcfg_features, ~features); 2337 } 2338 2339 uint64_t 2340 acpica_get_devcfg_feature(uint64_t features) 2341 { 2342 return (acpica_devcfg_features & features); 2343 } 2344 2345 void 2346 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT) 2347 { 2348 *gbl_FADT = &AcpiGbl_FADT; 2349 } 2350 2351 void 2352 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates) 2353 { 2354 if (pstates && AcpiGbl_FADT.PstateControl != 0) 2355 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK, 2356 AcpiGbl_FADT.PstateControl); 2357 2358 if (cstates && AcpiGbl_FADT.CstControl != 0) 2359 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK, 2360 AcpiGbl_FADT.CstControl); 2361 } 2362 2363 uint32_t 2364 acpi_strtoul(const char *str, char **ep, int base) 2365 { 2366 ulong_t v; 2367 2368 if (ddi_strtoul(str, ep, base, &v) != 0 || v > ACPI_UINT32_MAX) { 2369 return (ACPI_UINT32_MAX); 2370 } 2371 2372 return ((uint32_t)v); 2373 } 2374