1 /*- 2 * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org> 3 * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> 4 * Copyright (c) 2000, 2001 Michael Smith 5 * Copyright (c) 2000 BSDi 6 * All rights reserved. 7 * Copyright (c) 2025 The FreeBSD Foundation 8 * 9 * Portions of this software were developed by Aymeric Wibo 10 * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_acpi.h" 36 37 #include <sys/param.h> 38 #include <sys/eventhandler.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/fcntl.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/conf.h> 46 #include <sys/ioccom.h> 47 #include <sys/reboot.h> 48 #include <sys/sysctl.h> 49 #include <sys/ctype.h> 50 #include <sys/linker.h> 51 #include <sys/mount.h> 52 #include <sys/power.h> 53 #include <sys/sbuf.h> 54 #include <sys/sched.h> 55 #include <sys/smp.h> 56 #include <sys/timetc.h> 57 #include <sys/uuid.h> 58 59 #if defined(__i386__) || defined(__amd64__) 60 #include <machine/clock.h> 61 #include <machine/pci_cfgreg.h> 62 #include <x86/cputypes.h> 63 #include <x86/x86_var.h> 64 #endif 65 #include <machine/resource.h> 66 #include <machine/bus.h> 67 #include <sys/rman.h> 68 #include <isa/isavar.h> 69 #include <isa/pnpvar.h> 70 71 #include <contrib/dev/acpica/include/acpi.h> 72 #include <contrib/dev/acpica/include/accommon.h> 73 #include <contrib/dev/acpica/include/acnamesp.h> 74 75 #include <dev/acpica/acpivar.h> 76 #include <dev/acpica/acpiio.h> 77 78 #include <dev/pci/pcivar.h> 79 80 #include <vm/vm_param.h> 81 82 static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); 83 84 /* Hooks for the ACPI CA debugging infrastructure */ 85 #define _COMPONENT ACPI_BUS 86 ACPI_MODULE_NAME("ACPI") 87 88 static d_open_t acpiopen; 89 static d_close_t acpiclose; 90 static d_ioctl_t acpiioctl; 91 92 static struct cdevsw acpi_cdevsw = { 93 .d_version = D_VERSION, 94 .d_open = acpiopen, 95 .d_close = acpiclose, 96 .d_ioctl = acpiioctl, 97 .d_name = "acpi", 98 }; 99 100 struct acpi_interface { 101 ACPI_STRING *data; 102 int num; 103 }; 104 105 static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; 106 107 /* Global mutex for locking access to the ACPI subsystem. */ 108 struct mtx acpi_mutex; 109 struct callout acpi_sleep_timer; 110 111 /* Bitmap of device quirks. */ 112 int acpi_quirks; 113 114 /* Supported sleep states. */ 115 static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; 116 117 static void acpi_lookup(void *arg, const char *name, device_t *dev); 118 static int acpi_modevent(struct module *mod, int event, void *junk); 119 120 static device_probe_t acpi_probe; 121 static device_attach_t acpi_attach; 122 static device_suspend_t acpi_suspend; 123 static device_resume_t acpi_resume; 124 static device_shutdown_t acpi_shutdown; 125 126 static bus_add_child_t acpi_add_child; 127 static bus_print_child_t acpi_print_child; 128 static bus_probe_nomatch_t acpi_probe_nomatch; 129 static bus_driver_added_t acpi_driver_added; 130 static bus_child_deleted_t acpi_child_deleted; 131 static bus_read_ivar_t acpi_read_ivar; 132 static bus_write_ivar_t acpi_write_ivar; 133 static bus_get_resource_list_t acpi_get_rlist; 134 static bus_get_rman_t acpi_get_rman; 135 static bus_set_resource_t acpi_set_resource; 136 static bus_alloc_resource_t acpi_alloc_resource; 137 static bus_adjust_resource_t acpi_adjust_resource; 138 static bus_release_resource_t acpi_release_resource; 139 static bus_delete_resource_t acpi_delete_resource; 140 static bus_activate_resource_t acpi_activate_resource; 141 static bus_deactivate_resource_t acpi_deactivate_resource; 142 static bus_map_resource_t acpi_map_resource; 143 static bus_unmap_resource_t acpi_unmap_resource; 144 static bus_child_pnpinfo_t acpi_child_pnpinfo_method; 145 static bus_child_location_t acpi_child_location_method; 146 static bus_hint_device_unit_t acpi_hint_device_unit; 147 static bus_get_property_t acpi_bus_get_prop; 148 static bus_get_device_path_t acpi_get_device_path; 149 static bus_get_domain_t acpi_get_domain_method; 150 151 static acpi_id_probe_t acpi_device_id_probe; 152 static acpi_evaluate_object_t acpi_device_eval_obj; 153 static acpi_get_property_t acpi_device_get_prop; 154 static acpi_scan_children_t acpi_device_scan_children; 155 156 static isa_pnp_probe_t acpi_isa_pnp_probe; 157 158 static void acpi_reserve_resources(device_t dev); 159 static int acpi_sysres_alloc(device_t dev); 160 static uint32_t acpi_isa_get_logicalid(device_t dev); 161 static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); 162 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, 163 void *context, void **retval); 164 static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); 165 static void acpi_platform_osc(device_t dev); 166 static void acpi_probe_children(device_t bus); 167 static void acpi_probe_order(ACPI_HANDLE handle, int *order); 168 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, 169 void *context, void **status); 170 static void acpi_sleep_enable(void *arg); 171 static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); 172 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); 173 static void acpi_shutdown_final(void *arg, int howto); 174 static void acpi_enable_fixed_events(struct acpi_softc *sc); 175 static void acpi_resync_clock(struct acpi_softc *sc); 176 static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); 177 static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); 178 static int acpi_wake_prep_walk(int sstate); 179 static int acpi_wake_sysctl_walk(device_t dev); 180 static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); 181 static void acpi_system_eventhandler_sleep(void *arg, int state); 182 static void acpi_system_eventhandler_wakeup(void *arg, int state); 183 static int acpi_sname2sstate(const char *sname); 184 static const char *acpi_sstate2sname(int sstate); 185 static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 186 static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 187 static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); 188 static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype); 189 static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype); 190 static void acpi_enable_pcie(void); 191 static void acpi_reset_interfaces(device_t dev); 192 193 static device_method_t acpi_methods[] = { 194 /* Device interface */ 195 DEVMETHOD(device_probe, acpi_probe), 196 DEVMETHOD(device_attach, acpi_attach), 197 DEVMETHOD(device_shutdown, acpi_shutdown), 198 DEVMETHOD(device_detach, bus_generic_detach), 199 DEVMETHOD(device_suspend, acpi_suspend), 200 DEVMETHOD(device_resume, acpi_resume), 201 202 /* Bus interface */ 203 DEVMETHOD(bus_add_child, acpi_add_child), 204 DEVMETHOD(bus_print_child, acpi_print_child), 205 DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), 206 DEVMETHOD(bus_driver_added, acpi_driver_added), 207 DEVMETHOD(bus_child_deleted, acpi_child_deleted), 208 DEVMETHOD(bus_read_ivar, acpi_read_ivar), 209 DEVMETHOD(bus_write_ivar, acpi_write_ivar), 210 DEVMETHOD(bus_get_resource_list, acpi_get_rlist), 211 DEVMETHOD(bus_get_rman, acpi_get_rman), 212 DEVMETHOD(bus_set_resource, acpi_set_resource), 213 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 214 DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), 215 DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), 216 DEVMETHOD(bus_release_resource, acpi_release_resource), 217 DEVMETHOD(bus_delete_resource, acpi_delete_resource), 218 DEVMETHOD(bus_activate_resource, acpi_activate_resource), 219 DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), 220 DEVMETHOD(bus_map_resource, acpi_map_resource), 221 DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), 222 DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), 223 DEVMETHOD(bus_child_location, acpi_child_location_method), 224 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 225 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 226 DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), 227 DEVMETHOD(bus_get_cpus, acpi_get_cpus), 228 DEVMETHOD(bus_get_domain, acpi_get_domain_method), 229 DEVMETHOD(bus_get_property, acpi_bus_get_prop), 230 DEVMETHOD(bus_get_device_path, acpi_get_device_path), 231 232 /* ACPI bus */ 233 DEVMETHOD(acpi_id_probe, acpi_device_id_probe), 234 DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), 235 DEVMETHOD(acpi_get_property, acpi_device_get_prop), 236 DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), 237 DEVMETHOD(acpi_scan_children, acpi_device_scan_children), 238 239 /* ISA emulation */ 240 DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), 241 242 DEVMETHOD_END 243 }; 244 245 static driver_t acpi_driver = { 246 "acpi", 247 acpi_methods, 248 sizeof(struct acpi_softc), 249 }; 250 251 EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, 252 BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); 253 MODULE_VERSION(acpi, 1); 254 255 ACPI_SERIAL_DECL(acpi, "ACPI root bus"); 256 257 /* Local pools for managing system resources for ACPI child devices. */ 258 static struct rman acpi_rman_io, acpi_rman_mem; 259 260 #define ACPI_MINIMUM_AWAKETIME 5 261 262 /* Holds the description of the acpi0 device. */ 263 static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; 264 265 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 266 "ACPI debugging"); 267 static char acpi_ca_version[12]; 268 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, 269 acpi_ca_version, 0, "Version of Intel ACPI-CA"); 270 271 /* 272 * Allow overriding _OSI methods. 273 */ 274 static char acpi_install_interface[256]; 275 TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, 276 sizeof(acpi_install_interface)); 277 static char acpi_remove_interface[256]; 278 TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, 279 sizeof(acpi_remove_interface)); 280 281 /* Allow users to dump Debug objects without ACPI debugger. */ 282 static int acpi_debug_objects; 283 TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); 284 SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, 285 CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, 286 acpi_debug_objects_sysctl, "I", 287 "Enable Debug objects"); 288 289 /* Allow the interpreter to ignore common mistakes in BIOS. */ 290 static int acpi_interpreter_slack = 1; 291 TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); 292 SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, 293 &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); 294 295 /* Ignore register widths set by FADT and use default widths instead. */ 296 static int acpi_ignore_reg_width = 1; 297 TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); 298 SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, 299 &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); 300 301 /* Allow users to override quirks. */ 302 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); 303 304 int acpi_susp_bounce; 305 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, 306 &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); 307 308 #if defined(__amd64__) || defined(__i386__) 309 int acpi_override_isa_irq_polarity; 310 #endif 311 312 /* 313 * ACPI standard UUID for Device Specific Data Package 314 * "Device Properties UUID for _DSD" Rev. 2.0 315 */ 316 static const struct uuid acpi_dsd_uuid = { 317 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, 318 { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } 319 }; 320 321 /* 322 * ACPI can only be loaded as a module by the loader; activating it after 323 * system bootstrap time is not useful, and can be fatal to the system. 324 * It also cannot be unloaded, since the entire system bus hierarchy hangs 325 * off it. 326 */ 327 static int 328 acpi_modevent(struct module *mod, int event, void *junk) 329 { 330 switch (event) { 331 case MOD_LOAD: 332 if (!cold) { 333 printf("The ACPI driver cannot be loaded after boot.\n"); 334 return (EPERM); 335 } 336 break; 337 case MOD_UNLOAD: 338 if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) 339 return (EBUSY); 340 break; 341 default: 342 break; 343 } 344 return (0); 345 } 346 347 /* 348 * Perform early initialization. 349 */ 350 ACPI_STATUS 351 acpi_Startup(void) 352 { 353 static int started = 0; 354 ACPI_STATUS status; 355 int val; 356 357 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 358 359 /* Only run the startup code once. The MADT driver also calls this. */ 360 if (started) 361 return_VALUE (AE_OK); 362 started = 1; 363 364 /* 365 * Initialize the ACPICA subsystem. 366 */ 367 if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { 368 printf("ACPI: Could not initialize Subsystem: %s\n", 369 AcpiFormatException(status)); 370 return_VALUE (status); 371 } 372 373 /* 374 * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing 375 * if more tables exist. 376 */ 377 if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { 378 printf("ACPI: Table initialisation failed: %s\n", 379 AcpiFormatException(status)); 380 return_VALUE (status); 381 } 382 383 /* Set up any quirks we have for this system. */ 384 if (acpi_quirks == ACPI_Q_OK) 385 acpi_table_quirks(&acpi_quirks); 386 387 /* If the user manually set the disabled hint to 0, force-enable ACPI. */ 388 if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) 389 acpi_quirks &= ~ACPI_Q_BROKEN; 390 if (acpi_quirks & ACPI_Q_BROKEN) { 391 printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); 392 status = AE_SUPPORT; 393 } 394 395 return_VALUE (status); 396 } 397 398 /* 399 * Detect ACPI and perform early initialisation. 400 */ 401 int 402 acpi_identify(void) 403 { 404 ACPI_TABLE_RSDP *rsdp; 405 ACPI_TABLE_HEADER *rsdt; 406 ACPI_PHYSICAL_ADDRESS paddr; 407 struct sbuf sb; 408 409 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 410 411 if (!cold) 412 return (ENXIO); 413 414 /* Check that we haven't been disabled with a hint. */ 415 if (resource_disabled("acpi", 0)) 416 return (ENXIO); 417 418 /* Check for other PM systems. */ 419 if (power_pm_get_type() != POWER_PM_TYPE_NONE && 420 power_pm_get_type() != POWER_PM_TYPE_ACPI) { 421 printf("ACPI identify failed, other PM system enabled.\n"); 422 return (ENXIO); 423 } 424 425 /* Initialize root tables. */ 426 if (ACPI_FAILURE(acpi_Startup())) { 427 printf("ACPI: Try disabling either ACPI or apic support.\n"); 428 return (ENXIO); 429 } 430 431 if ((paddr = AcpiOsGetRootPointer()) == 0 || 432 (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) 433 return (ENXIO); 434 if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) 435 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; 436 else 437 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; 438 AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); 439 440 if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) 441 return (ENXIO); 442 sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); 443 sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); 444 sbuf_trim(&sb); 445 sbuf_putc(&sb, ' '); 446 sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); 447 sbuf_trim(&sb); 448 sbuf_finish(&sb); 449 sbuf_delete(&sb); 450 AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); 451 452 snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); 453 454 return (0); 455 } 456 457 /* 458 * Fetch some descriptive data from ACPI to put in our attach message. 459 */ 460 static int 461 acpi_probe(device_t dev) 462 { 463 464 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 465 466 device_set_desc(dev, acpi_desc); 467 468 return_VALUE (BUS_PROBE_NOWILDCARD); 469 } 470 471 static int 472 acpi_attach(device_t dev) 473 { 474 struct acpi_softc *sc; 475 ACPI_STATUS status; 476 int error, state; 477 UINT32 flags; 478 UINT8 TypeA, TypeB; 479 char *env; 480 481 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 482 483 sc = device_get_softc(dev); 484 sc->acpi_dev = dev; 485 callout_init(&sc->susp_force_to, 1); 486 487 error = ENXIO; 488 489 /* Initialize resource manager. */ 490 acpi_rman_io.rm_type = RMAN_ARRAY; 491 acpi_rman_io.rm_start = 0; 492 acpi_rman_io.rm_end = 0xffff; 493 acpi_rman_io.rm_descr = "ACPI I/O ports"; 494 if (rman_init(&acpi_rman_io) != 0) 495 panic("acpi rman_init IO ports failed"); 496 acpi_rman_mem.rm_type = RMAN_ARRAY; 497 acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; 498 if (rman_init(&acpi_rman_mem) != 0) 499 panic("acpi rman_init memory failed"); 500 501 resource_list_init(&sc->sysres_rl); 502 503 /* Initialise the ACPI mutex */ 504 mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); 505 506 /* 507 * Set the globals from our tunables. This is needed because ACPI-CA 508 * uses UINT8 for some values and we have no tunable_byte. 509 */ 510 AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; 511 AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; 512 AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; 513 514 #ifndef ACPI_DEBUG 515 /* 516 * Disable all debugging layers and levels. 517 */ 518 AcpiDbgLayer = 0; 519 AcpiDbgLevel = 0; 520 #endif 521 522 /* Override OS interfaces if the user requested. */ 523 acpi_reset_interfaces(dev); 524 525 /* Load ACPI name space. */ 526 status = AcpiLoadTables(); 527 if (ACPI_FAILURE(status)) { 528 device_printf(dev, "Could not load Namespace: %s\n", 529 AcpiFormatException(status)); 530 goto out; 531 } 532 533 /* Handle MCFG table if present. */ 534 acpi_enable_pcie(); 535 536 /* 537 * Note that some systems (specifically, those with namespace evaluation 538 * issues that require the avoidance of parts of the namespace) must 539 * avoid running _INI and _STA on everything, as well as dodging the final 540 * object init pass. 541 * 542 * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). 543 * 544 * XXX We should arrange for the object init pass after we have attached 545 * all our child devices, but on many systems it works here. 546 */ 547 flags = 0; 548 if (testenv("debug.acpi.avoid")) 549 flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; 550 551 /* Bring the hardware and basic handlers online. */ 552 if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { 553 device_printf(dev, "Could not enable ACPI: %s\n", 554 AcpiFormatException(status)); 555 goto out; 556 } 557 558 /* 559 * Call the ECDT probe function to provide EC functionality before 560 * the namespace has been evaluated. 561 * 562 * XXX This happens before the sysresource devices have been probed and 563 * attached so its resources come from nexus0. In practice, this isn't 564 * a problem but should be addressed eventually. 565 */ 566 acpi_ec_ecdt_probe(dev); 567 568 /* Bring device objects and regions online. */ 569 if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { 570 device_printf(dev, "Could not initialize ACPI objects: %s\n", 571 AcpiFormatException(status)); 572 goto out; 573 } 574 575 /* 576 * Setup our sysctl tree. 577 * 578 * XXX: This doesn't check to make sure that none of these fail. 579 */ 580 sysctl_ctx_init(&sc->acpi_sysctl_ctx); 581 sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, 582 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), 583 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 584 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 585 OID_AUTO, "supported_sleep_state", 586 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 587 0, 0, acpi_supported_sleep_state_sysctl, "A", 588 "List supported ACPI sleep states."); 589 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 590 OID_AUTO, "power_button_state", 591 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 592 &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", 593 "Power button ACPI sleep state."); 594 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 595 OID_AUTO, "sleep_button_state", 596 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 597 &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", 598 "Sleep button ACPI sleep state."); 599 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 600 OID_AUTO, "lid_switch_state", 601 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 602 &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", 603 "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); 604 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 605 OID_AUTO, "standby_state", 606 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 607 &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); 608 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 609 OID_AUTO, "suspend_state", 610 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 611 &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); 612 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 613 OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, 614 "sleep delay in seconds"); 615 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 616 OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); 617 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 618 OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); 619 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 620 OID_AUTO, "disable_on_reboot", CTLFLAG_RW, 621 &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); 622 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 623 OID_AUTO, "handle_reboot", CTLFLAG_RW, 624 &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); 625 626 #if defined(__amd64__) || defined(__i386__) 627 /* 628 * Enable workaround for incorrect ISA IRQ polarity by default on 629 * systems with Intel CPUs. 630 */ 631 if (cpu_vendor_id == CPU_VENDOR_INTEL) 632 acpi_override_isa_irq_polarity = 1; 633 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 634 OID_AUTO, "override_isa_irq_polarity", CTLFLAG_RDTUN, 635 &acpi_override_isa_irq_polarity, 0, 636 "Force active-hi polarity for edge-triggered ISA IRQs"); 637 #endif 638 639 /* 640 * Default to 1 second before sleeping to give some machines time to 641 * stabilize. 642 */ 643 sc->acpi_sleep_delay = 1; 644 if (bootverbose) 645 sc->acpi_verbose = 1; 646 if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { 647 if (strcmp(env, "0") != 0) 648 sc->acpi_verbose = 1; 649 freeenv(env); 650 } 651 652 /* Only enable reboot by default if the FADT says it is available. */ 653 if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) 654 sc->acpi_handle_reboot = 1; 655 656 #if !ACPI_REDUCED_HARDWARE 657 /* Only enable S4BIOS by default if the FACS says it is available. */ 658 if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) 659 sc->acpi_s4bios = 1; 660 #endif 661 662 /* Probe all supported sleep states. */ 663 acpi_sleep_states[ACPI_STATE_S0] = TRUE; 664 for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) 665 if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, 666 __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && 667 ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) 668 acpi_sleep_states[state] = TRUE; 669 670 /* 671 * Dispatch the default sleep state to devices. The lid switch is set 672 * to UNKNOWN by default to avoid surprising users. 673 */ 674 sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? 675 ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; 676 sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; 677 sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? 678 ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; 679 sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? 680 ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; 681 682 /* Pick the first valid sleep state for the sleep button default. */ 683 sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; 684 for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) 685 if (acpi_sleep_states[state]) { 686 sc->acpi_sleep_button_sx = state; 687 break; 688 } 689 690 acpi_enable_fixed_events(sc); 691 692 /* 693 * Scan the namespace and attach/initialise children. 694 */ 695 696 /* Register our shutdown handler. */ 697 EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, 698 SHUTDOWN_PRI_LAST + 150); 699 700 /* 701 * Register our acpi event handlers. 702 * XXX should be configurable eg. via userland policy manager. 703 */ 704 EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, 705 sc, ACPI_EVENT_PRI_LAST); 706 EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, 707 sc, ACPI_EVENT_PRI_LAST); 708 709 /* Flag our initial states. */ 710 sc->acpi_enabled = TRUE; 711 sc->acpi_sstate = ACPI_STATE_S0; 712 sc->acpi_sleep_disabled = TRUE; 713 714 /* Create the control device */ 715 sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, 716 "acpi"); 717 sc->acpi_dev_t->si_drv1 = sc; 718 719 if ((error = acpi_machdep_init(dev))) 720 goto out; 721 722 /* Register ACPI again to pass the correct argument of pm_func. */ 723 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); 724 725 acpi_platform_osc(dev); 726 727 if (!acpi_disabled("bus")) { 728 EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); 729 acpi_probe_children(dev); 730 } 731 732 /* Update all GPEs and enable runtime GPEs. */ 733 status = AcpiUpdateAllGpes(); 734 if (ACPI_FAILURE(status)) 735 device_printf(dev, "Could not update all GPEs: %s\n", 736 AcpiFormatException(status)); 737 738 /* Allow sleep request after a while. */ 739 callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); 740 callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, 741 acpi_sleep_enable, sc); 742 743 error = 0; 744 745 out: 746 return_VALUE (error); 747 } 748 749 static int 750 acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype) 751 { 752 switch (stype) { 753 case POWER_STYPE_AWAKE: 754 return (ACPI_STATE_S0); 755 case POWER_STYPE_STANDBY: 756 return (sc->acpi_standby_sx); 757 case POWER_STYPE_SUSPEND_TO_MEM: 758 return (ACPI_STATE_S3); 759 case POWER_STYPE_HIBERNATE: 760 return (ACPI_STATE_S4); 761 case POWER_STYPE_POWEROFF: 762 return (ACPI_STATE_S5); 763 case POWER_STYPE_SUSPEND_TO_IDLE: 764 case POWER_STYPE_COUNT: 765 case POWER_STYPE_UNKNOWN: 766 return (ACPI_STATE_UNKNOWN); 767 } 768 return (ACPI_STATE_UNKNOWN); 769 } 770 771 static void 772 acpi_set_power_children(device_t dev, int state) 773 { 774 device_t child; 775 device_t *devlist; 776 int dstate, i, numdevs; 777 778 if (device_get_children(dev, &devlist, &numdevs) != 0) 779 return; 780 781 /* 782 * Retrieve and set D-state for the sleep state if _SxD is present. 783 * Skip children who aren't attached since they are handled separately. 784 */ 785 for (i = 0; i < numdevs; i++) { 786 child = devlist[i]; 787 dstate = state; 788 if (device_is_attached(child) && 789 acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) 790 acpi_set_powerstate(child, dstate); 791 } 792 free(devlist, M_TEMP); 793 } 794 795 static int 796 acpi_suspend(device_t dev) 797 { 798 int error; 799 800 bus_topo_assert(); 801 802 error = bus_generic_suspend(dev); 803 if (error == 0) 804 acpi_set_power_children(dev, ACPI_STATE_D3); 805 806 return (error); 807 } 808 809 static int 810 acpi_resume(device_t dev) 811 { 812 813 bus_topo_assert(); 814 815 acpi_set_power_children(dev, ACPI_STATE_D0); 816 817 return (bus_generic_resume(dev)); 818 } 819 820 static int 821 acpi_shutdown(device_t dev) 822 { 823 824 bus_topo_assert(); 825 826 /* Allow children to shutdown first. */ 827 bus_generic_shutdown(dev); 828 829 /* 830 * Enable any GPEs that are able to power-on the system (i.e., RTC). 831 * Also, disable any that are not valid for this state (most). 832 */ 833 acpi_wake_prep_walk(ACPI_STATE_S5); 834 835 return (0); 836 } 837 838 /* 839 * Handle a new device being added 840 */ 841 static device_t 842 acpi_add_child(device_t bus, u_int order, const char *name, int unit) 843 { 844 struct acpi_device *ad; 845 device_t child; 846 847 if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) 848 return (NULL); 849 850 ad->ad_domain = ACPI_DEV_DOMAIN_UNKNOWN; 851 resource_list_init(&ad->ad_rl); 852 853 child = device_add_child_ordered(bus, order, name, unit); 854 if (child != NULL) 855 device_set_ivars(child, ad); 856 else 857 free(ad, M_ACPIDEV); 858 return (child); 859 } 860 861 static int 862 acpi_print_child(device_t bus, device_t child) 863 { 864 struct acpi_device *adev = device_get_ivars(child); 865 struct resource_list *rl = &adev->ad_rl; 866 int retval = 0; 867 868 retval += bus_print_child_header(bus, child); 869 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); 870 retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); 871 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); 872 retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); 873 if (device_get_flags(child)) 874 retval += printf(" flags %#x", device_get_flags(child)); 875 retval += bus_print_child_domain(bus, child); 876 retval += bus_print_child_footer(bus, child); 877 878 return (retval); 879 } 880 881 /* 882 * If this device is an ACPI child but no one claimed it, attempt 883 * to power it off. We'll power it back up when a driver is added. 884 * 885 * XXX Disabled for now since many necessary devices (like fdc and 886 * ATA) don't claim the devices we created for them but still expect 887 * them to be powered up. 888 */ 889 static void 890 acpi_probe_nomatch(device_t bus, device_t child) 891 { 892 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER 893 acpi_set_powerstate(child, ACPI_STATE_D3); 894 #endif 895 } 896 897 /* 898 * If a new driver has a chance to probe a child, first power it up. 899 * 900 * XXX Disabled for now (see acpi_probe_nomatch for details). 901 */ 902 static void 903 acpi_driver_added(device_t dev, driver_t *driver) 904 { 905 device_t child, *devlist; 906 int i, numdevs; 907 908 DEVICE_IDENTIFY(driver, dev); 909 if (device_get_children(dev, &devlist, &numdevs)) 910 return; 911 for (i = 0; i < numdevs; i++) { 912 child = devlist[i]; 913 if (device_get_state(child) == DS_NOTPRESENT) { 914 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER 915 acpi_set_powerstate(child, ACPI_STATE_D0); 916 if (device_probe_and_attach(child) != 0) 917 acpi_set_powerstate(child, ACPI_STATE_D3); 918 #else 919 device_probe_and_attach(child); 920 #endif 921 } 922 } 923 free(devlist, M_TEMP); 924 } 925 926 /* Location hint for devctl(8) */ 927 static int 928 acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) 929 { 930 struct acpi_device *dinfo = device_get_ivars(child); 931 int pxm; 932 933 if (dinfo->ad_handle) { 934 sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); 935 if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { 936 sbuf_printf(sb, " _PXM=%d", pxm); 937 } 938 } 939 return (0); 940 } 941 942 /* PnP information for devctl(8) */ 943 int 944 acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) 945 { 946 ACPI_DEVICE_INFO *adinfo; 947 948 if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { 949 sbuf_printf(sb, "unknown"); 950 return (0); 951 } 952 953 sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", 954 (adinfo->Valid & ACPI_VALID_HID) ? 955 adinfo->HardwareId.String : "none", 956 (adinfo->Valid & ACPI_VALID_UID) ? 957 strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, 958 ((adinfo->Valid & ACPI_VALID_CID) && 959 adinfo->CompatibleIdList.Count > 0) ? 960 adinfo->CompatibleIdList.Ids[0].String : "none"); 961 AcpiOsFree(adinfo); 962 963 return (0); 964 } 965 966 static int 967 acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) 968 { 969 struct acpi_device *dinfo = device_get_ivars(child); 970 971 return (acpi_pnpinfo(dinfo->ad_handle, sb)); 972 } 973 974 /* 975 * Note: the check for ACPI locator may be redundant. However, this routine is 976 * suitable for both busses whose only locator is ACPI and as a building block 977 * for busses that have multiple locators to cope with. 978 */ 979 int 980 acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) 981 { 982 if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { 983 ACPI_HANDLE *handle = acpi_get_handle(child); 984 985 if (handle != NULL) 986 sbuf_printf(sb, "%s", acpi_name(handle)); 987 return (0); 988 } 989 990 return (bus_generic_get_device_path(bus, child, locator, sb)); 991 } 992 993 static int 994 acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) 995 { 996 struct acpi_device *dinfo = device_get_ivars(child); 997 998 if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) 999 return (acpi_get_acpi_device_path(bus, child, locator, sb)); 1000 1001 if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { 1002 ACPI_DEVICE_INFO *adinfo; 1003 if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && 1004 dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { 1005 const char *hid = adinfo->HardwareId.String; 1006 u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? 1007 strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; 1008 u_long hidval; 1009 1010 /* 1011 * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text 1012 * Device Node Reference, there's an insanely long table 1013 * 98. This implements the relevant bits from that 1014 * table. Newer versions appear to have not required 1015 * anything new. The EDK2 firmware presents both PciRoot 1016 * and PcieRoot as PciRoot. Follow the EDK2 standard. 1017 */ 1018 if (strncmp("PNP", hid, 3) != 0) 1019 goto nomatch; 1020 hidval = strtoul(hid + 3, NULL, 16); 1021 switch (hidval) { 1022 case 0x0301: 1023 sbuf_printf(sb, "Keyboard(0x%lx)", uid); 1024 break; 1025 case 0x0401: 1026 sbuf_printf(sb, "ParallelPort(0x%lx)", uid); 1027 break; 1028 case 0x0501: 1029 sbuf_printf(sb, "Serial(0x%lx)", uid); 1030 break; 1031 case 0x0604: 1032 sbuf_printf(sb, "Floppy(0x%lx)", uid); 1033 break; 1034 case 0x0a03: 1035 case 0x0a08: 1036 sbuf_printf(sb, "PciRoot(0x%lx)", uid); 1037 break; 1038 default: /* Everything else gets a generic encode */ 1039 nomatch: 1040 sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); 1041 break; 1042 } 1043 } 1044 /* Not handled: AcpiAdr... unsure how to know it's one */ 1045 } 1046 1047 /* For the rest, punt to the default handler */ 1048 return (bus_generic_get_device_path(bus, child, locator, sb)); 1049 } 1050 1051 /* 1052 * Handle device deletion. 1053 */ 1054 static void 1055 acpi_child_deleted(device_t dev, device_t child) 1056 { 1057 struct acpi_device *dinfo = device_get_ivars(child); 1058 1059 if (acpi_get_device(dinfo->ad_handle) == child) 1060 AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); 1061 } 1062 1063 /* 1064 * Handle per-device ivars 1065 */ 1066 static int 1067 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 1068 { 1069 struct acpi_device *ad; 1070 1071 if ((ad = device_get_ivars(child)) == NULL) { 1072 device_printf(child, "device has no ivars\n"); 1073 return (ENOENT); 1074 } 1075 1076 /* ACPI and ISA compatibility ivars */ 1077 switch(index) { 1078 case ACPI_IVAR_HANDLE: 1079 *(ACPI_HANDLE *)result = ad->ad_handle; 1080 break; 1081 case ACPI_IVAR_PRIVATE: 1082 *(void **)result = ad->ad_private; 1083 break; 1084 case ACPI_IVAR_FLAGS: 1085 *(int *)result = ad->ad_flags; 1086 break; 1087 case ACPI_IVAR_DOMAIN: 1088 *(int *)result = ad->ad_domain; 1089 break; 1090 case ISA_IVAR_VENDORID: 1091 case ISA_IVAR_SERIAL: 1092 case ISA_IVAR_COMPATID: 1093 *(int *)result = -1; 1094 break; 1095 case ISA_IVAR_LOGICALID: 1096 *(int *)result = acpi_isa_get_logicalid(child); 1097 break; 1098 case PCI_IVAR_CLASS: 1099 *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; 1100 break; 1101 case PCI_IVAR_SUBCLASS: 1102 *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; 1103 break; 1104 case PCI_IVAR_PROGIF: 1105 *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; 1106 break; 1107 default: 1108 return (ENOENT); 1109 } 1110 1111 return (0); 1112 } 1113 1114 static int 1115 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 1116 { 1117 struct acpi_device *ad; 1118 1119 if ((ad = device_get_ivars(child)) == NULL) { 1120 device_printf(child, "device has no ivars\n"); 1121 return (ENOENT); 1122 } 1123 1124 switch(index) { 1125 case ACPI_IVAR_HANDLE: 1126 ad->ad_handle = (ACPI_HANDLE)value; 1127 break; 1128 case ACPI_IVAR_PRIVATE: 1129 ad->ad_private = (void *)value; 1130 break; 1131 case ACPI_IVAR_FLAGS: 1132 ad->ad_flags = (int)value; 1133 break; 1134 case ACPI_IVAR_DOMAIN: 1135 ad->ad_domain = (int)value; 1136 break; 1137 default: 1138 panic("bad ivar write request (%d)", index); 1139 return (ENOENT); 1140 } 1141 1142 return (0); 1143 } 1144 1145 /* 1146 * Handle child resource allocation/removal 1147 */ 1148 static struct resource_list * 1149 acpi_get_rlist(device_t dev, device_t child) 1150 { 1151 struct acpi_device *ad; 1152 1153 ad = device_get_ivars(child); 1154 return (&ad->ad_rl); 1155 } 1156 1157 static int 1158 acpi_match_resource_hint(device_t dev, int type, long value) 1159 { 1160 struct acpi_device *ad = device_get_ivars(dev); 1161 struct resource_list *rl = &ad->ad_rl; 1162 struct resource_list_entry *rle; 1163 1164 STAILQ_FOREACH(rle, rl, link) { 1165 if (rle->type != type) 1166 continue; 1167 if (rle->start <= value && rle->end >= value) 1168 return (1); 1169 } 1170 return (0); 1171 } 1172 1173 /* 1174 * Does this device match because the resources match? 1175 */ 1176 static bool 1177 acpi_hint_device_matches_resources(device_t child, const char *name, 1178 int unit) 1179 { 1180 long value; 1181 bool matches; 1182 1183 /* 1184 * Check for matching resources. We must have at least one match. 1185 * Since I/O and memory resources cannot be shared, if we get a 1186 * match on either of those, ignore any mismatches in IRQs or DRQs. 1187 * 1188 * XXX: We may want to revisit this to be more lenient and wire 1189 * as long as it gets one match. 1190 */ 1191 matches = false; 1192 if (resource_long_value(name, unit, "port", &value) == 0) { 1193 /* 1194 * Floppy drive controllers are notorious for having a 1195 * wide variety of resources not all of which include the 1196 * first port that is specified by the hint (typically 1197 * 0x3f0) (see the comment above fdc_isa_alloc_resources() 1198 * in fdc_isa.c). However, they do all seem to include 1199 * port + 2 (e.g. 0x3f2) so for a floppy device, look for 1200 * 'value + 2' in the port resources instead of the hint 1201 * value. 1202 */ 1203 if (strcmp(name, "fdc") == 0) 1204 value += 2; 1205 if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) 1206 matches = true; 1207 else 1208 return false; 1209 } 1210 if (resource_long_value(name, unit, "maddr", &value) == 0) { 1211 if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) 1212 matches = true; 1213 else 1214 return false; 1215 } 1216 1217 /* 1218 * If either the I/O address and/or the memory address matched, then 1219 * assumed this devices matches and that any mismatch in other resources 1220 * will be resolved by siltently ignoring those other resources. Otherwise 1221 * all further resources must match. 1222 */ 1223 if (matches) { 1224 return (true); 1225 } 1226 if (resource_long_value(name, unit, "irq", &value) == 0) { 1227 if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) 1228 matches = true; 1229 else 1230 return false; 1231 } 1232 if (resource_long_value(name, unit, "drq", &value) == 0) { 1233 if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) 1234 matches = true; 1235 else 1236 return false; 1237 } 1238 return matches; 1239 } 1240 1241 1242 /* 1243 * Wire device unit numbers based on resource matches in hints. 1244 */ 1245 static void 1246 acpi_hint_device_unit(device_t acdev, device_t child, const char *name, 1247 int *unitp) 1248 { 1249 device_location_cache_t *cache; 1250 const char *s; 1251 int line, unit; 1252 bool matches; 1253 1254 /* 1255 * Iterate over all the hints for the devices with the specified 1256 * name to see if one's resources are a subset of this device. 1257 */ 1258 line = 0; 1259 cache = dev_wired_cache_init(); 1260 while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { 1261 /* Must have an "at" for acpi or isa. */ 1262 resource_string_value(name, unit, "at", &s); 1263 matches = false; 1264 if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || 1265 strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) 1266 matches = acpi_hint_device_matches_resources(child, name, unit); 1267 else 1268 matches = dev_wired_cache_match(cache, child, s); 1269 1270 if (matches) { 1271 /* We have a winner! */ 1272 *unitp = unit; 1273 break; 1274 } 1275 } 1276 dev_wired_cache_fini(cache); 1277 } 1278 1279 /* 1280 * Fetch the NUMA domain for a device by mapping the value returned by 1281 * _PXM to a NUMA domain. If the device does not have a _PXM method, 1282 * -2 is returned. If any other error occurs, -1 is returned. 1283 */ 1284 int 1285 acpi_pxm_parse(device_t dev) 1286 { 1287 #ifdef NUMA 1288 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1289 ACPI_HANDLE handle; 1290 ACPI_STATUS status; 1291 int pxm; 1292 1293 handle = acpi_get_handle(dev); 1294 if (handle == NULL) 1295 return (-2); 1296 status = acpi_GetInteger(handle, "_PXM", &pxm); 1297 if (ACPI_SUCCESS(status)) 1298 return (acpi_map_pxm_to_vm_domainid(pxm)); 1299 if (status == AE_NOT_FOUND) 1300 return (-2); 1301 #endif 1302 #endif 1303 return (-1); 1304 } 1305 1306 int 1307 acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, 1308 cpuset_t *cpuset) 1309 { 1310 int d, error; 1311 1312 d = acpi_pxm_parse(child); 1313 if (d < 0) 1314 return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); 1315 1316 switch (op) { 1317 case LOCAL_CPUS: 1318 if (setsize != sizeof(cpuset_t)) 1319 return (EINVAL); 1320 *cpuset = cpuset_domain[d]; 1321 return (0); 1322 case INTR_CPUS: 1323 error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); 1324 if (error != 0) 1325 return (error); 1326 if (setsize != sizeof(cpuset_t)) 1327 return (EINVAL); 1328 CPU_AND(cpuset, cpuset, &cpuset_domain[d]); 1329 return (0); 1330 default: 1331 return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); 1332 } 1333 } 1334 1335 static int 1336 acpi_get_domain_method(device_t dev, device_t child, int *domain) 1337 { 1338 int error; 1339 1340 error = acpi_read_ivar(dev, child, ACPI_IVAR_DOMAIN, 1341 (uintptr_t *)domain); 1342 if (error == 0 && *domain != ACPI_DEV_DOMAIN_UNKNOWN) 1343 return (0); 1344 return (ENOENT); 1345 } 1346 1347 static struct rman * 1348 acpi_get_rman(device_t bus, int type, u_int flags) 1349 { 1350 /* Only memory and IO resources are managed. */ 1351 switch (type) { 1352 case SYS_RES_IOPORT: 1353 return (&acpi_rman_io); 1354 case SYS_RES_MEMORY: 1355 return (&acpi_rman_mem); 1356 default: 1357 return (NULL); 1358 } 1359 } 1360 1361 /* 1362 * Pre-allocate/manage all memory and IO resources. Since rman can't handle 1363 * duplicates, we merge any in the sysresource attach routine. 1364 */ 1365 static int 1366 acpi_sysres_alloc(device_t dev) 1367 { 1368 struct acpi_softc *sc = device_get_softc(dev); 1369 struct resource *res; 1370 struct resource_list_entry *rle; 1371 struct rman *rm; 1372 device_t *children; 1373 int child_count, i; 1374 1375 /* 1376 * Probe/attach any sysresource devices. This would be unnecessary if we 1377 * had multi-pass probe/attach. 1378 */ 1379 if (device_get_children(dev, &children, &child_count) != 0) 1380 return (ENXIO); 1381 for (i = 0; i < child_count; i++) { 1382 if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) 1383 device_probe_and_attach(children[i]); 1384 } 1385 free(children, M_TEMP); 1386 1387 STAILQ_FOREACH(rle, &sc->sysres_rl, link) { 1388 if (rle->res != NULL) { 1389 device_printf(dev, "duplicate resource for %jx\n", rle->start); 1390 continue; 1391 } 1392 1393 /* Only memory and IO resources are valid here. */ 1394 rm = acpi_get_rman(dev, rle->type, 0); 1395 if (rm == NULL) 1396 continue; 1397 1398 /* Pre-allocate resource and add to our rman pool. */ 1399 res = bus_alloc_resource(dev, rle->type, 1400 &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 1401 RF_ACTIVE | RF_UNMAPPED); 1402 if (res != NULL) { 1403 rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); 1404 rle->res = res; 1405 } else if (bootverbose) 1406 device_printf(dev, "reservation of %jx, %jx (%d) failed\n", 1407 rle->start, rle->count, rle->type); 1408 } 1409 return (0); 1410 } 1411 1412 /* 1413 * Reserve declared resources for active devices found during the 1414 * namespace scan once the boot-time attach of devices has completed. 1415 * 1416 * Ideally reserving firmware-assigned resources would work in a 1417 * depth-first traversal of the device namespace, but this is 1418 * complicated. In particular, not all resources are enumerated by 1419 * ACPI (e.g. PCI bridges and devices enumerate their resources via 1420 * other means). Some systems also enumerate devices via ACPI behind 1421 * PCI bridges but without a matching a PCI device_t enumerated via 1422 * PCI bus scanning, the device_t's end up as direct children of 1423 * acpi0. Doing this scan late is not ideal, but works for now. 1424 */ 1425 static void 1426 acpi_reserve_resources(device_t dev) 1427 { 1428 struct resource_list_entry *rle; 1429 struct resource_list *rl; 1430 struct acpi_device *ad; 1431 device_t *children; 1432 int child_count, i; 1433 1434 if (device_get_children(dev, &children, &child_count) != 0) 1435 return; 1436 for (i = 0; i < child_count; i++) { 1437 ad = device_get_ivars(children[i]); 1438 rl = &ad->ad_rl; 1439 1440 /* Don't reserve system resources. */ 1441 if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) 1442 continue; 1443 1444 STAILQ_FOREACH(rle, rl, link) { 1445 /* 1446 * Don't reserve IRQ resources. There are many sticky things 1447 * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET 1448 * when using legacy routing). 1449 */ 1450 if (rle->type == SYS_RES_IRQ) 1451 continue; 1452 1453 /* 1454 * Don't reserve the resource if it is already allocated. 1455 * The acpi_ec(4) driver can allocate its resources early 1456 * if ECDT is present. 1457 */ 1458 if (rle->res != NULL) 1459 continue; 1460 1461 /* 1462 * Try to reserve the resource from our parent. If this 1463 * fails because the resource is a system resource, just 1464 * let it be. The resource range is already reserved so 1465 * that other devices will not use it. If the driver 1466 * needs to allocate the resource, then 1467 * acpi_alloc_resource() will sub-alloc from the system 1468 * resource. 1469 */ 1470 resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, 1471 rle->start, rle->end, rle->count, 0); 1472 } 1473 } 1474 free(children, M_TEMP); 1475 } 1476 1477 static int 1478 acpi_set_resource(device_t dev, device_t child, int type, int rid, 1479 rman_res_t start, rman_res_t count) 1480 { 1481 struct acpi_device *ad = device_get_ivars(child); 1482 struct resource_list *rl = &ad->ad_rl; 1483 rman_res_t end; 1484 1485 #ifdef INTRNG 1486 /* map with default for now */ 1487 if (type == SYS_RES_IRQ) 1488 start = (rman_res_t)acpi_map_intr(child, (u_int)start, 1489 acpi_get_handle(child)); 1490 #endif 1491 1492 /* If the resource is already allocated, fail. */ 1493 if (resource_list_busy(rl, type, rid)) 1494 return (EBUSY); 1495 1496 /* If the resource is already reserved, release it. */ 1497 if (resource_list_reserved(rl, type, rid)) 1498 resource_list_unreserve(rl, dev, child, type, rid); 1499 1500 /* Add the resource. */ 1501 end = (start + count - 1); 1502 resource_list_add(rl, type, rid, start, end, count); 1503 return (0); 1504 } 1505 1506 static struct resource * 1507 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, 1508 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1509 { 1510 #ifndef INTRNG 1511 ACPI_RESOURCE ares; 1512 #endif 1513 struct acpi_device *ad; 1514 struct resource_list_entry *rle; 1515 struct resource_list *rl; 1516 struct resource *res; 1517 int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); 1518 1519 /* 1520 * First attempt at allocating the resource. For direct children, 1521 * use resource_list_alloc() to handle reserved resources. For 1522 * other devices, pass the request up to our parent. 1523 */ 1524 if (bus == device_get_parent(child)) { 1525 ad = device_get_ivars(child); 1526 rl = &ad->ad_rl; 1527 1528 /* 1529 * Simulate the behavior of the ISA bus for direct children 1530 * devices. That is, if a non-default range is specified for 1531 * a resource that doesn't exist, use bus_set_resource() to 1532 * add the resource before allocating it. Note that these 1533 * resources will not be reserved. 1534 */ 1535 if (!isdefault && resource_list_find(rl, type, *rid) == NULL) 1536 resource_list_add(rl, type, *rid, start, end, count); 1537 res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, 1538 flags); 1539 #ifndef INTRNG 1540 if (res != NULL && type == SYS_RES_IRQ) { 1541 /* 1542 * Since bus_config_intr() takes immediate effect, we cannot 1543 * configure the interrupt associated with a device when we 1544 * parse the resources but have to defer it until a driver 1545 * actually allocates the interrupt via bus_alloc_resource(). 1546 * 1547 * XXX: Should we handle the lookup failing? 1548 */ 1549 if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) 1550 acpi_config_intr(child, &ares); 1551 } 1552 #endif 1553 1554 /* 1555 * If this is an allocation of the "default" range for a given 1556 * RID, fetch the exact bounds for this resource from the 1557 * resource list entry to try to allocate the range from the 1558 * system resource regions. 1559 */ 1560 if (res == NULL && isdefault) { 1561 rle = resource_list_find(rl, type, *rid); 1562 if (rle != NULL) { 1563 start = rle->start; 1564 end = rle->end; 1565 count = rle->count; 1566 } 1567 } 1568 } else 1569 res = bus_generic_alloc_resource(bus, child, type, rid, 1570 start, end, count, flags); 1571 1572 /* 1573 * If the first attempt failed and this is an allocation of a 1574 * specific range, try to satisfy the request via a suballocation 1575 * from our system resource regions. 1576 */ 1577 if (res == NULL && start + count - 1 == end) 1578 res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, 1579 count, flags); 1580 return (res); 1581 } 1582 1583 static bool 1584 acpi_is_resource_managed(device_t bus, struct resource *r) 1585 { 1586 struct rman *rm; 1587 1588 rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); 1589 if (rm == NULL) 1590 return (false); 1591 return (rman_is_region_manager(r, rm)); 1592 } 1593 1594 static struct resource * 1595 acpi_managed_resource(device_t bus, struct resource *r) 1596 { 1597 struct acpi_softc *sc = device_get_softc(bus); 1598 struct resource_list_entry *rle; 1599 1600 KASSERT(acpi_is_resource_managed(bus, r), 1601 ("resource %p is not suballocated", r)); 1602 1603 STAILQ_FOREACH(rle, &sc->sysres_rl, link) { 1604 if (rle->type != rman_get_type(r) || rle->res == NULL) 1605 continue; 1606 if (rman_get_start(r) >= rman_get_start(rle->res) && 1607 rman_get_end(r) <= rman_get_end(rle->res)) 1608 return (rle->res); 1609 } 1610 return (NULL); 1611 } 1612 1613 static int 1614 acpi_adjust_resource(device_t bus, device_t child, struct resource *r, 1615 rman_res_t start, rman_res_t end) 1616 { 1617 1618 if (acpi_is_resource_managed(bus, r)) 1619 return (rman_adjust_resource(r, start, end)); 1620 return (bus_generic_adjust_resource(bus, child, r, start, end)); 1621 } 1622 1623 static int 1624 acpi_release_resource(device_t bus, device_t child, struct resource *r) 1625 { 1626 /* 1627 * If this resource belongs to one of our internal managers, 1628 * deactivate it and release it to the local pool. 1629 */ 1630 if (acpi_is_resource_managed(bus, r)) 1631 return (bus_generic_rman_release_resource(bus, child, r)); 1632 1633 return (bus_generic_rl_release_resource(bus, child, r)); 1634 } 1635 1636 static void 1637 acpi_delete_resource(device_t bus, device_t child, int type, int rid) 1638 { 1639 struct resource_list *rl; 1640 1641 rl = acpi_get_rlist(bus, child); 1642 if (resource_list_busy(rl, type, rid)) { 1643 device_printf(bus, "delete_resource: Resource still owned by child" 1644 " (type=%d, rid=%d)\n", type, rid); 1645 return; 1646 } 1647 if (resource_list_reserved(rl, type, rid)) 1648 resource_list_unreserve(rl, bus, child, type, rid); 1649 resource_list_delete(rl, type, rid); 1650 } 1651 1652 static int 1653 acpi_activate_resource(device_t bus, device_t child, struct resource *r) 1654 { 1655 if (acpi_is_resource_managed(bus, r)) 1656 return (bus_generic_rman_activate_resource(bus, child, r)); 1657 return (bus_generic_activate_resource(bus, child, r)); 1658 } 1659 1660 static int 1661 acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) 1662 { 1663 if (acpi_is_resource_managed(bus, r)) 1664 return (bus_generic_rman_deactivate_resource(bus, child, r)); 1665 return (bus_generic_deactivate_resource(bus, child, r)); 1666 } 1667 1668 static int 1669 acpi_map_resource(device_t bus, device_t child, struct resource *r, 1670 struct resource_map_request *argsp, struct resource_map *map) 1671 { 1672 struct resource_map_request args; 1673 struct resource *sysres; 1674 rman_res_t length, start; 1675 int error; 1676 1677 if (!acpi_is_resource_managed(bus, r)) 1678 return (bus_generic_map_resource(bus, child, r, argsp, map)); 1679 1680 /* Resources must be active to be mapped. */ 1681 if (!(rman_get_flags(r) & RF_ACTIVE)) 1682 return (ENXIO); 1683 1684 resource_init_map_request(&args); 1685 error = resource_validate_map_request(r, argsp, &args, &start, &length); 1686 if (error) 1687 return (error); 1688 1689 sysres = acpi_managed_resource(bus, r); 1690 if (sysres == NULL) 1691 return (ENOENT); 1692 1693 args.offset = start - rman_get_start(sysres); 1694 args.length = length; 1695 return (bus_map_resource(bus, sysres, &args, map)); 1696 } 1697 1698 static int 1699 acpi_unmap_resource(device_t bus, device_t child, struct resource *r, 1700 struct resource_map *map) 1701 { 1702 struct resource *sysres; 1703 1704 if (!acpi_is_resource_managed(bus, r)) 1705 return (bus_generic_unmap_resource(bus, child, r, map)); 1706 1707 sysres = acpi_managed_resource(bus, r); 1708 if (sysres == NULL) 1709 return (ENOENT); 1710 return (bus_unmap_resource(bus, sysres, map)); 1711 } 1712 1713 /* Allocate an IO port or memory resource, given its GAS. */ 1714 int 1715 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, 1716 struct resource **res, u_int flags) 1717 { 1718 int error, res_type; 1719 1720 error = ENOMEM; 1721 if (type == NULL || rid == NULL || gas == NULL || res == NULL) 1722 return (EINVAL); 1723 1724 /* We only support memory and IO spaces. */ 1725 switch (gas->SpaceId) { 1726 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1727 res_type = SYS_RES_MEMORY; 1728 break; 1729 case ACPI_ADR_SPACE_SYSTEM_IO: 1730 res_type = SYS_RES_IOPORT; 1731 break; 1732 default: 1733 return (EOPNOTSUPP); 1734 } 1735 1736 /* 1737 * If the register width is less than 8, assume the BIOS author means 1738 * it is a bit field and just allocate a byte. 1739 */ 1740 if (gas->BitWidth && gas->BitWidth < 8) 1741 gas->BitWidth = 8; 1742 1743 /* Validate the address after we're sure we support the space. */ 1744 if (gas->Address == 0 || gas->BitWidth == 0) 1745 return (EINVAL); 1746 1747 bus_set_resource(dev, res_type, *rid, gas->Address, 1748 gas->BitWidth / 8); 1749 *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); 1750 if (*res != NULL) { 1751 *type = res_type; 1752 error = 0; 1753 } else 1754 bus_delete_resource(dev, res_type, *rid); 1755 1756 return (error); 1757 } 1758 1759 /* Probe _HID and _CID for compatible ISA PNP ids. */ 1760 static uint32_t 1761 acpi_isa_get_logicalid(device_t dev) 1762 { 1763 ACPI_DEVICE_INFO *devinfo; 1764 ACPI_HANDLE h; 1765 uint32_t pnpid; 1766 1767 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1768 1769 /* Fetch and validate the HID. */ 1770 if ((h = acpi_get_handle(dev)) == NULL || 1771 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1772 return_VALUE (0); 1773 1774 pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && 1775 devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? 1776 PNP_EISAID(devinfo->HardwareId.String) : 0; 1777 AcpiOsFree(devinfo); 1778 1779 return_VALUE (pnpid); 1780 } 1781 1782 static int 1783 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) 1784 { 1785 ACPI_DEVICE_INFO *devinfo; 1786 ACPI_PNP_DEVICE_ID *ids; 1787 ACPI_HANDLE h; 1788 uint32_t *pnpid; 1789 int i, valid; 1790 1791 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1792 1793 pnpid = cids; 1794 1795 /* Fetch and validate the CID */ 1796 if ((h = acpi_get_handle(dev)) == NULL || 1797 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1798 return_VALUE (0); 1799 1800 if ((devinfo->Valid & ACPI_VALID_CID) == 0) { 1801 AcpiOsFree(devinfo); 1802 return_VALUE (0); 1803 } 1804 1805 if (devinfo->CompatibleIdList.Count < count) 1806 count = devinfo->CompatibleIdList.Count; 1807 ids = devinfo->CompatibleIdList.Ids; 1808 for (i = 0, valid = 0; i < count; i++) 1809 if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && 1810 strncmp(ids[i].String, "PNP", 3) == 0) { 1811 *pnpid++ = PNP_EISAID(ids[i].String); 1812 valid++; 1813 } 1814 AcpiOsFree(devinfo); 1815 1816 return_VALUE (valid); 1817 } 1818 1819 static int 1820 acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) 1821 { 1822 ACPI_HANDLE h; 1823 ACPI_OBJECT_TYPE t; 1824 int rv; 1825 int i; 1826 1827 h = acpi_get_handle(dev); 1828 if (ids == NULL || h == NULL) 1829 return (ENXIO); 1830 t = acpi_get_type(dev); 1831 if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) 1832 return (ENXIO); 1833 1834 /* Try to match one of the array of IDs with a HID or CID. */ 1835 for (i = 0; ids[i] != NULL; i++) { 1836 rv = acpi_MatchHid(h, ids[i]); 1837 if (rv == ACPI_MATCHHID_NOMATCH) 1838 continue; 1839 1840 if (match != NULL) { 1841 *match = ids[i]; 1842 } 1843 return ((rv == ACPI_MATCHHID_HID)? 1844 BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); 1845 } 1846 return (ENXIO); 1847 } 1848 1849 static ACPI_STATUS 1850 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, 1851 ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) 1852 { 1853 ACPI_HANDLE h; 1854 1855 if (dev == NULL) 1856 h = ACPI_ROOT_OBJECT; 1857 else if ((h = acpi_get_handle(dev)) == NULL) 1858 return (AE_BAD_PARAMETER); 1859 return (AcpiEvaluateObject(h, pathname, parameters, ret)); 1860 } 1861 1862 static ACPI_STATUS 1863 acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, 1864 const ACPI_OBJECT **value) 1865 { 1866 const ACPI_OBJECT *pkg, *name, *val; 1867 struct acpi_device *ad; 1868 ACPI_STATUS status; 1869 int i; 1870 1871 ad = device_get_ivars(dev); 1872 1873 if (ad == NULL || propname == NULL) 1874 return (AE_BAD_PARAMETER); 1875 if (ad->dsd_pkg == NULL) { 1876 if (ad->dsd.Pointer == NULL) { 1877 status = acpi_find_dsd(ad); 1878 if (ACPI_FAILURE(status)) 1879 return (status); 1880 } else { 1881 return (AE_NOT_FOUND); 1882 } 1883 } 1884 1885 for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { 1886 pkg = &ad->dsd_pkg->Package.Elements[i]; 1887 if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) 1888 continue; 1889 1890 name = &pkg->Package.Elements[0]; 1891 val = &pkg->Package.Elements[1]; 1892 if (name->Type != ACPI_TYPE_STRING) 1893 continue; 1894 if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { 1895 if (value != NULL) 1896 *value = val; 1897 1898 return (AE_OK); 1899 } 1900 } 1901 1902 return (AE_NOT_FOUND); 1903 } 1904 1905 static ACPI_STATUS 1906 acpi_find_dsd(struct acpi_device *ad) 1907 { 1908 const ACPI_OBJECT *dsd, *guid, *pkg; 1909 ACPI_STATUS status; 1910 1911 ad->dsd.Length = ACPI_ALLOCATE_BUFFER; 1912 ad->dsd.Pointer = NULL; 1913 ad->dsd_pkg = NULL; 1914 1915 status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); 1916 if (ACPI_FAILURE(status)) 1917 return (status); 1918 1919 dsd = ad->dsd.Pointer; 1920 guid = &dsd->Package.Elements[0]; 1921 pkg = &dsd->Package.Elements[1]; 1922 1923 if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || 1924 guid->Buffer.Length != sizeof(acpi_dsd_uuid)) 1925 return (AE_NOT_FOUND); 1926 if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, 1927 sizeof(acpi_dsd_uuid)) == 0) { 1928 1929 ad->dsd_pkg = pkg; 1930 return (AE_OK); 1931 } 1932 1933 return (AE_NOT_FOUND); 1934 } 1935 1936 static ssize_t 1937 acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) 1938 { 1939 ACPI_OBJECT *pobj; 1940 ACPI_HANDLE h; 1941 1942 if (hobj->Type != ACPI_TYPE_PACKAGE) 1943 goto err; 1944 if (hobj->Package.Count != 1) 1945 goto err; 1946 1947 pobj = &hobj->Package.Elements[0]; 1948 if (pobj == NULL) 1949 goto err; 1950 if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) 1951 goto err; 1952 1953 h = acpi_GetReference(NULL, pobj); 1954 if (h == NULL) 1955 goto err; 1956 1957 if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) 1958 *(ACPI_HANDLE *)propvalue = h; 1959 return (sizeof(ACPI_HANDLE)); 1960 1961 err: 1962 return (-1); 1963 } 1964 1965 static ssize_t 1966 acpi_bus_get_prop(device_t bus, device_t child, const char *propname, 1967 void *propvalue, size_t size, device_property_type_t type) 1968 { 1969 ACPI_STATUS status; 1970 const ACPI_OBJECT *obj; 1971 1972 status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), 1973 &obj); 1974 if (ACPI_FAILURE(status)) 1975 return (-1); 1976 1977 switch (type) { 1978 case DEVICE_PROP_ANY: 1979 case DEVICE_PROP_BUFFER: 1980 case DEVICE_PROP_UINT32: 1981 case DEVICE_PROP_UINT64: 1982 break; 1983 case DEVICE_PROP_HANDLE: 1984 return (acpi_bus_get_prop_handle(obj, propvalue, size)); 1985 default: 1986 return (-1); 1987 } 1988 1989 switch (obj->Type) { 1990 case ACPI_TYPE_INTEGER: 1991 if (type == DEVICE_PROP_UINT32) { 1992 if (propvalue != NULL && size >= sizeof(uint32_t)) 1993 *((uint32_t *)propvalue) = obj->Integer.Value; 1994 return (sizeof(uint32_t)); 1995 } 1996 if (propvalue != NULL && size >= sizeof(uint64_t)) 1997 *((uint64_t *) propvalue) = obj->Integer.Value; 1998 return (sizeof(uint64_t)); 1999 2000 case ACPI_TYPE_STRING: 2001 if (type != DEVICE_PROP_ANY && 2002 type != DEVICE_PROP_BUFFER) 2003 return (-1); 2004 2005 if (propvalue != NULL && size > 0) 2006 memcpy(propvalue, obj->String.Pointer, 2007 MIN(size, obj->String.Length)); 2008 return (obj->String.Length); 2009 2010 case ACPI_TYPE_BUFFER: 2011 if (propvalue != NULL && size > 0) 2012 memcpy(propvalue, obj->Buffer.Pointer, 2013 MIN(size, obj->Buffer.Length)); 2014 return (obj->Buffer.Length); 2015 2016 case ACPI_TYPE_PACKAGE: 2017 if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { 2018 *((ACPI_OBJECT **) propvalue) = 2019 __DECONST(ACPI_OBJECT *, obj); 2020 } 2021 return (sizeof(ACPI_OBJECT *)); 2022 2023 case ACPI_TYPE_LOCAL_REFERENCE: 2024 if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { 2025 ACPI_HANDLE h; 2026 2027 h = acpi_GetReference(NULL, 2028 __DECONST(ACPI_OBJECT *, obj)); 2029 memcpy(propvalue, h, sizeof(ACPI_HANDLE)); 2030 } 2031 return (sizeof(ACPI_HANDLE)); 2032 default: 2033 return (0); 2034 } 2035 } 2036 2037 int 2038 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) 2039 { 2040 struct acpi_softc *sc; 2041 ACPI_HANDLE handle; 2042 ACPI_STATUS status; 2043 char sxd[8]; 2044 2045 handle = acpi_get_handle(dev); 2046 2047 /* 2048 * XXX If we find these devices, don't try to power them down. 2049 * The serial and IRDA ports on my T23 hang the system when 2050 * set to D3 and it appears that such legacy devices may 2051 * need special handling in their drivers. 2052 */ 2053 if (dstate == NULL || handle == NULL || 2054 acpi_MatchHid(handle, "PNP0500") || 2055 acpi_MatchHid(handle, "PNP0501") || 2056 acpi_MatchHid(handle, "PNP0502") || 2057 acpi_MatchHid(handle, "PNP0510") || 2058 acpi_MatchHid(handle, "PNP0511")) 2059 return (ENXIO); 2060 2061 /* 2062 * Override next state with the value from _SxD, if present. 2063 * Note illegal _S0D is evaluated because some systems expect this. 2064 */ 2065 sc = device_get_softc(bus); 2066 snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); 2067 status = acpi_GetInteger(handle, sxd, dstate); 2068 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 2069 device_printf(dev, "failed to get %s on %s: %s\n", sxd, 2070 acpi_name(handle), AcpiFormatException(status)); 2071 return (ENXIO); 2072 } 2073 2074 return (0); 2075 } 2076 2077 /* Callback arg for our implementation of walking the namespace. */ 2078 struct acpi_device_scan_ctx { 2079 acpi_scan_cb_t user_fn; 2080 void *arg; 2081 ACPI_HANDLE parent; 2082 }; 2083 2084 static ACPI_STATUS 2085 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) 2086 { 2087 struct acpi_device_scan_ctx *ctx; 2088 device_t dev, old_dev; 2089 ACPI_STATUS status; 2090 ACPI_OBJECT_TYPE type; 2091 2092 /* 2093 * Skip this device if we think we'll have trouble with it or it is 2094 * the parent where the scan began. 2095 */ 2096 ctx = (struct acpi_device_scan_ctx *)arg; 2097 if (acpi_avoid(h) || h == ctx->parent) 2098 return (AE_OK); 2099 2100 /* If this is not a valid device type (e.g., a method), skip it. */ 2101 if (ACPI_FAILURE(AcpiGetType(h, &type))) 2102 return (AE_OK); 2103 if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && 2104 type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) 2105 return (AE_OK); 2106 2107 /* 2108 * Call the user function with the current device. If it is unchanged 2109 * afterwards, return. Otherwise, we update the handle to the new dev. 2110 */ 2111 old_dev = acpi_get_device(h); 2112 dev = old_dev; 2113 status = ctx->user_fn(h, &dev, level, ctx->arg); 2114 if (ACPI_FAILURE(status) || old_dev == dev) 2115 return (status); 2116 2117 /* Remove the old child and its connection to the handle. */ 2118 if (old_dev != NULL) 2119 device_delete_child(device_get_parent(old_dev), old_dev); 2120 2121 /* Recreate the handle association if the user created a device. */ 2122 if (dev != NULL) 2123 AcpiAttachData(h, acpi_fake_objhandler, dev); 2124 2125 return (AE_OK); 2126 } 2127 2128 static ACPI_STATUS 2129 acpi_device_scan_children(device_t bus, device_t dev, int max_depth, 2130 acpi_scan_cb_t user_fn, void *arg) 2131 { 2132 ACPI_HANDLE h; 2133 struct acpi_device_scan_ctx ctx; 2134 2135 if (acpi_disabled("children")) 2136 return (AE_OK); 2137 2138 if (dev == NULL) 2139 h = ACPI_ROOT_OBJECT; 2140 else if ((h = acpi_get_handle(dev)) == NULL) 2141 return (AE_BAD_PARAMETER); 2142 ctx.user_fn = user_fn; 2143 ctx.arg = arg; 2144 ctx.parent = h; 2145 return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, 2146 acpi_device_scan_cb, NULL, &ctx, NULL)); 2147 } 2148 2149 /* 2150 * Even though ACPI devices are not PCI, we use the PCI approach for setting 2151 * device power states since it's close enough to ACPI. 2152 */ 2153 int 2154 acpi_set_powerstate(device_t child, int state) 2155 { 2156 ACPI_HANDLE h; 2157 ACPI_STATUS status; 2158 2159 h = acpi_get_handle(child); 2160 if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) 2161 return (EINVAL); 2162 if (h == NULL) 2163 return (0); 2164 2165 /* Ignore errors if the power methods aren't present. */ 2166 status = acpi_pwr_switch_consumer(h, state); 2167 if (ACPI_SUCCESS(status)) { 2168 if (bootverbose) 2169 device_printf(child, "set ACPI power state %s on %s\n", 2170 acpi_d_state_to_str(state), acpi_name(h)); 2171 } else if (status != AE_NOT_FOUND) 2172 device_printf(child, 2173 "failed to set ACPI power state %s on %s: %s\n", 2174 acpi_d_state_to_str(state), acpi_name(h), 2175 AcpiFormatException(status)); 2176 2177 return (0); 2178 } 2179 2180 static int 2181 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) 2182 { 2183 int result, cid_count, i; 2184 uint32_t lid, cids[8]; 2185 2186 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2187 2188 /* 2189 * ISA-style drivers attached to ACPI may persist and 2190 * probe manually if we return ENOENT. We never want 2191 * that to happen, so don't ever return it. 2192 */ 2193 result = ENXIO; 2194 2195 /* Scan the supplied IDs for a match */ 2196 lid = acpi_isa_get_logicalid(child); 2197 cid_count = acpi_isa_get_compatid(child, cids, 8); 2198 while (ids && ids->ip_id) { 2199 if (lid == ids->ip_id) { 2200 result = 0; 2201 goto out; 2202 } 2203 for (i = 0; i < cid_count; i++) { 2204 if (cids[i] == ids->ip_id) { 2205 result = 0; 2206 goto out; 2207 } 2208 } 2209 ids++; 2210 } 2211 2212 out: 2213 if (result == 0 && ids->ip_desc) 2214 device_set_desc(child, ids->ip_desc); 2215 2216 return_VALUE (result); 2217 } 2218 2219 /* 2220 * Look for a MCFG table. If it is present, use the settings for 2221 * domain (segment) 0 to setup PCI config space access via the memory 2222 * map. 2223 * 2224 * On non-x86 architectures (arm64 for now), this will be done from the 2225 * PCI host bridge driver. 2226 */ 2227 static void 2228 acpi_enable_pcie(void) 2229 { 2230 #if defined(__i386__) || defined(__amd64__) 2231 ACPI_TABLE_HEADER *hdr; 2232 ACPI_MCFG_ALLOCATION *alloc, *end; 2233 ACPI_STATUS status; 2234 2235 status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); 2236 if (ACPI_FAILURE(status)) 2237 return; 2238 2239 end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); 2240 alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); 2241 while (alloc < end) { 2242 pcie_cfgregopen(alloc->Address, alloc->PciSegment, 2243 alloc->StartBusNumber, alloc->EndBusNumber); 2244 alloc++; 2245 } 2246 #endif 2247 } 2248 2249 static void 2250 acpi_platform_osc(device_t dev) 2251 { 2252 ACPI_HANDLE sb_handle; 2253 ACPI_STATUS status; 2254 uint32_t cap_set[2]; 2255 2256 /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ 2257 static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 2258 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 2259 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 2260 }; 2261 2262 if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) 2263 return; 2264 2265 cap_set[1] = 0x10; /* APEI Support */ 2266 status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, 2267 nitems(cap_set), cap_set, cap_set, false); 2268 if (ACPI_FAILURE(status)) { 2269 if (status == AE_NOT_FOUND) 2270 return; 2271 device_printf(dev, "_OSC failed: %s\n", 2272 AcpiFormatException(status)); 2273 return; 2274 } 2275 } 2276 2277 /* 2278 * Scan all of the ACPI namespace and attach child devices. 2279 * 2280 * We should only expect to find devices in the \_PR, \_TZ, \_SI, and 2281 * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. 2282 * However, in violation of the spec, some systems place their PCI link 2283 * devices in \, so we have to walk the whole namespace. We check the 2284 * type of namespace nodes, so this should be ok. 2285 */ 2286 static void 2287 acpi_probe_children(device_t bus) 2288 { 2289 2290 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2291 2292 /* 2293 * Scan the namespace and insert placeholders for all the devices that 2294 * we find. We also probe/attach any early devices. 2295 * 2296 * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because 2297 * we want to create nodes for all devices, not just those that are 2298 * currently present. (This assumes that we don't want to create/remove 2299 * devices as they appear, which might be smarter.) 2300 */ 2301 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); 2302 AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, 2303 NULL, bus, NULL); 2304 2305 /* Pre-allocate resources for our rman from any sysresource devices. */ 2306 acpi_sysres_alloc(bus); 2307 2308 /* Create any static children by calling device identify methods. */ 2309 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); 2310 bus_identify_children(bus); 2311 2312 /* Probe/attach all children, created statically and from the namespace. */ 2313 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_attach_children\n")); 2314 bus_attach_children(bus); 2315 2316 /* 2317 * Reserve resources allocated to children but not yet allocated 2318 * by a driver. 2319 */ 2320 acpi_reserve_resources(bus); 2321 2322 /* Attach wake sysctls. */ 2323 acpi_wake_sysctl_walk(bus); 2324 2325 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); 2326 return_VOID; 2327 } 2328 2329 /* 2330 * Determine the probe order for a given device. 2331 */ 2332 static void 2333 acpi_probe_order(ACPI_HANDLE handle, int *order) 2334 { 2335 ACPI_OBJECT_TYPE type; 2336 2337 /* 2338 * 0. CPUs 2339 * 1. I/O port and memory system resource holders 2340 * 2. Clocks and timers (to handle early accesses) 2341 * 3. Embedded controllers (to handle early accesses) 2342 * 4. PCI Link Devices 2343 */ 2344 AcpiGetType(handle, &type); 2345 if (type == ACPI_TYPE_PROCESSOR) 2346 *order = 0; 2347 else if (acpi_MatchHid(handle, "PNP0C01") || 2348 acpi_MatchHid(handle, "PNP0C02")) 2349 *order = 1; 2350 else if (acpi_MatchHid(handle, "PNP0100") || 2351 acpi_MatchHid(handle, "PNP0103") || 2352 acpi_MatchHid(handle, "PNP0B00")) 2353 *order = 2; 2354 else if (acpi_MatchHid(handle, "PNP0C09")) 2355 *order = 3; 2356 else if (acpi_MatchHid(handle, "PNP0C0F")) 2357 *order = 4; 2358 } 2359 2360 /* 2361 * Evaluate a child device and determine whether we might attach a device to 2362 * it. 2363 */ 2364 static ACPI_STATUS 2365 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 2366 { 2367 ACPI_DEVICE_INFO *devinfo; 2368 struct acpi_device *ad; 2369 struct acpi_prw_data prw; 2370 ACPI_OBJECT_TYPE type; 2371 ACPI_HANDLE h; 2372 device_t bus, child; 2373 char *handle_str; 2374 int d, order; 2375 2376 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2377 2378 if (acpi_disabled("children")) 2379 return_ACPI_STATUS (AE_OK); 2380 2381 /* Skip this device if we think we'll have trouble with it. */ 2382 if (acpi_avoid(handle)) 2383 return_ACPI_STATUS (AE_OK); 2384 2385 bus = (device_t)context; 2386 if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { 2387 handle_str = acpi_name(handle); 2388 switch (type) { 2389 case ACPI_TYPE_DEVICE: 2390 /* 2391 * Since we scan from \, be sure to skip system scope objects. 2392 * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around 2393 * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run 2394 * during the initialization and \_TZ_ is to support Notify() on it. 2395 */ 2396 if (strcmp(handle_str, "\\_SB_") == 0 || 2397 strcmp(handle_str, "\\_TZ_") == 0) 2398 break; 2399 if (acpi_parse_prw(handle, &prw) == 0) 2400 AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); 2401 2402 /* 2403 * Ignore devices that do not have a _HID or _CID. They should 2404 * be discovered by other buses (e.g. the PCI bus driver). 2405 */ 2406 if (!acpi_has_hid(handle)) 2407 break; 2408 /* FALLTHROUGH */ 2409 case ACPI_TYPE_PROCESSOR: 2410 case ACPI_TYPE_THERMAL: 2411 case ACPI_TYPE_POWER: 2412 /* 2413 * Create a placeholder device for this node. Sort the 2414 * placeholder so that the probe/attach passes will run 2415 * breadth-first. Orders less than ACPI_DEV_BASE_ORDER 2416 * are reserved for special objects (i.e., system 2417 * resources). 2418 */ 2419 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); 2420 order = level * 10 + ACPI_DEV_BASE_ORDER; 2421 acpi_probe_order(handle, &order); 2422 child = BUS_ADD_CHILD(bus, order, NULL, DEVICE_UNIT_ANY); 2423 if (child == NULL) 2424 break; 2425 2426 /* Associate the handle with the device_t and vice versa. */ 2427 acpi_set_handle(child, handle); 2428 AcpiAttachData(handle, acpi_fake_objhandler, child); 2429 2430 /* 2431 * Check that the device is present. If it's not present, 2432 * leave it disabled (so that we have a device_t attached to 2433 * the handle, but we don't probe it). 2434 * 2435 * XXX PCI link devices sometimes report "present" but not 2436 * "functional" (i.e. if disabled). Go ahead and probe them 2437 * anyway since we may enable them later. 2438 */ 2439 if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { 2440 /* Never disable PCI link devices. */ 2441 if (acpi_MatchHid(handle, "PNP0C0F")) 2442 break; 2443 2444 /* 2445 * RTC Device should be enabled for CMOS register space 2446 * unless FADT indicate it is not present. 2447 * (checked in RTC probe routine.) 2448 */ 2449 if (acpi_MatchHid(handle, "PNP0B00")) 2450 break; 2451 2452 /* 2453 * Docking stations should remain enabled since the system 2454 * may be undocked at boot. 2455 */ 2456 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) 2457 break; 2458 2459 device_disable(child); 2460 break; 2461 } 2462 2463 /* 2464 * Get the device's resource settings and attach them. 2465 * Note that if the device has _PRS but no _CRS, we need 2466 * to decide when it's appropriate to try to configure the 2467 * device. Ignore the return value here; it's OK for the 2468 * device not to have any resources. 2469 */ 2470 acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); 2471 2472 ad = device_get_ivars(child); 2473 ad->ad_cls_class = 0xffffff; 2474 if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { 2475 if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && 2476 devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { 2477 ad->ad_cls_class = strtoul(devinfo->ClassCode.String, 2478 NULL, 16); 2479 } 2480 AcpiOsFree(devinfo); 2481 } 2482 2483 d = acpi_pxm_parse(child); 2484 if (d >= 0) 2485 ad->ad_domain = d; 2486 break; 2487 } 2488 } 2489 2490 return_ACPI_STATUS (AE_OK); 2491 } 2492 2493 /* 2494 * AcpiAttachData() requires an object handler but never uses it. This is a 2495 * placeholder object handler so we can store a device_t in an ACPI_HANDLE. 2496 */ 2497 void 2498 acpi_fake_objhandler(ACPI_HANDLE h, void *data) 2499 { 2500 } 2501 2502 static void 2503 acpi_shutdown_final(void *arg, int howto) 2504 { 2505 struct acpi_softc *sc = (struct acpi_softc *)arg; 2506 register_t intr; 2507 ACPI_STATUS status; 2508 2509 /* 2510 * XXX Shutdown code should only run on the BSP (cpuid 0). 2511 * Some chipsets do not power off the system correctly if called from 2512 * an AP. 2513 */ 2514 if ((howto & RB_POWEROFF) != 0) { 2515 status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); 2516 if (ACPI_FAILURE(status)) { 2517 device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", 2518 AcpiFormatException(status)); 2519 return; 2520 } 2521 device_printf(sc->acpi_dev, "Powering system off\n"); 2522 intr = intr_disable(); 2523 status = AcpiEnterSleepState(ACPI_STATE_S5); 2524 if (ACPI_FAILURE(status)) { 2525 intr_restore(intr); 2526 device_printf(sc->acpi_dev, "power-off failed - %s\n", 2527 AcpiFormatException(status)); 2528 } else { 2529 DELAY(1000000); 2530 intr_restore(intr); 2531 device_printf(sc->acpi_dev, "power-off failed - timeout\n"); 2532 } 2533 } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { 2534 /* Reboot using the reset register. */ 2535 status = AcpiReset(); 2536 if (ACPI_SUCCESS(status)) { 2537 DELAY(1000000); 2538 device_printf(sc->acpi_dev, "reset failed - timeout\n"); 2539 } else if (status != AE_NOT_EXIST) 2540 device_printf(sc->acpi_dev, "reset failed - %s\n", 2541 AcpiFormatException(status)); 2542 } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { 2543 /* 2544 * Only disable ACPI if the user requested. On some systems, writing 2545 * the disable value to SMI_CMD hangs the system. 2546 */ 2547 device_printf(sc->acpi_dev, "Shutting down\n"); 2548 AcpiTerminate(); 2549 } 2550 } 2551 2552 static void 2553 acpi_enable_fixed_events(struct acpi_softc *sc) 2554 { 2555 static int first_time = 1; 2556 2557 /* Enable and clear fixed events and install handlers. */ 2558 if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { 2559 AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); 2560 AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, 2561 acpi_event_power_button_sleep, sc); 2562 if (first_time) 2563 device_printf(sc->acpi_dev, "Power Button (fixed)\n"); 2564 } 2565 if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 2566 AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); 2567 AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, 2568 acpi_event_sleep_button_sleep, sc); 2569 if (first_time) 2570 device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); 2571 } 2572 2573 first_time = 0; 2574 } 2575 2576 /* 2577 * Returns true if the device is actually present and should 2578 * be attached to. This requires the present, enabled, UI-visible 2579 * and diagnostics-passed bits to be set. 2580 */ 2581 BOOLEAN 2582 acpi_DeviceIsPresent(device_t dev) 2583 { 2584 ACPI_HANDLE h; 2585 UINT32 s; 2586 ACPI_STATUS status; 2587 2588 h = acpi_get_handle(dev); 2589 if (h == NULL) 2590 return (FALSE); 2591 2592 #ifdef ACPI_EARLY_EPYC_WAR 2593 /* 2594 * Certain Treadripper boards always returns 0 for FreeBSD because it 2595 * only returns non-zero for the OS string "Windows 2015". Otherwise it 2596 * will return zero. Force them to always be treated as present. 2597 * Beata versions were worse: they always returned 0. 2598 */ 2599 if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) 2600 return (TRUE); 2601 #endif 2602 2603 status = acpi_GetInteger(h, "_STA", &s); 2604 2605 /* 2606 * If no _STA method or if it failed, then assume that 2607 * the device is present. 2608 */ 2609 if (ACPI_FAILURE(status)) 2610 return (TRUE); 2611 2612 return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); 2613 } 2614 2615 /* 2616 * Returns true if the battery is actually present and inserted. 2617 */ 2618 BOOLEAN 2619 acpi_BatteryIsPresent(device_t dev) 2620 { 2621 ACPI_HANDLE h; 2622 UINT32 s; 2623 ACPI_STATUS status; 2624 2625 h = acpi_get_handle(dev); 2626 if (h == NULL) 2627 return (FALSE); 2628 status = acpi_GetInteger(h, "_STA", &s); 2629 2630 /* 2631 * If no _STA method or if it failed, then assume that 2632 * the device is present. 2633 */ 2634 if (ACPI_FAILURE(status)) 2635 return (TRUE); 2636 2637 return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); 2638 } 2639 2640 /* 2641 * Returns true if a device has at least one valid device ID. 2642 */ 2643 BOOLEAN 2644 acpi_has_hid(ACPI_HANDLE h) 2645 { 2646 ACPI_DEVICE_INFO *devinfo; 2647 BOOLEAN ret; 2648 2649 if (h == NULL || 2650 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 2651 return (FALSE); 2652 2653 ret = FALSE; 2654 if ((devinfo->Valid & ACPI_VALID_HID) != 0) 2655 ret = TRUE; 2656 else if ((devinfo->Valid & ACPI_VALID_CID) != 0) 2657 if (devinfo->CompatibleIdList.Count > 0) 2658 ret = TRUE; 2659 2660 AcpiOsFree(devinfo); 2661 return (ret); 2662 } 2663 2664 /* 2665 * Match a HID string against a handle 2666 * returns ACPI_MATCHHID_HID if _HID match 2667 * ACPI_MATCHHID_CID if _CID match and not _HID match. 2668 * ACPI_MATCHHID_NOMATCH=0 if no match. 2669 */ 2670 int 2671 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 2672 { 2673 ACPI_DEVICE_INFO *devinfo; 2674 BOOLEAN ret; 2675 int i; 2676 2677 if (hid == NULL || h == NULL || 2678 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 2679 return (ACPI_MATCHHID_NOMATCH); 2680 2681 ret = ACPI_MATCHHID_NOMATCH; 2682 if ((devinfo->Valid & ACPI_VALID_HID) != 0 && 2683 strcmp(hid, devinfo->HardwareId.String) == 0) 2684 ret = ACPI_MATCHHID_HID; 2685 else if ((devinfo->Valid & ACPI_VALID_CID) != 0) 2686 for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { 2687 if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { 2688 ret = ACPI_MATCHHID_CID; 2689 break; 2690 } 2691 } 2692 2693 AcpiOsFree(devinfo); 2694 return (ret); 2695 } 2696 2697 /* 2698 * Return the handle of a named object within our scope, ie. that of (parent) 2699 * or one if its parents. 2700 */ 2701 ACPI_STATUS 2702 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) 2703 { 2704 ACPI_HANDLE r; 2705 ACPI_STATUS status; 2706 2707 /* Walk back up the tree to the root */ 2708 for (;;) { 2709 status = AcpiGetHandle(parent, path, &r); 2710 if (ACPI_SUCCESS(status)) { 2711 *result = r; 2712 return (AE_OK); 2713 } 2714 /* XXX Return error here? */ 2715 if (status != AE_NOT_FOUND) 2716 return (AE_OK); 2717 if (ACPI_FAILURE(AcpiGetParent(parent, &r))) 2718 return (AE_NOT_FOUND); 2719 parent = r; 2720 } 2721 } 2722 2723 ACPI_STATUS 2724 acpi_GetProperty(device_t dev, ACPI_STRING propname, 2725 const ACPI_OBJECT **value) 2726 { 2727 device_t bus = device_get_parent(dev); 2728 2729 return (ACPI_GET_PROPERTY(bus, dev, propname, value)); 2730 } 2731 2732 /* 2733 * Allocate a buffer with a preset data size. 2734 */ 2735 ACPI_BUFFER * 2736 acpi_AllocBuffer(int size) 2737 { 2738 ACPI_BUFFER *buf; 2739 2740 if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) 2741 return (NULL); 2742 buf->Length = size; 2743 buf->Pointer = (void *)(buf + 1); 2744 return (buf); 2745 } 2746 2747 ACPI_STATUS 2748 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) 2749 { 2750 ACPI_OBJECT arg1; 2751 ACPI_OBJECT_LIST args; 2752 2753 arg1.Type = ACPI_TYPE_INTEGER; 2754 arg1.Integer.Value = number; 2755 args.Count = 1; 2756 args.Pointer = &arg1; 2757 2758 return (AcpiEvaluateObject(handle, path, &args, NULL)); 2759 } 2760 2761 /* 2762 * Evaluate a path that should return an integer. 2763 */ 2764 ACPI_STATUS 2765 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) 2766 { 2767 ACPI_STATUS status; 2768 ACPI_BUFFER buf; 2769 ACPI_OBJECT param; 2770 2771 if (handle == NULL) 2772 handle = ACPI_ROOT_OBJECT; 2773 2774 /* 2775 * Assume that what we've been pointed at is an Integer object, or 2776 * a method that will return an Integer. 2777 */ 2778 buf.Pointer = ¶m; 2779 buf.Length = sizeof(param); 2780 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2781 if (ACPI_SUCCESS(status)) { 2782 if (param.Type == ACPI_TYPE_INTEGER) 2783 *number = param.Integer.Value; 2784 else 2785 status = AE_TYPE; 2786 } 2787 2788 /* 2789 * In some applications, a method that's expected to return an Integer 2790 * may instead return a Buffer (probably to simplify some internal 2791 * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, 2792 * convert it into an Integer as best we can. 2793 * 2794 * This is a hack. 2795 */ 2796 if (status == AE_BUFFER_OVERFLOW) { 2797 if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { 2798 status = AE_NO_MEMORY; 2799 } else { 2800 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2801 if (ACPI_SUCCESS(status)) 2802 status = acpi_ConvertBufferToInteger(&buf, number); 2803 AcpiOsFree(buf.Pointer); 2804 } 2805 } 2806 return (status); 2807 } 2808 2809 ACPI_STATUS 2810 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) 2811 { 2812 ACPI_OBJECT *p; 2813 UINT8 *val; 2814 int i; 2815 2816 p = (ACPI_OBJECT *)bufp->Pointer; 2817 if (p->Type == ACPI_TYPE_INTEGER) { 2818 *number = p->Integer.Value; 2819 return (AE_OK); 2820 } 2821 if (p->Type != ACPI_TYPE_BUFFER) 2822 return (AE_TYPE); 2823 if (p->Buffer.Length > sizeof(int)) 2824 return (AE_BAD_DATA); 2825 2826 *number = 0; 2827 val = p->Buffer.Pointer; 2828 for (i = 0; i < p->Buffer.Length; i++) 2829 *number += val[i] << (i * 8); 2830 return (AE_OK); 2831 } 2832 2833 /* 2834 * Iterate over the elements of an a package object, calling the supplied 2835 * function for each element. 2836 * 2837 * XXX possible enhancement might be to abort traversal on error. 2838 */ 2839 ACPI_STATUS 2840 acpi_ForeachPackageObject(ACPI_OBJECT *pkg, 2841 void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) 2842 { 2843 ACPI_OBJECT *comp; 2844 int i; 2845 2846 if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) 2847 return (AE_BAD_PARAMETER); 2848 2849 /* Iterate over components */ 2850 i = 0; 2851 comp = pkg->Package.Elements; 2852 for (; i < pkg->Package.Count; i++, comp++) 2853 func(comp, arg); 2854 2855 return (AE_OK); 2856 } 2857 2858 /* 2859 * Find the (index)th resource object in a set. 2860 */ 2861 ACPI_STATUS 2862 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) 2863 { 2864 ACPI_RESOURCE *rp; 2865 int i; 2866 2867 rp = (ACPI_RESOURCE *)buf->Pointer; 2868 i = index; 2869 while (i-- > 0) { 2870 /* Range check */ 2871 if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) 2872 return (AE_BAD_PARAMETER); 2873 2874 /* Check for terminator */ 2875 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2876 return (AE_NOT_FOUND); 2877 rp = ACPI_NEXT_RESOURCE(rp); 2878 } 2879 if (resp != NULL) 2880 *resp = rp; 2881 2882 return (AE_OK); 2883 } 2884 2885 /* 2886 * Append an ACPI_RESOURCE to an ACPI_BUFFER. 2887 * 2888 * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER 2889 * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible 2890 * backing block. If the ACPI_RESOURCE is NULL, return an empty set of 2891 * resources. 2892 */ 2893 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 2894 2895 ACPI_STATUS 2896 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) 2897 { 2898 ACPI_RESOURCE *rp; 2899 void *newp; 2900 2901 /* Initialise the buffer if necessary. */ 2902 if (buf->Pointer == NULL) { 2903 buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; 2904 if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) 2905 return (AE_NO_MEMORY); 2906 rp = (ACPI_RESOURCE *)buf->Pointer; 2907 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2908 rp->Length = ACPI_RS_SIZE_MIN; 2909 } 2910 if (res == NULL) 2911 return (AE_OK); 2912 2913 /* 2914 * Scan the current buffer looking for the terminator. 2915 * This will either find the terminator or hit the end 2916 * of the buffer and return an error. 2917 */ 2918 rp = (ACPI_RESOURCE *)buf->Pointer; 2919 for (;;) { 2920 /* Range check, don't go outside the buffer */ 2921 if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) 2922 return (AE_BAD_PARAMETER); 2923 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2924 break; 2925 rp = ACPI_NEXT_RESOURCE(rp); 2926 } 2927 2928 /* 2929 * Check the size of the buffer and expand if required. 2930 * 2931 * Required size is: 2932 * size of existing resources before terminator + 2933 * size of new resource and header + 2934 * size of terminator. 2935 * 2936 * Note that this loop should really only run once, unless 2937 * for some reason we are stuffing a *really* huge resource. 2938 */ 2939 while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + 2940 res->Length + ACPI_RS_SIZE_NO_DATA + 2941 ACPI_RS_SIZE_MIN) >= buf->Length) { 2942 if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) 2943 return (AE_NO_MEMORY); 2944 bcopy(buf->Pointer, newp, buf->Length); 2945 rp = (ACPI_RESOURCE *)((u_int8_t *)newp + 2946 ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); 2947 AcpiOsFree(buf->Pointer); 2948 buf->Pointer = newp; 2949 buf->Length += buf->Length; 2950 } 2951 2952 /* Insert the new resource. */ 2953 bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); 2954 2955 /* And add the terminator. */ 2956 rp = ACPI_NEXT_RESOURCE(rp); 2957 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2958 rp->Length = ACPI_RS_SIZE_MIN; 2959 2960 return (AE_OK); 2961 } 2962 2963 UINT64 2964 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) 2965 { 2966 /* 2967 * ACPI spec 9.1.1 defines this. 2968 * 2969 * "Arg2: Function Index Represents a specific function whose meaning is 2970 * specific to the UUID and Revision ID. Function indices should start 2971 * with 1. Function number zero is a query function (see the special 2972 * return code defined below)." 2973 */ 2974 ACPI_BUFFER buf; 2975 ACPI_OBJECT *obj; 2976 UINT64 ret = 0; 2977 int i; 2978 2979 if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { 2980 ACPI_INFO(("Failed to enumerate DSM functions\n")); 2981 return (0); 2982 } 2983 2984 obj = (ACPI_OBJECT *)buf.Pointer; 2985 KASSERT(obj, ("Object not allowed to be NULL\n")); 2986 2987 /* 2988 * From ACPI 6.2 spec 9.1.1: 2989 * If Function Index = 0, a Buffer containing a function index bitfield. 2990 * Otherwise, the return value and type depends on the UUID and revision 2991 * ID (see below). 2992 */ 2993 switch (obj->Type) { 2994 case ACPI_TYPE_BUFFER: 2995 for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) 2996 ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); 2997 break; 2998 case ACPI_TYPE_INTEGER: 2999 ACPI_BIOS_WARNING((AE_INFO, 3000 "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); 3001 ret = obj->Integer.Value; 3002 break; 3003 default: 3004 ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); 3005 }; 3006 3007 AcpiOsFree(obj); 3008 return ret; 3009 } 3010 3011 /* 3012 * DSM may return multiple types depending on the function. It is therefore 3013 * unsafe to use the typed evaluation. It is highly recommended that the caller 3014 * check the type of the returned object. 3015 */ 3016 ACPI_STATUS 3017 acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, 3018 UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) 3019 { 3020 return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, 3021 package, out_buf, ACPI_TYPE_ANY)); 3022 } 3023 3024 ACPI_STATUS 3025 acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, 3026 UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, 3027 ACPI_OBJECT_TYPE type) 3028 { 3029 ACPI_OBJECT arg[4]; 3030 ACPI_OBJECT_LIST arglist; 3031 ACPI_BUFFER buf; 3032 ACPI_STATUS status; 3033 3034 if (out_buf == NULL) 3035 return (AE_NO_MEMORY); 3036 3037 arg[0].Type = ACPI_TYPE_BUFFER; 3038 arg[0].Buffer.Length = ACPI_UUID_LENGTH; 3039 arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); 3040 arg[1].Type = ACPI_TYPE_INTEGER; 3041 arg[1].Integer.Value = revision; 3042 arg[2].Type = ACPI_TYPE_INTEGER; 3043 arg[2].Integer.Value = function; 3044 if (package) { 3045 arg[3] = *package; 3046 } else { 3047 arg[3].Type = ACPI_TYPE_PACKAGE; 3048 arg[3].Package.Count = 0; 3049 arg[3].Package.Elements = NULL; 3050 } 3051 3052 arglist.Pointer = arg; 3053 arglist.Count = 4; 3054 buf.Pointer = NULL; 3055 buf.Length = ACPI_ALLOCATE_BUFFER; 3056 status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); 3057 if (ACPI_FAILURE(status)) 3058 return (status); 3059 3060 KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); 3061 3062 *out_buf = buf; 3063 return (status); 3064 } 3065 3066 ACPI_STATUS 3067 acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, 3068 uint32_t *caps_in, uint32_t *caps_out, bool query) 3069 { 3070 ACPI_OBJECT arg[4], *ret; 3071 ACPI_OBJECT_LIST arglist; 3072 ACPI_BUFFER buf; 3073 ACPI_STATUS status; 3074 3075 arglist.Pointer = arg; 3076 arglist.Count = 4; 3077 arg[0].Type = ACPI_TYPE_BUFFER; 3078 arg[0].Buffer.Length = ACPI_UUID_LENGTH; 3079 arg[0].Buffer.Pointer = uuid; 3080 arg[1].Type = ACPI_TYPE_INTEGER; 3081 arg[1].Integer.Value = revision; 3082 arg[2].Type = ACPI_TYPE_INTEGER; 3083 arg[2].Integer.Value = count; 3084 arg[3].Type = ACPI_TYPE_BUFFER; 3085 arg[3].Buffer.Length = count * sizeof(*caps_in); 3086 arg[3].Buffer.Pointer = (uint8_t *)caps_in; 3087 caps_in[0] = query ? 1 : 0; 3088 buf.Pointer = NULL; 3089 buf.Length = ACPI_ALLOCATE_BUFFER; 3090 status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, 3091 ACPI_TYPE_BUFFER); 3092 if (ACPI_FAILURE(status)) 3093 return (status); 3094 if (caps_out != NULL) { 3095 ret = buf.Pointer; 3096 if (ret->Buffer.Length != count * sizeof(*caps_out)) { 3097 AcpiOsFree(buf.Pointer); 3098 return (AE_BUFFER_OVERFLOW); 3099 } 3100 bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); 3101 } 3102 AcpiOsFree(buf.Pointer); 3103 return (status); 3104 } 3105 3106 /* 3107 * Set interrupt model. 3108 */ 3109 ACPI_STATUS 3110 acpi_SetIntrModel(int model) 3111 { 3112 3113 return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); 3114 } 3115 3116 /* 3117 * Walk subtables of a table and call a callback routine for each 3118 * subtable. The caller should provide the first subtable and a 3119 * pointer to the end of the table. This can be used to walk tables 3120 * such as MADT and SRAT that use subtable entries. 3121 */ 3122 void 3123 acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, 3124 void *arg) 3125 { 3126 ACPI_SUBTABLE_HEADER *entry; 3127 3128 for (entry = first; (void *)entry < end; ) { 3129 /* Avoid an infinite loop if we hit a bogus entry. */ 3130 if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) 3131 return; 3132 3133 handler(entry, arg); 3134 entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); 3135 } 3136 } 3137 3138 /* 3139 * DEPRECATED. This interface has serious deficiencies and will be 3140 * removed. 3141 * 3142 * Immediately enter the sleep state. In the old model, acpiconf(8) ran 3143 * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. 3144 */ 3145 ACPI_STATUS 3146 acpi_SetSleepState(struct acpi_softc *sc, int state) 3147 { 3148 static int once; 3149 3150 if (!once) { 3151 device_printf(sc->acpi_dev, 3152 "warning: acpi_SetSleepState() deprecated, need to update your software\n"); 3153 once = 1; 3154 } 3155 return (acpi_EnterSleepState(sc, state)); 3156 } 3157 3158 #if defined(__amd64__) || defined(__i386__) 3159 static void 3160 acpi_sleep_force_task(void *context) 3161 { 3162 struct acpi_softc *sc = (struct acpi_softc *)context; 3163 3164 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 3165 device_printf(sc->acpi_dev, "force sleep state S%d failed\n", 3166 sc->acpi_next_sstate); 3167 } 3168 3169 static void 3170 acpi_sleep_force(void *arg) 3171 { 3172 struct acpi_softc *sc = (struct acpi_softc *)arg; 3173 3174 device_printf(sc->acpi_dev, 3175 "suspend request timed out, forcing sleep now\n"); 3176 /* 3177 * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). 3178 * Suspend from acpi_task thread instead. 3179 */ 3180 if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, 3181 acpi_sleep_force_task, sc))) 3182 device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); 3183 } 3184 #endif 3185 3186 /* 3187 * Request that the system enter the given suspend state. All /dev/apm 3188 * devices and devd(8) will be notified. Userland then has a chance to 3189 * save state and acknowledge the request. The system sleeps once all 3190 * acks are in. 3191 */ 3192 int 3193 acpi_ReqSleepState(struct acpi_softc *sc, int state) 3194 { 3195 #if defined(__amd64__) || defined(__i386__) 3196 struct apm_clone_data *clone; 3197 ACPI_STATUS status; 3198 3199 if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) 3200 return (EINVAL); 3201 if (!acpi_sleep_states[state]) 3202 return (EOPNOTSUPP); 3203 3204 /* 3205 * If a reboot/shutdown/suspend request is already in progress or 3206 * suspend is blocked due to an upcoming shutdown, just return. 3207 */ 3208 if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { 3209 return (0); 3210 } 3211 3212 /* Wait until sleep is enabled. */ 3213 while (sc->acpi_sleep_disabled) { 3214 AcpiOsSleep(1000); 3215 } 3216 3217 ACPI_LOCK(acpi); 3218 3219 sc->acpi_next_sstate = state; 3220 3221 /* S5 (soft-off) should be entered directly with no waiting. */ 3222 if (state == ACPI_STATE_S5) { 3223 ACPI_UNLOCK(acpi); 3224 status = acpi_EnterSleepState(sc, state); 3225 return (ACPI_SUCCESS(status) ? 0 : ENXIO); 3226 } 3227 3228 /* Record the pending state and notify all apm devices. */ 3229 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 3230 clone->notify_status = APM_EV_NONE; 3231 if ((clone->flags & ACPI_EVF_DEVD) == 0) { 3232 selwakeuppri(&clone->sel_read, PZERO); 3233 KNOTE_LOCKED(&clone->sel_read.si_note, 0); 3234 } 3235 } 3236 3237 /* If devd(8) is not running, immediately enter the sleep state. */ 3238 if (!devctl_process_running()) { 3239 ACPI_UNLOCK(acpi); 3240 status = acpi_EnterSleepState(sc, state); 3241 return (ACPI_SUCCESS(status) ? 0 : ENXIO); 3242 } 3243 3244 /* 3245 * Set a timeout to fire if userland doesn't ack the suspend request 3246 * in time. This way we still eventually go to sleep if we were 3247 * overheating or running low on battery, even if userland is hung. 3248 * We cancel this timeout once all userland acks are in or the 3249 * suspend request is aborted. 3250 */ 3251 callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); 3252 ACPI_UNLOCK(acpi); 3253 3254 /* Now notify devd(8) also. */ 3255 acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); 3256 3257 return (0); 3258 #else 3259 /* This platform does not support acpi suspend/resume. */ 3260 return (EOPNOTSUPP); 3261 #endif 3262 } 3263 3264 /* 3265 * Acknowledge (or reject) a pending sleep state. The caller has 3266 * prepared for suspend and is now ready for it to proceed. If the 3267 * error argument is non-zero, it indicates suspend should be cancelled 3268 * and gives an errno value describing why. Once all votes are in, 3269 * we suspend the system. 3270 */ 3271 int 3272 acpi_AckSleepState(struct apm_clone_data *clone, int error) 3273 { 3274 #if defined(__amd64__) || defined(__i386__) 3275 struct acpi_softc *sc; 3276 int ret, sleeping; 3277 3278 /* If no pending sleep state, return an error. */ 3279 ACPI_LOCK(acpi); 3280 sc = clone->acpi_sc; 3281 if (sc->acpi_next_sstate == 0) { 3282 ACPI_UNLOCK(acpi); 3283 return (ENXIO); 3284 } 3285 3286 /* Caller wants to abort suspend process. */ 3287 if (error) { 3288 sc->acpi_next_sstate = 0; 3289 callout_stop(&sc->susp_force_to); 3290 device_printf(sc->acpi_dev, 3291 "listener on %s cancelled the pending suspend\n", 3292 devtoname(clone->cdev)); 3293 ACPI_UNLOCK(acpi); 3294 return (0); 3295 } 3296 3297 /* 3298 * Mark this device as acking the suspend request. Then, walk through 3299 * all devices, seeing if they agree yet. We only count devices that 3300 * are writable since read-only devices couldn't ack the request. 3301 */ 3302 sleeping = TRUE; 3303 clone->notify_status = APM_EV_ACKED; 3304 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 3305 if ((clone->flags & ACPI_EVF_WRITE) != 0 && 3306 clone->notify_status != APM_EV_ACKED) { 3307 sleeping = FALSE; 3308 break; 3309 } 3310 } 3311 3312 /* If all devices have voted "yes", we will suspend now. */ 3313 if (sleeping) 3314 callout_stop(&sc->susp_force_to); 3315 ACPI_UNLOCK(acpi); 3316 ret = 0; 3317 if (sleeping) { 3318 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 3319 ret = ENODEV; 3320 } 3321 return (ret); 3322 #else 3323 /* This platform does not support acpi suspend/resume. */ 3324 return (EOPNOTSUPP); 3325 #endif 3326 } 3327 3328 static void 3329 acpi_sleep_enable(void *arg) 3330 { 3331 struct acpi_softc *sc = (struct acpi_softc *)arg; 3332 3333 ACPI_LOCK_ASSERT(acpi); 3334 3335 /* Reschedule if the system is not fully up and running. */ 3336 if (!AcpiGbl_SystemAwakeAndRunning) { 3337 callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); 3338 return; 3339 } 3340 3341 sc->acpi_sleep_disabled = FALSE; 3342 } 3343 3344 static ACPI_STATUS 3345 acpi_sleep_disable(struct acpi_softc *sc) 3346 { 3347 ACPI_STATUS status; 3348 3349 /* Fail if the system is not fully up and running. */ 3350 if (!AcpiGbl_SystemAwakeAndRunning) 3351 return (AE_ERROR); 3352 3353 ACPI_LOCK(acpi); 3354 status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; 3355 sc->acpi_sleep_disabled = TRUE; 3356 ACPI_UNLOCK(acpi); 3357 3358 return (status); 3359 } 3360 3361 enum acpi_sleep_state { 3362 ACPI_SS_NONE, 3363 ACPI_SS_GPE_SET, 3364 ACPI_SS_DEV_SUSPEND, 3365 ACPI_SS_SLP_PREP, 3366 ACPI_SS_SLEPT, 3367 }; 3368 3369 /* 3370 * Enter the desired system sleep state. 3371 * 3372 * Currently we support S1-S5 but S4 is only S4BIOS 3373 */ 3374 static ACPI_STATUS 3375 acpi_EnterSleepState(struct acpi_softc *sc, int state) 3376 { 3377 register_t intr; 3378 ACPI_STATUS status; 3379 ACPI_EVENT_STATUS power_button_status; 3380 enum acpi_sleep_state slp_state; 3381 int sleep_result; 3382 3383 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 3384 3385 if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) 3386 return_ACPI_STATUS (AE_BAD_PARAMETER); 3387 if (!acpi_sleep_states[state]) { 3388 device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", 3389 state); 3390 return (AE_SUPPORT); 3391 } 3392 3393 /* Re-entry once we're suspending is not allowed. */ 3394 status = acpi_sleep_disable(sc); 3395 if (ACPI_FAILURE(status)) { 3396 device_printf(sc->acpi_dev, 3397 "suspend request ignored (not ready yet)\n"); 3398 return (status); 3399 } 3400 3401 if (state == ACPI_STATE_S5) { 3402 /* 3403 * Shut down cleanly and power off. This will call us back through the 3404 * shutdown handlers. 3405 */ 3406 shutdown_nice(RB_POWEROFF); 3407 return_ACPI_STATUS (AE_OK); 3408 } 3409 3410 EVENTHANDLER_INVOKE(power_suspend_early); 3411 stop_all_proc(); 3412 suspend_all_fs(); 3413 EVENTHANDLER_INVOKE(power_suspend); 3414 3415 #ifdef EARLY_AP_STARTUP 3416 MPASS(mp_ncpus == 1 || smp_started); 3417 thread_lock(curthread); 3418 sched_bind(curthread, 0); 3419 thread_unlock(curthread); 3420 #else 3421 if (smp_started) { 3422 thread_lock(curthread); 3423 sched_bind(curthread, 0); 3424 thread_unlock(curthread); 3425 } 3426 #endif 3427 3428 /* 3429 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME 3430 */ 3431 bus_topo_lock(); 3432 3433 slp_state = ACPI_SS_NONE; 3434 3435 sc->acpi_sstate = state; 3436 3437 /* Enable any GPEs as appropriate and requested by the user. */ 3438 acpi_wake_prep_walk(state); 3439 slp_state = ACPI_SS_GPE_SET; 3440 3441 /* 3442 * Inform all devices that we are going to sleep. If at least one 3443 * device fails, DEVICE_SUSPEND() automatically resumes the tree. 3444 * 3445 * XXX Note that a better two-pass approach with a 'veto' pass 3446 * followed by a "real thing" pass would be better, but the current 3447 * bus interface does not provide for this. 3448 */ 3449 if (DEVICE_SUSPEND(root_bus) != 0) { 3450 device_printf(sc->acpi_dev, "device_suspend failed\n"); 3451 goto backout; 3452 } 3453 slp_state = ACPI_SS_DEV_SUSPEND; 3454 3455 status = AcpiEnterSleepStatePrep(state); 3456 if (ACPI_FAILURE(status)) { 3457 device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", 3458 AcpiFormatException(status)); 3459 goto backout; 3460 } 3461 slp_state = ACPI_SS_SLP_PREP; 3462 3463 if (sc->acpi_sleep_delay > 0) 3464 DELAY(sc->acpi_sleep_delay * 1000000); 3465 3466 suspendclock(); 3467 intr = intr_disable(); 3468 if (state != ACPI_STATE_S1) { 3469 sleep_result = acpi_sleep_machdep(sc, state); 3470 acpi_wakeup_machdep(sc, state, sleep_result, 0); 3471 3472 /* 3473 * XXX According to ACPI specification SCI_EN bit should be restored 3474 * by ACPI platform (BIOS, firmware) to its pre-sleep state. 3475 * Unfortunately some BIOSes fail to do that and that leads to 3476 * unexpected and serious consequences during wake up like a system 3477 * getting stuck in SMI handlers. 3478 * This hack is picked up from Linux, which claims that it follows 3479 * Windows behavior. 3480 */ 3481 if (sleep_result == 1 && state != ACPI_STATE_S4) 3482 AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); 3483 3484 if (sleep_result == 1 && state == ACPI_STATE_S3) { 3485 /* 3486 * Prevent mis-interpretation of the wakeup by power button 3487 * as a request for power off. 3488 * Ideally we should post an appropriate wakeup event, 3489 * perhaps using acpi_event_power_button_wake or alike. 3490 * 3491 * Clearing of power button status after wakeup is mandated 3492 * by ACPI specification in section "Fixed Power Button". 3493 * 3494 * XXX As of ACPICA 20121114 AcpiGetEventStatus provides 3495 * status as 0/1 corressponding to inactive/active despite 3496 * its type being ACPI_EVENT_STATUS. In other words, 3497 * we should not test for ACPI_EVENT_FLAG_SET for time being. 3498 */ 3499 if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, 3500 &power_button_status)) && power_button_status != 0) { 3501 AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); 3502 device_printf(sc->acpi_dev, 3503 "cleared fixed power button status\n"); 3504 } 3505 } 3506 3507 intr_restore(intr); 3508 3509 /* call acpi_wakeup_machdep() again with interrupt enabled */ 3510 acpi_wakeup_machdep(sc, state, sleep_result, 1); 3511 3512 AcpiLeaveSleepStatePrep(state); 3513 3514 if (sleep_result == -1) 3515 goto backout; 3516 3517 /* Re-enable ACPI hardware on wakeup from sleep state 4. */ 3518 if (state == ACPI_STATE_S4) 3519 AcpiEnable(); 3520 } else { 3521 status = AcpiEnterSleepState(state); 3522 intr_restore(intr); 3523 AcpiLeaveSleepStatePrep(state); 3524 if (ACPI_FAILURE(status)) { 3525 device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", 3526 AcpiFormatException(status)); 3527 goto backout; 3528 } 3529 } 3530 slp_state = ACPI_SS_SLEPT; 3531 3532 /* 3533 * Back out state according to how far along we got in the suspend 3534 * process. This handles both the error and success cases. 3535 */ 3536 backout: 3537 if (slp_state >= ACPI_SS_SLP_PREP) 3538 resumeclock(); 3539 if (slp_state >= ACPI_SS_GPE_SET) { 3540 acpi_wake_prep_walk(state); 3541 sc->acpi_sstate = ACPI_STATE_S0; 3542 } 3543 if (slp_state >= ACPI_SS_DEV_SUSPEND) 3544 DEVICE_RESUME(root_bus); 3545 if (slp_state >= ACPI_SS_SLP_PREP) 3546 AcpiLeaveSleepState(state); 3547 if (slp_state >= ACPI_SS_SLEPT) { 3548 #if defined(__i386__) || defined(__amd64__) 3549 /* NB: we are still using ACPI timecounter at this point. */ 3550 resume_TSC(); 3551 #endif 3552 acpi_resync_clock(sc); 3553 acpi_enable_fixed_events(sc); 3554 } 3555 sc->acpi_next_sstate = 0; 3556 3557 bus_topo_unlock(); 3558 3559 #ifdef EARLY_AP_STARTUP 3560 thread_lock(curthread); 3561 sched_unbind(curthread); 3562 thread_unlock(curthread); 3563 #else 3564 if (smp_started) { 3565 thread_lock(curthread); 3566 sched_unbind(curthread); 3567 thread_unlock(curthread); 3568 } 3569 #endif 3570 3571 resume_all_fs(); 3572 resume_all_proc(); 3573 3574 EVENTHANDLER_INVOKE(power_resume); 3575 3576 /* Allow another sleep request after a while. */ 3577 callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); 3578 3579 /* Run /etc/rc.resume after we are back. */ 3580 if (devctl_process_running()) 3581 acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); 3582 3583 return_ACPI_STATUS (status); 3584 } 3585 3586 static void 3587 acpi_resync_clock(struct acpi_softc *sc) 3588 { 3589 3590 /* 3591 * Warm up timecounter again and reset system clock. 3592 */ 3593 (void)timecounter->tc_get_timecount(timecounter); 3594 inittodr(time_second + sc->acpi_sleep_delay); 3595 } 3596 3597 /* Enable or disable the device's wake GPE. */ 3598 int 3599 acpi_wake_set_enable(device_t dev, int enable) 3600 { 3601 struct acpi_prw_data prw; 3602 ACPI_STATUS status; 3603 int flags; 3604 3605 /* Make sure the device supports waking the system and get the GPE. */ 3606 if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) 3607 return (ENXIO); 3608 3609 flags = acpi_get_flags(dev); 3610 if (enable) { 3611 status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, 3612 ACPI_GPE_ENABLE); 3613 if (ACPI_FAILURE(status)) { 3614 device_printf(dev, "enable wake failed\n"); 3615 return (ENXIO); 3616 } 3617 acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); 3618 } else { 3619 status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, 3620 ACPI_GPE_DISABLE); 3621 if (ACPI_FAILURE(status)) { 3622 device_printf(dev, "disable wake failed\n"); 3623 return (ENXIO); 3624 } 3625 acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); 3626 } 3627 3628 return (0); 3629 } 3630 3631 static int 3632 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) 3633 { 3634 struct acpi_prw_data prw; 3635 device_t dev; 3636 3637 /* Check that this is a wake-capable device and get its GPE. */ 3638 if (acpi_parse_prw(handle, &prw) != 0) 3639 return (ENXIO); 3640 dev = acpi_get_device(handle); 3641 3642 /* 3643 * The destination sleep state must be less than (i.e., higher power) 3644 * or equal to the value specified by _PRW. If this GPE cannot be 3645 * enabled for the next sleep state, then disable it. If it can and 3646 * the user requested it be enabled, turn on any required power resources 3647 * and set _PSW. 3648 */ 3649 if (sstate > prw.lowest_wake) { 3650 AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); 3651 if (bootverbose) 3652 device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", 3653 acpi_name(handle), sstate); 3654 } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { 3655 acpi_pwr_wake_enable(handle, 1); 3656 acpi_SetInteger(handle, "_PSW", 1); 3657 if (bootverbose) 3658 device_printf(dev, "wake_prep enabled for %s (S%d)\n", 3659 acpi_name(handle), sstate); 3660 } 3661 3662 return (0); 3663 } 3664 3665 static int 3666 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) 3667 { 3668 struct acpi_prw_data prw; 3669 device_t dev; 3670 3671 /* 3672 * Check that this is a wake-capable device and get its GPE. Return 3673 * now if the user didn't enable this device for wake. 3674 */ 3675 if (acpi_parse_prw(handle, &prw) != 0) 3676 return (ENXIO); 3677 dev = acpi_get_device(handle); 3678 if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) 3679 return (0); 3680 3681 /* 3682 * If this GPE couldn't be enabled for the previous sleep state, it was 3683 * disabled before going to sleep so re-enable it. If it was enabled, 3684 * clear _PSW and turn off any power resources it used. 3685 */ 3686 if (sstate > prw.lowest_wake) { 3687 AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); 3688 if (bootverbose) 3689 device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); 3690 } else { 3691 acpi_SetInteger(handle, "_PSW", 0); 3692 acpi_pwr_wake_enable(handle, 0); 3693 if (bootverbose) 3694 device_printf(dev, "run_prep cleaned up for %s\n", 3695 acpi_name(handle)); 3696 } 3697 3698 return (0); 3699 } 3700 3701 static ACPI_STATUS 3702 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 3703 { 3704 int sstate; 3705 3706 /* If suspending, run the sleep prep function, otherwise wake. */ 3707 sstate = *(int *)context; 3708 if (AcpiGbl_SystemAwakeAndRunning) 3709 acpi_wake_sleep_prep(handle, sstate); 3710 else 3711 acpi_wake_run_prep(handle, sstate); 3712 return (AE_OK); 3713 } 3714 3715 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ 3716 static int 3717 acpi_wake_prep_walk(int sstate) 3718 { 3719 ACPI_HANDLE sb_handle; 3720 3721 if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) 3722 AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, 3723 acpi_wake_prep, NULL, &sstate, NULL); 3724 return (0); 3725 } 3726 3727 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ 3728 static int 3729 acpi_wake_sysctl_walk(device_t dev) 3730 { 3731 int error, i, numdevs; 3732 device_t *devlist; 3733 device_t child; 3734 ACPI_STATUS status; 3735 3736 error = device_get_children(dev, &devlist, &numdevs); 3737 if (error != 0 || numdevs == 0) { 3738 if (numdevs == 0) 3739 free(devlist, M_TEMP); 3740 return (error); 3741 } 3742 for (i = 0; i < numdevs; i++) { 3743 child = devlist[i]; 3744 acpi_wake_sysctl_walk(child); 3745 if (!device_is_attached(child)) 3746 continue; 3747 status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); 3748 if (ACPI_SUCCESS(status)) { 3749 SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), 3750 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, 3751 "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, 3752 acpi_wake_set_sysctl, "I", "Device set to wake the system"); 3753 } 3754 } 3755 free(devlist, M_TEMP); 3756 3757 return (0); 3758 } 3759 3760 /* Enable or disable wake from userland. */ 3761 static int 3762 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) 3763 { 3764 int enable, error; 3765 device_t dev; 3766 3767 dev = (device_t)arg1; 3768 enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; 3769 3770 error = sysctl_handle_int(oidp, &enable, 0, req); 3771 if (error != 0 || req->newptr == NULL) 3772 return (error); 3773 if (enable != 0 && enable != 1) 3774 return (EINVAL); 3775 3776 return (acpi_wake_set_enable(dev, enable)); 3777 } 3778 3779 /* Parse a device's _PRW into a structure. */ 3780 int 3781 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) 3782 { 3783 ACPI_STATUS status; 3784 ACPI_BUFFER prw_buffer; 3785 ACPI_OBJECT *res, *res2; 3786 int error, i, power_count; 3787 3788 if (h == NULL || prw == NULL) 3789 return (EINVAL); 3790 3791 /* 3792 * The _PRW object (7.2.9) is only required for devices that have the 3793 * ability to wake the system from a sleeping state. 3794 */ 3795 error = EINVAL; 3796 prw_buffer.Pointer = NULL; 3797 prw_buffer.Length = ACPI_ALLOCATE_BUFFER; 3798 status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); 3799 if (ACPI_FAILURE(status)) 3800 return (ENOENT); 3801 res = (ACPI_OBJECT *)prw_buffer.Pointer; 3802 if (res == NULL) 3803 return (ENOENT); 3804 if (!ACPI_PKG_VALID(res, 2)) 3805 goto out; 3806 3807 /* 3808 * Element 1 of the _PRW object: 3809 * The lowest power system sleeping state that can be entered while still 3810 * providing wake functionality. The sleeping state being entered must 3811 * be less than (i.e., higher power) or equal to this value. 3812 */ 3813 if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) 3814 goto out; 3815 3816 /* 3817 * Element 0 of the _PRW object: 3818 */ 3819 switch (res->Package.Elements[0].Type) { 3820 case ACPI_TYPE_INTEGER: 3821 /* 3822 * If the data type of this package element is numeric, then this 3823 * _PRW package element is the bit index in the GPEx_EN, in the 3824 * GPE blocks described in the FADT, of the enable bit that is 3825 * enabled for the wake event. 3826 */ 3827 prw->gpe_handle = NULL; 3828 prw->gpe_bit = res->Package.Elements[0].Integer.Value; 3829 error = 0; 3830 break; 3831 case ACPI_TYPE_PACKAGE: 3832 /* 3833 * If the data type of this package element is a package, then this 3834 * _PRW package element is itself a package containing two 3835 * elements. The first is an object reference to the GPE Block 3836 * device that contains the GPE that will be triggered by the wake 3837 * event. The second element is numeric and it contains the bit 3838 * index in the GPEx_EN, in the GPE Block referenced by the 3839 * first element in the package, of the enable bit that is enabled for 3840 * the wake event. 3841 * 3842 * For example, if this field is a package then it is of the form: 3843 * Package() {\_SB.PCI0.ISA.GPE, 2} 3844 */ 3845 res2 = &res->Package.Elements[0]; 3846 if (!ACPI_PKG_VALID(res2, 2)) 3847 goto out; 3848 prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); 3849 if (prw->gpe_handle == NULL) 3850 goto out; 3851 if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) 3852 goto out; 3853 error = 0; 3854 break; 3855 default: 3856 goto out; 3857 } 3858 3859 /* Elements 2 to N of the _PRW object are power resources. */ 3860 power_count = res->Package.Count - 2; 3861 if (power_count > ACPI_PRW_MAX_POWERRES) { 3862 printf("ACPI device %s has too many power resources\n", acpi_name(h)); 3863 power_count = 0; 3864 } 3865 prw->power_res_count = power_count; 3866 for (i = 0; i < power_count; i++) 3867 prw->power_res[i] = res->Package.Elements[i]; 3868 3869 out: 3870 if (prw_buffer.Pointer != NULL) 3871 AcpiOsFree(prw_buffer.Pointer); 3872 return (error); 3873 } 3874 3875 /* 3876 * ACPI Event Handlers 3877 */ 3878 3879 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ 3880 3881 static void 3882 acpi_system_eventhandler_sleep(void *arg, int state) 3883 { 3884 struct acpi_softc *sc = (struct acpi_softc *)arg; 3885 int ret; 3886 3887 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 3888 3889 /* Check if button action is disabled or unknown. */ 3890 if (state == ACPI_STATE_UNKNOWN) 3891 return; 3892 3893 /* Request that the system prepare to enter the given suspend state. */ 3894 ret = acpi_ReqSleepState(sc, state); 3895 if (ret != 0) 3896 device_printf(sc->acpi_dev, 3897 "request to enter state S%d failed (err %d)\n", state, ret); 3898 3899 return_VOID; 3900 } 3901 3902 static void 3903 acpi_system_eventhandler_wakeup(void *arg, int state) 3904 { 3905 3906 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 3907 3908 /* Currently, nothing to do for wakeup. */ 3909 3910 return_VOID; 3911 } 3912 3913 /* 3914 * ACPICA Event Handlers (FixedEvent, also called from button notify handler) 3915 */ 3916 static void 3917 acpi_invoke_sleep_eventhandler(void *context) 3918 { 3919 3920 EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); 3921 } 3922 3923 static void 3924 acpi_invoke_wake_eventhandler(void *context) 3925 { 3926 3927 EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); 3928 } 3929 3930 UINT32 3931 acpi_event_power_button_sleep(void *context) 3932 { 3933 #if defined(__amd64__) || defined(__i386__) 3934 struct acpi_softc *sc = (struct acpi_softc *)context; 3935 #else 3936 (void)context; 3937 #endif 3938 3939 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 3940 3941 #if defined(__amd64__) || defined(__i386__) 3942 if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, 3943 acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) 3944 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); 3945 #else 3946 shutdown_nice(RB_POWEROFF); 3947 #endif 3948 3949 return_VALUE (ACPI_INTERRUPT_HANDLED); 3950 } 3951 3952 UINT32 3953 acpi_event_power_button_wake(void *context) 3954 { 3955 struct acpi_softc *sc = (struct acpi_softc *)context; 3956 3957 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 3958 3959 if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, 3960 acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) 3961 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); 3962 return_VALUE (ACPI_INTERRUPT_HANDLED); 3963 } 3964 3965 UINT32 3966 acpi_event_sleep_button_sleep(void *context) 3967 { 3968 struct acpi_softc *sc = (struct acpi_softc *)context; 3969 3970 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 3971 3972 if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, 3973 acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) 3974 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); 3975 return_VALUE (ACPI_INTERRUPT_HANDLED); 3976 } 3977 3978 UINT32 3979 acpi_event_sleep_button_wake(void *context) 3980 { 3981 struct acpi_softc *sc = (struct acpi_softc *)context; 3982 3983 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 3984 3985 if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, 3986 acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) 3987 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); 3988 return_VALUE (ACPI_INTERRUPT_HANDLED); 3989 } 3990 3991 /* 3992 * XXX This static buffer is suboptimal. There is no locking so only 3993 * use this for single-threaded callers. 3994 */ 3995 char * 3996 acpi_name(ACPI_HANDLE handle) 3997 { 3998 ACPI_BUFFER buf; 3999 static char data[256]; 4000 4001 buf.Length = sizeof(data); 4002 buf.Pointer = data; 4003 4004 if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) 4005 return (data); 4006 return ("(unknown)"); 4007 } 4008 4009 /* 4010 * Debugging/bug-avoidance. Avoid trying to fetch info on various 4011 * parts of the namespace. 4012 */ 4013 int 4014 acpi_avoid(ACPI_HANDLE handle) 4015 { 4016 char *cp, *env, *np; 4017 int len; 4018 4019 np = acpi_name(handle); 4020 if (*np == '\\') 4021 np++; 4022 if ((env = kern_getenv("debug.acpi.avoid")) == NULL) 4023 return (0); 4024 4025 /* Scan the avoid list checking for a match */ 4026 cp = env; 4027 for (;;) { 4028 while (*cp != 0 && isspace(*cp)) 4029 cp++; 4030 if (*cp == 0) 4031 break; 4032 len = 0; 4033 while (cp[len] != 0 && !isspace(cp[len])) 4034 len++; 4035 if (!strncmp(cp, np, len)) { 4036 freeenv(env); 4037 return(1); 4038 } 4039 cp += len; 4040 } 4041 freeenv(env); 4042 4043 return (0); 4044 } 4045 4046 /* 4047 * Debugging/bug-avoidance. Disable ACPI subsystem components. 4048 */ 4049 int 4050 acpi_disabled(char *subsys) 4051 { 4052 char *cp, *env; 4053 int len; 4054 4055 if ((env = kern_getenv("debug.acpi.disabled")) == NULL) 4056 return (0); 4057 if (strcmp(env, "all") == 0) { 4058 freeenv(env); 4059 return (1); 4060 } 4061 4062 /* Scan the disable list, checking for a match. */ 4063 cp = env; 4064 for (;;) { 4065 while (*cp != '\0' && isspace(*cp)) 4066 cp++; 4067 if (*cp == '\0') 4068 break; 4069 len = 0; 4070 while (cp[len] != '\0' && !isspace(cp[len])) 4071 len++; 4072 if (strncmp(cp, subsys, len) == 0) { 4073 freeenv(env); 4074 return (1); 4075 } 4076 cp += len; 4077 } 4078 freeenv(env); 4079 4080 return (0); 4081 } 4082 4083 static void 4084 acpi_lookup(void *arg, const char *name, device_t *dev) 4085 { 4086 ACPI_HANDLE handle; 4087 4088 if (*dev != NULL) 4089 return; 4090 4091 /* 4092 * Allow any handle name that is specified as an absolute path and 4093 * starts with '\'. We could restrict this to \_SB and friends, 4094 * but see acpi_probe_children() for notes on why we scan the entire 4095 * namespace for devices. 4096 * 4097 * XXX: The pathname argument to AcpiGetHandle() should be fixed to 4098 * be const. 4099 */ 4100 if (name[0] != '\\') 4101 return; 4102 if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), 4103 &handle))) 4104 return; 4105 *dev = acpi_get_device(handle); 4106 } 4107 4108 /* 4109 * Control interface. 4110 * 4111 * We multiplex ioctls for all participating ACPI devices here. Individual 4112 * drivers wanting to be accessible via /dev/acpi should use the 4113 * register/deregister interface to make their handlers visible. 4114 */ 4115 struct acpi_ioctl_hook 4116 { 4117 TAILQ_ENTRY(acpi_ioctl_hook) link; 4118 u_long cmd; 4119 acpi_ioctl_fn fn; 4120 void *arg; 4121 }; 4122 4123 static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; 4124 static int acpi_ioctl_hooks_initted; 4125 4126 int 4127 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) 4128 { 4129 struct acpi_ioctl_hook *hp; 4130 4131 if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) 4132 return (ENOMEM); 4133 hp->cmd = cmd; 4134 hp->fn = fn; 4135 hp->arg = arg; 4136 4137 ACPI_LOCK(acpi); 4138 if (acpi_ioctl_hooks_initted == 0) { 4139 TAILQ_INIT(&acpi_ioctl_hooks); 4140 acpi_ioctl_hooks_initted = 1; 4141 } 4142 TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); 4143 ACPI_UNLOCK(acpi); 4144 4145 return (0); 4146 } 4147 4148 void 4149 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) 4150 { 4151 struct acpi_ioctl_hook *hp; 4152 4153 ACPI_LOCK(acpi); 4154 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) 4155 if (hp->cmd == cmd && hp->fn == fn) 4156 break; 4157 4158 if (hp != NULL) { 4159 TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); 4160 free(hp, M_ACPIDEV); 4161 } 4162 ACPI_UNLOCK(acpi); 4163 } 4164 4165 static int 4166 acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) 4167 { 4168 return (0); 4169 } 4170 4171 static int 4172 acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) 4173 { 4174 return (0); 4175 } 4176 4177 static int 4178 acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 4179 { 4180 struct acpi_softc *sc; 4181 struct acpi_ioctl_hook *hp; 4182 int error, state; 4183 4184 error = 0; 4185 hp = NULL; 4186 sc = dev->si_drv1; 4187 4188 /* 4189 * Scan the list of registered ioctls, looking for handlers. 4190 */ 4191 ACPI_LOCK(acpi); 4192 if (acpi_ioctl_hooks_initted) 4193 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { 4194 if (hp->cmd == cmd) 4195 break; 4196 } 4197 ACPI_UNLOCK(acpi); 4198 if (hp) 4199 return (hp->fn(cmd, addr, hp->arg)); 4200 4201 /* 4202 * Core ioctls are not permitted for non-writable user. 4203 * Currently, other ioctls just fetch information. 4204 * Not changing system behavior. 4205 */ 4206 if ((flag & FWRITE) == 0) 4207 return (EPERM); 4208 4209 /* Core system ioctls. */ 4210 switch (cmd) { 4211 case ACPIIO_REQSLPSTATE: 4212 state = *(int *)addr; 4213 if (state != ACPI_STATE_S5) 4214 return (acpi_ReqSleepState(sc, state)); 4215 device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); 4216 error = EOPNOTSUPP; 4217 break; 4218 case ACPIIO_ACKSLPSTATE: 4219 error = *(int *)addr; 4220 error = acpi_AckSleepState(sc->acpi_clone, error); 4221 break; 4222 case ACPIIO_SETSLPSTATE: /* DEPRECATED */ 4223 state = *(int *)addr; 4224 if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) 4225 return (EINVAL); 4226 if (!acpi_sleep_states[state]) 4227 return (EOPNOTSUPP); 4228 if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) 4229 error = ENXIO; 4230 break; 4231 default: 4232 error = ENXIO; 4233 break; 4234 } 4235 4236 return (error); 4237 } 4238 4239 static int 4240 acpi_sname2sstate(const char *sname) 4241 { 4242 int sstate; 4243 4244 if (toupper(sname[0]) == 'S') { 4245 sstate = sname[1] - '0'; 4246 if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && 4247 sname[2] == '\0') 4248 return (sstate); 4249 } else if (strcasecmp(sname, "NONE") == 0) 4250 return (ACPI_STATE_UNKNOWN); 4251 return (-1); 4252 } 4253 4254 static const char * 4255 acpi_sstate2sname(int sstate) 4256 { 4257 static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; 4258 4259 if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) 4260 return (snames[sstate]); 4261 else if (sstate == ACPI_STATE_UNKNOWN) 4262 return ("NONE"); 4263 return (NULL); 4264 } 4265 4266 static int 4267 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 4268 { 4269 int error; 4270 struct sbuf sb; 4271 UINT8 state; 4272 4273 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4274 for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) 4275 if (acpi_sleep_states[state]) 4276 sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); 4277 sbuf_trim(&sb); 4278 sbuf_finish(&sb); 4279 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4280 sbuf_delete(&sb); 4281 return (error); 4282 } 4283 4284 static int 4285 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 4286 { 4287 char sleep_state[10]; 4288 int error, new_state, old_state; 4289 4290 old_state = *(int *)oidp->oid_arg1; 4291 strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); 4292 error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); 4293 if (error == 0 && req->newptr != NULL) { 4294 new_state = acpi_sname2sstate(sleep_state); 4295 if (new_state < ACPI_STATE_S1) 4296 return (EINVAL); 4297 if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) 4298 return (EOPNOTSUPP); 4299 if (new_state != old_state) 4300 *(int *)oidp->oid_arg1 = new_state; 4301 } 4302 return (error); 4303 } 4304 4305 /* Inform devctl(4) when we receive a Notify. */ 4306 void 4307 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) 4308 { 4309 char notify_buf[16]; 4310 ACPI_BUFFER handle_buf; 4311 ACPI_STATUS status; 4312 4313 if (subsystem == NULL) 4314 return; 4315 4316 handle_buf.Pointer = NULL; 4317 handle_buf.Length = ACPI_ALLOCATE_BUFFER; 4318 status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); 4319 if (ACPI_FAILURE(status)) 4320 return; 4321 snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); 4322 devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); 4323 AcpiOsFree(handle_buf.Pointer); 4324 } 4325 4326 #ifdef ACPI_DEBUG 4327 /* 4328 * Support for parsing debug options from the kernel environment. 4329 * 4330 * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers 4331 * by specifying the names of the bits in the debug.acpi.layer and 4332 * debug.acpi.level environment variables. Bits may be unset by 4333 * prefixing the bit name with !. 4334 */ 4335 struct debugtag 4336 { 4337 char *name; 4338 UINT32 value; 4339 }; 4340 4341 static struct debugtag dbg_layer[] = { 4342 {"ACPI_UTILITIES", ACPI_UTILITIES}, 4343 {"ACPI_HARDWARE", ACPI_HARDWARE}, 4344 {"ACPI_EVENTS", ACPI_EVENTS}, 4345 {"ACPI_TABLES", ACPI_TABLES}, 4346 {"ACPI_NAMESPACE", ACPI_NAMESPACE}, 4347 {"ACPI_PARSER", ACPI_PARSER}, 4348 {"ACPI_DISPATCHER", ACPI_DISPATCHER}, 4349 {"ACPI_EXECUTER", ACPI_EXECUTER}, 4350 {"ACPI_RESOURCES", ACPI_RESOURCES}, 4351 {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, 4352 {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, 4353 {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, 4354 {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, 4355 4356 {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, 4357 {"ACPI_BATTERY", ACPI_BATTERY}, 4358 {"ACPI_BUS", ACPI_BUS}, 4359 {"ACPI_BUTTON", ACPI_BUTTON}, 4360 {"ACPI_EC", ACPI_EC}, 4361 {"ACPI_FAN", ACPI_FAN}, 4362 {"ACPI_POWERRES", ACPI_POWERRES}, 4363 {"ACPI_PROCESSOR", ACPI_PROCESSOR}, 4364 {"ACPI_THERMAL", ACPI_THERMAL}, 4365 {"ACPI_TIMER", ACPI_TIMER}, 4366 {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, 4367 {NULL, 0} 4368 }; 4369 4370 static struct debugtag dbg_level[] = { 4371 {"ACPI_LV_INIT", ACPI_LV_INIT}, 4372 {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, 4373 {"ACPI_LV_INFO", ACPI_LV_INFO}, 4374 {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, 4375 {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, 4376 4377 /* Trace verbosity level 1 [Standard Trace Level] */ 4378 {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, 4379 {"ACPI_LV_PARSE", ACPI_LV_PARSE}, 4380 {"ACPI_LV_LOAD", ACPI_LV_LOAD}, 4381 {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, 4382 {"ACPI_LV_EXEC", ACPI_LV_EXEC}, 4383 {"ACPI_LV_NAMES", ACPI_LV_NAMES}, 4384 {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, 4385 {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, 4386 {"ACPI_LV_TABLES", ACPI_LV_TABLES}, 4387 {"ACPI_LV_VALUES", ACPI_LV_VALUES}, 4388 {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, 4389 {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, 4390 {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, 4391 {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, 4392 {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, 4393 4394 /* Trace verbosity level 2 [Function tracing and memory allocation] */ 4395 {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, 4396 {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, 4397 {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, 4398 {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, 4399 {"ACPI_LV_ALL", ACPI_LV_ALL}, 4400 4401 /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ 4402 {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, 4403 {"ACPI_LV_THREADS", ACPI_LV_THREADS}, 4404 {"ACPI_LV_IO", ACPI_LV_IO}, 4405 {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, 4406 {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, 4407 4408 /* Exceptionally verbose output -- also used in the global "DebugLevel" */ 4409 {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, 4410 {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, 4411 {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, 4412 {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, 4413 {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, 4414 {NULL, 0} 4415 }; 4416 4417 static void 4418 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) 4419 { 4420 char *ep; 4421 int i, l; 4422 int set; 4423 4424 while (*cp) { 4425 if (isspace(*cp)) { 4426 cp++; 4427 continue; 4428 } 4429 ep = cp; 4430 while (*ep && !isspace(*ep)) 4431 ep++; 4432 if (*cp == '!') { 4433 set = 0; 4434 cp++; 4435 if (cp == ep) 4436 continue; 4437 } else { 4438 set = 1; 4439 } 4440 l = ep - cp; 4441 for (i = 0; tag[i].name != NULL; i++) { 4442 if (!strncmp(cp, tag[i].name, l)) { 4443 if (set) 4444 *flag |= tag[i].value; 4445 else 4446 *flag &= ~tag[i].value; 4447 } 4448 } 4449 cp = ep; 4450 } 4451 } 4452 4453 static void 4454 acpi_set_debugging(void *junk) 4455 { 4456 char *layer, *level; 4457 4458 if (cold) { 4459 AcpiDbgLayer = 0; 4460 AcpiDbgLevel = 0; 4461 } 4462 4463 layer = kern_getenv("debug.acpi.layer"); 4464 level = kern_getenv("debug.acpi.level"); 4465 if (layer == NULL && level == NULL) 4466 return; 4467 4468 printf("ACPI set debug"); 4469 if (layer != NULL) { 4470 if (strcmp("NONE", layer) != 0) 4471 printf(" layer '%s'", layer); 4472 acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); 4473 freeenv(layer); 4474 } 4475 if (level != NULL) { 4476 if (strcmp("NONE", level) != 0) 4477 printf(" level '%s'", level); 4478 acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); 4479 freeenv(level); 4480 } 4481 printf("\n"); 4482 } 4483 4484 SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, 4485 NULL); 4486 4487 static int 4488 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) 4489 { 4490 int error, *dbg; 4491 struct debugtag *tag; 4492 struct sbuf sb; 4493 char temp[128]; 4494 4495 if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) 4496 return (ENOMEM); 4497 if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { 4498 tag = &dbg_layer[0]; 4499 dbg = &AcpiDbgLayer; 4500 } else { 4501 tag = &dbg_level[0]; 4502 dbg = &AcpiDbgLevel; 4503 } 4504 4505 /* Get old values if this is a get request. */ 4506 ACPI_SERIAL_BEGIN(acpi); 4507 if (*dbg == 0) { 4508 sbuf_cpy(&sb, "NONE"); 4509 } else if (req->newptr == NULL) { 4510 for (; tag->name != NULL; tag++) { 4511 if ((*dbg & tag->value) == tag->value) 4512 sbuf_printf(&sb, "%s ", tag->name); 4513 } 4514 } 4515 sbuf_trim(&sb); 4516 sbuf_finish(&sb); 4517 strlcpy(temp, sbuf_data(&sb), sizeof(temp)); 4518 sbuf_delete(&sb); 4519 4520 error = sysctl_handle_string(oidp, temp, sizeof(temp), req); 4521 4522 /* Check for error or no change */ 4523 if (error == 0 && req->newptr != NULL) { 4524 *dbg = 0; 4525 kern_setenv((char *)oidp->oid_arg1, temp); 4526 acpi_set_debugging(NULL); 4527 } 4528 ACPI_SERIAL_END(acpi); 4529 4530 return (error); 4531 } 4532 4533 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, 4534 CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, 4535 acpi_debug_sysctl, "A", 4536 ""); 4537 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, 4538 CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, 4539 acpi_debug_sysctl, "A", 4540 ""); 4541 #endif /* ACPI_DEBUG */ 4542 4543 static int 4544 acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) 4545 { 4546 int error; 4547 int old; 4548 4549 old = acpi_debug_objects; 4550 error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); 4551 if (error != 0 || req->newptr == NULL) 4552 return (error); 4553 if (old == acpi_debug_objects || (old && acpi_debug_objects)) 4554 return (0); 4555 4556 ACPI_SERIAL_BEGIN(acpi); 4557 AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; 4558 ACPI_SERIAL_END(acpi); 4559 4560 return (0); 4561 } 4562 4563 static int 4564 acpi_parse_interfaces(char *str, struct acpi_interface *iface) 4565 { 4566 char *p; 4567 size_t len; 4568 int i, j; 4569 4570 p = str; 4571 while (isspace(*p) || *p == ',') 4572 p++; 4573 len = strlen(p); 4574 if (len == 0) 4575 return (0); 4576 p = strdup(p, M_TEMP); 4577 for (i = 0; i < len; i++) 4578 if (p[i] == ',') 4579 p[i] = '\0'; 4580 i = j = 0; 4581 while (i < len) 4582 if (isspace(p[i]) || p[i] == '\0') 4583 i++; 4584 else { 4585 i += strlen(p + i) + 1; 4586 j++; 4587 } 4588 if (j == 0) { 4589 free(p, M_TEMP); 4590 return (0); 4591 } 4592 iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); 4593 iface->num = j; 4594 i = j = 0; 4595 while (i < len) 4596 if (isspace(p[i]) || p[i] == '\0') 4597 i++; 4598 else { 4599 iface->data[j] = p + i; 4600 i += strlen(p + i) + 1; 4601 j++; 4602 } 4603 4604 return (j); 4605 } 4606 4607 static void 4608 acpi_free_interfaces(struct acpi_interface *iface) 4609 { 4610 4611 free(iface->data[0], M_TEMP); 4612 free(iface->data, M_TEMP); 4613 } 4614 4615 static void 4616 acpi_reset_interfaces(device_t dev) 4617 { 4618 struct acpi_interface list; 4619 ACPI_STATUS status; 4620 int i; 4621 4622 if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { 4623 for (i = 0; i < list.num; i++) { 4624 status = AcpiInstallInterface(list.data[i]); 4625 if (ACPI_FAILURE(status)) 4626 device_printf(dev, 4627 "failed to install _OSI(\"%s\"): %s\n", 4628 list.data[i], AcpiFormatException(status)); 4629 else if (bootverbose) 4630 device_printf(dev, "installed _OSI(\"%s\")\n", 4631 list.data[i]); 4632 } 4633 acpi_free_interfaces(&list); 4634 } 4635 if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { 4636 for (i = 0; i < list.num; i++) { 4637 status = AcpiRemoveInterface(list.data[i]); 4638 if (ACPI_FAILURE(status)) 4639 device_printf(dev, 4640 "failed to remove _OSI(\"%s\"): %s\n", 4641 list.data[i], AcpiFormatException(status)); 4642 else if (bootverbose) 4643 device_printf(dev, "removed _OSI(\"%s\")\n", 4644 list.data[i]); 4645 } 4646 acpi_free_interfaces(&list); 4647 } 4648 } 4649 4650 static int 4651 acpi_pm_func(u_long cmd, void *arg, enum power_stype stype) 4652 { 4653 int error, sstate; 4654 struct acpi_softc *sc; 4655 4656 error = 0; 4657 switch (cmd) { 4658 case POWER_CMD_SUSPEND: 4659 sc = (struct acpi_softc *)arg; 4660 if (sc == NULL) { 4661 error = EINVAL; 4662 goto out; 4663 } 4664 sstate = acpi_stype_to_sstate(sc, stype); 4665 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sstate))) 4666 error = ENXIO; 4667 break; 4668 default: 4669 error = EINVAL; 4670 goto out; 4671 } 4672 4673 out: 4674 return (error); 4675 } 4676 4677 static void 4678 acpi_pm_register(void *arg) 4679 { 4680 if (!cold || resource_disabled("acpi", 0)) 4681 return; 4682 4683 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); 4684 } 4685 4686 SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); 4687