1 /*- 2 * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org> 3 * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> 4 * Copyright (c) 2000, 2001 Michael Smith 5 * Copyright (c) 2000 BSDi 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_acpi.h" 34 #include <sys/param.h> 35 #include <sys/kernel.h> 36 #include <sys/proc.h> 37 #include <sys/fcntl.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/bus.h> 41 #include <sys/conf.h> 42 #include <sys/ioccom.h> 43 #include <sys/reboot.h> 44 #include <sys/sysctl.h> 45 #include <sys/ctype.h> 46 #include <sys/linker.h> 47 #include <sys/power.h> 48 #include <sys/sbuf.h> 49 #include <sys/smp.h> 50 51 #if defined(__i386__) || defined(__amd64__) 52 #include <machine/pci_cfgreg.h> 53 #endif 54 #include <machine/resource.h> 55 #include <machine/bus.h> 56 #include <sys/rman.h> 57 #include <isa/isavar.h> 58 #include <isa/pnpvar.h> 59 60 #include <contrib/dev/acpica/acpi.h> 61 #include <dev/acpica/acpivar.h> 62 #include <dev/acpica/acpiio.h> 63 #include <contrib/dev/acpica/achware.h> 64 #include <contrib/dev/acpica/acnamesp.h> 65 66 #include "pci_if.h" 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pci_private.h> 69 70 #include <vm/vm_param.h> 71 72 MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); 73 74 /* Hooks for the ACPI CA debugging infrastructure */ 75 #define _COMPONENT ACPI_BUS 76 ACPI_MODULE_NAME("ACPI") 77 78 static d_open_t acpiopen; 79 static d_close_t acpiclose; 80 static d_ioctl_t acpiioctl; 81 82 static struct cdevsw acpi_cdevsw = { 83 .d_version = D_VERSION, 84 .d_open = acpiopen, 85 .d_close = acpiclose, 86 .d_ioctl = acpiioctl, 87 .d_name = "acpi", 88 }; 89 90 /* Global mutex for locking access to the ACPI subsystem. */ 91 struct mtx acpi_mutex; 92 93 /* Bitmap of device quirks. */ 94 int acpi_quirks; 95 96 static int acpi_modevent(struct module *mod, int event, void *junk); 97 static int acpi_probe(device_t dev); 98 static int acpi_attach(device_t dev); 99 static int acpi_suspend(device_t dev); 100 static int acpi_resume(device_t dev); 101 static int acpi_shutdown(device_t dev); 102 static device_t acpi_add_child(device_t bus, int order, const char *name, 103 int unit); 104 static int acpi_print_child(device_t bus, device_t child); 105 static void acpi_probe_nomatch(device_t bus, device_t child); 106 static void acpi_driver_added(device_t dev, driver_t *driver); 107 static int acpi_read_ivar(device_t dev, device_t child, int index, 108 uintptr_t *result); 109 static int acpi_write_ivar(device_t dev, device_t child, int index, 110 uintptr_t value); 111 static struct resource_list *acpi_get_rlist(device_t dev, device_t child); 112 static int acpi_sysres_alloc(device_t dev); 113 static struct resource *acpi_alloc_resource(device_t bus, device_t child, 114 int type, int *rid, u_long start, u_long end, 115 u_long count, u_int flags); 116 static int acpi_release_resource(device_t bus, device_t child, int type, 117 int rid, struct resource *r); 118 static void acpi_delete_resource(device_t bus, device_t child, int type, 119 int rid); 120 static uint32_t acpi_isa_get_logicalid(device_t dev); 121 static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); 122 static char *acpi_device_id_probe(device_t bus, device_t dev, char **ids); 123 static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, 124 ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, 125 ACPI_BUFFER *ret); 126 static int acpi_device_pwr_for_sleep(device_t bus, device_t dev, 127 int *dstate); 128 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, 129 void *context, void **retval); 130 static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, 131 int max_depth, acpi_scan_cb_t user_fn, void *arg); 132 static int acpi_set_powerstate_method(device_t bus, device_t child, 133 int state); 134 static int acpi_isa_pnp_probe(device_t bus, device_t child, 135 struct isa_pnp_id *ids); 136 static void acpi_probe_children(device_t bus); 137 static void acpi_probe_order(ACPI_HANDLE handle, int *order); 138 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, 139 void *context, void **status); 140 static BOOLEAN acpi_MatchHid(ACPI_HANDLE h, const char *hid); 141 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); 142 static void acpi_shutdown_final(void *arg, int howto); 143 static void acpi_enable_fixed_events(struct acpi_softc *sc); 144 static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); 145 static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); 146 static int acpi_wake_prep_walk(int sstate); 147 static int acpi_wake_sysctl_walk(device_t dev); 148 static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); 149 static void acpi_system_eventhandler_sleep(void *arg, int state); 150 static void acpi_system_eventhandler_wakeup(void *arg, int state); 151 static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 152 static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 153 static int acpi_pm_func(u_long cmd, void *arg, ...); 154 static int acpi_child_location_str_method(device_t acdev, device_t child, 155 char *buf, size_t buflen); 156 static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child, 157 char *buf, size_t buflen); 158 #if defined(__i386__) || defined(__amd64__) 159 static void acpi_enable_pcie(void); 160 #endif 161 static void acpi_hint_device_unit(device_t acdev, device_t child, 162 const char *name, int *unitp); 163 164 static device_method_t acpi_methods[] = { 165 /* Device interface */ 166 DEVMETHOD(device_probe, acpi_probe), 167 DEVMETHOD(device_attach, acpi_attach), 168 DEVMETHOD(device_shutdown, acpi_shutdown), 169 DEVMETHOD(device_detach, bus_generic_detach), 170 DEVMETHOD(device_suspend, acpi_suspend), 171 DEVMETHOD(device_resume, acpi_resume), 172 173 /* Bus interface */ 174 DEVMETHOD(bus_add_child, acpi_add_child), 175 DEVMETHOD(bus_print_child, acpi_print_child), 176 DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), 177 DEVMETHOD(bus_driver_added, acpi_driver_added), 178 DEVMETHOD(bus_read_ivar, acpi_read_ivar), 179 DEVMETHOD(bus_write_ivar, acpi_write_ivar), 180 DEVMETHOD(bus_get_resource_list, acpi_get_rlist), 181 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 182 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 183 DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), 184 DEVMETHOD(bus_release_resource, acpi_release_resource), 185 DEVMETHOD(bus_delete_resource, acpi_delete_resource), 186 DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method), 187 DEVMETHOD(bus_child_location_str, acpi_child_location_str_method), 188 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 189 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 190 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 191 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 192 DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), 193 194 /* ACPI bus */ 195 DEVMETHOD(acpi_id_probe, acpi_device_id_probe), 196 DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), 197 DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), 198 DEVMETHOD(acpi_scan_children, acpi_device_scan_children), 199 200 /* PCI emulation */ 201 DEVMETHOD(pci_set_powerstate, acpi_set_powerstate_method), 202 203 /* ISA emulation */ 204 DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), 205 206 {0, 0} 207 }; 208 209 static driver_t acpi_driver = { 210 "acpi", 211 acpi_methods, 212 sizeof(struct acpi_softc), 213 }; 214 215 static devclass_t acpi_devclass; 216 DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0); 217 MODULE_VERSION(acpi, 1); 218 219 ACPI_SERIAL_DECL(acpi, "ACPI root bus"); 220 221 /* Local pools for managing system resources for ACPI child devices. */ 222 static struct rman acpi_rman_io, acpi_rman_mem; 223 224 #define ACPI_MINIMUM_AWAKETIME 5 225 226 static const char* sleep_state_names[] = { 227 "S0", "S1", "S2", "S3", "S4", "S5", "NONE"}; 228 229 /* Holds the description of the acpi0 device. */ 230 static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; 231 232 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging"); 233 static char acpi_ca_version[12]; 234 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, 235 acpi_ca_version, 0, "Version of Intel ACPI-CA"); 236 237 /* 238 * Allow override of whether methods execute in parallel or not. 239 * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS" 240 * errors for AML that really can't handle parallel method execution. 241 * It is off by default since this breaks recursive methods and 242 * some IBMs use such code. 243 */ 244 static int acpi_serialize_methods; 245 TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods); 246 247 /* Power devices off and on in suspend and resume. XXX Remove once tested. */ 248 static int acpi_do_powerstate = 1; 249 TUNABLE_INT("debug.acpi.do_powerstate", &acpi_do_powerstate); 250 SYSCTL_INT(_debug_acpi, OID_AUTO, do_powerstate, CTLFLAG_RW, 251 &acpi_do_powerstate, 1, "Turn off devices when suspending."); 252 253 /* Allow users to override quirks. */ 254 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); 255 256 static int acpi_susp_bounce; 257 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, 258 &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); 259 260 /* 261 * ACPI can only be loaded as a module by the loader; activating it after 262 * system bootstrap time is not useful, and can be fatal to the system. 263 * It also cannot be unloaded, since the entire system bus hierarchy hangs 264 * off it. 265 */ 266 static int 267 acpi_modevent(struct module *mod, int event, void *junk) 268 { 269 switch (event) { 270 case MOD_LOAD: 271 if (!cold) { 272 printf("The ACPI driver cannot be loaded after boot.\n"); 273 return (EPERM); 274 } 275 break; 276 case MOD_UNLOAD: 277 if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) 278 return (EBUSY); 279 break; 280 default: 281 break; 282 } 283 return (0); 284 } 285 286 /* 287 * Perform early initialization. 288 */ 289 ACPI_STATUS 290 acpi_Startup(void) 291 { 292 static int started = 0; 293 ACPI_STATUS status; 294 int val; 295 296 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 297 298 /* Only run the startup code once. The MADT driver also calls this. */ 299 if (started) 300 return_VALUE (AE_OK); 301 started = 1; 302 303 /* 304 * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing 305 * if more tables exist. 306 */ 307 if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { 308 printf("ACPI: Table initialisation failed: %s\n", 309 AcpiFormatException(status)); 310 return_VALUE (status); 311 } 312 313 /* Set up any quirks we have for this system. */ 314 if (acpi_quirks == ACPI_Q_OK) 315 acpi_table_quirks(&acpi_quirks); 316 317 /* If the user manually set the disabled hint to 0, force-enable ACPI. */ 318 if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) 319 acpi_quirks &= ~ACPI_Q_BROKEN; 320 if (acpi_quirks & ACPI_Q_BROKEN) { 321 printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); 322 status = AE_SUPPORT; 323 } 324 325 return_VALUE (status); 326 } 327 328 /* 329 * Detect ACPI and perform early initialisation. 330 */ 331 int 332 acpi_identify(void) 333 { 334 ACPI_TABLE_RSDP *rsdp; 335 ACPI_TABLE_HEADER *rsdt; 336 ACPI_PHYSICAL_ADDRESS paddr; 337 struct sbuf sb; 338 339 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 340 341 if (!cold) 342 return (ENXIO); 343 344 /* Check that we haven't been disabled with a hint. */ 345 if (resource_disabled("acpi", 0)) 346 return (ENXIO); 347 348 /* Check for other PM systems. */ 349 if (power_pm_get_type() != POWER_PM_TYPE_NONE && 350 power_pm_get_type() != POWER_PM_TYPE_ACPI) { 351 printf("ACPI identify failed, other PM system enabled.\n"); 352 return (ENXIO); 353 } 354 355 /* Initialize root tables. */ 356 if (ACPI_FAILURE(acpi_Startup())) { 357 printf("ACPI: Try disabling either ACPI or apic support.\n"); 358 return (ENXIO); 359 } 360 361 if ((paddr = AcpiOsGetRootPointer()) == 0 || 362 (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) 363 return (ENXIO); 364 if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) 365 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; 366 else 367 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; 368 AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); 369 370 if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) 371 return (ENXIO); 372 sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); 373 sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); 374 sbuf_trim(&sb); 375 sbuf_putc(&sb, ' '); 376 sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); 377 sbuf_trim(&sb); 378 sbuf_finish(&sb); 379 sbuf_delete(&sb); 380 AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); 381 382 snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); 383 384 return (0); 385 } 386 387 /* 388 * Fetch some descriptive data from ACPI to put in our attach message. 389 */ 390 static int 391 acpi_probe(device_t dev) 392 { 393 394 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 395 396 device_set_desc(dev, acpi_desc); 397 398 return_VALUE (0); 399 } 400 401 static int 402 acpi_attach(device_t dev) 403 { 404 struct acpi_softc *sc; 405 ACPI_TABLE_FACS *facs; 406 ACPI_STATUS status; 407 int error, state; 408 UINT32 flags; 409 UINT8 TypeA, TypeB; 410 char *env; 411 412 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 413 414 sc = device_get_softc(dev); 415 sc->acpi_dev = dev; 416 callout_init(&sc->susp_force_to, TRUE); 417 418 error = ENXIO; 419 420 /* Initialize resource manager. */ 421 acpi_rman_io.rm_type = RMAN_ARRAY; 422 acpi_rman_io.rm_start = 0; 423 acpi_rman_io.rm_end = 0xffff; 424 acpi_rman_io.rm_descr = "ACPI I/O ports"; 425 if (rman_init(&acpi_rman_io) != 0) 426 panic("acpi rman_init IO ports failed"); 427 acpi_rman_mem.rm_type = RMAN_ARRAY; 428 acpi_rman_mem.rm_start = 0; 429 acpi_rman_mem.rm_end = ~0ul; 430 acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; 431 if (rman_init(&acpi_rman_mem) != 0) 432 panic("acpi rman_init memory failed"); 433 434 /* Initialise the ACPI mutex */ 435 mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); 436 437 /* 438 * Set the globals from our tunables. This is needed because ACPI-CA 439 * uses UINT8 for some values and we have no tunable_byte. 440 */ 441 AcpiGbl_AllMethodsSerialized = acpi_serialize_methods; 442 AcpiGbl_EnableInterpreterSlack = TRUE; 443 444 /* Start up the ACPI CA subsystem. */ 445 status = AcpiInitializeSubsystem(); 446 if (ACPI_FAILURE(status)) { 447 device_printf(dev, "Could not initialize Subsystem: %s\n", 448 AcpiFormatException(status)); 449 goto out; 450 } 451 452 /* Load ACPI name space. */ 453 status = AcpiLoadTables(); 454 if (ACPI_FAILURE(status)) { 455 device_printf(dev, "Could not load Namespace: %s\n", 456 AcpiFormatException(status)); 457 goto out; 458 } 459 460 #if defined(__i386__) || defined(__amd64__) 461 /* Handle MCFG table if present. */ 462 acpi_enable_pcie(); 463 #endif 464 465 /* Install the default address space handlers. */ 466 status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, 467 ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL); 468 if (ACPI_FAILURE(status)) { 469 device_printf(dev, "Could not initialise SystemMemory handler: %s\n", 470 AcpiFormatException(status)); 471 goto out; 472 } 473 status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, 474 ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL); 475 if (ACPI_FAILURE(status)) { 476 device_printf(dev, "Could not initialise SystemIO handler: %s\n", 477 AcpiFormatException(status)); 478 goto out; 479 } 480 status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, 481 ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 482 if (ACPI_FAILURE(status)) { 483 device_printf(dev, "could not initialise PciConfig handler: %s\n", 484 AcpiFormatException(status)); 485 goto out; 486 } 487 488 /* 489 * Note that some systems (specifically, those with namespace evaluation 490 * issues that require the avoidance of parts of the namespace) must 491 * avoid running _INI and _STA on everything, as well as dodging the final 492 * object init pass. 493 * 494 * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). 495 * 496 * XXX We should arrange for the object init pass after we have attached 497 * all our child devices, but on many systems it works here. 498 */ 499 flags = 0; 500 if (testenv("debug.acpi.avoid")) 501 flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; 502 503 /* Bring the hardware and basic handlers online. */ 504 if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { 505 device_printf(dev, "Could not enable ACPI: %s\n", 506 AcpiFormatException(status)); 507 goto out; 508 } 509 510 /* 511 * Call the ECDT probe function to provide EC functionality before 512 * the namespace has been evaluated. 513 * 514 * XXX This happens before the sysresource devices have been probed and 515 * attached so its resources come from nexus0. In practice, this isn't 516 * a problem but should be addressed eventually. 517 */ 518 acpi_ec_ecdt_probe(dev); 519 520 /* Bring device objects and regions online. */ 521 if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { 522 device_printf(dev, "Could not initialize ACPI objects: %s\n", 523 AcpiFormatException(status)); 524 goto out; 525 } 526 527 /* 528 * Setup our sysctl tree. 529 * 530 * XXX: This doesn't check to make sure that none of these fail. 531 */ 532 sysctl_ctx_init(&sc->acpi_sysctl_ctx); 533 sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, 534 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 535 device_get_name(dev), CTLFLAG_RD, 0, ""); 536 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 537 OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD, 538 0, 0, acpi_supported_sleep_state_sysctl, "A", ""); 539 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 540 OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW, 541 &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); 542 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 543 OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW, 544 &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); 545 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 546 OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW, 547 &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", ""); 548 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 549 OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW, 550 &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); 551 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 552 OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW, 553 &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); 554 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 555 OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, 556 "sleep delay"); 557 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 558 OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); 559 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 560 OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); 561 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 562 OID_AUTO, "disable_on_reboot", CTLFLAG_RW, 563 &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); 564 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 565 OID_AUTO, "handle_reboot", CTLFLAG_RW, 566 &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); 567 568 /* 569 * Default to 1 second before sleeping to give some machines time to 570 * stabilize. 571 */ 572 sc->acpi_sleep_delay = 1; 573 if (bootverbose) 574 sc->acpi_verbose = 1; 575 if ((env = getenv("hw.acpi.verbose")) != NULL) { 576 if (strcmp(env, "0") != 0) 577 sc->acpi_verbose = 1; 578 freeenv(env); 579 } 580 581 /* Only enable S4BIOS by default if the FACS says it is available. */ 582 status = AcpiGetTable(ACPI_SIG_FACS, 0, (ACPI_TABLE_HEADER **)&facs); 583 if (ACPI_FAILURE(status)) { 584 device_printf(dev, "couldn't get FACS: %s\n", 585 AcpiFormatException(status)); 586 error = ENXIO; 587 goto out; 588 } 589 if (facs->Flags & ACPI_FACS_S4_BIOS_PRESENT) 590 sc->acpi_s4bios = 1; 591 592 /* 593 * Dispatch the default sleep state to devices. The lid switch is set 594 * to NONE by default to avoid surprising users. 595 */ 596 sc->acpi_power_button_sx = ACPI_STATE_S5; 597 sc->acpi_lid_switch_sx = ACPI_S_STATES_MAX + 1; 598 sc->acpi_standby_sx = ACPI_STATE_S1; 599 sc->acpi_suspend_sx = ACPI_STATE_S3; 600 601 /* Pick the first valid sleep state for the sleep button default. */ 602 sc->acpi_sleep_button_sx = ACPI_S_STATES_MAX + 1; 603 for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) 604 if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) { 605 sc->acpi_sleep_button_sx = state; 606 break; 607 } 608 609 acpi_enable_fixed_events(sc); 610 611 /* 612 * Scan the namespace and attach/initialise children. 613 */ 614 615 /* Register our shutdown handler. */ 616 EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, 617 SHUTDOWN_PRI_LAST); 618 619 /* 620 * Register our acpi event handlers. 621 * XXX should be configurable eg. via userland policy manager. 622 */ 623 EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, 624 sc, ACPI_EVENT_PRI_LAST); 625 EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, 626 sc, ACPI_EVENT_PRI_LAST); 627 628 /* Flag our initial states. */ 629 sc->acpi_enabled = 1; 630 sc->acpi_sstate = ACPI_STATE_S0; 631 sc->acpi_sleep_disabled = 0; 632 633 /* Create the control device */ 634 sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644, 635 "acpi"); 636 sc->acpi_dev_t->si_drv1 = sc; 637 638 if ((error = acpi_machdep_init(dev))) 639 goto out; 640 641 /* Register ACPI again to pass the correct argument of pm_func. */ 642 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); 643 644 if (!acpi_disabled("bus")) 645 acpi_probe_children(dev); 646 647 error = 0; 648 649 out: 650 return_VALUE (error); 651 } 652 653 static int 654 acpi_suspend(device_t dev) 655 { 656 device_t child, *devlist; 657 int error, i, numdevs, pstate; 658 659 GIANT_REQUIRED; 660 661 /* First give child devices a chance to suspend. */ 662 error = bus_generic_suspend(dev); 663 if (error) 664 return (error); 665 666 /* 667 * Now, set them into the appropriate power state, usually D3. If the 668 * device has an _SxD method for the next sleep state, use that power 669 * state instead. 670 */ 671 error = device_get_children(dev, &devlist, &numdevs); 672 if (error) 673 return (error); 674 for (i = 0; i < numdevs; i++) { 675 /* If the device is not attached, we've powered it down elsewhere. */ 676 child = devlist[i]; 677 if (!device_is_attached(child)) 678 continue; 679 680 /* 681 * Default to D3 for all sleep states. The _SxD method is optional 682 * so set the powerstate even if it's absent. 683 */ 684 pstate = PCI_POWERSTATE_D3; 685 error = acpi_device_pwr_for_sleep(device_get_parent(child), 686 child, &pstate); 687 if ((error == 0 || error == ESRCH) && acpi_do_powerstate) 688 pci_set_powerstate(child, pstate); 689 } 690 free(devlist, M_TEMP); 691 error = 0; 692 693 return (error); 694 } 695 696 static int 697 acpi_resume(device_t dev) 698 { 699 ACPI_HANDLE handle; 700 int i, numdevs, error; 701 device_t child, *devlist; 702 703 GIANT_REQUIRED; 704 705 /* 706 * Put all devices in D0 before resuming them. Call _S0D on each one 707 * since some systems expect this. 708 */ 709 error = device_get_children(dev, &devlist, &numdevs); 710 if (error) 711 return (error); 712 for (i = 0; i < numdevs; i++) { 713 child = devlist[i]; 714 handle = acpi_get_handle(child); 715 if (handle) 716 AcpiEvaluateObject(handle, "_S0D", NULL, NULL); 717 if (device_is_attached(child) && acpi_do_powerstate) 718 pci_set_powerstate(child, PCI_POWERSTATE_D0); 719 } 720 free(devlist, M_TEMP); 721 722 return (bus_generic_resume(dev)); 723 } 724 725 static int 726 acpi_shutdown(device_t dev) 727 { 728 729 GIANT_REQUIRED; 730 731 /* Allow children to shutdown first. */ 732 bus_generic_shutdown(dev); 733 734 /* 735 * Enable any GPEs that are able to power-on the system (i.e., RTC). 736 * Also, disable any that are not valid for this state (most). 737 */ 738 acpi_wake_prep_walk(ACPI_STATE_S5); 739 740 return (0); 741 } 742 743 /* 744 * Handle a new device being added 745 */ 746 static device_t 747 acpi_add_child(device_t bus, int order, const char *name, int unit) 748 { 749 struct acpi_device *ad; 750 device_t child; 751 752 if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) 753 return (NULL); 754 755 resource_list_init(&ad->ad_rl); 756 757 child = device_add_child_ordered(bus, order, name, unit); 758 if (child != NULL) 759 device_set_ivars(child, ad); 760 else 761 free(ad, M_ACPIDEV); 762 return (child); 763 } 764 765 static int 766 acpi_print_child(device_t bus, device_t child) 767 { 768 struct acpi_device *adev = device_get_ivars(child); 769 struct resource_list *rl = &adev->ad_rl; 770 int retval = 0; 771 772 retval += bus_print_child_header(bus, child); 773 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); 774 retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); 775 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); 776 retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld"); 777 if (device_get_flags(child)) 778 retval += printf(" flags %#x", device_get_flags(child)); 779 retval += bus_print_child_footer(bus, child); 780 781 return (retval); 782 } 783 784 /* 785 * If this device is an ACPI child but no one claimed it, attempt 786 * to power it off. We'll power it back up when a driver is added. 787 * 788 * XXX Disabled for now since many necessary devices (like fdc and 789 * ATA) don't claim the devices we created for them but still expect 790 * them to be powered up. 791 */ 792 static void 793 acpi_probe_nomatch(device_t bus, device_t child) 794 { 795 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER 796 pci_set_powerstate(child, PCI_POWERSTATE_D3); 797 #endif 798 } 799 800 /* 801 * If a new driver has a chance to probe a child, first power it up. 802 * 803 * XXX Disabled for now (see acpi_probe_nomatch for details). 804 */ 805 static void 806 acpi_driver_added(device_t dev, driver_t *driver) 807 { 808 device_t child, *devlist; 809 int i, numdevs; 810 811 DEVICE_IDENTIFY(driver, dev); 812 if (device_get_children(dev, &devlist, &numdevs)) 813 return; 814 for (i = 0; i < numdevs; i++) { 815 child = devlist[i]; 816 if (device_get_state(child) == DS_NOTPRESENT) { 817 #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER 818 pci_set_powerstate(child, PCI_POWERSTATE_D0); 819 if (device_probe_and_attach(child) != 0) 820 pci_set_powerstate(child, PCI_POWERSTATE_D3); 821 #else 822 device_probe_and_attach(child); 823 #endif 824 } 825 } 826 free(devlist, M_TEMP); 827 } 828 829 /* Location hint for devctl(8) */ 830 static int 831 acpi_child_location_str_method(device_t cbdev, device_t child, char *buf, 832 size_t buflen) 833 { 834 struct acpi_device *dinfo = device_get_ivars(child); 835 836 if (dinfo->ad_handle) 837 snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle)); 838 else 839 snprintf(buf, buflen, "unknown"); 840 return (0); 841 } 842 843 /* PnP information for devctl(8) */ 844 static int 845 acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, 846 size_t buflen) 847 { 848 ACPI_BUFFER adbuf = {ACPI_ALLOCATE_BUFFER, NULL}; 849 ACPI_DEVICE_INFO *adinfo; 850 struct acpi_device *dinfo = device_get_ivars(child); 851 char *end; 852 int error; 853 854 error = AcpiGetObjectInfo(dinfo->ad_handle, &adbuf); 855 adinfo = (ACPI_DEVICE_INFO *) adbuf.Pointer; 856 if (error) 857 snprintf(buf, buflen, "unknown"); 858 else 859 snprintf(buf, buflen, "_HID=%s _UID=%lu", 860 (adinfo->Valid & ACPI_VALID_HID) ? 861 adinfo->HardwareId.Value : "none", 862 (adinfo->Valid & ACPI_VALID_UID) ? 863 strtoul(adinfo->UniqueId.Value, &end, 10) : 0); 864 if (adinfo) 865 AcpiOsFree(adinfo); 866 867 return (0); 868 } 869 870 /* 871 * Handle per-device ivars 872 */ 873 static int 874 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 875 { 876 struct acpi_device *ad; 877 878 if ((ad = device_get_ivars(child)) == NULL) { 879 printf("device has no ivars\n"); 880 return (ENOENT); 881 } 882 883 /* ACPI and ISA compatibility ivars */ 884 switch(index) { 885 case ACPI_IVAR_HANDLE: 886 *(ACPI_HANDLE *)result = ad->ad_handle; 887 break; 888 case ACPI_IVAR_MAGIC: 889 *(uintptr_t *)result = ad->ad_magic; 890 break; 891 case ACPI_IVAR_PRIVATE: 892 *(void **)result = ad->ad_private; 893 break; 894 case ACPI_IVAR_FLAGS: 895 *(int *)result = ad->ad_flags; 896 break; 897 case ISA_IVAR_VENDORID: 898 case ISA_IVAR_SERIAL: 899 case ISA_IVAR_COMPATID: 900 *(int *)result = -1; 901 break; 902 case ISA_IVAR_LOGICALID: 903 *(int *)result = acpi_isa_get_logicalid(child); 904 break; 905 default: 906 return (ENOENT); 907 } 908 909 return (0); 910 } 911 912 static int 913 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 914 { 915 struct acpi_device *ad; 916 917 if ((ad = device_get_ivars(child)) == NULL) { 918 printf("device has no ivars\n"); 919 return (ENOENT); 920 } 921 922 switch(index) { 923 case ACPI_IVAR_HANDLE: 924 ad->ad_handle = (ACPI_HANDLE)value; 925 break; 926 case ACPI_IVAR_MAGIC: 927 ad->ad_magic = (uintptr_t)value; 928 break; 929 case ACPI_IVAR_PRIVATE: 930 ad->ad_private = (void *)value; 931 break; 932 case ACPI_IVAR_FLAGS: 933 ad->ad_flags = (int)value; 934 break; 935 default: 936 panic("bad ivar write request (%d)", index); 937 return (ENOENT); 938 } 939 940 return (0); 941 } 942 943 /* 944 * Handle child resource allocation/removal 945 */ 946 static struct resource_list * 947 acpi_get_rlist(device_t dev, device_t child) 948 { 949 struct acpi_device *ad; 950 951 ad = device_get_ivars(child); 952 return (&ad->ad_rl); 953 } 954 955 static int 956 acpi_match_resource_hint(device_t dev, int type, long value) 957 { 958 struct acpi_device *ad = device_get_ivars(dev); 959 struct resource_list *rl = &ad->ad_rl; 960 struct resource_list_entry *rle; 961 962 STAILQ_FOREACH(rle, rl, link) { 963 if (rle->type != type) 964 continue; 965 if (rle->start <= value && rle->end >= value) 966 return (1); 967 } 968 return (0); 969 } 970 971 /* 972 * Wire device unit numbers based on resource matches in hints. 973 */ 974 static void 975 acpi_hint_device_unit(device_t acdev, device_t child, const char *name, 976 int *unitp) 977 { 978 const char *s; 979 long value; 980 int line, matches, unit; 981 982 /* 983 * Iterate over all the hints for the devices with the specified 984 * name to see if one's resources are a subset of this device. 985 */ 986 line = 0; 987 for (;;) { 988 if (resource_find_dev(&line, name, &unit, "at", NULL) != 0) 989 break; 990 991 /* Must have an "at" for acpi or isa. */ 992 resource_string_value(name, unit, "at", &s); 993 if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || 994 strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0)) 995 continue; 996 997 /* 998 * Check for matching resources. We must have at least one, 999 * and all resources specified have to match. 1000 * 1001 * XXX: We may want to revisit this to be more lenient and wire 1002 * as long as it gets one match. 1003 */ 1004 matches = 0; 1005 if (resource_long_value(name, unit, "port", &value) == 0) { 1006 if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) 1007 matches++; 1008 else 1009 continue; 1010 } 1011 if (resource_long_value(name, unit, "maddr", &value) == 0) { 1012 if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) 1013 matches++; 1014 else 1015 continue; 1016 } 1017 if (resource_long_value(name, unit, "irq", &value) == 0) { 1018 if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) 1019 matches++; 1020 else 1021 continue; 1022 } 1023 if (resource_long_value(name, unit, "drq", &value) == 0) { 1024 if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) 1025 matches++; 1026 else 1027 continue; 1028 } 1029 1030 if (matches > 0) { 1031 /* We have a winner! */ 1032 *unitp = unit; 1033 break; 1034 } 1035 } 1036 } 1037 1038 /* 1039 * Pre-allocate/manage all memory and IO resources. Since rman can't handle 1040 * duplicates, we merge any in the sysresource attach routine. 1041 */ 1042 static int 1043 acpi_sysres_alloc(device_t dev) 1044 { 1045 struct resource *res; 1046 struct resource_list *rl; 1047 struct resource_list_entry *rle; 1048 struct rman *rm; 1049 char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; 1050 device_t *children; 1051 int child_count, i; 1052 1053 /* 1054 * Probe/attach any sysresource devices. This would be unnecessary if we 1055 * had multi-pass probe/attach. 1056 */ 1057 if (device_get_children(dev, &children, &child_count) != 0) 1058 return (ENXIO); 1059 for (i = 0; i < child_count; i++) { 1060 if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL) 1061 device_probe_and_attach(children[i]); 1062 } 1063 free(children, M_TEMP); 1064 1065 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); 1066 STAILQ_FOREACH(rle, rl, link) { 1067 if (rle->res != NULL) { 1068 device_printf(dev, "duplicate resource for %lx\n", rle->start); 1069 continue; 1070 } 1071 1072 /* Only memory and IO resources are valid here. */ 1073 switch (rle->type) { 1074 case SYS_RES_IOPORT: 1075 rm = &acpi_rman_io; 1076 break; 1077 case SYS_RES_MEMORY: 1078 rm = &acpi_rman_mem; 1079 break; 1080 default: 1081 continue; 1082 } 1083 1084 /* Pre-allocate resource and add to our rman pool. */ 1085 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type, 1086 &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0); 1087 if (res != NULL) { 1088 rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); 1089 rle->res = res; 1090 } else 1091 device_printf(dev, "reservation of %lx, %lx (%d) failed\n", 1092 rle->start, rle->count, rle->type); 1093 } 1094 return (0); 1095 } 1096 1097 static struct resource * 1098 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, 1099 u_long start, u_long end, u_long count, u_int flags) 1100 { 1101 ACPI_RESOURCE ares; 1102 struct acpi_device *ad = device_get_ivars(child); 1103 struct resource_list *rl = &ad->ad_rl; 1104 struct resource_list_entry *rle; 1105 struct resource *res; 1106 struct rman *rm; 1107 1108 res = NULL; 1109 1110 /* We only handle memory and IO resources through rman. */ 1111 switch (type) { 1112 case SYS_RES_IOPORT: 1113 rm = &acpi_rman_io; 1114 break; 1115 case SYS_RES_MEMORY: 1116 rm = &acpi_rman_mem; 1117 break; 1118 default: 1119 rm = NULL; 1120 } 1121 1122 ACPI_SERIAL_BEGIN(acpi); 1123 1124 /* 1125 * If this is an allocation of the "default" range for a given RID, and 1126 * we know what the resources for this device are (i.e., they're on the 1127 * child's resource list), use those start/end values. 1128 */ 1129 if (bus == device_get_parent(child) && start == 0UL && end == ~0UL) { 1130 rle = resource_list_find(rl, type, *rid); 1131 if (rle == NULL) 1132 goto out; 1133 start = rle->start; 1134 end = rle->end; 1135 count = rle->count; 1136 } 1137 1138 /* 1139 * If this is an allocation of a specific range, see if we can satisfy 1140 * the request from our system resource regions. If we can't, pass the 1141 * request up to the parent. 1142 */ 1143 if (start + count - 1 == end && rm != NULL) 1144 res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, 1145 child); 1146 if (res == NULL) { 1147 res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, 1148 start, end, count, flags); 1149 } else { 1150 rman_set_rid(res, *rid); 1151 1152 /* If requested, activate the resource using the parent's method. */ 1153 if (flags & RF_ACTIVE) 1154 if (bus_activate_resource(child, type, *rid, res) != 0) { 1155 rman_release_resource(res); 1156 res = NULL; 1157 goto out; 1158 } 1159 } 1160 1161 if (res != NULL && device_get_parent(child) == bus) 1162 switch (type) { 1163 case SYS_RES_IRQ: 1164 /* 1165 * Since bus_config_intr() takes immediate effect, we cannot 1166 * configure the interrupt associated with a device when we 1167 * parse the resources but have to defer it until a driver 1168 * actually allocates the interrupt via bus_alloc_resource(). 1169 * 1170 * XXX: Should we handle the lookup failing? 1171 */ 1172 if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) 1173 acpi_config_intr(child, &ares); 1174 break; 1175 } 1176 1177 out: 1178 ACPI_SERIAL_END(acpi); 1179 return (res); 1180 } 1181 1182 static int 1183 acpi_release_resource(device_t bus, device_t child, int type, int rid, 1184 struct resource *r) 1185 { 1186 struct rman *rm; 1187 int ret; 1188 1189 /* We only handle memory and IO resources through rman. */ 1190 switch (type) { 1191 case SYS_RES_IOPORT: 1192 rm = &acpi_rman_io; 1193 break; 1194 case SYS_RES_MEMORY: 1195 rm = &acpi_rman_mem; 1196 break; 1197 default: 1198 rm = NULL; 1199 } 1200 1201 ACPI_SERIAL_BEGIN(acpi); 1202 1203 /* 1204 * If this resource belongs to one of our internal managers, 1205 * deactivate it and release it to the local pool. If it doesn't, 1206 * pass this request up to the parent. 1207 */ 1208 if (rm != NULL && rman_is_region_manager(r, rm)) { 1209 if (rman_get_flags(r) & RF_ACTIVE) { 1210 ret = bus_deactivate_resource(child, type, rid, r); 1211 if (ret != 0) 1212 goto out; 1213 } 1214 ret = rman_release_resource(r); 1215 } else 1216 ret = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, r); 1217 1218 out: 1219 ACPI_SERIAL_END(acpi); 1220 return (ret); 1221 } 1222 1223 static void 1224 acpi_delete_resource(device_t bus, device_t child, int type, int rid) 1225 { 1226 struct resource_list *rl; 1227 1228 rl = acpi_get_rlist(bus, child); 1229 resource_list_delete(rl, type, rid); 1230 } 1231 1232 /* Allocate an IO port or memory resource, given its GAS. */ 1233 int 1234 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, 1235 struct resource **res, u_int flags) 1236 { 1237 int error, res_type; 1238 1239 error = ENOMEM; 1240 if (type == NULL || rid == NULL || gas == NULL || res == NULL) 1241 return (EINVAL); 1242 1243 /* We only support memory and IO spaces. */ 1244 switch (gas->SpaceId) { 1245 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1246 res_type = SYS_RES_MEMORY; 1247 break; 1248 case ACPI_ADR_SPACE_SYSTEM_IO: 1249 res_type = SYS_RES_IOPORT; 1250 break; 1251 default: 1252 return (EOPNOTSUPP); 1253 } 1254 1255 /* 1256 * If the register width is less than 8, assume the BIOS author means 1257 * it is a bit field and just allocate a byte. 1258 */ 1259 if (gas->BitWidth && gas->BitWidth < 8) 1260 gas->BitWidth = 8; 1261 1262 /* Validate the address after we're sure we support the space. */ 1263 if (gas->Address == 0 || gas->BitWidth == 0) 1264 return (EINVAL); 1265 1266 bus_set_resource(dev, res_type, *rid, gas->Address, 1267 gas->BitWidth / 8); 1268 *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); 1269 if (*res != NULL) { 1270 *type = res_type; 1271 error = 0; 1272 } else 1273 bus_delete_resource(dev, res_type, *rid); 1274 1275 return (error); 1276 } 1277 1278 /* Probe _HID and _CID for compatible ISA PNP ids. */ 1279 static uint32_t 1280 acpi_isa_get_logicalid(device_t dev) 1281 { 1282 ACPI_DEVICE_INFO *devinfo; 1283 ACPI_BUFFER buf; 1284 ACPI_HANDLE h; 1285 ACPI_STATUS error; 1286 u_int32_t pnpid; 1287 1288 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1289 1290 pnpid = 0; 1291 buf.Pointer = NULL; 1292 buf.Length = ACPI_ALLOCATE_BUFFER; 1293 1294 /* Fetch and validate the HID. */ 1295 if ((h = acpi_get_handle(dev)) == NULL) 1296 goto out; 1297 error = AcpiGetObjectInfo(h, &buf); 1298 if (ACPI_FAILURE(error)) 1299 goto out; 1300 devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; 1301 1302 if ((devinfo->Valid & ACPI_VALID_HID) != 0) 1303 pnpid = PNP_EISAID(devinfo->HardwareId.Value); 1304 1305 out: 1306 if (buf.Pointer != NULL) 1307 AcpiOsFree(buf.Pointer); 1308 return_VALUE (pnpid); 1309 } 1310 1311 static int 1312 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) 1313 { 1314 ACPI_DEVICE_INFO *devinfo; 1315 ACPI_BUFFER buf; 1316 ACPI_HANDLE h; 1317 ACPI_STATUS error; 1318 uint32_t *pnpid; 1319 int valid, i; 1320 1321 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1322 1323 pnpid = cids; 1324 valid = 0; 1325 buf.Pointer = NULL; 1326 buf.Length = ACPI_ALLOCATE_BUFFER; 1327 1328 /* Fetch and validate the CID */ 1329 if ((h = acpi_get_handle(dev)) == NULL) 1330 goto out; 1331 error = AcpiGetObjectInfo(h, &buf); 1332 if (ACPI_FAILURE(error)) 1333 goto out; 1334 devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; 1335 if ((devinfo->Valid & ACPI_VALID_CID) == 0) 1336 goto out; 1337 1338 if (devinfo->CompatibilityId.Count < count) 1339 count = devinfo->CompatibilityId.Count; 1340 for (i = 0; i < count; i++) { 1341 if (strncmp(devinfo->CompatibilityId.Id[i].Value, "PNP", 3) != 0) 1342 continue; 1343 *pnpid++ = PNP_EISAID(devinfo->CompatibilityId.Id[i].Value); 1344 valid++; 1345 } 1346 1347 out: 1348 if (buf.Pointer != NULL) 1349 AcpiOsFree(buf.Pointer); 1350 return_VALUE (valid); 1351 } 1352 1353 static char * 1354 acpi_device_id_probe(device_t bus, device_t dev, char **ids) 1355 { 1356 ACPI_HANDLE h; 1357 int i; 1358 1359 h = acpi_get_handle(dev); 1360 if (ids == NULL || h == NULL || acpi_get_type(dev) != ACPI_TYPE_DEVICE) 1361 return (NULL); 1362 1363 /* Try to match one of the array of IDs with a HID or CID. */ 1364 for (i = 0; ids[i] != NULL; i++) { 1365 if (acpi_MatchHid(h, ids[i])) 1366 return (ids[i]); 1367 } 1368 return (NULL); 1369 } 1370 1371 static ACPI_STATUS 1372 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, 1373 ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) 1374 { 1375 ACPI_HANDLE h; 1376 1377 if (dev == NULL) 1378 h = ACPI_ROOT_OBJECT; 1379 else if ((h = acpi_get_handle(dev)) == NULL) 1380 return (AE_BAD_PARAMETER); 1381 return (AcpiEvaluateObject(h, pathname, parameters, ret)); 1382 } 1383 1384 static int 1385 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) 1386 { 1387 struct acpi_softc *sc; 1388 ACPI_HANDLE handle; 1389 ACPI_STATUS status; 1390 char sxd[8]; 1391 int error; 1392 1393 sc = device_get_softc(bus); 1394 handle = acpi_get_handle(dev); 1395 1396 /* 1397 * XXX If we find these devices, don't try to power them down. 1398 * The serial and IRDA ports on my T23 hang the system when 1399 * set to D3 and it appears that such legacy devices may 1400 * need special handling in their drivers. 1401 */ 1402 if (handle == NULL || 1403 acpi_MatchHid(handle, "PNP0500") || 1404 acpi_MatchHid(handle, "PNP0501") || 1405 acpi_MatchHid(handle, "PNP0502") || 1406 acpi_MatchHid(handle, "PNP0510") || 1407 acpi_MatchHid(handle, "PNP0511")) 1408 return (ENXIO); 1409 1410 /* 1411 * Override next state with the value from _SxD, if present. If no 1412 * dstate argument was provided, don't fetch the return value. 1413 */ 1414 snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); 1415 if (dstate) 1416 status = acpi_GetInteger(handle, sxd, dstate); 1417 else 1418 status = AcpiEvaluateObject(handle, sxd, NULL, NULL); 1419 1420 switch (status) { 1421 case AE_OK: 1422 error = 0; 1423 break; 1424 case AE_NOT_FOUND: 1425 error = ESRCH; 1426 break; 1427 default: 1428 error = ENXIO; 1429 break; 1430 } 1431 1432 return (error); 1433 } 1434 1435 /* Callback arg for our implementation of walking the namespace. */ 1436 struct acpi_device_scan_ctx { 1437 acpi_scan_cb_t user_fn; 1438 void *arg; 1439 ACPI_HANDLE parent; 1440 }; 1441 1442 static ACPI_STATUS 1443 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) 1444 { 1445 struct acpi_device_scan_ctx *ctx; 1446 device_t dev, old_dev; 1447 ACPI_STATUS status; 1448 ACPI_OBJECT_TYPE type; 1449 1450 /* 1451 * Skip this device if we think we'll have trouble with it or it is 1452 * the parent where the scan began. 1453 */ 1454 ctx = (struct acpi_device_scan_ctx *)arg; 1455 if (acpi_avoid(h) || h == ctx->parent) 1456 return (AE_OK); 1457 1458 /* If this is not a valid device type (e.g., a method), skip it. */ 1459 if (ACPI_FAILURE(AcpiGetType(h, &type))) 1460 return (AE_OK); 1461 if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && 1462 type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) 1463 return (AE_OK); 1464 1465 /* 1466 * Call the user function with the current device. If it is unchanged 1467 * afterwards, return. Otherwise, we update the handle to the new dev. 1468 */ 1469 old_dev = acpi_get_device(h); 1470 dev = old_dev; 1471 status = ctx->user_fn(h, &dev, level, ctx->arg); 1472 if (ACPI_FAILURE(status) || old_dev == dev) 1473 return (status); 1474 1475 /* Remove the old child and its connection to the handle. */ 1476 if (old_dev != NULL) { 1477 device_delete_child(device_get_parent(old_dev), old_dev); 1478 AcpiDetachData(h, acpi_fake_objhandler); 1479 } 1480 1481 /* Recreate the handle association if the user created a device. */ 1482 if (dev != NULL) 1483 AcpiAttachData(h, acpi_fake_objhandler, dev); 1484 1485 return (AE_OK); 1486 } 1487 1488 static ACPI_STATUS 1489 acpi_device_scan_children(device_t bus, device_t dev, int max_depth, 1490 acpi_scan_cb_t user_fn, void *arg) 1491 { 1492 ACPI_HANDLE h; 1493 struct acpi_device_scan_ctx ctx; 1494 1495 if (acpi_disabled("children")) 1496 return (AE_OK); 1497 1498 if (dev == NULL) 1499 h = ACPI_ROOT_OBJECT; 1500 else if ((h = acpi_get_handle(dev)) == NULL) 1501 return (AE_BAD_PARAMETER); 1502 ctx.user_fn = user_fn; 1503 ctx.arg = arg; 1504 ctx.parent = h; 1505 return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, 1506 acpi_device_scan_cb, &ctx, NULL)); 1507 } 1508 1509 /* 1510 * Even though ACPI devices are not PCI, we use the PCI approach for setting 1511 * device power states since it's close enough to ACPI. 1512 */ 1513 static int 1514 acpi_set_powerstate_method(device_t bus, device_t child, int state) 1515 { 1516 ACPI_HANDLE h; 1517 ACPI_STATUS status; 1518 int error; 1519 1520 error = 0; 1521 h = acpi_get_handle(child); 1522 if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3) 1523 return (EINVAL); 1524 if (h == NULL) 1525 return (0); 1526 1527 /* Ignore errors if the power methods aren't present. */ 1528 status = acpi_pwr_switch_consumer(h, state); 1529 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND 1530 && status != AE_BAD_PARAMETER) 1531 device_printf(bus, "failed to set ACPI power state D%d on %s: %s\n", 1532 state, acpi_name(h), AcpiFormatException(status)); 1533 1534 return (error); 1535 } 1536 1537 static int 1538 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) 1539 { 1540 int result, cid_count, i; 1541 uint32_t lid, cids[8]; 1542 1543 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1544 1545 /* 1546 * ISA-style drivers attached to ACPI may persist and 1547 * probe manually if we return ENOENT. We never want 1548 * that to happen, so don't ever return it. 1549 */ 1550 result = ENXIO; 1551 1552 /* Scan the supplied IDs for a match */ 1553 lid = acpi_isa_get_logicalid(child); 1554 cid_count = acpi_isa_get_compatid(child, cids, 8); 1555 while (ids && ids->ip_id) { 1556 if (lid == ids->ip_id) { 1557 result = 0; 1558 goto out; 1559 } 1560 for (i = 0; i < cid_count; i++) { 1561 if (cids[i] == ids->ip_id) { 1562 result = 0; 1563 goto out; 1564 } 1565 } 1566 ids++; 1567 } 1568 1569 out: 1570 if (result == 0 && ids->ip_desc) 1571 device_set_desc(child, ids->ip_desc); 1572 1573 return_VALUE (result); 1574 } 1575 1576 #if defined(__i386__) || defined(__amd64__) 1577 /* 1578 * Look for a MCFG table. If it is present, use the settings for 1579 * domain (segment) 0 to setup PCI config space access via the memory 1580 * map. 1581 */ 1582 static void 1583 acpi_enable_pcie(void) 1584 { 1585 ACPI_TABLE_HEADER *hdr; 1586 ACPI_MCFG_ALLOCATION *alloc, *end; 1587 ACPI_STATUS status; 1588 1589 status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); 1590 if (ACPI_FAILURE(status)) 1591 return; 1592 1593 end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); 1594 alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); 1595 while (alloc < end) { 1596 if (alloc->PciSegment == 0) { 1597 pcie_cfgregopen(alloc->Address, alloc->StartBusNumber, 1598 alloc->EndBusNumber); 1599 return; 1600 } 1601 alloc++; 1602 } 1603 } 1604 #endif 1605 1606 /* 1607 * Scan all of the ACPI namespace and attach child devices. 1608 * 1609 * We should only expect to find devices in the \_PR, \_TZ, \_SI, and 1610 * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. 1611 * However, in violation of the spec, some systems place their PCI link 1612 * devices in \, so we have to walk the whole namespace. We check the 1613 * type of namespace nodes, so this should be ok. 1614 */ 1615 static void 1616 acpi_probe_children(device_t bus) 1617 { 1618 1619 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1620 1621 /* 1622 * Scan the namespace and insert placeholders for all the devices that 1623 * we find. We also probe/attach any early devices. 1624 * 1625 * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because 1626 * we want to create nodes for all devices, not just those that are 1627 * currently present. (This assumes that we don't want to create/remove 1628 * devices as they appear, which might be smarter.) 1629 */ 1630 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); 1631 AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, 1632 bus, NULL); 1633 1634 /* Pre-allocate resources for our rman from any sysresource devices. */ 1635 acpi_sysres_alloc(bus); 1636 1637 /* Create any static children by calling device identify methods. */ 1638 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); 1639 bus_generic_probe(bus); 1640 1641 /* Probe/attach all children, created staticly and from the namespace. */ 1642 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "first bus_generic_attach\n")); 1643 bus_generic_attach(bus); 1644 1645 /* 1646 * Some of these children may have attached others as part of their attach 1647 * process (eg. the root PCI bus driver), so rescan. 1648 */ 1649 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "second bus_generic_attach\n")); 1650 bus_generic_attach(bus); 1651 1652 /* Attach wake sysctls. */ 1653 acpi_wake_sysctl_walk(bus); 1654 1655 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); 1656 return_VOID; 1657 } 1658 1659 /* 1660 * Determine the probe order for a given device. 1661 */ 1662 static void 1663 acpi_probe_order(ACPI_HANDLE handle, int *order) 1664 { 1665 ACPI_OBJECT_TYPE type; 1666 1667 /* 1668 * 1. I/O port and memory system resource holders 1669 * 2. Embedded controllers (to handle early accesses) 1670 * 3. PCI Link Devices 1671 * 100000. CPUs 1672 */ 1673 AcpiGetType(handle, &type); 1674 if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) 1675 *order = 1; 1676 else if (acpi_MatchHid(handle, "PNP0C09")) 1677 *order = 2; 1678 else if (acpi_MatchHid(handle, "PNP0C0F")) 1679 *order = 3; 1680 else if (type == ACPI_TYPE_PROCESSOR) 1681 *order = 100000; 1682 } 1683 1684 /* 1685 * Evaluate a child device and determine whether we might attach a device to 1686 * it. 1687 */ 1688 static ACPI_STATUS 1689 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 1690 { 1691 ACPI_OBJECT_TYPE type; 1692 ACPI_HANDLE h; 1693 device_t bus, child; 1694 int order; 1695 char *handle_str, **search; 1696 static char *scopes[] = {"\\_PR_", "\\_TZ_", "\\_SI_", "\\_SB_", NULL}; 1697 1698 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1699 1700 /* Skip this device if we think we'll have trouble with it. */ 1701 if (acpi_avoid(handle)) 1702 return_ACPI_STATUS (AE_OK); 1703 1704 bus = (device_t)context; 1705 if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { 1706 switch (type) { 1707 case ACPI_TYPE_DEVICE: 1708 case ACPI_TYPE_PROCESSOR: 1709 case ACPI_TYPE_THERMAL: 1710 case ACPI_TYPE_POWER: 1711 if (acpi_disabled("children")) 1712 break; 1713 1714 /* 1715 * Since we scan from \, be sure to skip system scope objects. 1716 * At least \_SB and \_TZ are detected as devices (ACPI-CA bug?) 1717 */ 1718 handle_str = acpi_name(handle); 1719 for (search = scopes; *search != NULL; search++) { 1720 if (strcmp(handle_str, *search) == 0) 1721 break; 1722 } 1723 if (*search != NULL) 1724 break; 1725 1726 /* 1727 * Create a placeholder device for this node. Sort the 1728 * placeholder so that the probe/attach passes will run 1729 * breadth-first. Orders less than ACPI_DEV_BASE_ORDER 1730 * are reserved for special objects (i.e., system 1731 * resources). CPU devices have a very high order to 1732 * ensure they are probed after other devices. 1733 */ 1734 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); 1735 order = level * 10 + 100; 1736 acpi_probe_order(handle, &order); 1737 child = BUS_ADD_CHILD(bus, order, NULL, -1); 1738 if (child == NULL) 1739 break; 1740 1741 /* Associate the handle with the device_t and vice versa. */ 1742 acpi_set_handle(child, handle); 1743 AcpiAttachData(handle, acpi_fake_objhandler, child); 1744 1745 /* 1746 * Check that the device is present. If it's not present, 1747 * leave it disabled (so that we have a device_t attached to 1748 * the handle, but we don't probe it). 1749 * 1750 * XXX PCI link devices sometimes report "present" but not 1751 * "functional" (i.e. if disabled). Go ahead and probe them 1752 * anyway since we may enable them later. 1753 */ 1754 if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { 1755 /* Never disable PCI link devices. */ 1756 if (acpi_MatchHid(handle, "PNP0C0F")) 1757 break; 1758 /* 1759 * Docking stations should remain enabled since the system 1760 * may be undocked at boot. 1761 */ 1762 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) 1763 break; 1764 1765 device_disable(child); 1766 break; 1767 } 1768 1769 /* 1770 * Get the device's resource settings and attach them. 1771 * Note that if the device has _PRS but no _CRS, we need 1772 * to decide when it's appropriate to try to configure the 1773 * device. Ignore the return value here; it's OK for the 1774 * device not to have any resources. 1775 */ 1776 acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); 1777 break; 1778 } 1779 } 1780 1781 return_ACPI_STATUS (AE_OK); 1782 } 1783 1784 /* 1785 * AcpiAttachData() requires an object handler but never uses it. This is a 1786 * placeholder object handler so we can store a device_t in an ACPI_HANDLE. 1787 */ 1788 void 1789 acpi_fake_objhandler(ACPI_HANDLE h, UINT32 fn, void *data) 1790 { 1791 } 1792 1793 static void 1794 acpi_shutdown_final(void *arg, int howto) 1795 { 1796 struct acpi_softc *sc; 1797 ACPI_STATUS status; 1798 1799 /* 1800 * XXX Shutdown code should only run on the BSP (cpuid 0). 1801 * Some chipsets do not power off the system correctly if called from 1802 * an AP. 1803 */ 1804 sc = arg; 1805 if ((howto & RB_POWEROFF) != 0) { 1806 status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); 1807 if (ACPI_FAILURE(status)) { 1808 printf("AcpiEnterSleepStatePrep failed - %s\n", 1809 AcpiFormatException(status)); 1810 return; 1811 } 1812 printf("Powering system off using ACPI\n"); 1813 ACPI_DISABLE_IRQS(); 1814 status = AcpiEnterSleepState(ACPI_STATE_S5); 1815 if (ACPI_FAILURE(status)) { 1816 printf("ACPI power-off failed - %s\n", AcpiFormatException(status)); 1817 } else { 1818 DELAY(1000000); 1819 printf("ACPI power-off failed - timeout\n"); 1820 } 1821 } else if ((howto & RB_HALT) == 0 && 1822 (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) && 1823 sc->acpi_handle_reboot) { 1824 /* Reboot using the reset register. */ 1825 status = AcpiHwLowLevelWrite( 1826 AcpiGbl_FADT.ResetRegister.BitWidth, 1827 AcpiGbl_FADT.ResetValue, &AcpiGbl_FADT.ResetRegister); 1828 if (ACPI_FAILURE(status)) { 1829 printf("ACPI reset failed - %s\n", AcpiFormatException(status)); 1830 } else { 1831 DELAY(1000000); 1832 printf("ACPI reset failed - timeout\n"); 1833 } 1834 } else if (sc->acpi_do_disable && panicstr == NULL) { 1835 /* 1836 * Only disable ACPI if the user requested. On some systems, writing 1837 * the disable value to SMI_CMD hangs the system. 1838 */ 1839 printf("Shutting down ACPI\n"); 1840 AcpiTerminate(); 1841 } 1842 } 1843 1844 static void 1845 acpi_enable_fixed_events(struct acpi_softc *sc) 1846 { 1847 static int first_time = 1; 1848 1849 /* Enable and clear fixed events and install handlers. */ 1850 if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { 1851 AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); 1852 AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, 1853 acpi_event_power_button_sleep, sc); 1854 if (first_time) 1855 device_printf(sc->acpi_dev, "Power Button (fixed)\n"); 1856 } 1857 if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1858 AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); 1859 AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, 1860 acpi_event_sleep_button_sleep, sc); 1861 if (first_time) 1862 device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); 1863 } 1864 1865 first_time = 0; 1866 } 1867 1868 /* 1869 * Returns true if the device is actually present and should 1870 * be attached to. This requires the present, enabled, UI-visible 1871 * and diagnostics-passed bits to be set. 1872 */ 1873 BOOLEAN 1874 acpi_DeviceIsPresent(device_t dev) 1875 { 1876 ACPI_DEVICE_INFO *devinfo; 1877 ACPI_HANDLE h; 1878 ACPI_BUFFER buf; 1879 ACPI_STATUS error; 1880 int ret; 1881 1882 ret = FALSE; 1883 if ((h = acpi_get_handle(dev)) == NULL) 1884 return (FALSE); 1885 buf.Pointer = NULL; 1886 buf.Length = ACPI_ALLOCATE_BUFFER; 1887 error = AcpiGetObjectInfo(h, &buf); 1888 if (ACPI_FAILURE(error)) 1889 return (FALSE); 1890 devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; 1891 1892 /* If no _STA method, must be present */ 1893 if ((devinfo->Valid & ACPI_VALID_STA) == 0) 1894 ret = TRUE; 1895 1896 /* Return true for 'present' and 'functioning' */ 1897 if (ACPI_DEVICE_PRESENT(devinfo->CurrentStatus)) 1898 ret = TRUE; 1899 1900 AcpiOsFree(buf.Pointer); 1901 return (ret); 1902 } 1903 1904 /* 1905 * Returns true if the battery is actually present and inserted. 1906 */ 1907 BOOLEAN 1908 acpi_BatteryIsPresent(device_t dev) 1909 { 1910 ACPI_DEVICE_INFO *devinfo; 1911 ACPI_HANDLE h; 1912 ACPI_BUFFER buf; 1913 ACPI_STATUS error; 1914 int ret; 1915 1916 ret = FALSE; 1917 if ((h = acpi_get_handle(dev)) == NULL) 1918 return (FALSE); 1919 buf.Pointer = NULL; 1920 buf.Length = ACPI_ALLOCATE_BUFFER; 1921 error = AcpiGetObjectInfo(h, &buf); 1922 if (ACPI_FAILURE(error)) 1923 return (FALSE); 1924 devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; 1925 1926 /* If no _STA method, must be present */ 1927 if ((devinfo->Valid & ACPI_VALID_STA) == 0) 1928 ret = TRUE; 1929 1930 /* Return true for 'present', 'battery present', and 'functioning' */ 1931 if (ACPI_BATTERY_PRESENT(devinfo->CurrentStatus)) 1932 ret = TRUE; 1933 1934 AcpiOsFree(buf.Pointer); 1935 return (ret); 1936 } 1937 1938 /* 1939 * Match a HID string against a handle 1940 */ 1941 static BOOLEAN 1942 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 1943 { 1944 ACPI_DEVICE_INFO *devinfo; 1945 ACPI_BUFFER buf; 1946 ACPI_STATUS error; 1947 int ret, i; 1948 1949 ret = FALSE; 1950 if (hid == NULL || h == NULL) 1951 return (ret); 1952 buf.Pointer = NULL; 1953 buf.Length = ACPI_ALLOCATE_BUFFER; 1954 error = AcpiGetObjectInfo(h, &buf); 1955 if (ACPI_FAILURE(error)) 1956 return (ret); 1957 devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; 1958 1959 if ((devinfo->Valid & ACPI_VALID_HID) != 0 && 1960 strcmp(hid, devinfo->HardwareId.Value) == 0) 1961 ret = TRUE; 1962 else if ((devinfo->Valid & ACPI_VALID_CID) != 0) { 1963 for (i = 0; i < devinfo->CompatibilityId.Count; i++) { 1964 if (strcmp(hid, devinfo->CompatibilityId.Id[i].Value) == 0) { 1965 ret = TRUE; 1966 break; 1967 } 1968 } 1969 } 1970 1971 AcpiOsFree(buf.Pointer); 1972 return (ret); 1973 } 1974 1975 /* 1976 * Return the handle of a named object within our scope, ie. that of (parent) 1977 * or one if its parents. 1978 */ 1979 ACPI_STATUS 1980 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) 1981 { 1982 ACPI_HANDLE r; 1983 ACPI_STATUS status; 1984 1985 /* Walk back up the tree to the root */ 1986 for (;;) { 1987 status = AcpiGetHandle(parent, path, &r); 1988 if (ACPI_SUCCESS(status)) { 1989 *result = r; 1990 return (AE_OK); 1991 } 1992 /* XXX Return error here? */ 1993 if (status != AE_NOT_FOUND) 1994 return (AE_OK); 1995 if (ACPI_FAILURE(AcpiGetParent(parent, &r))) 1996 return (AE_NOT_FOUND); 1997 parent = r; 1998 } 1999 } 2000 2001 /* Find the difference between two PM tick counts. */ 2002 uint32_t 2003 acpi_TimerDelta(uint32_t end, uint32_t start) 2004 { 2005 uint32_t delta; 2006 2007 if (end >= start) 2008 delta = end - start; 2009 else if (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) 2010 delta = ((0xFFFFFFFF - start) + end + 1); 2011 else 2012 delta = ((0x00FFFFFF - start) + end + 1) & 0x00FFFFFF; 2013 return (delta); 2014 } 2015 2016 /* 2017 * Allocate a buffer with a preset data size. 2018 */ 2019 ACPI_BUFFER * 2020 acpi_AllocBuffer(int size) 2021 { 2022 ACPI_BUFFER *buf; 2023 2024 if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) 2025 return (NULL); 2026 buf->Length = size; 2027 buf->Pointer = (void *)(buf + 1); 2028 return (buf); 2029 } 2030 2031 ACPI_STATUS 2032 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) 2033 { 2034 ACPI_OBJECT arg1; 2035 ACPI_OBJECT_LIST args; 2036 2037 arg1.Type = ACPI_TYPE_INTEGER; 2038 arg1.Integer.Value = number; 2039 args.Count = 1; 2040 args.Pointer = &arg1; 2041 2042 return (AcpiEvaluateObject(handle, path, &args, NULL)); 2043 } 2044 2045 /* 2046 * Evaluate a path that should return an integer. 2047 */ 2048 ACPI_STATUS 2049 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) 2050 { 2051 ACPI_STATUS status; 2052 ACPI_BUFFER buf; 2053 ACPI_OBJECT param; 2054 2055 if (handle == NULL) 2056 handle = ACPI_ROOT_OBJECT; 2057 2058 /* 2059 * Assume that what we've been pointed at is an Integer object, or 2060 * a method that will return an Integer. 2061 */ 2062 buf.Pointer = ¶m; 2063 buf.Length = sizeof(param); 2064 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2065 if (ACPI_SUCCESS(status)) { 2066 if (param.Type == ACPI_TYPE_INTEGER) 2067 *number = param.Integer.Value; 2068 else 2069 status = AE_TYPE; 2070 } 2071 2072 /* 2073 * In some applications, a method that's expected to return an Integer 2074 * may instead return a Buffer (probably to simplify some internal 2075 * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, 2076 * convert it into an Integer as best we can. 2077 * 2078 * This is a hack. 2079 */ 2080 if (status == AE_BUFFER_OVERFLOW) { 2081 if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { 2082 status = AE_NO_MEMORY; 2083 } else { 2084 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2085 if (ACPI_SUCCESS(status)) 2086 status = acpi_ConvertBufferToInteger(&buf, number); 2087 AcpiOsFree(buf.Pointer); 2088 } 2089 } 2090 return (status); 2091 } 2092 2093 ACPI_STATUS 2094 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) 2095 { 2096 ACPI_OBJECT *p; 2097 UINT8 *val; 2098 int i; 2099 2100 p = (ACPI_OBJECT *)bufp->Pointer; 2101 if (p->Type == ACPI_TYPE_INTEGER) { 2102 *number = p->Integer.Value; 2103 return (AE_OK); 2104 } 2105 if (p->Type != ACPI_TYPE_BUFFER) 2106 return (AE_TYPE); 2107 if (p->Buffer.Length > sizeof(int)) 2108 return (AE_BAD_DATA); 2109 2110 *number = 0; 2111 val = p->Buffer.Pointer; 2112 for (i = 0; i < p->Buffer.Length; i++) 2113 *number += val[i] << (i * 8); 2114 return (AE_OK); 2115 } 2116 2117 /* 2118 * Iterate over the elements of an a package object, calling the supplied 2119 * function for each element. 2120 * 2121 * XXX possible enhancement might be to abort traversal on error. 2122 */ 2123 ACPI_STATUS 2124 acpi_ForeachPackageObject(ACPI_OBJECT *pkg, 2125 void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) 2126 { 2127 ACPI_OBJECT *comp; 2128 int i; 2129 2130 if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) 2131 return (AE_BAD_PARAMETER); 2132 2133 /* Iterate over components */ 2134 i = 0; 2135 comp = pkg->Package.Elements; 2136 for (; i < pkg->Package.Count; i++, comp++) 2137 func(comp, arg); 2138 2139 return (AE_OK); 2140 } 2141 2142 /* 2143 * Find the (index)th resource object in a set. 2144 */ 2145 ACPI_STATUS 2146 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) 2147 { 2148 ACPI_RESOURCE *rp; 2149 int i; 2150 2151 rp = (ACPI_RESOURCE *)buf->Pointer; 2152 i = index; 2153 while (i-- > 0) { 2154 /* Range check */ 2155 if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) 2156 return (AE_BAD_PARAMETER); 2157 2158 /* Check for terminator */ 2159 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2160 return (AE_NOT_FOUND); 2161 rp = ACPI_NEXT_RESOURCE(rp); 2162 } 2163 if (resp != NULL) 2164 *resp = rp; 2165 2166 return (AE_OK); 2167 } 2168 2169 /* 2170 * Append an ACPI_RESOURCE to an ACPI_BUFFER. 2171 * 2172 * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER 2173 * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible 2174 * backing block. If the ACPI_RESOURCE is NULL, return an empty set of 2175 * resources. 2176 */ 2177 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 2178 2179 ACPI_STATUS 2180 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) 2181 { 2182 ACPI_RESOURCE *rp; 2183 void *newp; 2184 2185 /* Initialise the buffer if necessary. */ 2186 if (buf->Pointer == NULL) { 2187 buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; 2188 if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) 2189 return (AE_NO_MEMORY); 2190 rp = (ACPI_RESOURCE *)buf->Pointer; 2191 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2192 rp->Length = 0; 2193 } 2194 if (res == NULL) 2195 return (AE_OK); 2196 2197 /* 2198 * Scan the current buffer looking for the terminator. 2199 * This will either find the terminator or hit the end 2200 * of the buffer and return an error. 2201 */ 2202 rp = (ACPI_RESOURCE *)buf->Pointer; 2203 for (;;) { 2204 /* Range check, don't go outside the buffer */ 2205 if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) 2206 return (AE_BAD_PARAMETER); 2207 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2208 break; 2209 rp = ACPI_NEXT_RESOURCE(rp); 2210 } 2211 2212 /* 2213 * Check the size of the buffer and expand if required. 2214 * 2215 * Required size is: 2216 * size of existing resources before terminator + 2217 * size of new resource and header + 2218 * size of terminator. 2219 * 2220 * Note that this loop should really only run once, unless 2221 * for some reason we are stuffing a *really* huge resource. 2222 */ 2223 while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + 2224 res->Length + ACPI_RS_SIZE_NO_DATA + 2225 ACPI_RS_SIZE_MIN) >= buf->Length) { 2226 if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) 2227 return (AE_NO_MEMORY); 2228 bcopy(buf->Pointer, newp, buf->Length); 2229 rp = (ACPI_RESOURCE *)((u_int8_t *)newp + 2230 ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); 2231 AcpiOsFree(buf->Pointer); 2232 buf->Pointer = newp; 2233 buf->Length += buf->Length; 2234 } 2235 2236 /* Insert the new resource. */ 2237 bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); 2238 2239 /* And add the terminator. */ 2240 rp = ACPI_NEXT_RESOURCE(rp); 2241 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2242 rp->Length = 0; 2243 2244 return (AE_OK); 2245 } 2246 2247 /* 2248 * Set interrupt model. 2249 */ 2250 ACPI_STATUS 2251 acpi_SetIntrModel(int model) 2252 { 2253 2254 return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); 2255 } 2256 2257 /* 2258 * DEPRECATED. This interface has serious deficiencies and will be 2259 * removed. 2260 * 2261 * Immediately enter the sleep state. In the old model, acpiconf(8) ran 2262 * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. 2263 */ 2264 ACPI_STATUS 2265 acpi_SetSleepState(struct acpi_softc *sc, int state) 2266 { 2267 static int once; 2268 2269 if (!once) { 2270 printf( 2271 "warning: acpi_SetSleepState() deprecated, need to update your software\n"); 2272 once = 1; 2273 } 2274 return (acpi_EnterSleepState(sc, state)); 2275 } 2276 2277 static void 2278 acpi_sleep_force(void *arg) 2279 { 2280 struct acpi_softc *sc; 2281 2282 printf("acpi: suspend request timed out, forcing sleep now\n"); 2283 sc = arg; 2284 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 2285 printf("acpi: force sleep state S%d failed\n", sc->acpi_next_sstate); 2286 } 2287 2288 /* 2289 * Request that the system enter the given suspend state. All /dev/apm 2290 * devices and devd(8) will be notified. Userland then has a chance to 2291 * save state and acknowledge the request. The system sleeps once all 2292 * acks are in. 2293 */ 2294 int 2295 acpi_ReqSleepState(struct acpi_softc *sc, int state) 2296 { 2297 struct apm_clone_data *clone; 2298 2299 if (state < ACPI_STATE_S1 || state > ACPI_STATE_S5) 2300 return (EINVAL); 2301 2302 /* S5 (soft-off) should be entered directly with no waiting. */ 2303 if (state == ACPI_STATE_S5) { 2304 if (ACPI_SUCCESS(acpi_EnterSleepState(sc, state))) 2305 return (0); 2306 else 2307 return (ENXIO); 2308 } 2309 2310 #if !defined(__i386__) 2311 /* This platform does not support acpi suspend/resume. */ 2312 return (EOPNOTSUPP); 2313 #endif 2314 2315 /* If a suspend request is already in progress, just return. */ 2316 ACPI_LOCK(acpi); 2317 if (sc->acpi_next_sstate != 0) { 2318 ACPI_UNLOCK(acpi); 2319 return (0); 2320 } 2321 2322 /* Record the pending state and notify all apm devices. */ 2323 sc->acpi_next_sstate = state; 2324 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 2325 clone->notify_status = APM_EV_NONE; 2326 if ((clone->flags & ACPI_EVF_DEVD) == 0) { 2327 selwakeuppri(&clone->sel_read, PZERO); 2328 KNOTE_UNLOCKED(&clone->sel_read.si_note, 0); 2329 } 2330 } 2331 2332 /* If devd(8) is not running, immediately enter the sleep state. */ 2333 if (devctl_process_running() == FALSE) { 2334 ACPI_UNLOCK(acpi); 2335 if (ACPI_SUCCESS(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) { 2336 return (0); 2337 } else { 2338 return (ENXIO); 2339 } 2340 } 2341 2342 /* Now notify devd(8) also. */ 2343 acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); 2344 2345 /* 2346 * Set a timeout to fire if userland doesn't ack the suspend request 2347 * in time. This way we still eventually go to sleep if we were 2348 * overheating or running low on battery, even if userland is hung. 2349 * We cancel this timeout once all userland acks are in or the 2350 * suspend request is aborted. 2351 */ 2352 callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); 2353 ACPI_UNLOCK(acpi); 2354 return (0); 2355 } 2356 2357 /* 2358 * Acknowledge (or reject) a pending sleep state. The caller has 2359 * prepared for suspend and is now ready for it to proceed. If the 2360 * error argument is non-zero, it indicates suspend should be cancelled 2361 * and gives an errno value describing why. Once all votes are in, 2362 * we suspend the system. 2363 */ 2364 int 2365 acpi_AckSleepState(struct apm_clone_data *clone, int error) 2366 { 2367 struct acpi_softc *sc; 2368 int ret, sleeping; 2369 2370 #if !defined(__i386__) 2371 /* This platform does not support acpi suspend/resume. */ 2372 return (EOPNOTSUPP); 2373 #endif 2374 2375 /* If no pending sleep state, return an error. */ 2376 ACPI_LOCK(acpi); 2377 sc = clone->acpi_sc; 2378 if (sc->acpi_next_sstate == 0) { 2379 ACPI_UNLOCK(acpi); 2380 return (ENXIO); 2381 } 2382 2383 /* Caller wants to abort suspend process. */ 2384 if (error) { 2385 sc->acpi_next_sstate = 0; 2386 callout_stop(&sc->susp_force_to); 2387 printf("acpi: listener on %s cancelled the pending suspend\n", 2388 devtoname(clone->cdev)); 2389 ACPI_UNLOCK(acpi); 2390 return (0); 2391 } 2392 2393 /* 2394 * Mark this device as acking the suspend request. Then, walk through 2395 * all devices, seeing if they agree yet. We only count devices that 2396 * are writable since read-only devices couldn't ack the request. 2397 */ 2398 clone->notify_status = APM_EV_ACKED; 2399 sleeping = TRUE; 2400 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 2401 if ((clone->flags & ACPI_EVF_WRITE) != 0 && 2402 clone->notify_status != APM_EV_ACKED) { 2403 sleeping = FALSE; 2404 break; 2405 } 2406 } 2407 2408 /* If all devices have voted "yes", we will suspend now. */ 2409 if (sleeping) 2410 callout_stop(&sc->susp_force_to); 2411 ACPI_UNLOCK(acpi); 2412 ret = 0; 2413 if (sleeping) { 2414 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 2415 ret = ENODEV; 2416 } 2417 2418 return (ret); 2419 } 2420 2421 static void 2422 acpi_sleep_enable(void *arg) 2423 { 2424 2425 ((struct acpi_softc *)arg)->acpi_sleep_disabled = 0; 2426 } 2427 2428 enum acpi_sleep_state { 2429 ACPI_SS_NONE, 2430 ACPI_SS_GPE_SET, 2431 ACPI_SS_DEV_SUSPEND, 2432 ACPI_SS_SLP_PREP, 2433 ACPI_SS_SLEPT, 2434 }; 2435 2436 /* 2437 * Enter the desired system sleep state. 2438 * 2439 * Currently we support S1-S5 but S4 is only S4BIOS 2440 */ 2441 static ACPI_STATUS 2442 acpi_EnterSleepState(struct acpi_softc *sc, int state) 2443 { 2444 ACPI_STATUS status; 2445 UINT8 TypeA; 2446 UINT8 TypeB; 2447 enum acpi_sleep_state slp_state; 2448 2449 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2450 2451 /* Re-entry once we're suspending is not allowed. */ 2452 status = AE_OK; 2453 ACPI_LOCK(acpi); 2454 if (sc->acpi_sleep_disabled) { 2455 ACPI_UNLOCK(acpi); 2456 printf("acpi: suspend request ignored (not ready yet)\n"); 2457 return (AE_ERROR); 2458 } 2459 sc->acpi_sleep_disabled = 1; 2460 ACPI_UNLOCK(acpi); 2461 2462 /* 2463 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE 2464 * drivers need this. 2465 */ 2466 mtx_lock(&Giant); 2467 slp_state = ACPI_SS_NONE; 2468 switch (state) { 2469 case ACPI_STATE_S1: 2470 case ACPI_STATE_S2: 2471 case ACPI_STATE_S3: 2472 case ACPI_STATE_S4: 2473 status = AcpiGetSleepTypeData(state, &TypeA, &TypeB); 2474 if (status == AE_NOT_FOUND) { 2475 device_printf(sc->acpi_dev, 2476 "Sleep state S%d not supported by BIOS\n", state); 2477 break; 2478 } else if (ACPI_FAILURE(status)) { 2479 device_printf(sc->acpi_dev, "AcpiGetSleepTypeData failed - %s\n", 2480 AcpiFormatException(status)); 2481 break; 2482 } 2483 2484 sc->acpi_sstate = state; 2485 2486 /* Enable any GPEs as appropriate and requested by the user. */ 2487 acpi_wake_prep_walk(state); 2488 slp_state = ACPI_SS_GPE_SET; 2489 2490 /* 2491 * Inform all devices that we are going to sleep. If at least one 2492 * device fails, DEVICE_SUSPEND() automatically resumes the tree. 2493 * 2494 * XXX Note that a better two-pass approach with a 'veto' pass 2495 * followed by a "real thing" pass would be better, but the current 2496 * bus interface does not provide for this. 2497 */ 2498 if (DEVICE_SUSPEND(root_bus) != 0) { 2499 device_printf(sc->acpi_dev, "device_suspend failed\n"); 2500 break; 2501 } 2502 slp_state = ACPI_SS_DEV_SUSPEND; 2503 2504 /* If testing device suspend only, back out of everything here. */ 2505 if (acpi_susp_bounce) 2506 break; 2507 2508 status = AcpiEnterSleepStatePrep(state); 2509 if (ACPI_FAILURE(status)) { 2510 device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", 2511 AcpiFormatException(status)); 2512 break; 2513 } 2514 slp_state = ACPI_SS_SLP_PREP; 2515 2516 if (sc->acpi_sleep_delay > 0) 2517 DELAY(sc->acpi_sleep_delay * 1000000); 2518 2519 if (state != ACPI_STATE_S1) { 2520 acpi_sleep_machdep(sc, state); 2521 2522 /* Re-enable ACPI hardware on wakeup from sleep state 4. */ 2523 if (state == ACPI_STATE_S4) 2524 AcpiEnable(); 2525 } else { 2526 ACPI_DISABLE_IRQS(); 2527 status = AcpiEnterSleepState(state); 2528 if (ACPI_FAILURE(status)) { 2529 device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", 2530 AcpiFormatException(status)); 2531 break; 2532 } 2533 } 2534 slp_state = ACPI_SS_SLEPT; 2535 break; 2536 case ACPI_STATE_S5: 2537 /* 2538 * Shut down cleanly and power off. This will call us back through the 2539 * shutdown handlers. 2540 */ 2541 shutdown_nice(RB_POWEROFF); 2542 break; 2543 case ACPI_STATE_S0: 2544 default: 2545 status = AE_BAD_PARAMETER; 2546 break; 2547 } 2548 2549 /* 2550 * Back out state according to how far along we got in the suspend 2551 * process. This handles both the error and success cases. 2552 */ 2553 sc->acpi_next_sstate = 0; 2554 if (slp_state >= ACPI_SS_GPE_SET) { 2555 acpi_wake_prep_walk(state); 2556 sc->acpi_sstate = ACPI_STATE_S0; 2557 } 2558 if (slp_state >= ACPI_SS_SLP_PREP) 2559 AcpiLeaveSleepState(state); 2560 if (slp_state >= ACPI_SS_DEV_SUSPEND) 2561 DEVICE_RESUME(root_bus); 2562 if (slp_state >= ACPI_SS_SLEPT) 2563 acpi_enable_fixed_events(sc); 2564 2565 /* Allow another sleep request after a while. */ 2566 if (state != ACPI_STATE_S5) 2567 timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME); 2568 2569 /* Run /etc/rc.resume after we are back. */ 2570 acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); 2571 2572 mtx_unlock(&Giant); 2573 return_ACPI_STATUS (status); 2574 } 2575 2576 /* Initialize a device's wake GPE. */ 2577 int 2578 acpi_wake_init(device_t dev, int type) 2579 { 2580 struct acpi_prw_data prw; 2581 2582 /* Evaluate _PRW to find the GPE. */ 2583 if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) 2584 return (ENXIO); 2585 2586 /* Set the requested type for the GPE (runtime, wake, or both). */ 2587 if (ACPI_FAILURE(AcpiSetGpeType(prw.gpe_handle, prw.gpe_bit, type))) { 2588 device_printf(dev, "set GPE type failed\n"); 2589 return (ENXIO); 2590 } 2591 2592 return (0); 2593 } 2594 2595 /* Enable or disable the device's wake GPE. */ 2596 int 2597 acpi_wake_set_enable(device_t dev, int enable) 2598 { 2599 struct acpi_prw_data prw; 2600 ACPI_STATUS status; 2601 int flags; 2602 2603 /* Make sure the device supports waking the system and get the GPE. */ 2604 if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) 2605 return (ENXIO); 2606 2607 flags = acpi_get_flags(dev); 2608 if (enable) { 2609 status = AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); 2610 if (ACPI_FAILURE(status)) { 2611 device_printf(dev, "enable wake failed\n"); 2612 return (ENXIO); 2613 } 2614 acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); 2615 } else { 2616 status = AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); 2617 if (ACPI_FAILURE(status)) { 2618 device_printf(dev, "disable wake failed\n"); 2619 return (ENXIO); 2620 } 2621 acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); 2622 } 2623 2624 return (0); 2625 } 2626 2627 static int 2628 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) 2629 { 2630 struct acpi_prw_data prw; 2631 device_t dev; 2632 2633 /* Check that this is a wake-capable device and get its GPE. */ 2634 if (acpi_parse_prw(handle, &prw) != 0) 2635 return (ENXIO); 2636 dev = acpi_get_device(handle); 2637 2638 /* 2639 * The destination sleep state must be less than (i.e., higher power) 2640 * or equal to the value specified by _PRW. If this GPE cannot be 2641 * enabled for the next sleep state, then disable it. If it can and 2642 * the user requested it be enabled, turn on any required power resources 2643 * and set _PSW. 2644 */ 2645 if (sstate > prw.lowest_wake) { 2646 AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); 2647 if (bootverbose) 2648 device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", 2649 acpi_name(handle), sstate); 2650 } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { 2651 acpi_pwr_wake_enable(handle, 1); 2652 acpi_SetInteger(handle, "_PSW", 1); 2653 if (bootverbose) 2654 device_printf(dev, "wake_prep enabled for %s (S%d)\n", 2655 acpi_name(handle), sstate); 2656 } 2657 2658 return (0); 2659 } 2660 2661 static int 2662 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) 2663 { 2664 struct acpi_prw_data prw; 2665 device_t dev; 2666 2667 /* 2668 * Check that this is a wake-capable device and get its GPE. Return 2669 * now if the user didn't enable this device for wake. 2670 */ 2671 if (acpi_parse_prw(handle, &prw) != 0) 2672 return (ENXIO); 2673 dev = acpi_get_device(handle); 2674 if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) 2675 return (0); 2676 2677 /* 2678 * If this GPE couldn't be enabled for the previous sleep state, it was 2679 * disabled before going to sleep so re-enable it. If it was enabled, 2680 * clear _PSW and turn off any power resources it used. 2681 */ 2682 if (sstate > prw.lowest_wake) { 2683 AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); 2684 if (bootverbose) 2685 device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); 2686 } else { 2687 acpi_SetInteger(handle, "_PSW", 0); 2688 acpi_pwr_wake_enable(handle, 0); 2689 if (bootverbose) 2690 device_printf(dev, "run_prep cleaned up for %s\n", 2691 acpi_name(handle)); 2692 } 2693 2694 return (0); 2695 } 2696 2697 static ACPI_STATUS 2698 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 2699 { 2700 int sstate; 2701 2702 /* If suspending, run the sleep prep function, otherwise wake. */ 2703 sstate = *(int *)context; 2704 if (AcpiGbl_SystemAwakeAndRunning) 2705 acpi_wake_sleep_prep(handle, sstate); 2706 else 2707 acpi_wake_run_prep(handle, sstate); 2708 return (AE_OK); 2709 } 2710 2711 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ 2712 static int 2713 acpi_wake_prep_walk(int sstate) 2714 { 2715 ACPI_HANDLE sb_handle; 2716 2717 if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) 2718 AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, 2719 acpi_wake_prep, &sstate, NULL); 2720 return (0); 2721 } 2722 2723 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ 2724 static int 2725 acpi_wake_sysctl_walk(device_t dev) 2726 { 2727 int error, i, numdevs; 2728 device_t *devlist; 2729 device_t child; 2730 ACPI_STATUS status; 2731 2732 error = device_get_children(dev, &devlist, &numdevs); 2733 if (error != 0 || numdevs == 0) { 2734 if (numdevs == 0) 2735 free(devlist, M_TEMP); 2736 return (error); 2737 } 2738 for (i = 0; i < numdevs; i++) { 2739 child = devlist[i]; 2740 acpi_wake_sysctl_walk(child); 2741 if (!device_is_attached(child)) 2742 continue; 2743 status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); 2744 if (ACPI_SUCCESS(status)) { 2745 SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), 2746 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, 2747 "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0, 2748 acpi_wake_set_sysctl, "I", "Device set to wake the system"); 2749 } 2750 } 2751 free(devlist, M_TEMP); 2752 2753 return (0); 2754 } 2755 2756 /* Enable or disable wake from userland. */ 2757 static int 2758 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) 2759 { 2760 int enable, error; 2761 device_t dev; 2762 2763 dev = (device_t)arg1; 2764 enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; 2765 2766 error = sysctl_handle_int(oidp, &enable, 0, req); 2767 if (error != 0 || req->newptr == NULL) 2768 return (error); 2769 if (enable != 0 && enable != 1) 2770 return (EINVAL); 2771 2772 return (acpi_wake_set_enable(dev, enable)); 2773 } 2774 2775 /* Parse a device's _PRW into a structure. */ 2776 int 2777 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) 2778 { 2779 ACPI_STATUS status; 2780 ACPI_BUFFER prw_buffer; 2781 ACPI_OBJECT *res, *res2; 2782 int error, i, power_count; 2783 2784 if (h == NULL || prw == NULL) 2785 return (EINVAL); 2786 2787 /* 2788 * The _PRW object (7.2.9) is only required for devices that have the 2789 * ability to wake the system from a sleeping state. 2790 */ 2791 error = EINVAL; 2792 prw_buffer.Pointer = NULL; 2793 prw_buffer.Length = ACPI_ALLOCATE_BUFFER; 2794 status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); 2795 if (ACPI_FAILURE(status)) 2796 return (ENOENT); 2797 res = (ACPI_OBJECT *)prw_buffer.Pointer; 2798 if (res == NULL) 2799 return (ENOENT); 2800 if (!ACPI_PKG_VALID(res, 2)) 2801 goto out; 2802 2803 /* 2804 * Element 1 of the _PRW object: 2805 * The lowest power system sleeping state that can be entered while still 2806 * providing wake functionality. The sleeping state being entered must 2807 * be less than (i.e., higher power) or equal to this value. 2808 */ 2809 if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) 2810 goto out; 2811 2812 /* 2813 * Element 0 of the _PRW object: 2814 */ 2815 switch (res->Package.Elements[0].Type) { 2816 case ACPI_TYPE_INTEGER: 2817 /* 2818 * If the data type of this package element is numeric, then this 2819 * _PRW package element is the bit index in the GPEx_EN, in the 2820 * GPE blocks described in the FADT, of the enable bit that is 2821 * enabled for the wake event. 2822 */ 2823 prw->gpe_handle = NULL; 2824 prw->gpe_bit = res->Package.Elements[0].Integer.Value; 2825 error = 0; 2826 break; 2827 case ACPI_TYPE_PACKAGE: 2828 /* 2829 * If the data type of this package element is a package, then this 2830 * _PRW package element is itself a package containing two 2831 * elements. The first is an object reference to the GPE Block 2832 * device that contains the GPE that will be triggered by the wake 2833 * event. The second element is numeric and it contains the bit 2834 * index in the GPEx_EN, in the GPE Block referenced by the 2835 * first element in the package, of the enable bit that is enabled for 2836 * the wake event. 2837 * 2838 * For example, if this field is a package then it is of the form: 2839 * Package() {\_SB.PCI0.ISA.GPE, 2} 2840 */ 2841 res2 = &res->Package.Elements[0]; 2842 if (!ACPI_PKG_VALID(res2, 2)) 2843 goto out; 2844 prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); 2845 if (prw->gpe_handle == NULL) 2846 goto out; 2847 if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) 2848 goto out; 2849 error = 0; 2850 break; 2851 default: 2852 goto out; 2853 } 2854 2855 /* Elements 2 to N of the _PRW object are power resources. */ 2856 power_count = res->Package.Count - 2; 2857 if (power_count > ACPI_PRW_MAX_POWERRES) { 2858 printf("ACPI device %s has too many power resources\n", acpi_name(h)); 2859 power_count = 0; 2860 } 2861 prw->power_res_count = power_count; 2862 for (i = 0; i < power_count; i++) 2863 prw->power_res[i] = res->Package.Elements[i]; 2864 2865 out: 2866 if (prw_buffer.Pointer != NULL) 2867 AcpiOsFree(prw_buffer.Pointer); 2868 return (error); 2869 } 2870 2871 /* 2872 * ACPI Event Handlers 2873 */ 2874 2875 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ 2876 2877 static void 2878 acpi_system_eventhandler_sleep(void *arg, int state) 2879 { 2880 int ret; 2881 2882 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2883 2884 /* Check if button action is disabled. */ 2885 if (state == ACPI_S_STATES_MAX + 1) 2886 return; 2887 2888 /* Request that the system prepare to enter the given suspend state. */ 2889 ret = acpi_ReqSleepState((struct acpi_softc *)arg, state); 2890 if (ret != 0) 2891 printf("acpi: request to enter state S%d failed (err %d)\n", 2892 state, ret); 2893 2894 return_VOID; 2895 } 2896 2897 static void 2898 acpi_system_eventhandler_wakeup(void *arg, int state) 2899 { 2900 2901 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2902 2903 /* Currently, nothing to do for wakeup. */ 2904 2905 return_VOID; 2906 } 2907 2908 /* 2909 * ACPICA Event Handlers (FixedEvent, also called from button notify handler) 2910 */ 2911 UINT32 2912 acpi_event_power_button_sleep(void *context) 2913 { 2914 struct acpi_softc *sc = (struct acpi_softc *)context; 2915 2916 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2917 2918 EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx); 2919 2920 return_VALUE (ACPI_INTERRUPT_HANDLED); 2921 } 2922 2923 UINT32 2924 acpi_event_power_button_wake(void *context) 2925 { 2926 struct acpi_softc *sc = (struct acpi_softc *)context; 2927 2928 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2929 2930 EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx); 2931 2932 return_VALUE (ACPI_INTERRUPT_HANDLED); 2933 } 2934 2935 UINT32 2936 acpi_event_sleep_button_sleep(void *context) 2937 { 2938 struct acpi_softc *sc = (struct acpi_softc *)context; 2939 2940 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2941 2942 EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx); 2943 2944 return_VALUE (ACPI_INTERRUPT_HANDLED); 2945 } 2946 2947 UINT32 2948 acpi_event_sleep_button_wake(void *context) 2949 { 2950 struct acpi_softc *sc = (struct acpi_softc *)context; 2951 2952 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2953 2954 EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx); 2955 2956 return_VALUE (ACPI_INTERRUPT_HANDLED); 2957 } 2958 2959 /* 2960 * XXX This static buffer is suboptimal. There is no locking so only 2961 * use this for single-threaded callers. 2962 */ 2963 char * 2964 acpi_name(ACPI_HANDLE handle) 2965 { 2966 ACPI_BUFFER buf; 2967 static char data[256]; 2968 2969 buf.Length = sizeof(data); 2970 buf.Pointer = data; 2971 2972 if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) 2973 return (data); 2974 return ("(unknown)"); 2975 } 2976 2977 /* 2978 * Debugging/bug-avoidance. Avoid trying to fetch info on various 2979 * parts of the namespace. 2980 */ 2981 int 2982 acpi_avoid(ACPI_HANDLE handle) 2983 { 2984 char *cp, *env, *np; 2985 int len; 2986 2987 np = acpi_name(handle); 2988 if (*np == '\\') 2989 np++; 2990 if ((env = getenv("debug.acpi.avoid")) == NULL) 2991 return (0); 2992 2993 /* Scan the avoid list checking for a match */ 2994 cp = env; 2995 for (;;) { 2996 while (*cp != 0 && isspace(*cp)) 2997 cp++; 2998 if (*cp == 0) 2999 break; 3000 len = 0; 3001 while (cp[len] != 0 && !isspace(cp[len])) 3002 len++; 3003 if (!strncmp(cp, np, len)) { 3004 freeenv(env); 3005 return(1); 3006 } 3007 cp += len; 3008 } 3009 freeenv(env); 3010 3011 return (0); 3012 } 3013 3014 /* 3015 * Debugging/bug-avoidance. Disable ACPI subsystem components. 3016 */ 3017 int 3018 acpi_disabled(char *subsys) 3019 { 3020 char *cp, *env; 3021 int len; 3022 3023 if ((env = getenv("debug.acpi.disabled")) == NULL) 3024 return (0); 3025 if (strcmp(env, "all") == 0) { 3026 freeenv(env); 3027 return (1); 3028 } 3029 3030 /* Scan the disable list, checking for a match. */ 3031 cp = env; 3032 for (;;) { 3033 while (*cp != '\0' && isspace(*cp)) 3034 cp++; 3035 if (*cp == '\0') 3036 break; 3037 len = 0; 3038 while (cp[len] != '\0' && !isspace(cp[len])) 3039 len++; 3040 if (strncmp(cp, subsys, len) == 0) { 3041 freeenv(env); 3042 return (1); 3043 } 3044 cp += len; 3045 } 3046 freeenv(env); 3047 3048 return (0); 3049 } 3050 3051 /* 3052 * Control interface. 3053 * 3054 * We multiplex ioctls for all participating ACPI devices here. Individual 3055 * drivers wanting to be accessible via /dev/acpi should use the 3056 * register/deregister interface to make their handlers visible. 3057 */ 3058 struct acpi_ioctl_hook 3059 { 3060 TAILQ_ENTRY(acpi_ioctl_hook) link; 3061 u_long cmd; 3062 acpi_ioctl_fn fn; 3063 void *arg; 3064 }; 3065 3066 static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; 3067 static int acpi_ioctl_hooks_initted; 3068 3069 int 3070 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) 3071 { 3072 struct acpi_ioctl_hook *hp; 3073 3074 if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) 3075 return (ENOMEM); 3076 hp->cmd = cmd; 3077 hp->fn = fn; 3078 hp->arg = arg; 3079 3080 ACPI_LOCK(acpi); 3081 if (acpi_ioctl_hooks_initted == 0) { 3082 TAILQ_INIT(&acpi_ioctl_hooks); 3083 acpi_ioctl_hooks_initted = 1; 3084 } 3085 TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); 3086 ACPI_UNLOCK(acpi); 3087 3088 return (0); 3089 } 3090 3091 void 3092 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) 3093 { 3094 struct acpi_ioctl_hook *hp; 3095 3096 ACPI_LOCK(acpi); 3097 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) 3098 if (hp->cmd == cmd && hp->fn == fn) 3099 break; 3100 3101 if (hp != NULL) { 3102 TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); 3103 free(hp, M_ACPIDEV); 3104 } 3105 ACPI_UNLOCK(acpi); 3106 } 3107 3108 static int 3109 acpiopen(struct cdev *dev, int flag, int fmt, d_thread_t *td) 3110 { 3111 return (0); 3112 } 3113 3114 static int 3115 acpiclose(struct cdev *dev, int flag, int fmt, d_thread_t *td) 3116 { 3117 return (0); 3118 } 3119 3120 static int 3121 acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, d_thread_t *td) 3122 { 3123 struct acpi_softc *sc; 3124 struct acpi_ioctl_hook *hp; 3125 int error, state; 3126 3127 error = 0; 3128 hp = NULL; 3129 sc = dev->si_drv1; 3130 3131 /* 3132 * Scan the list of registered ioctls, looking for handlers. 3133 */ 3134 ACPI_LOCK(acpi); 3135 if (acpi_ioctl_hooks_initted) 3136 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { 3137 if (hp->cmd == cmd) 3138 break; 3139 } 3140 ACPI_UNLOCK(acpi); 3141 if (hp) 3142 return (hp->fn(cmd, addr, hp->arg)); 3143 3144 /* 3145 * Core ioctls are not permitted for non-writable user. 3146 * Currently, other ioctls just fetch information. 3147 * Not changing system behavior. 3148 */ 3149 if ((flag & FWRITE) == 0) 3150 return (EPERM); 3151 3152 /* Core system ioctls. */ 3153 switch (cmd) { 3154 case ACPIIO_REQSLPSTATE: 3155 state = *(int *)addr; 3156 if (state != ACPI_STATE_S5) 3157 error = acpi_ReqSleepState(sc, state); 3158 else { 3159 printf("power off via acpi ioctl not supported\n"); 3160 error = ENXIO; 3161 } 3162 break; 3163 case ACPIIO_ACKSLPSTATE: 3164 error = *(int *)addr; 3165 error = acpi_AckSleepState(sc->acpi_clone, error); 3166 break; 3167 case ACPIIO_SETSLPSTATE: /* DEPRECATED */ 3168 error = EINVAL; 3169 state = *(int *)addr; 3170 if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) 3171 if (ACPI_SUCCESS(acpi_SetSleepState(sc, state))) 3172 error = 0; 3173 break; 3174 default: 3175 error = ENXIO; 3176 break; 3177 } 3178 3179 return (error); 3180 } 3181 3182 static int 3183 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 3184 { 3185 int error; 3186 struct sbuf sb; 3187 UINT8 state, TypeA, TypeB; 3188 3189 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 3190 for (state = ACPI_STATE_S1; state < ACPI_S_STATES_MAX + 1; state++) 3191 if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) 3192 sbuf_printf(&sb, "S%d ", state); 3193 sbuf_trim(&sb); 3194 sbuf_finish(&sb); 3195 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 3196 sbuf_delete(&sb); 3197 return (error); 3198 } 3199 3200 static int 3201 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 3202 { 3203 char sleep_state[10]; 3204 int error; 3205 u_int new_state, old_state; 3206 3207 old_state = *(u_int *)oidp->oid_arg1; 3208 if (old_state > ACPI_S_STATES_MAX + 1) 3209 strlcpy(sleep_state, "unknown", sizeof(sleep_state)); 3210 else 3211 strlcpy(sleep_state, sleep_state_names[old_state], sizeof(sleep_state)); 3212 error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); 3213 if (error == 0 && req->newptr != NULL) { 3214 new_state = ACPI_STATE_S0; 3215 for (; new_state <= ACPI_S_STATES_MAX + 1; new_state++) 3216 if (strcmp(sleep_state, sleep_state_names[new_state]) == 0) 3217 break; 3218 if (new_state <= ACPI_S_STATES_MAX + 1) { 3219 if (new_state != old_state) 3220 *(u_int *)oidp->oid_arg1 = new_state; 3221 } else 3222 error = EINVAL; 3223 } 3224 3225 return (error); 3226 } 3227 3228 /* Inform devctl(4) when we receive a Notify. */ 3229 void 3230 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) 3231 { 3232 char notify_buf[16]; 3233 ACPI_BUFFER handle_buf; 3234 ACPI_STATUS status; 3235 3236 if (subsystem == NULL) 3237 return; 3238 3239 handle_buf.Pointer = NULL; 3240 handle_buf.Length = ACPI_ALLOCATE_BUFFER; 3241 status = AcpiNsHandleToPathname(h, &handle_buf); 3242 if (ACPI_FAILURE(status)) 3243 return; 3244 snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); 3245 devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); 3246 AcpiOsFree(handle_buf.Pointer); 3247 } 3248 3249 #ifdef ACPI_DEBUG 3250 /* 3251 * Support for parsing debug options from the kernel environment. 3252 * 3253 * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers 3254 * by specifying the names of the bits in the debug.acpi.layer and 3255 * debug.acpi.level environment variables. Bits may be unset by 3256 * prefixing the bit name with !. 3257 */ 3258 struct debugtag 3259 { 3260 char *name; 3261 UINT32 value; 3262 }; 3263 3264 static struct debugtag dbg_layer[] = { 3265 {"ACPI_UTILITIES", ACPI_UTILITIES}, 3266 {"ACPI_HARDWARE", ACPI_HARDWARE}, 3267 {"ACPI_EVENTS", ACPI_EVENTS}, 3268 {"ACPI_TABLES", ACPI_TABLES}, 3269 {"ACPI_NAMESPACE", ACPI_NAMESPACE}, 3270 {"ACPI_PARSER", ACPI_PARSER}, 3271 {"ACPI_DISPATCHER", ACPI_DISPATCHER}, 3272 {"ACPI_EXECUTER", ACPI_EXECUTER}, 3273 {"ACPI_RESOURCES", ACPI_RESOURCES}, 3274 {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, 3275 {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, 3276 {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, 3277 {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, 3278 3279 {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, 3280 {"ACPI_BATTERY", ACPI_BATTERY}, 3281 {"ACPI_BUS", ACPI_BUS}, 3282 {"ACPI_BUTTON", ACPI_BUTTON}, 3283 {"ACPI_EC", ACPI_EC}, 3284 {"ACPI_FAN", ACPI_FAN}, 3285 {"ACPI_POWERRES", ACPI_POWERRES}, 3286 {"ACPI_PROCESSOR", ACPI_PROCESSOR}, 3287 {"ACPI_THERMAL", ACPI_THERMAL}, 3288 {"ACPI_TIMER", ACPI_TIMER}, 3289 {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, 3290 {NULL, 0} 3291 }; 3292 3293 static struct debugtag dbg_level[] = { 3294 {"ACPI_LV_ERROR", ACPI_LV_ERROR}, 3295 {"ACPI_LV_WARN", ACPI_LV_WARN}, 3296 {"ACPI_LV_INIT", ACPI_LV_INIT}, 3297 {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, 3298 {"ACPI_LV_INFO", ACPI_LV_INFO}, 3299 {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, 3300 3301 /* Trace verbosity level 1 [Standard Trace Level] */ 3302 {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, 3303 {"ACPI_LV_PARSE", ACPI_LV_PARSE}, 3304 {"ACPI_LV_LOAD", ACPI_LV_LOAD}, 3305 {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, 3306 {"ACPI_LV_EXEC", ACPI_LV_EXEC}, 3307 {"ACPI_LV_NAMES", ACPI_LV_NAMES}, 3308 {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, 3309 {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, 3310 {"ACPI_LV_TABLES", ACPI_LV_TABLES}, 3311 {"ACPI_LV_VALUES", ACPI_LV_VALUES}, 3312 {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, 3313 {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, 3314 {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, 3315 {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, 3316 {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, 3317 3318 /* Trace verbosity level 2 [Function tracing and memory allocation] */ 3319 {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, 3320 {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, 3321 {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, 3322 {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, 3323 {"ACPI_LV_ALL", ACPI_LV_ALL}, 3324 3325 /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ 3326 {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, 3327 {"ACPI_LV_THREADS", ACPI_LV_THREADS}, 3328 {"ACPI_LV_IO", ACPI_LV_IO}, 3329 {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, 3330 {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, 3331 3332 /* Exceptionally verbose output -- also used in the global "DebugLevel" */ 3333 {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, 3334 {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, 3335 {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, 3336 {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, 3337 {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, 3338 {NULL, 0} 3339 }; 3340 3341 static void 3342 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) 3343 { 3344 char *ep; 3345 int i, l; 3346 int set; 3347 3348 while (*cp) { 3349 if (isspace(*cp)) { 3350 cp++; 3351 continue; 3352 } 3353 ep = cp; 3354 while (*ep && !isspace(*ep)) 3355 ep++; 3356 if (*cp == '!') { 3357 set = 0; 3358 cp++; 3359 if (cp == ep) 3360 continue; 3361 } else { 3362 set = 1; 3363 } 3364 l = ep - cp; 3365 for (i = 0; tag[i].name != NULL; i++) { 3366 if (!strncmp(cp, tag[i].name, l)) { 3367 if (set) 3368 *flag |= tag[i].value; 3369 else 3370 *flag &= ~tag[i].value; 3371 } 3372 } 3373 cp = ep; 3374 } 3375 } 3376 3377 static void 3378 acpi_set_debugging(void *junk) 3379 { 3380 char *layer, *level; 3381 3382 if (cold) { 3383 AcpiDbgLayer = 0; 3384 AcpiDbgLevel = 0; 3385 } 3386 3387 layer = getenv("debug.acpi.layer"); 3388 level = getenv("debug.acpi.level"); 3389 if (layer == NULL && level == NULL) 3390 return; 3391 3392 printf("ACPI set debug"); 3393 if (layer != NULL) { 3394 if (strcmp("NONE", layer) != 0) 3395 printf(" layer '%s'", layer); 3396 acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); 3397 freeenv(layer); 3398 } 3399 if (level != NULL) { 3400 if (strcmp("NONE", level) != 0) 3401 printf(" level '%s'", level); 3402 acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); 3403 freeenv(level); 3404 } 3405 printf("\n"); 3406 } 3407 3408 SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, 3409 NULL); 3410 3411 static int 3412 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) 3413 { 3414 int error, *dbg; 3415 struct debugtag *tag; 3416 struct sbuf sb; 3417 3418 if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) 3419 return (ENOMEM); 3420 if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { 3421 tag = &dbg_layer[0]; 3422 dbg = &AcpiDbgLayer; 3423 } else { 3424 tag = &dbg_level[0]; 3425 dbg = &AcpiDbgLevel; 3426 } 3427 3428 /* Get old values if this is a get request. */ 3429 ACPI_SERIAL_BEGIN(acpi); 3430 if (*dbg == 0) { 3431 sbuf_cpy(&sb, "NONE"); 3432 } else if (req->newptr == NULL) { 3433 for (; tag->name != NULL; tag++) { 3434 if ((*dbg & tag->value) == tag->value) 3435 sbuf_printf(&sb, "%s ", tag->name); 3436 } 3437 } 3438 sbuf_trim(&sb); 3439 sbuf_finish(&sb); 3440 3441 /* Copy out the old values to the user. */ 3442 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); 3443 sbuf_delete(&sb); 3444 3445 /* If the user is setting a string, parse it. */ 3446 if (error == 0 && req->newptr != NULL) { 3447 *dbg = 0; 3448 setenv((char *)oidp->oid_arg1, (char *)req->newptr); 3449 acpi_set_debugging(NULL); 3450 } 3451 ACPI_SERIAL_END(acpi); 3452 3453 return (error); 3454 } 3455 3456 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING, 3457 "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); 3458 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING, 3459 "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); 3460 #endif /* ACPI_DEBUG */ 3461 3462 static int 3463 acpi_pm_func(u_long cmd, void *arg, ...) 3464 { 3465 int state, acpi_state; 3466 int error; 3467 struct acpi_softc *sc; 3468 va_list ap; 3469 3470 error = 0; 3471 switch (cmd) { 3472 case POWER_CMD_SUSPEND: 3473 sc = (struct acpi_softc *)arg; 3474 if (sc == NULL) { 3475 error = EINVAL; 3476 goto out; 3477 } 3478 3479 va_start(ap, arg); 3480 state = va_arg(ap, int); 3481 va_end(ap); 3482 3483 switch (state) { 3484 case POWER_SLEEP_STATE_STANDBY: 3485 acpi_state = sc->acpi_standby_sx; 3486 break; 3487 case POWER_SLEEP_STATE_SUSPEND: 3488 acpi_state = sc->acpi_suspend_sx; 3489 break; 3490 case POWER_SLEEP_STATE_HIBERNATE: 3491 acpi_state = ACPI_STATE_S4; 3492 break; 3493 default: 3494 error = EINVAL; 3495 goto out; 3496 } 3497 3498 if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) 3499 error = ENXIO; 3500 break; 3501 default: 3502 error = EINVAL; 3503 goto out; 3504 } 3505 3506 out: 3507 return (error); 3508 } 3509 3510 static void 3511 acpi_pm_register(void *arg) 3512 { 3513 if (!cold || resource_disabled("acpi", 0)) 3514 return; 3515 3516 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); 3517 } 3518 3519 SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0); 3520