1 /*- 2 * Copyright (c) 2001 Mitsuru IWASAKI 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/sysctl.h> 35 36 #include <vm/vm.h> 37 #include <vm/pmap.h> 38 39 #include <contrib/dev/acpica/include/acpi.h> 40 #include <contrib/dev/acpica/include/accommon.h> 41 #include <contrib/dev/acpica/include/actables.h> 42 43 #include <dev/acpica/acpivar.h> 44 45 #include <machine/nexusvar.h> 46 47 /* 48 * APM driver emulation 49 */ 50 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/fcntl.h> 54 #include <sys/malloc.h> 55 #include <sys/poll.h> 56 #include <sys/uio.h> 57 58 #include <dev/acpica/acpiio.h> 59 60 #include <machine/apm_bios.h> 61 #include <i386/include/pc/bios.h> 62 63 #include <i386/bios/apm.h> 64 65 SYSCTL_DECL(_debug_acpi); 66 67 int acpi_resume_beep; 68 TUNABLE_INT("debug.acpi.resume_beep", &acpi_resume_beep); 69 SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RW, &acpi_resume_beep, 70 0, "Beep the PC speaker when resuming"); 71 72 int acpi_reset_video; 73 TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video); 74 75 static int intr_model = ACPI_INTR_PIC; 76 static int apm_active; 77 static struct clonedevs *apm_clones; 78 79 MALLOC_DEFINE(M_APMDEV, "apmdev", "APM device emulation"); 80 81 static d_open_t apmopen; 82 static d_close_t apmclose; 83 static d_write_t apmwrite; 84 static d_ioctl_t apmioctl; 85 static d_poll_t apmpoll; 86 static d_kqfilter_t apmkqfilter; 87 static void apmreadfiltdetach(struct knote *kn); 88 static int apmreadfilt(struct knote *kn, long hint); 89 static struct filterops apm_readfiltops = { 90 .f_isfd = 1, 91 .f_detach = apmreadfiltdetach, 92 .f_event = apmreadfilt, 93 }; 94 95 static struct cdevsw apm_cdevsw = { 96 .d_version = D_VERSION, 97 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 98 .d_open = apmopen, 99 .d_close = apmclose, 100 .d_write = apmwrite, 101 .d_ioctl = apmioctl, 102 .d_poll = apmpoll, 103 .d_name = "apm", 104 .d_kqfilter = apmkqfilter 105 }; 106 107 static int 108 acpi_capm_convert_battstate(struct acpi_battinfo *battp) 109 { 110 int state; 111 112 state = APM_UNKNOWN; 113 114 if (battp->state & ACPI_BATT_STAT_DISCHARG) { 115 if (battp->cap >= 50) 116 state = 0; /* high */ 117 else 118 state = 1; /* low */ 119 } 120 if (battp->state & ACPI_BATT_STAT_CRITICAL) 121 state = 2; /* critical */ 122 if (battp->state & ACPI_BATT_STAT_CHARGING) 123 state = 3; /* charging */ 124 125 /* If still unknown, determine it based on the battery capacity. */ 126 if (state == APM_UNKNOWN) { 127 if (battp->cap >= 50) 128 state = 0; /* high */ 129 else 130 state = 1; /* low */ 131 } 132 133 return (state); 134 } 135 136 static int 137 acpi_capm_convert_battflags(struct acpi_battinfo *battp) 138 { 139 int flags; 140 141 flags = 0; 142 143 if (battp->cap >= 50) 144 flags |= APM_BATT_HIGH; 145 else { 146 if (battp->state & ACPI_BATT_STAT_CRITICAL) 147 flags |= APM_BATT_CRITICAL; 148 else 149 flags |= APM_BATT_LOW; 150 } 151 if (battp->state & ACPI_BATT_STAT_CHARGING) 152 flags |= APM_BATT_CHARGING; 153 if (battp->state == ACPI_BATT_STAT_NOT_PRESENT) 154 flags = APM_BATT_NOT_PRESENT; 155 156 return (flags); 157 } 158 159 static int 160 acpi_capm_get_info(apm_info_t aip) 161 { 162 int acline; 163 struct acpi_battinfo batt; 164 165 aip->ai_infoversion = 1; 166 aip->ai_major = 1; 167 aip->ai_minor = 2; 168 aip->ai_status = apm_active; 169 aip->ai_capabilities= 0xff00; /* unknown */ 170 171 if (acpi_acad_get_acline(&acline)) 172 aip->ai_acline = APM_UNKNOWN; /* unknown */ 173 else 174 aip->ai_acline = acline; /* on/off */ 175 176 if (acpi_battery_get_battinfo(NULL, &batt) != 0) { 177 aip->ai_batt_stat = APM_UNKNOWN; 178 aip->ai_batt_life = APM_UNKNOWN; 179 aip->ai_batt_time = -1; /* unknown */ 180 aip->ai_batteries = ~0U; /* unknown */ 181 } else { 182 aip->ai_batt_stat = acpi_capm_convert_battstate(&batt); 183 aip->ai_batt_life = batt.cap; 184 aip->ai_batt_time = (batt.min == -1) ? -1 : batt.min * 60; 185 aip->ai_batteries = acpi_battery_get_units(); 186 } 187 188 return (0); 189 } 190 191 static int 192 acpi_capm_get_pwstatus(apm_pwstatus_t app) 193 { 194 device_t dev; 195 int acline, unit, error; 196 struct acpi_battinfo batt; 197 198 if (app->ap_device != PMDV_ALLDEV && 199 (app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL)) 200 return (1); 201 202 if (app->ap_device == PMDV_ALLDEV) 203 error = acpi_battery_get_battinfo(NULL, &batt); 204 else { 205 unit = app->ap_device - PMDV_BATT0; 206 dev = devclass_get_device(devclass_find("battery"), unit); 207 if (dev != NULL) 208 error = acpi_battery_get_battinfo(dev, &batt); 209 else 210 error = ENXIO; 211 } 212 if (error) 213 return (1); 214 215 app->ap_batt_stat = acpi_capm_convert_battstate(&batt); 216 app->ap_batt_flag = acpi_capm_convert_battflags(&batt); 217 app->ap_batt_life = batt.cap; 218 app->ap_batt_time = (batt.min == -1) ? -1 : batt.min * 60; 219 220 if (acpi_acad_get_acline(&acline)) 221 app->ap_acline = APM_UNKNOWN; 222 else 223 app->ap_acline = acline; /* on/off */ 224 225 return (0); 226 } 227 228 /* Create single-use devices for /dev/apm and /dev/apmctl. */ 229 static void 230 apm_clone(void *arg, struct ucred *cred, char *name, int namelen, 231 struct cdev **dev) 232 { 233 int ctl_dev, unit; 234 235 if (*dev != NULL) 236 return; 237 if (strcmp(name, "apmctl") == 0) 238 ctl_dev = TRUE; 239 else if (strcmp(name, "apm") == 0) 240 ctl_dev = FALSE; 241 else 242 return; 243 244 /* Always create a new device and unit number. */ 245 unit = -1; 246 if (clone_create(&apm_clones, &apm_cdevsw, &unit, dev, 0)) { 247 if (ctl_dev) { 248 *dev = make_dev(&apm_cdevsw, unit, 249 UID_ROOT, GID_OPERATOR, 0660, "apmctl%d", unit); 250 } else { 251 *dev = make_dev(&apm_cdevsw, unit, 252 UID_ROOT, GID_OPERATOR, 0664, "apm%d", unit); 253 } 254 if (*dev != NULL) { 255 dev_ref(*dev); 256 (*dev)->si_flags |= SI_CHEAPCLONE; 257 } 258 } 259 } 260 261 /* Create a struct for tracking per-device suspend notification. */ 262 static struct apm_clone_data * 263 apm_create_clone(struct cdev *dev, struct acpi_softc *acpi_sc) 264 { 265 struct apm_clone_data *clone; 266 267 clone = malloc(sizeof(*clone), M_APMDEV, M_WAITOK); 268 clone->cdev = dev; 269 clone->acpi_sc = acpi_sc; 270 clone->notify_status = APM_EV_NONE; 271 bzero(&clone->sel_read, sizeof(clone->sel_read)); 272 knlist_init_mtx(&clone->sel_read.si_note, &acpi_mutex); 273 274 /* 275 * The acpi device is always managed by devd(8) and is considered 276 * writable (i.e., ack is required to allow suspend to proceed.) 277 */ 278 if (strcmp("acpi", devtoname(dev)) == 0) 279 clone->flags = ACPI_EVF_DEVD | ACPI_EVF_WRITE; 280 else 281 clone->flags = ACPI_EVF_NONE; 282 283 ACPI_LOCK(acpi); 284 STAILQ_INSERT_TAIL(&acpi_sc->apm_cdevs, clone, entries); 285 ACPI_UNLOCK(acpi); 286 return (clone); 287 } 288 289 static int 290 apmopen(struct cdev *dev, int flag, int fmt, struct thread *td) 291 { 292 struct acpi_softc *acpi_sc; 293 struct apm_clone_data *clone; 294 295 acpi_sc = devclass_get_softc(devclass_find("acpi"), 0); 296 clone = apm_create_clone(dev, acpi_sc); 297 dev->si_drv1 = clone; 298 299 /* If the device is opened for write, record that. */ 300 if ((flag & FWRITE) != 0) 301 clone->flags |= ACPI_EVF_WRITE; 302 303 return (0); 304 } 305 306 static int 307 apmclose(struct cdev *dev, int flag, int fmt, struct thread *td) 308 { 309 struct apm_clone_data *clone; 310 struct acpi_softc *acpi_sc; 311 312 clone = dev->si_drv1; 313 acpi_sc = clone->acpi_sc; 314 315 /* We are about to lose a reference so check if suspend should occur */ 316 if (acpi_sc->acpi_next_sstate != 0 && 317 clone->notify_status != APM_EV_ACKED) 318 acpi_AckSleepState(clone, 0); 319 320 /* Remove this clone's data from the list and free it. */ 321 ACPI_LOCK(acpi); 322 STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries); 323 knlist_destroy(&clone->sel_read.si_note); 324 ACPI_UNLOCK(acpi); 325 free(clone, M_APMDEV); 326 destroy_dev_sched(dev); 327 return (0); 328 } 329 330 static int 331 apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 332 { 333 int error; 334 struct apm_clone_data *clone; 335 struct acpi_softc *acpi_sc; 336 struct apm_info info; 337 struct apm_event_info *ev_info; 338 apm_info_old_t aiop; 339 340 error = 0; 341 clone = dev->si_drv1; 342 acpi_sc = clone->acpi_sc; 343 344 switch (cmd) { 345 case APMIO_SUSPEND: 346 if ((flag & FWRITE) == 0) 347 return (EPERM); 348 if (acpi_sc->acpi_next_sstate == 0) { 349 if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) { 350 error = acpi_ReqSleepState(acpi_sc, 351 acpi_sc->acpi_suspend_sx); 352 } else { 353 printf( 354 "power off via apm suspend not supported\n"); 355 error = ENXIO; 356 } 357 } else 358 error = acpi_AckSleepState(clone, 0); 359 break; 360 case APMIO_STANDBY: 361 if ((flag & FWRITE) == 0) 362 return (EPERM); 363 if (acpi_sc->acpi_next_sstate == 0) { 364 if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) { 365 error = acpi_ReqSleepState(acpi_sc, 366 acpi_sc->acpi_standby_sx); 367 } else { 368 printf( 369 "power off via apm standby not supported\n"); 370 error = ENXIO; 371 } 372 } else 373 error = acpi_AckSleepState(clone, 0); 374 break; 375 case APMIO_NEXTEVENT: 376 printf("apm nextevent start\n"); 377 ACPI_LOCK(acpi); 378 if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status == 379 APM_EV_NONE) { 380 ev_info = (struct apm_event_info *)addr; 381 if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3) 382 ev_info->type = PMEV_STANDBYREQ; 383 else 384 ev_info->type = PMEV_SUSPENDREQ; 385 ev_info->index = 0; 386 clone->notify_status = APM_EV_NOTIFIED; 387 printf("apm event returning %d\n", ev_info->type); 388 } else 389 error = EAGAIN; 390 ACPI_UNLOCK(acpi); 391 break; 392 case APMIO_GETINFO_OLD: 393 if (acpi_capm_get_info(&info)) 394 error = ENXIO; 395 aiop = (apm_info_old_t)addr; 396 aiop->ai_major = info.ai_major; 397 aiop->ai_minor = info.ai_minor; 398 aiop->ai_acline = info.ai_acline; 399 aiop->ai_batt_stat = info.ai_batt_stat; 400 aiop->ai_batt_life = info.ai_batt_life; 401 aiop->ai_status = info.ai_status; 402 break; 403 case APMIO_GETINFO: 404 if (acpi_capm_get_info((apm_info_t)addr)) 405 error = ENXIO; 406 break; 407 case APMIO_GETPWSTATUS: 408 if (acpi_capm_get_pwstatus((apm_pwstatus_t)addr)) 409 error = ENXIO; 410 break; 411 case APMIO_ENABLE: 412 if ((flag & FWRITE) == 0) 413 return (EPERM); 414 apm_active = 1; 415 break; 416 case APMIO_DISABLE: 417 if ((flag & FWRITE) == 0) 418 return (EPERM); 419 apm_active = 0; 420 break; 421 case APMIO_HALTCPU: 422 break; 423 case APMIO_NOTHALTCPU: 424 break; 425 case APMIO_DISPLAY: 426 if ((flag & FWRITE) == 0) 427 return (EPERM); 428 break; 429 case APMIO_BIOS: 430 if ((flag & FWRITE) == 0) 431 return (EPERM); 432 bzero(addr, sizeof(struct apm_bios_arg)); 433 break; 434 default: 435 error = EINVAL; 436 break; 437 } 438 439 return (error); 440 } 441 442 static int 443 apmwrite(struct cdev *dev, struct uio *uio, int ioflag) 444 { 445 return (uio->uio_resid); 446 } 447 448 static int 449 apmpoll(struct cdev *dev, int events, struct thread *td) 450 { 451 struct apm_clone_data *clone; 452 int revents; 453 454 revents = 0; 455 ACPI_LOCK(acpi); 456 clone = dev->si_drv1; 457 if (clone->acpi_sc->acpi_next_sstate) 458 revents |= events & (POLLIN | POLLRDNORM); 459 else 460 selrecord(td, &clone->sel_read); 461 ACPI_UNLOCK(acpi); 462 return (revents); 463 } 464 465 static int 466 apmkqfilter(struct cdev *dev, struct knote *kn) 467 { 468 struct apm_clone_data *clone; 469 470 ACPI_LOCK(acpi); 471 clone = dev->si_drv1; 472 kn->kn_hook = clone; 473 kn->kn_fop = &apm_readfiltops; 474 knlist_add(&clone->sel_read.si_note, kn, 0); 475 ACPI_UNLOCK(acpi); 476 return (0); 477 } 478 479 static void 480 apmreadfiltdetach(struct knote *kn) 481 { 482 struct apm_clone_data *clone; 483 484 ACPI_LOCK(acpi); 485 clone = kn->kn_hook; 486 knlist_remove(&clone->sel_read.si_note, kn, 0); 487 ACPI_UNLOCK(acpi); 488 } 489 490 static int 491 apmreadfilt(struct knote *kn, long hint) 492 { 493 struct apm_clone_data *clone; 494 int sleeping; 495 496 ACPI_LOCK(acpi); 497 clone = kn->kn_hook; 498 sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0; 499 ACPI_UNLOCK(acpi); 500 return (sleeping); 501 } 502 503 int 504 acpi_machdep_init(device_t dev) 505 { 506 struct acpi_softc *sc; 507 508 sc = devclass_get_softc(devclass_find("acpi"), 0); 509 510 /* Create a clone for /dev/acpi also. */ 511 STAILQ_INIT(&sc->apm_cdevs); 512 sc->acpi_clone = apm_create_clone(sc->acpi_dev_t, sc); 513 clone_setup(&apm_clones); 514 EVENTHANDLER_REGISTER(dev_clone, apm_clone, 0, 1000); 515 516 if (intr_model != ACPI_INTR_PIC) 517 acpi_SetIntrModel(intr_model); 518 519 SYSCTL_ADD_UINT(&sc->acpi_sysctl_ctx, 520 SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, 521 "reset_video", CTLFLAG_RW, &acpi_reset_video, 0, 522 "Call the VESA reset BIOS vector on the resume path"); 523 524 return (0); 525 } 526 527 void 528 acpi_SetDefaultIntrModel(int model) 529 { 530 531 intr_model = model; 532 } 533 534 int 535 acpi_machdep_quirks(int *quirks) 536 { 537 return (0); 538 } 539 540 void 541 acpi_cpu_c1() 542 { 543 __asm __volatile("sti; hlt"); 544 } 545 546 /* 547 * Support for mapping ACPI tables during early boot. Currently this 548 * uses the crashdump map to map each table. However, the crashdump 549 * map is created in pmap_bootstrap() right after the direct map, so 550 * we should be able to just use pmap_mapbios() here instead. 551 * 552 * This makes the following assumptions about how we use this KVA: 553 * pages 0 and 1 are used to map in the header of each table found via 554 * the RSDT or XSDT and pages 2 to n are used to map in the RSDT or 555 * XSDT. This has to use 2 pages for the table headers in case a 556 * header spans a page boundary. 557 * 558 * XXX: We don't ensure the table fits in the available address space 559 * in the crashdump map. 560 */ 561 562 /* 563 * Map some memory using the crashdump map. 'offset' is an offset in 564 * pages into the crashdump map to use for the start of the mapping. 565 */ 566 static void * 567 table_map(vm_paddr_t pa, int offset, vm_offset_t length) 568 { 569 vm_offset_t va, off; 570 void *data; 571 572 off = pa & PAGE_MASK; 573 length = roundup(length + off, PAGE_SIZE); 574 pa = pa & PG_FRAME; 575 va = (vm_offset_t)pmap_kenter_temporary(pa, offset) + 576 (offset * PAGE_SIZE); 577 data = (void *)(va + off); 578 length -= PAGE_SIZE; 579 while (length > 0) { 580 va += PAGE_SIZE; 581 pa += PAGE_SIZE; 582 length -= PAGE_SIZE; 583 pmap_kenter(va, pa); 584 invlpg(va); 585 } 586 return (data); 587 } 588 589 /* Unmap memory previously mapped with table_map(). */ 590 static void 591 table_unmap(void *data, vm_offset_t length) 592 { 593 vm_offset_t va, off; 594 595 va = (vm_offset_t)data; 596 off = va & PAGE_MASK; 597 length = roundup(length + off, PAGE_SIZE); 598 va &= ~PAGE_MASK; 599 while (length > 0) { 600 pmap_kremove(va); 601 invlpg(va); 602 va += PAGE_SIZE; 603 length -= PAGE_SIZE; 604 } 605 } 606 607 /* 608 * Map a table at a given offset into the crashdump map. It first 609 * maps the header to determine the table length and then maps the 610 * entire table. 611 */ 612 static void * 613 map_table(vm_paddr_t pa, int offset, const char *sig) 614 { 615 ACPI_TABLE_HEADER *header; 616 vm_offset_t length; 617 void *table; 618 619 header = table_map(pa, offset, sizeof(ACPI_TABLE_HEADER)); 620 if (strncmp(header->Signature, sig, ACPI_NAME_SIZE) != 0) { 621 table_unmap(header, sizeof(ACPI_TABLE_HEADER)); 622 return (NULL); 623 } 624 length = header->Length; 625 table_unmap(header, sizeof(ACPI_TABLE_HEADER)); 626 table = table_map(pa, offset, length); 627 if (ACPI_FAILURE(AcpiTbChecksum(table, length))) { 628 if (bootverbose) 629 printf("ACPI: Failed checksum for table %s\n", sig); 630 table_unmap(table, length); 631 return (NULL); 632 } 633 return (table); 634 } 635 636 /* 637 * See if a given ACPI table is the requested table. Returns the 638 * length of the able if it matches or zero on failure. 639 */ 640 static int 641 probe_table(vm_paddr_t address, const char *sig) 642 { 643 ACPI_TABLE_HEADER *table; 644 645 table = table_map(address, 0, sizeof(ACPI_TABLE_HEADER)); 646 if (table == NULL) { 647 if (bootverbose) 648 printf("ACPI: Failed to map table at 0x%jx\n", 649 (uintmax_t)address); 650 return (0); 651 } 652 if (bootverbose) 653 printf("Table '%.4s' at 0x%jx\n", table->Signature, 654 (uintmax_t)address); 655 656 if (strncmp(table->Signature, sig, ACPI_NAME_SIZE) != 0) { 657 table_unmap(table, sizeof(ACPI_TABLE_HEADER)); 658 return (0); 659 } 660 table_unmap(table, sizeof(ACPI_TABLE_HEADER)); 661 return (1); 662 } 663 664 /* 665 * Try to map a table at a given physical address previously returned 666 * by acpi_find_table(). 667 */ 668 void * 669 acpi_map_table(vm_paddr_t pa, const char *sig) 670 { 671 672 return (map_table(pa, 0, sig)); 673 } 674 675 /* Unmap a table previously mapped via acpi_map_table(). */ 676 void 677 acpi_unmap_table(void *table) 678 { 679 ACPI_TABLE_HEADER *header; 680 681 header = (ACPI_TABLE_HEADER *)table; 682 table_unmap(table, header->Length); 683 } 684 685 /* 686 * Return the physical address of the requested table or zero if one 687 * is not found. 688 */ 689 vm_paddr_t 690 acpi_find_table(const char *sig) 691 { 692 ACPI_PHYSICAL_ADDRESS rsdp_ptr; 693 ACPI_TABLE_RSDP *rsdp; 694 ACPI_TABLE_RSDT *rsdt; 695 ACPI_TABLE_XSDT *xsdt; 696 ACPI_TABLE_HEADER *table; 697 vm_paddr_t addr; 698 int i, count; 699 700 if (resource_disabled("acpi", 0)) 701 return (0); 702 703 /* 704 * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn 705 * calls pmap_mapbios() to find the RSDP, we assume that we can use 706 * pmap_mapbios() to map the RSDP. 707 */ 708 if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) 709 return (0); 710 rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP)); 711 if (rsdp == NULL) { 712 if (bootverbose) 713 printf("ACPI: Failed to map RSDP\n"); 714 return (0); 715 } 716 717 /* 718 * For ACPI >= 2.0, use the XSDT if it is available. 719 * Otherwise, use the RSDT. We map the XSDT or RSDT at page 2 720 * in the crashdump area. Pages 0 and 1 are used to map in the 721 * headers of candidate ACPI tables. 722 */ 723 addr = 0; 724 if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) { 725 /* 726 * AcpiOsGetRootPointer only verifies the checksum for 727 * the version 1.0 portion of the RSDP. Version 2.0 has 728 * an additional checksum that we verify first. 729 */ 730 if (AcpiTbChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) { 731 if (bootverbose) 732 printf("ACPI: RSDP failed extended checksum\n"); 733 return (0); 734 } 735 xsdt = map_table(rsdp->XsdtPhysicalAddress, 2, ACPI_SIG_XSDT); 736 if (xsdt == NULL) { 737 if (bootverbose) 738 printf("ACPI: Failed to map XSDT\n"); 739 return (0); 740 } 741 count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / 742 sizeof(UINT64); 743 for (i = 0; i < count; i++) 744 if (probe_table(xsdt->TableOffsetEntry[i], sig)) { 745 addr = xsdt->TableOffsetEntry[i]; 746 break; 747 } 748 acpi_unmap_table(xsdt); 749 } else { 750 rsdt = map_table(rsdp->RsdtPhysicalAddress, 2, ACPI_SIG_RSDT); 751 if (rsdt == NULL) { 752 if (bootverbose) 753 printf("ACPI: Failed to map RSDT\n"); 754 return (0); 755 } 756 count = (rsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / 757 sizeof(UINT32); 758 for (i = 0; i < count; i++) 759 if (probe_table(rsdt->TableOffsetEntry[i], sig)) { 760 addr = rsdt->TableOffsetEntry[i]; 761 break; 762 } 763 acpi_unmap_table(rsdt); 764 } 765 pmap_unmapbios((vm_offset_t)rsdp, sizeof(ACPI_TABLE_RSDP)); 766 if (addr == 0) { 767 if (bootverbose) 768 printf("ACPI: No %s table found\n", sig); 769 return (0); 770 } 771 if (bootverbose) 772 printf("%s: Found table at 0x%jx\n", sig, (uintmax_t)addr); 773 774 /* 775 * Verify that we can map the full table and that its checksum is 776 * correct, etc. 777 */ 778 table = map_table(addr, 0, sig); 779 if (table == NULL) 780 return (0); 781 acpi_unmap_table(table); 782 783 return (addr); 784 } 785 786 /* 787 * ACPI nexus(4) driver. 788 */ 789 static int 790 nexus_acpi_probe(device_t dev) 791 { 792 int error; 793 794 error = acpi_identify(); 795 if (error) 796 return (error); 797 798 return (BUS_PROBE_DEFAULT); 799 } 800 801 static int 802 nexus_acpi_attach(device_t dev) 803 { 804 device_t acpi_dev; 805 int error; 806 807 nexus_init_resources(); 808 bus_generic_probe(dev); 809 acpi_dev = BUS_ADD_CHILD(dev, 10, "acpi", 0); 810 if (acpi_dev == NULL) 811 panic("failed to add acpi0 device"); 812 813 error = bus_generic_attach(dev); 814 if (error == 0) 815 acpi_install_wakeup_handler(device_get_softc(acpi_dev)); 816 817 return (error); 818 } 819 820 static device_method_t nexus_acpi_methods[] = { 821 /* Device interface */ 822 DEVMETHOD(device_probe, nexus_acpi_probe), 823 DEVMETHOD(device_attach, nexus_acpi_attach), 824 825 { 0, 0 } 826 }; 827 828 DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); 829 static devclass_t nexus_devclass; 830 831 DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, nexus_devclass, 0, 0); 832