1 /*- 2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Landon Fuller 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 17 * redistribution must be conditioned upon including a substantially 18 * similar Disclaimer requirement for further binary redistribution. 19 * 20 * NO WARRANTY 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGES. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/systm.h> 43 44 #include <machine/bus.h> 45 46 #include <dev/bhnd/cores/pmu/bhnd_pmu.h> 47 48 #include "bcma_dmp.h" 49 50 #include "bcma_eromreg.h" 51 #include "bcma_eromvar.h" 52 53 #include "bcmavar.h" 54 55 /* RID used when allocating EROM table */ 56 #define BCMA_EROM_RID 0 57 58 static bhnd_erom_class_t * 59 bcma_get_erom_class(driver_t *driver) 60 { 61 return (&bcma_erom_parser); 62 } 63 64 int 65 bcma_probe(device_t dev) 66 { 67 device_set_desc(dev, "BCMA BHND bus"); 68 return (BUS_PROBE_DEFAULT); 69 } 70 71 /** 72 * Default bcma(4) bus driver implementation of DEVICE_ATTACH(). 73 * 74 * This implementation initializes internal bcma(4) state and performs 75 * bus enumeration, and must be called by subclassing drivers in 76 * DEVICE_ATTACH() before any other bus methods. 77 */ 78 int 79 bcma_attach(device_t dev) 80 { 81 int error; 82 83 /* Enumerate children */ 84 if ((error = bcma_add_children(dev))) { 85 device_delete_children(dev); 86 return (error); 87 } 88 89 return (0); 90 } 91 92 int 93 bcma_detach(device_t dev) 94 { 95 return (bhnd_generic_detach(dev)); 96 } 97 98 static device_t 99 bcma_add_child(device_t dev, u_int order, const char *name, int unit) 100 { 101 struct bcma_devinfo *dinfo; 102 device_t child; 103 104 child = device_add_child_ordered(dev, order, name, unit); 105 if (child == NULL) 106 return (NULL); 107 108 if ((dinfo = bcma_alloc_dinfo(dev)) == NULL) { 109 device_delete_child(dev, child); 110 return (NULL); 111 } 112 113 device_set_ivars(child, dinfo); 114 115 return (child); 116 } 117 118 static void 119 bcma_child_deleted(device_t dev, device_t child) 120 { 121 struct bhnd_softc *sc; 122 struct bcma_devinfo *dinfo; 123 124 sc = device_get_softc(dev); 125 126 /* Call required bhnd(4) implementation */ 127 bhnd_generic_child_deleted(dev, child); 128 129 /* Free bcma device info */ 130 if ((dinfo = device_get_ivars(child)) != NULL) 131 bcma_free_dinfo(dev, child, dinfo); 132 133 device_set_ivars(child, NULL); 134 } 135 136 static int 137 bcma_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 138 { 139 const struct bcma_devinfo *dinfo; 140 const struct bhnd_core_info *ci; 141 142 dinfo = device_get_ivars(child); 143 ci = &dinfo->corecfg->core_info; 144 145 switch (index) { 146 case BHND_IVAR_VENDOR: 147 *result = ci->vendor; 148 return (0); 149 case BHND_IVAR_DEVICE: 150 *result = ci->device; 151 return (0); 152 case BHND_IVAR_HWREV: 153 *result = ci->hwrev; 154 return (0); 155 case BHND_IVAR_DEVICE_CLASS: 156 *result = bhnd_core_class(ci); 157 return (0); 158 case BHND_IVAR_VENDOR_NAME: 159 *result = (uintptr_t) bhnd_vendor_name(ci->vendor); 160 return (0); 161 case BHND_IVAR_DEVICE_NAME: 162 *result = (uintptr_t) bhnd_core_name(ci); 163 return (0); 164 case BHND_IVAR_CORE_INDEX: 165 *result = ci->core_idx; 166 return (0); 167 case BHND_IVAR_CORE_UNIT: 168 *result = ci->unit; 169 return (0); 170 case BHND_IVAR_PMU_INFO: 171 *result = (uintptr_t) dinfo->pmu_info; 172 return (0); 173 default: 174 return (ENOENT); 175 } 176 } 177 178 static int 179 bcma_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 180 { 181 struct bcma_devinfo *dinfo; 182 183 dinfo = device_get_ivars(child); 184 185 switch (index) { 186 case BHND_IVAR_VENDOR: 187 case BHND_IVAR_DEVICE: 188 case BHND_IVAR_HWREV: 189 case BHND_IVAR_DEVICE_CLASS: 190 case BHND_IVAR_VENDOR_NAME: 191 case BHND_IVAR_DEVICE_NAME: 192 case BHND_IVAR_CORE_INDEX: 193 case BHND_IVAR_CORE_UNIT: 194 return (EINVAL); 195 case BHND_IVAR_PMU_INFO: 196 dinfo->pmu_info = (void *)value; 197 return (0); 198 default: 199 return (ENOENT); 200 } 201 } 202 203 static struct resource_list * 204 bcma_get_resource_list(device_t dev, device_t child) 205 { 206 struct bcma_devinfo *dinfo = device_get_ivars(child); 207 return (&dinfo->resources); 208 } 209 210 static int 211 bcma_read_iost(device_t dev, device_t child, uint16_t *iost) 212 { 213 uint32_t value; 214 int error; 215 216 if ((error = bhnd_read_config(child, BCMA_DMP_IOSTATUS, &value, 4))) 217 return (error); 218 219 /* Return only the bottom 16 bits */ 220 *iost = (value & BCMA_DMP_IOST_MASK); 221 return (0); 222 } 223 224 static int 225 bcma_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) 226 { 227 uint32_t value; 228 int error; 229 230 if ((error = bhnd_read_config(child, BCMA_DMP_IOCTRL, &value, 4))) 231 return (error); 232 233 /* Return only the bottom 16 bits */ 234 *ioctl = (value & BCMA_DMP_IOCTRL_MASK); 235 return (0); 236 } 237 238 static int 239 bcma_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) 240 { 241 struct bcma_devinfo *dinfo; 242 struct bhnd_resource *r; 243 uint32_t ioctl; 244 245 if (device_get_parent(child) != dev) 246 return (EINVAL); 247 248 dinfo = device_get_ivars(child); 249 if ((r = dinfo->res_agent) == NULL) 250 return (ENODEV); 251 252 /* Write new value */ 253 ioctl = bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); 254 ioctl &= ~(BCMA_DMP_IOCTRL_MASK & mask); 255 ioctl |= (value & mask); 256 257 bhnd_bus_write_4(r, BCMA_DMP_IOCTRL, ioctl); 258 259 /* Perform read-back and wait for completion */ 260 bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); 261 DELAY(10); 262 263 return (0); 264 } 265 266 static bool 267 bcma_is_hw_suspended(device_t dev, device_t child) 268 { 269 uint32_t rst; 270 uint16_t ioctl; 271 int error; 272 273 /* Is core held in RESET? */ 274 error = bhnd_read_config(child, BCMA_DMP_RESETCTRL, &rst, 4); 275 if (error) { 276 device_printf(child, "error reading HW reset state: %d\n", 277 error); 278 return (true); 279 } 280 281 if (rst & BCMA_DMP_RC_RESET) 282 return (true); 283 284 /* Is core clocked? */ 285 error = bhnd_read_ioctl(child, &ioctl); 286 if (error) { 287 device_printf(child, "error reading HW ioctl register: %d\n", 288 error); 289 return (true); 290 } 291 292 if (!(ioctl & BHND_IOCTL_CLK_EN)) 293 return (true); 294 295 return (false); 296 } 297 298 static int 299 bcma_reset_hw(device_t dev, device_t child, uint16_t ioctl) 300 { 301 struct bcma_devinfo *dinfo; 302 struct bhnd_core_pmu_info *pm; 303 struct bhnd_resource *r; 304 int error; 305 306 if (device_get_parent(child) != dev) 307 return (EINVAL); 308 309 dinfo = device_get_ivars(child); 310 pm = dinfo->pmu_info; 311 312 /* We require exclusive control over BHND_IOCTL_CLK_EN and 313 * BHND_IOCTL_CLK_FORCE. */ 314 if (ioctl & (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE)) 315 return (EINVAL); 316 317 /* Can't suspend the core without access to the agent registers */ 318 if ((r = dinfo->res_agent) == NULL) 319 return (ENODEV); 320 321 /* Place core into known RESET state */ 322 if ((error = BHND_BUS_SUSPEND_HW(dev, child))) 323 return (error); 324 325 /* 326 * Leaving the core in reset: 327 * - Set the caller's IOCTL flags 328 * - Enable clocks 329 * - Force clock distribution to ensure propagation throughout the 330 * core. 331 */ 332 error = bhnd_write_ioctl(child, 333 ioctl | BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE, UINT16_MAX); 334 if (error) 335 return (error); 336 337 /* Bring the core out of reset */ 338 if ((error = bcma_dmp_write_reset(child, dinfo, 0x0))) 339 return (error); 340 341 /* Disable forced clock gating (leaving clock enabled) */ 342 error = bhnd_write_ioctl(child, 0x0, BHND_IOCTL_CLK_FORCE); 343 if (error) 344 return (error); 345 346 return (0); 347 } 348 349 static int 350 bcma_suspend_hw(device_t dev, device_t child) 351 { 352 struct bcma_devinfo *dinfo; 353 struct bhnd_resource *r; 354 uint32_t rst; 355 int error; 356 357 if (device_get_parent(child) != dev) 358 return (EINVAL); 359 360 dinfo = device_get_ivars(child); 361 362 /* Can't suspend the core without access to the agent registers */ 363 if ((r = dinfo->res_agent) == NULL) 364 return (ENODEV); 365 366 /* Wait for any pending reset operations to clear */ 367 if ((error = bcma_dmp_wait_reset(child, dinfo))) 368 return (error); 369 370 /* Already in reset? */ 371 rst = bhnd_bus_read_4(r, BCMA_DMP_RESETCTRL); 372 if (rst & BCMA_DMP_RC_RESET) 373 return (0); 374 375 /* Put core into reset */ 376 if ((error = bcma_dmp_write_reset(child, dinfo, BCMA_DMP_RC_RESET))) 377 return (error); 378 379 /* Clear core flags */ 380 if ((error = bhnd_write_ioctl(child, 0x0, UINT16_MAX))) 381 return (error); 382 383 return (0); 384 } 385 386 static int 387 bcma_read_config(device_t dev, device_t child, bus_size_t offset, void *value, 388 u_int width) 389 { 390 struct bcma_devinfo *dinfo; 391 struct bhnd_resource *r; 392 393 /* Must be a directly attached child core */ 394 if (device_get_parent(child) != dev) 395 return (EINVAL); 396 397 /* Fetch the agent registers */ 398 dinfo = device_get_ivars(child); 399 if ((r = dinfo->res_agent) == NULL) 400 return (ENODEV); 401 402 /* Verify bounds */ 403 if (offset > rman_get_size(r->res)) 404 return (EFAULT); 405 406 if (rman_get_size(r->res) - offset < width) 407 return (EFAULT); 408 409 switch (width) { 410 case 1: 411 *((uint8_t *)value) = bhnd_bus_read_1(r, offset); 412 return (0); 413 case 2: 414 *((uint16_t *)value) = bhnd_bus_read_2(r, offset); 415 return (0); 416 case 4: 417 *((uint32_t *)value) = bhnd_bus_read_4(r, offset); 418 return (0); 419 default: 420 return (EINVAL); 421 } 422 } 423 424 static int 425 bcma_write_config(device_t dev, device_t child, bus_size_t offset, 426 const void *value, u_int width) 427 { 428 struct bcma_devinfo *dinfo; 429 struct bhnd_resource *r; 430 431 /* Must be a directly attached child core */ 432 if (device_get_parent(child) != dev) 433 return (EINVAL); 434 435 /* Fetch the agent registers */ 436 dinfo = device_get_ivars(child); 437 if ((r = dinfo->res_agent) == NULL) 438 return (ENODEV); 439 440 /* Verify bounds */ 441 if (offset > rman_get_size(r->res)) 442 return (EFAULT); 443 444 if (rman_get_size(r->res) - offset < width) 445 return (EFAULT); 446 447 switch (width) { 448 case 1: 449 bhnd_bus_write_1(r, offset, *(const uint8_t *)value); 450 return (0); 451 case 2: 452 bhnd_bus_write_2(r, offset, *(const uint16_t *)value); 453 return (0); 454 case 4: 455 bhnd_bus_write_4(r, offset, *(const uint32_t *)value); 456 return (0); 457 default: 458 return (EINVAL); 459 } 460 } 461 462 static u_int 463 bcma_get_port_count(device_t dev, device_t child, bhnd_port_type type) 464 { 465 struct bcma_devinfo *dinfo; 466 467 /* delegate non-bus-attached devices to our parent */ 468 if (device_get_parent(child) != dev) 469 return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, 470 type)); 471 472 dinfo = device_get_ivars(child); 473 switch (type) { 474 case BHND_PORT_DEVICE: 475 return (dinfo->corecfg->num_dev_ports); 476 case BHND_PORT_BRIDGE: 477 return (dinfo->corecfg->num_bridge_ports); 478 case BHND_PORT_AGENT: 479 return (dinfo->corecfg->num_wrapper_ports); 480 default: 481 device_printf(dev, "%s: unknown type (%d)\n", 482 __func__, 483 type); 484 return (0); 485 } 486 } 487 488 static u_int 489 bcma_get_region_count(device_t dev, device_t child, bhnd_port_type type, 490 u_int port_num) 491 { 492 struct bcma_devinfo *dinfo; 493 struct bcma_sport_list *ports; 494 struct bcma_sport *port; 495 496 /* delegate non-bus-attached devices to our parent */ 497 if (device_get_parent(child) != dev) 498 return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, 499 type, port_num)); 500 501 dinfo = device_get_ivars(child); 502 ports = bcma_corecfg_get_port_list(dinfo->corecfg, type); 503 504 STAILQ_FOREACH(port, ports, sp_link) { 505 if (port->sp_num == port_num) 506 return (port->sp_num_maps); 507 } 508 509 /* not found */ 510 return (0); 511 } 512 513 static int 514 bcma_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, 515 u_int port_num, u_int region_num) 516 { 517 struct bcma_devinfo *dinfo; 518 struct bcma_map *map; 519 struct bcma_sport_list *ports; 520 struct bcma_sport *port; 521 522 dinfo = device_get_ivars(child); 523 ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); 524 525 STAILQ_FOREACH(port, ports, sp_link) { 526 if (port->sp_num != port_num) 527 continue; 528 529 STAILQ_FOREACH(map, &port->sp_maps, m_link) 530 if (map->m_region_num == region_num) 531 return map->m_rid; 532 } 533 534 return -1; 535 } 536 537 static int 538 bcma_decode_port_rid(device_t dev, device_t child, int type, int rid, 539 bhnd_port_type *port_type, u_int *port_num, u_int *region_num) 540 { 541 struct bcma_devinfo *dinfo; 542 struct bcma_map *map; 543 struct bcma_sport_list *ports; 544 struct bcma_sport *port; 545 546 dinfo = device_get_ivars(child); 547 548 /* Ports are always memory mapped */ 549 if (type != SYS_RES_MEMORY) 550 return (EINVAL); 551 552 /* Starting with the most likely device list, search all three port 553 * lists */ 554 bhnd_port_type types[] = { 555 BHND_PORT_DEVICE, 556 BHND_PORT_AGENT, 557 BHND_PORT_BRIDGE 558 }; 559 560 for (int i = 0; i < nitems(types); i++) { 561 ports = bcma_corecfg_get_port_list(dinfo->corecfg, types[i]); 562 563 STAILQ_FOREACH(port, ports, sp_link) { 564 STAILQ_FOREACH(map, &port->sp_maps, m_link) { 565 if (map->m_rid != rid) 566 continue; 567 568 *port_type = port->sp_type; 569 *port_num = port->sp_num; 570 *region_num = map->m_region_num; 571 return (0); 572 } 573 } 574 } 575 576 return (ENOENT); 577 } 578 579 static int 580 bcma_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, 581 u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) 582 { 583 struct bcma_devinfo *dinfo; 584 struct bcma_map *map; 585 struct bcma_sport_list *ports; 586 struct bcma_sport *port; 587 588 dinfo = device_get_ivars(child); 589 ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); 590 591 /* Search the port list */ 592 STAILQ_FOREACH(port, ports, sp_link) { 593 if (port->sp_num != port_num) 594 continue; 595 596 STAILQ_FOREACH(map, &port->sp_maps, m_link) { 597 if (map->m_region_num != region_num) 598 continue; 599 600 /* Found! */ 601 *addr = map->m_base; 602 *size = map->m_size; 603 return (0); 604 } 605 } 606 607 return (ENOENT); 608 } 609 610 /** 611 * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). 612 */ 613 u_int 614 bcma_get_intr_count(device_t dev, device_t child) 615 { 616 struct bcma_devinfo *dinfo; 617 618 /* delegate non-bus-attached devices to our parent */ 619 if (device_get_parent(child) != dev) 620 return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); 621 622 dinfo = device_get_ivars(child); 623 return (dinfo->num_intrs); 624 } 625 626 /** 627 * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). 628 */ 629 int 630 bcma_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) 631 { 632 struct bcma_devinfo *dinfo; 633 struct bcma_intr *desc; 634 635 /* delegate non-bus-attached devices to our parent */ 636 if (device_get_parent(child) != dev) { 637 return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, 638 intr, ivec)); 639 } 640 641 dinfo = device_get_ivars(child); 642 643 STAILQ_FOREACH(desc, &dinfo->intrs, i_link) { 644 if (desc->i_sel == intr) { 645 *ivec = desc->i_busline; 646 return (0); 647 } 648 } 649 650 /* Not found */ 651 return (ENXIO); 652 } 653 654 /** 655 * Scan the device enumeration ROM table, adding all valid discovered cores to 656 * the bus. 657 * 658 * @param bus The bcma bus. 659 */ 660 int 661 bcma_add_children(device_t bus) 662 { 663 bhnd_erom_t *erom; 664 struct bcma_erom *bcma_erom; 665 struct bhnd_erom_io *eio; 666 const struct bhnd_chipid *cid; 667 struct bcma_corecfg *corecfg; 668 struct bcma_devinfo *dinfo; 669 device_t child; 670 int error; 671 672 cid = BHND_BUS_GET_CHIPID(bus, bus); 673 corecfg = NULL; 674 675 /* Allocate our EROM parser */ 676 eio = bhnd_erom_iores_new(bus, BCMA_EROM_RID); 677 erom = bhnd_erom_alloc(&bcma_erom_parser, cid, eio); 678 if (erom == NULL) { 679 bhnd_erom_io_fini(eio); 680 return (ENODEV); 681 } 682 683 /* Add all cores. */ 684 bcma_erom = (struct bcma_erom *)erom; 685 while ((error = bcma_erom_next_corecfg(bcma_erom, &corecfg)) == 0) { 686 /* Add the child device */ 687 child = BUS_ADD_CHILD(bus, 0, NULL, -1); 688 if (child == NULL) { 689 error = ENXIO; 690 goto cleanup; 691 } 692 693 /* Initialize device ivars */ 694 dinfo = device_get_ivars(child); 695 if ((error = bcma_init_dinfo(bus, child, dinfo, corecfg))) 696 goto cleanup; 697 698 /* The dinfo instance now owns the corecfg value */ 699 corecfg = NULL; 700 701 /* If pins are floating or the hardware is otherwise 702 * unpopulated, the device shouldn't be used. */ 703 if (bhnd_is_hw_disabled(child)) 704 device_disable(child); 705 706 /* Issue bus callback for fully initialized child. */ 707 BHND_BUS_CHILD_ADDED(bus, child); 708 } 709 710 /* EOF while parsing cores is expected */ 711 if (error == ENOENT) 712 error = 0; 713 714 cleanup: 715 bhnd_erom_free(erom); 716 717 if (corecfg != NULL) 718 bcma_free_corecfg(corecfg); 719 720 if (error) 721 device_delete_children(bus); 722 723 return (error); 724 } 725 726 727 static device_method_t bcma_methods[] = { 728 /* Device interface */ 729 DEVMETHOD(device_probe, bcma_probe), 730 DEVMETHOD(device_attach, bcma_attach), 731 DEVMETHOD(device_detach, bcma_detach), 732 733 /* Bus interface */ 734 DEVMETHOD(bus_add_child, bcma_add_child), 735 DEVMETHOD(bus_child_deleted, bcma_child_deleted), 736 DEVMETHOD(bus_read_ivar, bcma_read_ivar), 737 DEVMETHOD(bus_write_ivar, bcma_write_ivar), 738 DEVMETHOD(bus_get_resource_list, bcma_get_resource_list), 739 740 /* BHND interface */ 741 DEVMETHOD(bhnd_bus_get_erom_class, bcma_get_erom_class), 742 DEVMETHOD(bhnd_bus_read_ioctl, bcma_read_ioctl), 743 DEVMETHOD(bhnd_bus_write_ioctl, bcma_write_ioctl), 744 DEVMETHOD(bhnd_bus_read_iost, bcma_read_iost), 745 DEVMETHOD(bhnd_bus_is_hw_suspended, bcma_is_hw_suspended), 746 DEVMETHOD(bhnd_bus_reset_hw, bcma_reset_hw), 747 DEVMETHOD(bhnd_bus_suspend_hw, bcma_suspend_hw), 748 DEVMETHOD(bhnd_bus_read_config, bcma_read_config), 749 DEVMETHOD(bhnd_bus_write_config, bcma_write_config), 750 DEVMETHOD(bhnd_bus_get_port_count, bcma_get_port_count), 751 DEVMETHOD(bhnd_bus_get_region_count, bcma_get_region_count), 752 DEVMETHOD(bhnd_bus_get_port_rid, bcma_get_port_rid), 753 DEVMETHOD(bhnd_bus_decode_port_rid, bcma_decode_port_rid), 754 DEVMETHOD(bhnd_bus_get_region_addr, bcma_get_region_addr), 755 DEVMETHOD(bhnd_bus_get_intr_count, bcma_get_intr_count), 756 DEVMETHOD(bhnd_bus_get_intr_ivec, bcma_get_intr_ivec), 757 758 DEVMETHOD_END 759 }; 760 761 DEFINE_CLASS_1(bhnd, bcma_driver, bcma_methods, sizeof(struct bcma_softc), bhnd_driver); 762 MODULE_VERSION(bcma, 1); 763 MODULE_DEPEND(bcma, bhnd, 1, 1, 1); 764