1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 5 * Copyright (c) 2017 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Landon Fuller 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 19 * redistribution must be conditioned upon including a substantially 20 * similar Disclaimer requirement for further binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/bus.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/systm.h> 42 43 #include <machine/bus.h> 44 45 #include <dev/bhnd/cores/pmu/bhnd_pmu.h> 46 47 #include "bcma_dmp.h" 48 49 #include "bcma_eromreg.h" 50 #include "bcma_eromvar.h" 51 52 #include "bcmavar.h" 53 54 /* RID used when allocating EROM table */ 55 #define BCMA_EROM_RID 0 56 57 static bhnd_erom_class_t * 58 bcma_get_erom_class(driver_t *driver) 59 { 60 return (&bcma_erom_parser); 61 } 62 63 int 64 bcma_probe(device_t dev) 65 { 66 device_set_desc(dev, "BCMA BHND bus"); 67 return (BUS_PROBE_DEFAULT); 68 } 69 70 /** 71 * Default bcma(4) bus driver implementation of DEVICE_ATTACH(). 72 * 73 * This implementation initializes internal bcma(4) state and performs 74 * bus enumeration, and must be called by subclassing drivers in 75 * DEVICE_ATTACH() before any other bus methods. 76 */ 77 int 78 bcma_attach(device_t dev) 79 { 80 int error; 81 82 /* Enumerate children */ 83 if ((error = bcma_add_children(dev))) { 84 device_delete_children(dev); 85 return (error); 86 } 87 88 return (0); 89 } 90 91 int 92 bcma_detach(device_t dev) 93 { 94 return (bhnd_generic_detach(dev)); 95 } 96 97 static device_t 98 bcma_add_child(device_t dev, u_int order, const char *name, int unit) 99 { 100 struct bcma_devinfo *dinfo; 101 device_t child; 102 103 child = device_add_child_ordered(dev, order, name, unit); 104 if (child == NULL) 105 return (NULL); 106 107 if ((dinfo = bcma_alloc_dinfo(dev)) == NULL) { 108 device_delete_child(dev, child); 109 return (NULL); 110 } 111 112 device_set_ivars(child, dinfo); 113 114 return (child); 115 } 116 117 static void 118 bcma_child_deleted(device_t dev, device_t child) 119 { 120 struct bcma_devinfo *dinfo; 121 122 /* Call required bhnd(4) implementation */ 123 bhnd_generic_child_deleted(dev, child); 124 125 /* Free bcma device info */ 126 if ((dinfo = device_get_ivars(child)) != NULL) 127 bcma_free_dinfo(dev, child, dinfo); 128 129 device_set_ivars(child, NULL); 130 } 131 132 static int 133 bcma_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 134 { 135 const struct bcma_devinfo *dinfo; 136 const struct bhnd_core_info *ci; 137 138 dinfo = device_get_ivars(child); 139 ci = &dinfo->corecfg->core_info; 140 141 switch (index) { 142 case BHND_IVAR_VENDOR: 143 *result = ci->vendor; 144 return (0); 145 case BHND_IVAR_DEVICE: 146 *result = ci->device; 147 return (0); 148 case BHND_IVAR_HWREV: 149 *result = ci->hwrev; 150 return (0); 151 case BHND_IVAR_DEVICE_CLASS: 152 *result = bhnd_core_class(ci); 153 return (0); 154 case BHND_IVAR_VENDOR_NAME: 155 *result = (uintptr_t) bhnd_vendor_name(ci->vendor); 156 return (0); 157 case BHND_IVAR_DEVICE_NAME: 158 *result = (uintptr_t) bhnd_core_name(ci); 159 return (0); 160 case BHND_IVAR_CORE_INDEX: 161 *result = ci->core_idx; 162 return (0); 163 case BHND_IVAR_CORE_UNIT: 164 *result = ci->unit; 165 return (0); 166 case BHND_IVAR_PMU_INFO: 167 *result = (uintptr_t) dinfo->pmu_info; 168 return (0); 169 default: 170 return (ENOENT); 171 } 172 } 173 174 static int 175 bcma_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 176 { 177 struct bcma_devinfo *dinfo; 178 179 dinfo = device_get_ivars(child); 180 181 switch (index) { 182 case BHND_IVAR_VENDOR: 183 case BHND_IVAR_DEVICE: 184 case BHND_IVAR_HWREV: 185 case BHND_IVAR_DEVICE_CLASS: 186 case BHND_IVAR_VENDOR_NAME: 187 case BHND_IVAR_DEVICE_NAME: 188 case BHND_IVAR_CORE_INDEX: 189 case BHND_IVAR_CORE_UNIT: 190 return (EINVAL); 191 case BHND_IVAR_PMU_INFO: 192 dinfo->pmu_info = (void *)value; 193 return (0); 194 default: 195 return (ENOENT); 196 } 197 } 198 199 static struct resource_list * 200 bcma_get_resource_list(device_t dev, device_t child) 201 { 202 struct bcma_devinfo *dinfo = device_get_ivars(child); 203 return (&dinfo->resources); 204 } 205 206 static int 207 bcma_read_iost(device_t dev, device_t child, uint16_t *iost) 208 { 209 uint32_t value; 210 int error; 211 212 if ((error = bhnd_read_config(child, BCMA_DMP_IOSTATUS, &value, 4))) 213 return (error); 214 215 /* Return only the bottom 16 bits */ 216 *iost = (value & BCMA_DMP_IOST_MASK); 217 return (0); 218 } 219 220 static int 221 bcma_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) 222 { 223 uint32_t value; 224 int error; 225 226 if ((error = bhnd_read_config(child, BCMA_DMP_IOCTRL, &value, 4))) 227 return (error); 228 229 /* Return only the bottom 16 bits */ 230 *ioctl = (value & BCMA_DMP_IOCTRL_MASK); 231 return (0); 232 } 233 234 static int 235 bcma_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) 236 { 237 struct bcma_devinfo *dinfo; 238 struct bhnd_resource *r; 239 uint32_t ioctl; 240 241 if (device_get_parent(child) != dev) 242 return (EINVAL); 243 244 dinfo = device_get_ivars(child); 245 if ((r = dinfo->res_agent) == NULL) 246 return (ENODEV); 247 248 /* Write new value */ 249 ioctl = bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); 250 ioctl &= ~(BCMA_DMP_IOCTRL_MASK & mask); 251 ioctl |= (value & mask); 252 253 bhnd_bus_write_4(r, BCMA_DMP_IOCTRL, ioctl); 254 255 /* Perform read-back and wait for completion */ 256 bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); 257 DELAY(10); 258 259 return (0); 260 } 261 262 static bool 263 bcma_is_hw_suspended(device_t dev, device_t child) 264 { 265 uint32_t rst; 266 uint16_t ioctl; 267 int error; 268 269 /* Is core held in RESET? */ 270 error = bhnd_read_config(child, BCMA_DMP_RESETCTRL, &rst, 4); 271 if (error) { 272 device_printf(child, "error reading HW reset state: %d\n", 273 error); 274 return (true); 275 } 276 277 if (rst & BCMA_DMP_RC_RESET) 278 return (true); 279 280 /* Is core clocked? */ 281 error = bhnd_read_ioctl(child, &ioctl); 282 if (error) { 283 device_printf(child, "error reading HW ioctl register: %d\n", 284 error); 285 return (true); 286 } 287 288 if (!(ioctl & BHND_IOCTL_CLK_EN)) 289 return (true); 290 291 return (false); 292 } 293 294 static int 295 bcma_reset_hw(device_t dev, device_t child, uint16_t ioctl, 296 uint16_t reset_ioctl) 297 { 298 struct bcma_devinfo *dinfo; 299 struct bhnd_resource *r; 300 uint16_t clkflags; 301 int error; 302 303 if (device_get_parent(child) != dev) 304 return (EINVAL); 305 306 dinfo = device_get_ivars(child); 307 308 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ 309 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; 310 if (ioctl & clkflags) 311 return (EINVAL); 312 313 /* Can't suspend the core without access to the agent registers */ 314 if ((r = dinfo->res_agent) == NULL) 315 return (ENODEV); 316 317 /* Place core into known RESET state */ 318 if ((error = bhnd_suspend_hw(child, reset_ioctl))) 319 return (error); 320 321 /* 322 * Leaving the core in reset: 323 * - Set the caller's IOCTL flags 324 * - Enable clocks 325 * - Force clock distribution to ensure propagation throughout the 326 * core. 327 */ 328 if ((error = bhnd_write_ioctl(child, ioctl | clkflags, UINT16_MAX))) 329 return (error); 330 331 /* Bring the core out of reset */ 332 if ((error = bcma_dmp_write_reset(child, dinfo, 0x0))) 333 return (error); 334 335 /* Disable forced clock gating (leaving clock enabled) */ 336 error = bhnd_write_ioctl(child, 0x0, BHND_IOCTL_CLK_FORCE); 337 if (error) 338 return (error); 339 340 return (0); 341 } 342 343 static int 344 bcma_suspend_hw(device_t dev, device_t child, uint16_t ioctl) 345 { 346 struct bcma_devinfo *dinfo; 347 struct bhnd_resource *r; 348 uint16_t clkflags; 349 int error; 350 351 if (device_get_parent(child) != dev) 352 return (EINVAL); 353 354 dinfo = device_get_ivars(child); 355 356 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ 357 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; 358 if (ioctl & clkflags) 359 return (EINVAL); 360 361 /* Can't suspend the core without access to the agent registers */ 362 if ((r = dinfo->res_agent) == NULL) 363 return (ENODEV); 364 365 /* Wait for any pending reset operations to clear */ 366 if ((error = bcma_dmp_wait_reset(child, dinfo))) 367 return (error); 368 369 /* Put core into reset (if not already in reset) */ 370 if ((error = bcma_dmp_write_reset(child, dinfo, BCMA_DMP_RC_RESET))) 371 return (error); 372 373 /* Write core flags (and clear CLK_EN/CLK_FORCE) */ 374 if ((error = bhnd_write_ioctl(child, ioctl, ~clkflags))) 375 return (error); 376 377 return (0); 378 } 379 380 static int 381 bcma_read_config(device_t dev, device_t child, bus_size_t offset, void *value, 382 u_int width) 383 { 384 struct bcma_devinfo *dinfo; 385 struct bhnd_resource *r; 386 387 /* Must be a directly attached child core */ 388 if (device_get_parent(child) != dev) 389 return (EINVAL); 390 391 /* Fetch the agent registers */ 392 dinfo = device_get_ivars(child); 393 if ((r = dinfo->res_agent) == NULL) 394 return (ENODEV); 395 396 /* Verify bounds */ 397 if (offset > rman_get_size(r->res)) 398 return (EFAULT); 399 400 if (rman_get_size(r->res) - offset < width) 401 return (EFAULT); 402 403 switch (width) { 404 case 1: 405 *((uint8_t *)value) = bhnd_bus_read_1(r, offset); 406 return (0); 407 case 2: 408 *((uint16_t *)value) = bhnd_bus_read_2(r, offset); 409 return (0); 410 case 4: 411 *((uint32_t *)value) = bhnd_bus_read_4(r, offset); 412 return (0); 413 default: 414 return (EINVAL); 415 } 416 } 417 418 static int 419 bcma_write_config(device_t dev, device_t child, bus_size_t offset, 420 const void *value, u_int width) 421 { 422 struct bcma_devinfo *dinfo; 423 struct bhnd_resource *r; 424 425 /* Must be a directly attached child core */ 426 if (device_get_parent(child) != dev) 427 return (EINVAL); 428 429 /* Fetch the agent registers */ 430 dinfo = device_get_ivars(child); 431 if ((r = dinfo->res_agent) == NULL) 432 return (ENODEV); 433 434 /* Verify bounds */ 435 if (offset > rman_get_size(r->res)) 436 return (EFAULT); 437 438 if (rman_get_size(r->res) - offset < width) 439 return (EFAULT); 440 441 switch (width) { 442 case 1: 443 bhnd_bus_write_1(r, offset, *(const uint8_t *)value); 444 return (0); 445 case 2: 446 bhnd_bus_write_2(r, offset, *(const uint16_t *)value); 447 return (0); 448 case 4: 449 bhnd_bus_write_4(r, offset, *(const uint32_t *)value); 450 return (0); 451 default: 452 return (EINVAL); 453 } 454 } 455 456 static u_int 457 bcma_get_port_count(device_t dev, device_t child, bhnd_port_type type) 458 { 459 struct bcma_devinfo *dinfo; 460 461 /* delegate non-bus-attached devices to our parent */ 462 if (device_get_parent(child) != dev) 463 return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, 464 type)); 465 466 dinfo = device_get_ivars(child); 467 switch (type) { 468 case BHND_PORT_DEVICE: 469 return (dinfo->corecfg->num_dev_ports); 470 case BHND_PORT_BRIDGE: 471 return (dinfo->corecfg->num_bridge_ports); 472 case BHND_PORT_AGENT: 473 return (dinfo->corecfg->num_wrapper_ports); 474 default: 475 device_printf(dev, "%s: unknown type (%d)\n", 476 __func__, 477 type); 478 return (0); 479 } 480 } 481 482 static u_int 483 bcma_get_region_count(device_t dev, device_t child, bhnd_port_type type, 484 u_int port_num) 485 { 486 struct bcma_devinfo *dinfo; 487 struct bcma_sport_list *ports; 488 struct bcma_sport *port; 489 490 /* delegate non-bus-attached devices to our parent */ 491 if (device_get_parent(child) != dev) 492 return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, 493 type, port_num)); 494 495 dinfo = device_get_ivars(child); 496 ports = bcma_corecfg_get_port_list(dinfo->corecfg, type); 497 498 STAILQ_FOREACH(port, ports, sp_link) { 499 if (port->sp_num == port_num) 500 return (port->sp_num_maps); 501 } 502 503 /* not found */ 504 return (0); 505 } 506 507 static int 508 bcma_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, 509 u_int port_num, u_int region_num) 510 { 511 struct bcma_devinfo *dinfo; 512 struct bcma_map *map; 513 struct bcma_sport_list *ports; 514 struct bcma_sport *port; 515 516 dinfo = device_get_ivars(child); 517 ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); 518 519 STAILQ_FOREACH(port, ports, sp_link) { 520 if (port->sp_num != port_num) 521 continue; 522 523 STAILQ_FOREACH(map, &port->sp_maps, m_link) 524 if (map->m_region_num == region_num) 525 return map->m_rid; 526 } 527 528 return -1; 529 } 530 531 static int 532 bcma_decode_port_rid(device_t dev, device_t child, int type, int rid, 533 bhnd_port_type *port_type, u_int *port_num, u_int *region_num) 534 { 535 struct bcma_devinfo *dinfo; 536 struct bcma_map *map; 537 struct bcma_sport_list *ports; 538 struct bcma_sport *port; 539 540 dinfo = device_get_ivars(child); 541 542 /* Ports are always memory mapped */ 543 if (type != SYS_RES_MEMORY) 544 return (EINVAL); 545 546 /* Starting with the most likely device list, search all three port 547 * lists */ 548 bhnd_port_type types[] = { 549 BHND_PORT_DEVICE, 550 BHND_PORT_AGENT, 551 BHND_PORT_BRIDGE 552 }; 553 554 for (int i = 0; i < nitems(types); i++) { 555 ports = bcma_corecfg_get_port_list(dinfo->corecfg, types[i]); 556 557 STAILQ_FOREACH(port, ports, sp_link) { 558 STAILQ_FOREACH(map, &port->sp_maps, m_link) { 559 if (map->m_rid != rid) 560 continue; 561 562 *port_type = port->sp_type; 563 *port_num = port->sp_num; 564 *region_num = map->m_region_num; 565 return (0); 566 } 567 } 568 } 569 570 return (ENOENT); 571 } 572 573 static int 574 bcma_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, 575 u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) 576 { 577 struct bcma_devinfo *dinfo; 578 struct bcma_map *map; 579 struct bcma_sport_list *ports; 580 struct bcma_sport *port; 581 582 dinfo = device_get_ivars(child); 583 ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); 584 585 /* Search the port list */ 586 STAILQ_FOREACH(port, ports, sp_link) { 587 if (port->sp_num != port_num) 588 continue; 589 590 STAILQ_FOREACH(map, &port->sp_maps, m_link) { 591 if (map->m_region_num != region_num) 592 continue; 593 594 /* Found! */ 595 *addr = map->m_base; 596 *size = map->m_size; 597 return (0); 598 } 599 } 600 601 return (ENOENT); 602 } 603 604 /** 605 * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). 606 */ 607 u_int 608 bcma_get_intr_count(device_t dev, device_t child) 609 { 610 struct bcma_devinfo *dinfo; 611 612 /* delegate non-bus-attached devices to our parent */ 613 if (device_get_parent(child) != dev) 614 return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); 615 616 dinfo = device_get_ivars(child); 617 return (dinfo->num_intrs); 618 } 619 620 /** 621 * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). 622 */ 623 int 624 bcma_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) 625 { 626 struct bcma_devinfo *dinfo; 627 struct bcma_intr *desc; 628 629 /* delegate non-bus-attached devices to our parent */ 630 if (device_get_parent(child) != dev) { 631 return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, 632 intr, ivec)); 633 } 634 635 dinfo = device_get_ivars(child); 636 637 STAILQ_FOREACH(desc, &dinfo->intrs, i_link) { 638 if (desc->i_sel == intr) { 639 *ivec = desc->i_busline; 640 return (0); 641 } 642 } 643 644 /* Not found */ 645 return (ENXIO); 646 } 647 648 /** 649 * Scan the device enumeration ROM table, adding all valid discovered cores to 650 * the bus. 651 * 652 * @param bus The bcma bus. 653 */ 654 int 655 bcma_add_children(device_t bus) 656 { 657 bhnd_erom_t *erom; 658 struct bcma_erom *bcma_erom; 659 struct bhnd_erom_io *eio; 660 const struct bhnd_chipid *cid; 661 struct bcma_corecfg *corecfg; 662 struct bcma_devinfo *dinfo; 663 device_t child; 664 int error; 665 666 cid = BHND_BUS_GET_CHIPID(bus, bus); 667 corecfg = NULL; 668 669 /* Allocate our EROM parser */ 670 eio = bhnd_erom_iores_new(bus, BCMA_EROM_RID); 671 erom = bhnd_erom_alloc(&bcma_erom_parser, cid, eio); 672 if (erom == NULL) { 673 bhnd_erom_io_fini(eio); 674 return (ENODEV); 675 } 676 677 /* Add all cores. */ 678 bcma_erom = (struct bcma_erom *)erom; 679 while ((error = bcma_erom_next_corecfg(bcma_erom, &corecfg)) == 0) { 680 /* Add the child device */ 681 child = BUS_ADD_CHILD(bus, 0, NULL, DEVICE_UNIT_ANY); 682 if (child == NULL) { 683 error = ENXIO; 684 goto cleanup; 685 } 686 687 /* Initialize device ivars */ 688 dinfo = device_get_ivars(child); 689 if ((error = bcma_init_dinfo(bus, child, dinfo, corecfg))) 690 goto cleanup; 691 692 /* The dinfo instance now owns the corecfg value */ 693 corecfg = NULL; 694 695 /* If pins are floating or the hardware is otherwise 696 * unpopulated, the device shouldn't be used. */ 697 if (bhnd_is_hw_disabled(child)) 698 device_disable(child); 699 700 /* Issue bus callback for fully initialized child. */ 701 BHND_BUS_CHILD_ADDED(bus, child); 702 } 703 704 /* EOF while parsing cores is expected */ 705 if (error == ENOENT) 706 error = 0; 707 708 cleanup: 709 bhnd_erom_free(erom); 710 711 if (corecfg != NULL) 712 bcma_free_corecfg(corecfg); 713 714 if (error) 715 device_delete_children(bus); 716 717 return (error); 718 } 719 720 static device_method_t bcma_methods[] = { 721 /* Device interface */ 722 DEVMETHOD(device_probe, bcma_probe), 723 DEVMETHOD(device_attach, bcma_attach), 724 DEVMETHOD(device_detach, bcma_detach), 725 726 /* Bus interface */ 727 DEVMETHOD(bus_add_child, bcma_add_child), 728 DEVMETHOD(bus_child_deleted, bcma_child_deleted), 729 DEVMETHOD(bus_read_ivar, bcma_read_ivar), 730 DEVMETHOD(bus_write_ivar, bcma_write_ivar), 731 DEVMETHOD(bus_get_resource_list, bcma_get_resource_list), 732 733 /* BHND interface */ 734 DEVMETHOD(bhnd_bus_get_erom_class, bcma_get_erom_class), 735 DEVMETHOD(bhnd_bus_read_ioctl, bcma_read_ioctl), 736 DEVMETHOD(bhnd_bus_write_ioctl, bcma_write_ioctl), 737 DEVMETHOD(bhnd_bus_read_iost, bcma_read_iost), 738 DEVMETHOD(bhnd_bus_is_hw_suspended, bcma_is_hw_suspended), 739 DEVMETHOD(bhnd_bus_reset_hw, bcma_reset_hw), 740 DEVMETHOD(bhnd_bus_suspend_hw, bcma_suspend_hw), 741 DEVMETHOD(bhnd_bus_read_config, bcma_read_config), 742 DEVMETHOD(bhnd_bus_write_config, bcma_write_config), 743 DEVMETHOD(bhnd_bus_get_port_count, bcma_get_port_count), 744 DEVMETHOD(bhnd_bus_get_region_count, bcma_get_region_count), 745 DEVMETHOD(bhnd_bus_get_port_rid, bcma_get_port_rid), 746 DEVMETHOD(bhnd_bus_decode_port_rid, bcma_decode_port_rid), 747 DEVMETHOD(bhnd_bus_get_region_addr, bcma_get_region_addr), 748 DEVMETHOD(bhnd_bus_get_intr_count, bcma_get_intr_count), 749 DEVMETHOD(bhnd_bus_get_intr_ivec, bcma_get_intr_ivec), 750 751 DEVMETHOD_END 752 }; 753 754 DEFINE_CLASS_1(bhnd, bcma_driver, bcma_methods, sizeof(struct bcma_softc), bhnd_driver); 755 MODULE_VERSION(bcma, 1); 756 MODULE_DEPEND(bcma, bhnd, 1, 1, 1); 757