1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org> 5 * Copyright (c) 2017 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Landon Fuller 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 19 * redistribution must be conditioned upon including a substantially 20 * similar Disclaimer requirement for further binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/kernel.h> 42 #include <sys/limits.h> 43 #include <sys/systm.h> 44 45 #include <machine/bus.h> 46 #include <machine/resource.h> 47 48 #include <dev/bhnd/cores/chipc/chipcreg.h> 49 50 #include "bcma_eromreg.h" 51 #include "bcma_eromvar.h" 52 53 /* 54 * BCMA Enumeration ROM (EROM) Table 55 * 56 * Provides auto-discovery of BCMA cores on Broadcom's HND SoC. 57 * 58 * The EROM core address can be found at BCMA_CC_EROM_ADDR within the 59 * ChipCommon registers. The table itself is comprised of 32-bit 60 * type-tagged entries, organized into an array of variable-length 61 * core descriptor records. 62 * 63 * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF) 64 * marker. 65 */ 66 67 static const char *bcma_erom_entry_type_name (uint8_t entry); 68 69 static int bcma_erom_read32(struct bcma_erom *erom, 70 uint32_t *entry); 71 static int bcma_erom_skip32(struct bcma_erom *erom); 72 73 static int bcma_erom_skip_core(struct bcma_erom *erom); 74 static int bcma_erom_skip_mport(struct bcma_erom *erom); 75 static int bcma_erom_skip_sport_region(struct bcma_erom *erom); 76 77 static int bcma_erom_seek_next(struct bcma_erom *erom, 78 uint8_t etype); 79 static int bcma_erom_region_to_port_type(struct bcma_erom *erom, 80 uint8_t region_type, bhnd_port_type *port_type); 81 82 83 static int bcma_erom_peek32(struct bcma_erom *erom, 84 uint32_t *entry); 85 86 static bus_size_t bcma_erom_tell(struct bcma_erom *erom); 87 static void bcma_erom_seek(struct bcma_erom *erom, 88 bus_size_t offset); 89 static void bcma_erom_reset(struct bcma_erom *erom); 90 91 static int bcma_erom_seek_matching_core(struct bcma_erom *sc, 92 const struct bhnd_core_match *desc, 93 struct bhnd_core_info *core); 94 95 static int bcma_erom_parse_core(struct bcma_erom *erom, 96 struct bcma_erom_core *core); 97 98 static int bcma_erom_parse_mport(struct bcma_erom *erom, 99 struct bcma_erom_mport *mport); 100 101 static int bcma_erom_parse_sport_region(struct bcma_erom *erom, 102 struct bcma_erom_sport_region *region); 103 104 static void bcma_erom_to_core_info(const struct bcma_erom_core *core, 105 u_int core_idx, int core_unit, 106 struct bhnd_core_info *info); 107 108 /** 109 * BCMA EROM per-instance state. 110 */ 111 struct bcma_erom { 112 struct bhnd_erom obj; 113 device_t dev; /**< parent device, or NULL if none. */ 114 struct bhnd_erom_io *eio; /**< bus I/O callbacks */ 115 bhnd_size_t offset; /**< current read offset */ 116 }; 117 118 #define EROM_LOG(erom, fmt, ...) do { \ 119 printf("%s erom[0x%llx]: " fmt, __FUNCTION__, \ 120 (unsigned long long)(erom->offset), ##__VA_ARGS__); \ 121 } while(0) 122 123 /** Return the type name for an EROM entry */ 124 static const char * 125 bcma_erom_entry_type_name (uint8_t entry) 126 { 127 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { 128 case BCMA_EROM_ENTRY_TYPE_CORE: 129 return "core"; 130 case BCMA_EROM_ENTRY_TYPE_MPORT: 131 return "mport"; 132 case BCMA_EROM_ENTRY_TYPE_REGION: 133 return "region"; 134 default: 135 return "unknown"; 136 } 137 } 138 139 /* BCMA implementation of BHND_EROM_INIT() */ 140 static int 141 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid, 142 struct bhnd_erom_io *eio) 143 { 144 struct bcma_erom *sc; 145 bhnd_addr_t table_addr; 146 int error; 147 148 sc = (struct bcma_erom *)erom; 149 sc->eio = eio; 150 sc->offset = 0; 151 152 /* Determine erom table address */ 153 if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr) 154 return (ENXIO); /* would overflow */ 155 156 table_addr = cid->enum_addr + BCMA_EROM_TABLE_START; 157 158 /* Try to map the erom table */ 159 error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE); 160 if (error) 161 return (error); 162 163 return (0); 164 } 165 166 /* BCMA implementation of BHND_EROM_PROBE() */ 167 static int 168 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio, 169 const struct bhnd_chipid *hint, struct bhnd_chipid *cid) 170 { 171 uint32_t idreg, eromptr; 172 173 /* Hints aren't supported; all BCMA devices have a ChipCommon 174 * core */ 175 if (hint != NULL) 176 return (EINVAL); 177 178 /* Confirm CHIPC_EROMPTR availability */ 179 idreg = bhnd_erom_io_read(eio, CHIPC_ID, 4); 180 if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS))) 181 return (ENXIO); 182 183 /* Fetch EROM address */ 184 eromptr = bhnd_erom_io_read(eio, CHIPC_EROMPTR, 4); 185 186 /* Parse chip identifier */ 187 *cid = bhnd_parse_chipid(idreg, eromptr); 188 189 /* Verify chip type */ 190 switch (cid->chip_type) { 191 case BHND_CHIPTYPE_BCMA: 192 return (BUS_PROBE_DEFAULT); 193 194 case BHND_CHIPTYPE_BCMA_ALT: 195 case BHND_CHIPTYPE_UBUS: 196 return (BUS_PROBE_GENERIC); 197 198 default: 199 return (ENXIO); 200 } 201 } 202 203 static void 204 bcma_erom_fini(bhnd_erom_t *erom) 205 { 206 struct bcma_erom *sc = (struct bcma_erom *)erom; 207 208 bhnd_erom_io_fini(sc->eio); 209 } 210 211 static int 212 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc, 213 struct bhnd_core_info *core) 214 { 215 struct bcma_erom *sc = (struct bcma_erom *)erom; 216 217 /* Search for the first matching core */ 218 return (bcma_erom_seek_matching_core(sc, desc, core)); 219 } 220 221 static int 222 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc, 223 bhnd_port_type port_type, u_int port_num, u_int region_num, 224 struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size) 225 { 226 struct bcma_erom *sc; 227 struct bcma_erom_core ec; 228 uint32_t entry; 229 uint8_t region_port, region_type; 230 bool found; 231 int error; 232 233 sc = (struct bcma_erom *)erom; 234 235 /* Seek to the first matching core and provide the core info 236 * to the caller */ 237 if ((error = bcma_erom_seek_matching_core(sc, desc, core))) 238 return (error); 239 240 if ((error = bcma_erom_parse_core(sc, &ec))) 241 return (error); 242 243 /* Skip master ports */ 244 for (u_long i = 0; i < ec.num_mport; i++) { 245 if ((error = bcma_erom_skip_mport(sc))) 246 return (error); 247 } 248 249 /* Seek to the region block for the given port type */ 250 found = false; 251 while (1) { 252 bhnd_port_type p_type; 253 uint8_t r_type; 254 255 if ((error = bcma_erom_peek32(sc, &entry))) 256 return (error); 257 258 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) 259 return (ENOENT); 260 261 /* Expected region type? */ 262 r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 263 error = bcma_erom_region_to_port_type(sc, r_type, &p_type); 264 if (error) 265 return (error); 266 267 if (p_type == port_type) { 268 found = true; 269 break; 270 } 271 272 /* Skip to next entry */ 273 if ((error = bcma_erom_skip_sport_region(sc))) 274 return (error); 275 } 276 277 if (!found) 278 return (ENOENT); 279 280 /* Found the appropriate port type block; now find the region records 281 * for the given port number */ 282 found = false; 283 for (u_int i = 0; i <= port_num; i++) { 284 bhnd_port_type p_type; 285 286 if ((error = bcma_erom_peek32(sc, &entry))) 287 return (error); 288 289 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) 290 return (ENOENT); 291 292 /* Fetch the type/port of the first region entry */ 293 region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 294 region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); 295 296 /* Have we found the region entries for the desired port? */ 297 if (i == port_num) { 298 error = bcma_erom_region_to_port_type(sc, region_type, 299 &p_type); 300 if (error) 301 return (error); 302 303 if (p_type == port_type) 304 found = true; 305 306 break; 307 } 308 309 /* Otherwise, seek to next block of region records */ 310 while (1) { 311 uint8_t next_type, next_port; 312 313 if ((error = bcma_erom_skip_sport_region(sc))) 314 return (error); 315 316 if ((error = bcma_erom_peek32(sc, &entry))) 317 return (error); 318 319 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) 320 return (ENOENT); 321 322 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 323 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); 324 325 if (next_type != region_type || 326 next_port != region_port) 327 break; 328 } 329 } 330 331 if (!found) 332 return (ENOENT); 333 334 /* Finally, search for the requested region number */ 335 for (u_int i = 0; i <= region_num; i++) { 336 struct bcma_erom_sport_region region; 337 uint8_t next_port, next_type; 338 339 if ((error = bcma_erom_peek32(sc, &entry))) 340 return (error); 341 342 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) 343 return (ENOENT); 344 345 /* Check for the end of the region block */ 346 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 347 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); 348 349 if (next_type != region_type || 350 next_port != region_port) 351 break; 352 353 /* Parse the region */ 354 if ((error = bcma_erom_parse_sport_region(sc, ®ion))) 355 return (error); 356 357 /* Is this our target region_num? */ 358 if (i == region_num) { 359 /* Found */ 360 *addr = region.base_addr; 361 *size = region.size; 362 return (0); 363 } 364 } 365 366 /* Not found */ 367 return (ENOENT); 368 }; 369 370 static int 371 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores, 372 u_int *num_cores) 373 { 374 struct bcma_erom *sc; 375 struct bhnd_core_info *buffer; 376 bus_size_t initial_offset; 377 u_int count; 378 int error; 379 380 sc = (struct bcma_erom *)erom; 381 382 buffer = NULL; 383 initial_offset = bcma_erom_tell(sc); 384 385 /* Determine the core count */ 386 bcma_erom_reset(sc); 387 for (count = 0, error = 0; !error; count++) { 388 struct bcma_erom_core core; 389 390 /* Seek to the first readable core entry */ 391 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); 392 if (error == ENOENT) 393 break; 394 else if (error) 395 goto cleanup; 396 397 /* Read past the core descriptor */ 398 if ((error = bcma_erom_parse_core(sc, &core))) 399 goto cleanup; 400 } 401 402 /* Allocate our output buffer */ 403 buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND, 404 M_NOWAIT); 405 if (buffer == NULL) { 406 error = ENOMEM; 407 goto cleanup; 408 } 409 410 /* Parse all core descriptors */ 411 bcma_erom_reset(sc); 412 for (u_int i = 0; i < count; i++) { 413 struct bcma_erom_core core; 414 int unit; 415 416 /* Parse the core */ 417 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); 418 if (error) 419 goto cleanup; 420 421 error = bcma_erom_parse_core(sc, &core); 422 if (error) 423 goto cleanup; 424 425 /* Determine the unit number */ 426 unit = 0; 427 for (u_int j = 0; j < i; j++) { 428 if (buffer[i].vendor == buffer[j].vendor && 429 buffer[i].device == buffer[j].device) 430 unit++; 431 } 432 433 /* Convert to a bhnd info record */ 434 bcma_erom_to_core_info(&core, i, unit, &buffer[i]); 435 } 436 437 cleanup: 438 if (!error) { 439 *cores = buffer; 440 *num_cores = count; 441 } else { 442 if (buffer != NULL) 443 free(buffer, M_BHND); 444 } 445 446 /* Restore the initial position */ 447 bcma_erom_seek(sc, initial_offset); 448 return (error); 449 } 450 451 static void 452 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores) 453 { 454 free(cores, M_BHND); 455 } 456 457 /** 458 * Return the current read position. 459 */ 460 static bus_size_t 461 bcma_erom_tell(struct bcma_erom *erom) 462 { 463 return (erom->offset); 464 } 465 466 /** 467 * Seek to an absolute read position. 468 */ 469 static void 470 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset) 471 { 472 erom->offset = offset; 473 } 474 475 /** 476 * Read a 32-bit entry value from the EROM table without advancing the 477 * read position. 478 * 479 * @param erom EROM read state. 480 * @param entry Will contain the read result on success. 481 * @retval 0 success 482 * @retval ENOENT The end of the EROM table was reached. 483 * @retval non-zero The read could not be completed. 484 */ 485 static int 486 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry) 487 { 488 if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) { 489 EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n"); 490 return (EINVAL); 491 } 492 493 *entry = bhnd_erom_io_read(erom->eio, erom->offset, 4); 494 return (0); 495 } 496 497 /** 498 * Read a 32-bit entry value from the EROM table. 499 * 500 * @param erom EROM read state. 501 * @param entry Will contain the read result on success. 502 * @retval 0 success 503 * @retval ENOENT The end of the EROM table was reached. 504 * @retval non-zero The read could not be completed. 505 */ 506 static int 507 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry) 508 { 509 int error; 510 511 if ((error = bcma_erom_peek32(erom, entry)) == 0) 512 erom->offset += 4; 513 514 return (error); 515 } 516 517 /** 518 * Read and discard 32-bit entry value from the EROM table. 519 * 520 * @param erom EROM read state. 521 * @retval 0 success 522 * @retval ENOENT The end of the EROM table was reached. 523 * @retval non-zero The read could not be completed. 524 */ 525 static int 526 bcma_erom_skip32(struct bcma_erom *erom) 527 { 528 uint32_t entry; 529 530 return bcma_erom_read32(erom, &entry); 531 } 532 533 /** 534 * Read and discard a core descriptor from the EROM table. 535 * 536 * @param erom EROM read state. 537 * @retval 0 success 538 * @retval ENOENT The end of the EROM table was reached. 539 * @retval non-zero The read could not be completed. 540 */ 541 static int 542 bcma_erom_skip_core(struct bcma_erom *erom) 543 { 544 struct bcma_erom_core core; 545 return (bcma_erom_parse_core(erom, &core)); 546 } 547 548 /** 549 * Read and discard a master port descriptor from the EROM table. 550 * 551 * @param erom EROM read state. 552 * @retval 0 success 553 * @retval ENOENT The end of the EROM table was reached. 554 * @retval non-zero The read could not be completed. 555 */ 556 static int 557 bcma_erom_skip_mport(struct bcma_erom *erom) 558 { 559 struct bcma_erom_mport mp; 560 return (bcma_erom_parse_mport(erom, &mp)); 561 } 562 563 /** 564 * Read and discard a port region descriptor from the EROM table. 565 * 566 * @param erom EROM read state. 567 * @retval 0 success 568 * @retval ENOENT The end of the EROM table was reached. 569 * @retval non-zero The read could not be completed. 570 */ 571 static int 572 bcma_erom_skip_sport_region(struct bcma_erom *erom) 573 { 574 struct bcma_erom_sport_region r; 575 return (bcma_erom_parse_sport_region(erom, &r)); 576 } 577 578 /** 579 * Seek to the next entry matching the given EROM entry type. 580 * 581 * @param erom EROM read state. 582 * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE, 583 * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION. 584 * @retval 0 success 585 * @retval ENOENT The end of the EROM table was reached. 586 * @retval non-zero Reading or parsing the descriptor failed. 587 */ 588 static int 589 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype) 590 { 591 uint32_t entry; 592 int error; 593 594 /* Iterate until we hit an entry matching the requested type. */ 595 while (!(error = bcma_erom_peek32(erom, &entry))) { 596 /* Handle EOF */ 597 if (entry == BCMA_EROM_TABLE_EOF) 598 return (ENOENT); 599 600 /* Invalid entry */ 601 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) 602 return (EINVAL); 603 604 /* Entry type matches? */ 605 if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype) 606 return (0); 607 608 /* Skip non-matching entry types. */ 609 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { 610 case BCMA_EROM_ENTRY_TYPE_CORE: 611 if ((error = bcma_erom_skip_core(erom))) 612 return (error); 613 614 break; 615 616 case BCMA_EROM_ENTRY_TYPE_MPORT: 617 if ((error = bcma_erom_skip_mport(erom))) 618 return (error); 619 620 break; 621 622 case BCMA_EROM_ENTRY_TYPE_REGION: 623 if ((error = bcma_erom_skip_sport_region(erom))) 624 return (error); 625 break; 626 627 default: 628 /* Unknown entry type! */ 629 return (EINVAL); 630 } 631 } 632 633 return (error); 634 } 635 636 /** 637 * Return the read position to the start of the EROM table. 638 * 639 * @param erom EROM read state. 640 */ 641 static void 642 bcma_erom_reset(struct bcma_erom *erom) 643 { 644 erom->offset = 0; 645 } 646 647 /** 648 * Seek to the first core entry matching @p desc. 649 * 650 * @param erom EROM read state. 651 * @param desc The core match descriptor. 652 * @param[out] core On success, the matching core info. If the core info 653 * is not desired, a NULL pointer may be provided. 654 * @retval 0 success 655 * @retval ENOENT The end of the EROM table was reached before @p index was 656 * found. 657 * @retval non-zero Reading or parsing failed. 658 */ 659 static int 660 bcma_erom_seek_matching_core(struct bcma_erom *sc, 661 const struct bhnd_core_match *desc, struct bhnd_core_info *core) 662 { 663 struct bhnd_core_match imatch; 664 bus_size_t core_offset, next_offset; 665 int error; 666 667 /* Seek to table start. */ 668 bcma_erom_reset(sc); 669 670 /* We can't determine a core's unit number during the initial scan. */ 671 imatch = *desc; 672 imatch.m.match.core_unit = 0; 673 674 /* Locate the first matching core */ 675 for (u_int i = 0; i < UINT_MAX; i++) { 676 struct bcma_erom_core ec; 677 struct bhnd_core_info ci; 678 679 /* Seek to the next core */ 680 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); 681 if (error) 682 return (error); 683 684 /* Save the core offset */ 685 core_offset = bcma_erom_tell(sc); 686 687 /* Parse the core */ 688 if ((error = bcma_erom_parse_core(sc, &ec))) 689 return (error); 690 691 bcma_erom_to_core_info(&ec, i, 0, &ci); 692 693 /* Check for initial match */ 694 if (!bhnd_core_matches(&ci, &imatch)) 695 continue; 696 697 /* Re-scan preceding cores to determine the unit number. */ 698 next_offset = bcma_erom_tell(sc); 699 bcma_erom_reset(sc); 700 for (u_int j = 0; j < i; j++) { 701 /* Parse the core */ 702 error = bcma_erom_seek_next(sc, 703 BCMA_EROM_ENTRY_TYPE_CORE); 704 if (error) 705 return (error); 706 707 if ((error = bcma_erom_parse_core(sc, &ec))) 708 return (error); 709 710 /* Bump the unit number? */ 711 if (ec.vendor == ci.vendor && ec.device == ci.device) 712 ci.unit++; 713 } 714 715 /* Check for full match against now-valid unit number */ 716 if (!bhnd_core_matches(&ci, desc)) { 717 /* Reposition to allow reading the next core */ 718 bcma_erom_seek(sc, next_offset); 719 continue; 720 } 721 722 /* Found; seek to the core's initial offset and provide 723 * the core info to the caller */ 724 bcma_erom_seek(sc, core_offset); 725 if (core != NULL) 726 *core = ci; 727 728 return (0); 729 } 730 731 /* Not found, or a parse error occured */ 732 return (error); 733 } 734 735 /** 736 * Read the next core descriptor from the EROM table. 737 * 738 * @param erom EROM read state. 739 * @param[out] core On success, will be populated with the parsed core 740 * descriptor data. 741 * @retval 0 success 742 * @retval ENOENT The end of the EROM table was reached. 743 * @retval non-zero Reading or parsing the core descriptor failed. 744 */ 745 static int 746 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core) 747 { 748 uint32_t entry; 749 int error; 750 751 /* Parse CoreDescA */ 752 if ((error = bcma_erom_read32(erom, &entry))) 753 return (error); 754 755 /* Handle EOF */ 756 if (entry == BCMA_EROM_TABLE_EOF) 757 return (ENOENT); 758 759 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { 760 EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n", 761 entry, bcma_erom_entry_type_name(entry)); 762 763 return (EINVAL); 764 } 765 766 core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER); 767 core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID); 768 769 /* Parse CoreDescB */ 770 if ((error = bcma_erom_read32(erom, &entry))) 771 return (error); 772 773 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { 774 return (EINVAL); 775 } 776 777 core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV); 778 core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP); 779 core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP); 780 core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP); 781 core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP); 782 783 return (0); 784 } 785 786 /** 787 * Read the next master port descriptor from the EROM table. 788 * 789 * @param erom EROM read state. 790 * @param[out] mport On success, will be populated with the parsed 791 * descriptor data. 792 * @retval 0 success 793 * @retval non-zero Reading or parsing the descriptor failed. 794 */ 795 static int 796 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport) 797 { 798 uint32_t entry; 799 int error; 800 801 /* Parse the master port descriptor */ 802 if ((error = bcma_erom_read32(erom, &entry))) 803 return (error); 804 805 if (!BCMA_EROM_ENTRY_IS(entry, MPORT)) 806 return (EINVAL); 807 808 mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID); 809 mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM); 810 811 return (0); 812 } 813 814 /** 815 * Read the next slave port region descriptor from the EROM table. 816 * 817 * @param erom EROM read state. 818 * @param[out] mport On success, will be populated with the parsed 819 * descriptor data. 820 * @retval 0 success 821 * @retval ENOENT The end of the region descriptor table was reached. 822 * @retval non-zero Reading or parsing the descriptor failed. 823 */ 824 static int 825 bcma_erom_parse_sport_region(struct bcma_erom *erom, 826 struct bcma_erom_sport_region *region) 827 { 828 uint32_t entry; 829 uint8_t size_type; 830 int error; 831 832 /* Peek at the region descriptor */ 833 if (bcma_erom_peek32(erom, &entry)) 834 return (EINVAL); 835 836 /* A non-region entry signals the end of the region table */ 837 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) { 838 return (ENOENT); 839 } else { 840 bcma_erom_skip32(erom); 841 } 842 843 region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE); 844 region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 845 region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); 846 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE); 847 848 /* If region address is 64-bit, fetch the high bits. */ 849 if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) { 850 if ((error = bcma_erom_read32(erom, &entry))) 851 return (error); 852 853 region->base_addr |= ((bhnd_addr_t) entry << 32); 854 } 855 856 /* Parse the region size; it's either encoded as the binary logarithm 857 * of the number of 4K pages (i.e. log2 n), or its encoded as a 858 * 32-bit/64-bit literal value directly following the current entry. */ 859 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) { 860 if ((error = bcma_erom_read32(erom, &entry))) 861 return (error); 862 863 region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL); 864 865 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) { 866 if ((error = bcma_erom_read32(erom, &entry))) 867 return (error); 868 region->size |= ((bhnd_size_t) entry << 32); 869 } 870 } else { 871 region->size = BCMA_EROM_REGION_SIZE_BASE << size_type; 872 } 873 874 /* Verify that addr+size does not overflow. */ 875 if (region->size != 0 && 876 BHND_ADDR_MAX - (region->size - 1) < region->base_addr) 877 { 878 EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n", 879 bcma_erom_entry_type_name(region->region_type), 880 region->region_port, 881 (unsigned long long) region->base_addr, 882 (unsigned long long) region->size); 883 884 return (EINVAL); 885 } 886 887 return (0); 888 } 889 890 /** 891 * Convert a bcma_erom_core record to its bhnd_core_info representation. 892 * 893 * @param core EROM core record to convert. 894 * @param core_idx The core index of @p core. 895 * @param core_unit The core unit of @p core. 896 * @param[out] info The populated bhnd_core_info representation. 897 */ 898 static void 899 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx, 900 int core_unit, struct bhnd_core_info *info) 901 { 902 info->vendor = core->vendor; 903 info->device = core->device; 904 info->hwrev = core->rev; 905 info->core_idx = core_idx; 906 info->unit = core_unit; 907 } 908 909 /** 910 * Map an EROM region type to its corresponding port type. 911 * 912 * @param region_type Region type value. 913 * @param[out] port_type On success, the corresponding port type. 914 */ 915 static int 916 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type, 917 bhnd_port_type *port_type) 918 { 919 switch (region_type) { 920 case BCMA_EROM_REGION_TYPE_DEVICE: 921 *port_type = BHND_PORT_DEVICE; 922 return (0); 923 case BCMA_EROM_REGION_TYPE_BRIDGE: 924 *port_type = BHND_PORT_BRIDGE; 925 return (0); 926 case BCMA_EROM_REGION_TYPE_MWRAP: 927 case BCMA_EROM_REGION_TYPE_SWRAP: 928 *port_type = BHND_PORT_AGENT; 929 return (0); 930 default: 931 EROM_LOG(erom, "unsupported region type %hhx\n", 932 region_type); 933 return (EINVAL); 934 } 935 } 936 937 /** 938 * Register all MMIO region descriptors for the given slave port. 939 * 940 * @param erom EROM read state. 941 * @param corecfg Core info to be populated with the scanned port regions. 942 * @param port_num Port index for which regions will be parsed. 943 * @param region_type The region type to be parsed. 944 * @param[out] offset The offset at which to perform parsing. On success, this 945 * will be updated to point to the next EROM table entry. 946 */ 947 static int 948 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom, 949 struct bcma_corecfg *corecfg, bcma_pid_t port_num, 950 uint8_t region_type) 951 { 952 struct bcma_sport *sport; 953 struct bcma_sport_list *sports; 954 bus_size_t entry_offset; 955 int error; 956 bhnd_port_type port_type; 957 958 error = 0; 959 960 /* Determine the port type for this region type. */ 961 error = bcma_erom_region_to_port_type(erom, region_type, &port_type); 962 if (error) 963 return (error); 964 965 /* Fetch the list to be populated */ 966 sports = bcma_corecfg_get_port_list(corecfg, port_type); 967 968 /* Allocate a new port descriptor */ 969 sport = bcma_alloc_sport(port_num, port_type); 970 if (sport == NULL) 971 return (ENOMEM); 972 973 /* Read all address regions defined for this port */ 974 for (bcma_rmid_t region_num = 0;; region_num++) { 975 struct bcma_map *map; 976 struct bcma_erom_sport_region spr; 977 978 /* No valid port definition should come anywhere near 979 * BCMA_RMID_MAX. */ 980 if (region_num == BCMA_RMID_MAX) { 981 EROM_LOG(erom, "core%u %s%u: region count reached " 982 "upper limit of %u\n", 983 corecfg->core_info.core_idx, 984 bhnd_port_type_name(port_type), 985 port_num, BCMA_RMID_MAX); 986 987 error = EINVAL; 988 goto cleanup; 989 } 990 991 /* Parse the next region entry. */ 992 entry_offset = bcma_erom_tell(erom); 993 error = bcma_erom_parse_sport_region(erom, &spr); 994 if (error && error != ENOENT) { 995 EROM_LOG(erom, "core%u %s%u.%u: invalid slave port " 996 "address region\n", 997 corecfg->core_info.core_idx, 998 bhnd_port_type_name(port_type), 999 port_num, region_num); 1000 goto cleanup; 1001 } 1002 1003 /* ENOENT signals no further region entries */ 1004 if (error == ENOENT) { 1005 /* No further entries */ 1006 error = 0; 1007 break; 1008 } 1009 1010 /* A region or type mismatch also signals no further region 1011 * entries */ 1012 if (spr.region_port != port_num || 1013 spr.region_type != region_type) 1014 { 1015 /* We don't want to consume this entry */ 1016 bcma_erom_seek(erom, entry_offset); 1017 1018 error = 0; 1019 goto cleanup; 1020 } 1021 1022 /* 1023 * Create the map entry. 1024 */ 1025 map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT); 1026 if (map == NULL) { 1027 error = ENOMEM; 1028 goto cleanup; 1029 } 1030 1031 map->m_region_num = region_num; 1032 map->m_base = spr.base_addr; 1033 map->m_size = spr.size; 1034 map->m_rid = -1; 1035 1036 /* Add the region map to the port */ 1037 STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link); 1038 sport->sp_num_maps++; 1039 } 1040 1041 cleanup: 1042 /* Append the new port descriptor on success, or deallocate the 1043 * partially parsed descriptor on failure. */ 1044 if (error == 0) { 1045 STAILQ_INSERT_TAIL(sports, sport, sp_link); 1046 } else if (sport != NULL) { 1047 bcma_free_sport(sport); 1048 } 1049 1050 return error; 1051 } 1052 1053 /** 1054 * Parse the next core entry from the EROM table and produce a bcma_corecfg 1055 * to be owned by the caller. 1056 * 1057 * @param erom A bcma EROM instance. 1058 * @param[out] result On success, the core's device info. The caller inherits 1059 * ownership of this allocation. 1060 * 1061 * @return If successful, returns 0. If the end of the EROM table is hit, 1062 * ENOENT will be returned. On error, returns a non-zero error value. 1063 */ 1064 int 1065 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result) 1066 { 1067 struct bcma_corecfg *cfg; 1068 struct bcma_erom_core core; 1069 uint8_t first_region_type; 1070 bus_size_t initial_offset; 1071 u_int core_index; 1072 int core_unit; 1073 int error; 1074 1075 cfg = NULL; 1076 initial_offset = bcma_erom_tell(erom); 1077 1078 /* Parse the next core entry */ 1079 if ((error = bcma_erom_parse_core(erom, &core))) 1080 return (error); 1081 1082 /* Determine the core's index and unit numbers */ 1083 bcma_erom_reset(erom); 1084 core_unit = 0; 1085 core_index = 0; 1086 for (; bcma_erom_tell(erom) != initial_offset; core_index++) { 1087 struct bcma_erom_core prev_core; 1088 1089 /* Parse next core */ 1090 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); 1091 if (error) 1092 return (error); 1093 1094 if ((error = bcma_erom_parse_core(erom, &prev_core))) 1095 return (error); 1096 1097 /* Is earlier unit? */ 1098 if (core.vendor == prev_core.vendor && 1099 core.device == prev_core.device) 1100 { 1101 core_unit++; 1102 } 1103 1104 /* Seek to next core */ 1105 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); 1106 if (error) 1107 return (error); 1108 } 1109 1110 /* We already parsed the core descriptor */ 1111 if ((error = bcma_erom_skip_core(erom))) 1112 return (error); 1113 1114 /* Allocate our corecfg */ 1115 cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor, 1116 core.device, core.rev); 1117 if (cfg == NULL) 1118 return (ENOMEM); 1119 1120 /* These are 5-bit values in the EROM table, and should never be able 1121 * to overflow BCMA_PID_MAX. */ 1122 KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count")); 1123 KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count")); 1124 KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX, 1125 ("unsupported wport count")); 1126 1127 if (bootverbose) { 1128 EROM_LOG(erom, 1129 "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n", 1130 core_index, 1131 bhnd_vendor_name(core.vendor), 1132 bhnd_find_core_name(core.vendor, core.device), 1133 core.device, core.rev, core_unit); 1134 } 1135 1136 cfg->num_master_ports = core.num_mport; 1137 cfg->num_dev_ports = 0; /* determined below */ 1138 cfg->num_bridge_ports = 0; /* determined blow */ 1139 cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap; 1140 1141 /* Parse Master Port Descriptors */ 1142 for (uint8_t i = 0; i < core.num_mport; i++) { 1143 struct bcma_mport *mport; 1144 struct bcma_erom_mport mpd; 1145 1146 /* Parse the master port descriptor */ 1147 error = bcma_erom_parse_mport(erom, &mpd); 1148 if (error) 1149 goto failed; 1150 1151 /* Initialize a new bus mport structure */ 1152 mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT); 1153 if (mport == NULL) { 1154 error = ENOMEM; 1155 goto failed; 1156 } 1157 1158 mport->mp_vid = mpd.port_vid; 1159 mport->mp_num = mpd.port_num; 1160 1161 /* Update dinfo */ 1162 STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link); 1163 } 1164 1165 1166 /* 1167 * Determine whether this is a bridge device; if so, we can 1168 * expect the first sequence of address region descriptors to 1169 * be of EROM_REGION_TYPE_BRIDGE instead of 1170 * BCMA_EROM_REGION_TYPE_DEVICE. 1171 * 1172 * It's unclear whether this is the correct mechanism by which we 1173 * should detect/handle bridge devices, but this approach matches 1174 * that of (some of) Broadcom's published drivers. 1175 */ 1176 if (core.num_dport > 0) { 1177 uint32_t entry; 1178 1179 if ((error = bcma_erom_peek32(erom, &entry))) 1180 goto failed; 1181 1182 if (BCMA_EROM_ENTRY_IS(entry, REGION) && 1183 BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE) 1184 { 1185 first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE; 1186 cfg->num_dev_ports = 0; 1187 cfg->num_bridge_ports = core.num_dport; 1188 } else { 1189 first_region_type = BCMA_EROM_REGION_TYPE_DEVICE; 1190 cfg->num_dev_ports = core.num_dport; 1191 cfg->num_bridge_ports = 0; 1192 } 1193 } 1194 1195 /* Device/bridge port descriptors */ 1196 for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) { 1197 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, 1198 first_region_type); 1199 1200 if (error) 1201 goto failed; 1202 } 1203 1204 /* Wrapper (aka device management) descriptors (for master ports). */ 1205 for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) { 1206 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, 1207 BCMA_EROM_REGION_TYPE_MWRAP); 1208 1209 if (error) 1210 goto failed; 1211 } 1212 1213 1214 /* Wrapper (aka device management) descriptors (for slave ports). */ 1215 for (uint8_t i = 0; i < core.num_swrap; i++) { 1216 /* Slave wrapper ports are not numbered distinctly from master 1217 * wrapper ports. */ 1218 1219 /* 1220 * Broadcom DDR1/DDR2 Memory Controller 1221 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) -> 1222 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2) 1223 * 1224 * ARM BP135 AMBA3 AXI to APB Bridge 1225 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) -> 1226 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2) 1227 * 1228 * core.num_mwrap 1229 * ===> 1230 * (core.num_mwrap > 0) ? 1231 * core.num_mwrap : 1232 * ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) 1233 */ 1234 uint8_t sp_num; 1235 sp_num = (core.num_mwrap > 0) ? 1236 core.num_mwrap : 1237 ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i; 1238 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, 1239 BCMA_EROM_REGION_TYPE_SWRAP); 1240 1241 if (error) 1242 goto failed; 1243 } 1244 1245 /* 1246 * Seek to the next core entry (if any), skipping any dangling/invalid 1247 * region entries. 1248 * 1249 * On the BCM4706, the EROM entry for the memory controller core 1250 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region 1251 * descriptor. 1252 */ 1253 if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) { 1254 if (error != ENOENT) 1255 goto failed; 1256 } 1257 1258 *result = cfg; 1259 return (0); 1260 1261 failed: 1262 if (cfg != NULL) 1263 bcma_free_corecfg(cfg); 1264 1265 return error; 1266 } 1267 1268 static int 1269 bcma_erom_dump(bhnd_erom_t *erom) 1270 { 1271 struct bcma_erom *sc; 1272 uint32_t entry; 1273 int error; 1274 1275 sc = (struct bcma_erom *)erom; 1276 1277 bcma_erom_reset(sc); 1278 1279 while (!(error = bcma_erom_read32(sc, &entry))) { 1280 /* Handle EOF */ 1281 if (entry == BCMA_EROM_TABLE_EOF) { 1282 EROM_LOG(sc, "EOF\n"); 1283 return (0); 1284 } 1285 1286 /* Invalid entry */ 1287 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) { 1288 EROM_LOG(sc, "invalid EROM entry %#x\n", entry); 1289 return (EINVAL); 1290 } 1291 1292 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { 1293 case BCMA_EROM_ENTRY_TYPE_CORE: { 1294 /* CoreDescA */ 1295 EROM_LOG(sc, "coreA (0x%x)\n", entry); 1296 EROM_LOG(sc, "\tdesigner:\t0x%x\n", 1297 BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER)); 1298 EROM_LOG(sc, "\tid:\t\t0x%x\n", 1299 BCMA_EROM_GET_ATTR(entry, COREA_ID)); 1300 EROM_LOG(sc, "\tclass:\t\t0x%x\n", 1301 BCMA_EROM_GET_ATTR(entry, COREA_CLASS)); 1302 1303 /* CoreDescB */ 1304 if ((error = bcma_erom_read32(sc, &entry))) { 1305 EROM_LOG(sc, "error reading CoreDescB: %d\n", 1306 error); 1307 return (error); 1308 } 1309 1310 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { 1311 EROM_LOG(sc, "invalid core descriptor; found " 1312 "unexpected entry %#x (type=%s)\n", 1313 entry, bcma_erom_entry_type_name(entry)); 1314 return (EINVAL); 1315 } 1316 1317 EROM_LOG(sc, "coreB (0x%x)\n", entry); 1318 EROM_LOG(sc, "\trev:\t0x%x\n", 1319 BCMA_EROM_GET_ATTR(entry, COREB_REV)); 1320 EROM_LOG(sc, "\tnummp:\t0x%x\n", 1321 BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP)); 1322 EROM_LOG(sc, "\tnumdp:\t0x%x\n", 1323 BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP)); 1324 EROM_LOG(sc, "\tnumwmp:\t0x%x\n", 1325 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP)); 1326 EROM_LOG(sc, "\tnumwsp:\t0x%x\n", 1327 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP)); 1328 1329 break; 1330 } 1331 case BCMA_EROM_ENTRY_TYPE_MPORT: 1332 EROM_LOG(sc, "\tmport 0x%x\n", entry); 1333 EROM_LOG(sc, "\t\tport:\t0x%x\n", 1334 BCMA_EROM_GET_ATTR(entry, MPORT_NUM)); 1335 EROM_LOG(sc, "\t\tid:\t\t0x%x\n", 1336 BCMA_EROM_GET_ATTR(entry, MPORT_ID)); 1337 break; 1338 1339 case BCMA_EROM_ENTRY_TYPE_REGION: { 1340 bool addr64; 1341 uint8_t size_type; 1342 1343 addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0); 1344 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE); 1345 1346 EROM_LOG(sc, "\tregion 0x%x:\n", entry); 1347 EROM_LOG(sc, "\t\t%s:\t0x%x\n", 1348 addr64 ? "baselo" : "base", 1349 BCMA_EROM_GET_ATTR(entry, REGION_BASE)); 1350 EROM_LOG(sc, "\t\tport:\t0x%x\n", 1351 BCMA_EROM_GET_ATTR(entry, REGION_PORT)); 1352 EROM_LOG(sc, "\t\ttype:\t0x%x\n", 1353 BCMA_EROM_GET_ATTR(entry, REGION_TYPE)); 1354 EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type); 1355 1356 /* Read the base address high bits */ 1357 if (addr64) { 1358 if ((error = bcma_erom_read32(sc, &entry))) { 1359 EROM_LOG(sc, "error reading region " 1360 "base address high bits %d\n", 1361 error); 1362 return (error); 1363 } 1364 1365 EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry); 1366 } 1367 1368 /* Read extended size descriptor */ 1369 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) { 1370 bool size64; 1371 1372 if ((error = bcma_erom_read32(sc, &entry))) { 1373 EROM_LOG(sc, "error reading region " 1374 "size descriptor %d\n", 1375 error); 1376 return (error); 1377 } 1378 1379 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) 1380 size64 = true; 1381 else 1382 size64 = false; 1383 1384 EROM_LOG(sc, "\t\t%s:\t0x%x\n", 1385 size64 ? "sizelo" : "size", 1386 BCMA_EROM_GET_ATTR(entry, RSIZE_VAL)); 1387 1388 if (size64) { 1389 error = bcma_erom_read32(sc, &entry); 1390 if (error) { 1391 EROM_LOG(sc, "error reading " 1392 "region size high bits: " 1393 "%d\n", error); 1394 return (error); 1395 } 1396 1397 EROM_LOG(sc, "\t\tsizehi:\t0x%x\n", 1398 entry); 1399 } 1400 } 1401 break; 1402 } 1403 1404 default: 1405 EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n", 1406 entry, bcma_erom_entry_type_name(entry)); 1407 return (EINVAL); 1408 } 1409 } 1410 1411 if (error == ENOENT) 1412 EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n"); 1413 else if (error) 1414 EROM_LOG(sc, "EROM read failed: %d\n", error); 1415 1416 return (error); 1417 } 1418 1419 static kobj_method_t bcma_erom_methods[] = { 1420 KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe), 1421 KOBJMETHOD(bhnd_erom_init, bcma_erom_init), 1422 KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini), 1423 KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table), 1424 KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table), 1425 KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core), 1426 KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr), 1427 KOBJMETHOD(bhnd_erom_dump, bcma_erom_dump), 1428 1429 KOBJMETHOD_END 1430 }; 1431 1432 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom)); 1433