1 /*- 2 * Copyright (c) 2015 Landon Fuller <landon@landonf.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/limits.h> 37 #include <sys/systm.h> 38 39 #include <machine/bus.h> 40 #include <machine/resource.h> 41 42 #include "bcma_eromreg.h" 43 #include "bcma_eromvar.h" 44 45 /* 46 * BCMA Enumeration ROM (EROM) Table 47 * 48 * Provides auto-discovery of BCMA cores on Broadcom's HND SoC. 49 * 50 * The EROM core address can be found at BCMA_CC_EROM_ADDR within the 51 * ChipCommon registers. The table itself is comprised of 32-bit 52 * type-tagged entries, organized into an array of variable-length 53 * core descriptor records. 54 * 55 * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF) 56 * marker. 57 */ 58 59 static const char *erom_entry_type_name (uint8_t entry); 60 static int erom_read32(struct bcma_erom *erom, uint32_t *entry); 61 static int erom_skip32(struct bcma_erom *erom); 62 63 static int erom_skip_core(struct bcma_erom *erom); 64 static int erom_skip_mport(struct bcma_erom *erom); 65 static int erom_skip_sport_region(struct bcma_erom *erom); 66 67 static int erom_seek_next(struct bcma_erom *erom, uint8_t etype); 68 69 #define EROM_LOG(erom, fmt, ...) \ 70 device_printf(erom->dev, "erom[0x%llx]: " fmt, \ 71 (unsigned long long) (erom->offset), ##__VA_ARGS__); 72 73 /** 74 * Open an EROM table for reading. 75 * 76 * @param[out] erom On success, will be populated with a valid EROM 77 * read state. 78 * @param r An active resource mapping the EROM core. 79 * @param offset Offset of the EROM core within @p resource. 80 * 81 * @retval 0 success 82 * @retval non-zero if the erom table could not be opened. 83 */ 84 int 85 bcma_erom_open(struct bcma_erom *erom, struct resource *r, bus_size_t offset) 86 { 87 /* Initialize the EROM reader */ 88 erom->dev = rman_get_device(r); 89 erom->r = r; 90 erom->start = offset + BCMA_EROM_TABLE_START; 91 erom->offset = 0; 92 93 return (0); 94 } 95 96 /** Return the type name for an EROM entry */ 97 static const char * 98 erom_entry_type_name (uint8_t entry) 99 { 100 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { 101 case BCMA_EROM_ENTRY_TYPE_CORE: 102 return "core"; 103 case BCMA_EROM_ENTRY_TYPE_MPORT: 104 return "mport"; 105 case BCMA_EROM_ENTRY_TYPE_REGION: 106 return "region"; 107 default: 108 return "unknown"; 109 } 110 } 111 112 /** 113 * Return the current read position. 114 */ 115 bus_size_t 116 bcma_erom_tell(struct bcma_erom *erom) 117 { 118 return (erom->offset); 119 } 120 121 /** 122 * Seek to an absolute read position. 123 */ 124 void 125 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset) 126 { 127 erom->offset = offset; 128 } 129 130 /** 131 * Read a 32-bit entry value from the EROM table without advancing the 132 * read position. 133 * 134 * @param erom EROM read state. 135 * @param entry Will contain the read result on success. 136 * @retval 0 success 137 * @retval ENOENT The end of the EROM table was reached. 138 * @retval non-zero The read could not be completed. 139 */ 140 int 141 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry) 142 { 143 if (erom->offset >= BCMA_EROM_TABLE_SIZE) { 144 EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n"); 145 return (EINVAL); 146 } 147 148 *entry = bus_read_4(erom->r, erom->start + erom->offset); 149 return (0); 150 } 151 152 /** 153 * Read a 32-bit entry value from the EROM table. 154 * 155 * @param erom EROM read state. 156 * @param entry Will contain the read result on success. 157 * @retval 0 success 158 * @retval ENOENT The end of the EROM table was reached. 159 * @retval non-zero The read could not be completed. 160 */ 161 static int 162 erom_read32(struct bcma_erom *erom, uint32_t *entry) 163 { 164 int error; 165 166 if ((error = bcma_erom_peek32(erom, entry)) == 0) 167 erom->offset += 4; 168 169 return (error); 170 } 171 172 /** 173 * Read and discard 32-bit entry value from the EROM table. 174 * 175 * @param erom EROM read state. 176 * @retval 0 success 177 * @retval ENOENT The end of the EROM table was reached. 178 * @retval non-zero The read could not be completed. 179 */ 180 static int 181 erom_skip32(struct bcma_erom *erom) 182 { 183 uint32_t entry; 184 185 return erom_read32(erom, &entry); 186 } 187 188 /** 189 * Read and discard a core descriptor from the EROM table. 190 * 191 * @param erom EROM read state. 192 * @retval 0 success 193 * @retval ENOENT The end of the EROM table was reached. 194 * @retval non-zero The read could not be completed. 195 */ 196 static int 197 erom_skip_core(struct bcma_erom *erom) 198 { 199 struct bcma_erom_core core; 200 return (bcma_erom_parse_core(erom, &core)); 201 } 202 203 /** 204 * Read and discard a master port descriptor from the EROM table. 205 * 206 * @param erom EROM read state. 207 * @retval 0 success 208 * @retval ENOENT The end of the EROM table was reached. 209 * @retval non-zero The read could not be completed. 210 */ 211 static int 212 erom_skip_mport(struct bcma_erom *erom) 213 { 214 struct bcma_erom_mport mp; 215 return (bcma_erom_parse_mport(erom, &mp)); 216 } 217 218 /** 219 * Read and discard a port region descriptor from the EROM table. 220 * 221 * @param erom EROM read state. 222 * @retval 0 success 223 * @retval ENOENT The end of the EROM table was reached. 224 * @retval non-zero The read could not be completed. 225 */ 226 static int 227 erom_skip_sport_region(struct bcma_erom *erom) 228 { 229 struct bcma_erom_sport_region r; 230 return (bcma_erom_parse_sport_region(erom, &r)); 231 } 232 233 /** 234 * Seek to the next entry matching the given EROM entry type. 235 * 236 * @param erom EROM read state. 237 * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE, 238 * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION. 239 * @retval 0 success 240 * @retval ENOENT The end of the EROM table was reached. 241 * @retval non-zero Reading or parsing the descriptor failed. 242 */ 243 static int 244 erom_seek_next(struct bcma_erom *erom, uint8_t etype) 245 { 246 uint32_t entry; 247 int error; 248 249 /* Iterate until we hit an entry matching the requested type. */ 250 while (!(error = bcma_erom_peek32(erom, &entry))) { 251 /* Handle EOF */ 252 if (entry == BCMA_EROM_TABLE_EOF) 253 return (ENOENT); 254 255 /* Invalid entry */ 256 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) 257 return (EINVAL); 258 259 /* Entry type matches? */ 260 if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype) 261 return (0); 262 263 /* Skip non-matching entry types. */ 264 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { 265 case BCMA_EROM_ENTRY_TYPE_CORE: 266 if ((error = erom_skip_core(erom))) 267 return (error); 268 269 break; 270 271 case BCMA_EROM_ENTRY_TYPE_MPORT: 272 if ((error = erom_skip_mport(erom))) 273 return (error); 274 275 break; 276 277 case BCMA_EROM_ENTRY_TYPE_REGION: 278 if ((error = erom_skip_sport_region(erom))) 279 return (error); 280 break; 281 282 default: 283 /* Unknown entry type! */ 284 return (EINVAL); 285 } 286 } 287 288 return (error); 289 } 290 291 /** 292 * Return the read position to the start of the EROM table. 293 * 294 * @param erom EROM read state. 295 */ 296 void 297 bcma_erom_reset(struct bcma_erom *erom) 298 { 299 erom->offset = 0; 300 } 301 302 /** 303 * Seek to the requested core entry. 304 * 305 * @param erom EROM read state. 306 * @param core_index Index of the core to seek to. 307 * @retval 0 success 308 * @retval ENOENT The end of the EROM table was reached before @p index was 309 * found. 310 * @retval non-zero Reading or parsing failed. 311 */ 312 int 313 bcma_erom_seek_core_index(struct bcma_erom *erom, u_int core_index) 314 { 315 int error; 316 317 /* Start search at top of EROM */ 318 bcma_erom_reset(erom); 319 320 /* Skip core descriptors till we hit the requested entry */ 321 for (u_int i = 0; i < core_index; i++) { 322 struct bcma_erom_core core; 323 324 /* Read past the core descriptor */ 325 if ((error = bcma_erom_parse_core(erom, &core))) 326 return (error); 327 328 /* Seek to the next readable core entry */ 329 error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); 330 if (error) 331 return (error); 332 } 333 334 return (0); 335 } 336 337 338 /** 339 * Read the next core descriptor from the EROM table. 340 * 341 * @param erom EROM read state. 342 * @param[out] core On success, will be populated with the parsed core 343 * descriptor data. 344 * @retval 0 success 345 * @retval ENOENT The end of the EROM table was reached. 346 * @retval non-zero Reading or parsing the core descriptor failed. 347 */ 348 int 349 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core) 350 { 351 uint32_t entry; 352 int error; 353 354 /* Parse CoreDescA */ 355 if ((error = erom_read32(erom, &entry))) 356 return (error); 357 358 /* Handle EOF */ 359 if (entry == BCMA_EROM_TABLE_EOF) 360 return (ENOENT); 361 362 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { 363 EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n", 364 entry, erom_entry_type_name(entry)); 365 366 return (EINVAL); 367 } 368 369 core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER); 370 core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID); 371 372 /* Parse CoreDescB */ 373 if ((error = erom_read32(erom, &entry))) 374 return (error); 375 376 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { 377 return (EINVAL); 378 } 379 380 core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV); 381 core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP); 382 core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP); 383 core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP); 384 core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP); 385 386 return (0); 387 } 388 389 /** 390 * Read the next master port descriptor from the EROM table. 391 * 392 * @param erom EROM read state. 393 * @param[out] mport On success, will be populated with the parsed 394 * descriptor data. 395 * @retval 0 success 396 * @retval non-zero Reading or parsing the descriptor failed. 397 */ 398 int 399 bcma_erom_parse_mport(struct bcma_erom *erom, 400 struct bcma_erom_mport *mport) 401 { 402 uint32_t entry; 403 int error; 404 405 /* Parse the master port descriptor */ 406 if ((error = erom_read32(erom, &entry))) 407 return (error); 408 409 if (!BCMA_EROM_ENTRY_IS(entry, MPORT)) 410 return (EINVAL); 411 412 mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID); 413 mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM); 414 415 return (0); 416 } 417 418 /** 419 * Read the next slave port region descriptor from the EROM table. 420 * 421 * @param erom EROM read state. 422 * @param[out] mport On success, will be populated with the parsed 423 * descriptor data. 424 * @retval 0 success 425 * @retval ENOENT The end of the region descriptor table was reached. 426 * @retval non-zero Reading or parsing the descriptor failed. 427 */ 428 int 429 bcma_erom_parse_sport_region(struct bcma_erom *erom, 430 struct bcma_erom_sport_region *region) 431 { 432 uint32_t entry; 433 uint8_t size_type; 434 int error; 435 436 /* Peek at the region descriptor */ 437 if (bcma_erom_peek32(erom, &entry)) 438 return (EINVAL); 439 440 /* A non-region entry signals the end of the region table */ 441 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) { 442 return (ENOENT); 443 } else { 444 erom_skip32(erom); 445 } 446 447 region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE); 448 region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); 449 region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); 450 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE); 451 452 /* If region address is 64-bit, fetch the high bits. */ 453 if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) { 454 if ((error = erom_read32(erom, &entry))) 455 return (error); 456 457 region->base_addr |= ((bhnd_addr_t) entry << 32); 458 } 459 460 /* Parse the region size; it's either encoded as the binary logarithm 461 * of the number of 4K pages (i.e. log2 n), or its encoded as a 462 * 32-bit/64-bit literal value directly following the current entry. */ 463 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) { 464 if ((error = erom_read32(erom, &entry))) 465 return (error); 466 467 region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL); 468 469 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) { 470 if ((error = erom_read32(erom, &entry))) 471 return (error); 472 region->size |= ((bhnd_size_t) entry << 32); 473 } 474 } else { 475 region->size = BCMA_EROM_REGION_SIZE_BASE << size_type; 476 } 477 478 /* Verify that addr+size does not overflow. */ 479 if (region->size != 0 && 480 BHND_ADDR_MAX - (region->size - 1) < region->base_addr) 481 { 482 EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n", 483 erom_entry_type_name(region->region_type), 484 region->region_port, 485 (unsigned long long) region->base_addr, 486 (unsigned long long) region->size); 487 488 return (EINVAL); 489 } 490 491 return (0); 492 } 493 494 /** 495 * Parse all cores descriptors from @p erom and return the array 496 * in @p cores and the count in @p num_cores. The current EROM read position 497 * is left unmodified. 498 * 499 * The memory allocated for the table should be freed using 500 * `free(*cores, M_BHND)`. @p cores and @p num_cores are not changed 501 * when an error is returned. 502 * 503 * @param erom EROM read state. 504 * @param[out] cores the table of parsed core descriptors. 505 * @param[out] num_cores the number of core records in @p cores. 506 */ 507 int 508 bcma_erom_get_core_info(struct bcma_erom *erom, 509 struct bhnd_core_info **cores, 510 u_int *num_cores) 511 { 512 struct bhnd_core_info *buffer; 513 bus_size_t initial_offset; 514 u_int count; 515 int error; 516 517 buffer = NULL; 518 initial_offset = bcma_erom_tell(erom); 519 520 /* Determine the core count */ 521 bcma_erom_reset(erom); 522 for (count = 0, error = 0; !error; count++) { 523 struct bcma_erom_core core; 524 525 /* Seek to the first readable core entry */ 526 error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); 527 if (error == ENOENT) 528 break; 529 else if (error) 530 goto cleanup; 531 532 /* Read past the core descriptor */ 533 if ((error = bcma_erom_parse_core(erom, &core))) 534 goto cleanup; 535 } 536 537 /* Allocate our output buffer */ 538 buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND, 539 M_NOWAIT); 540 if (buffer == NULL) { 541 error = ENOMEM; 542 goto cleanup; 543 } 544 545 /* Parse all core descriptors */ 546 bcma_erom_reset(erom); 547 for (u_int i = 0; i < count; i++) { 548 struct bcma_erom_core core; 549 550 /* Parse the core */ 551 error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); 552 if (error) 553 goto cleanup; 554 555 error = bcma_erom_parse_core(erom, &core); 556 if (error) 557 goto cleanup; 558 559 /* Convert to a bhnd info record */ 560 buffer[i].vendor = core.vendor; 561 buffer[i].device = core.device; 562 buffer[i].hwrev = core.rev; 563 buffer[i].core_idx = i; 564 buffer[i].unit = 0; 565 566 /* Determine the unit number */ 567 for (u_int j = 0; j < i; j++) { 568 if (buffer[i].vendor == buffer[j].vendor && 569 buffer[i].device == buffer[j].device) 570 buffer[i].unit++;; 571 } 572 } 573 574 cleanup: 575 if (!error) { 576 *cores = buffer; 577 *num_cores = count; 578 } else { 579 if (buffer != NULL) 580 free(buffer, M_BHND); 581 } 582 583 /* Restore the initial position */ 584 bcma_erom_seek(erom, initial_offset); 585 return (error); 586 } 587 588 589 /** 590 * Register all MMIO region descriptors for the given slave port. 591 * 592 * @param erom EROM read state. 593 * @param corecfg Core info to be populated with the scanned port regions. 594 * @param port_num Port index for which regions will be parsed. 595 * @param region_type The region type to be parsed. 596 * @param[out] offset The offset at which to perform parsing. On success, this 597 * will be updated to point to the next EROM table entry. 598 */ 599 static int 600 erom_corecfg_fill_port_regions(struct bcma_erom *erom, 601 struct bcma_corecfg *corecfg, bcma_pid_t port_num, 602 uint8_t region_type) 603 { 604 struct bcma_sport *sport; 605 struct bcma_sport_list *sports; 606 bus_size_t entry_offset; 607 int error; 608 bhnd_port_type port_type; 609 610 error = 0; 611 612 /* Determine the port type for this region type. */ 613 switch (region_type) { 614 case BCMA_EROM_REGION_TYPE_DEVICE: 615 port_type = BHND_PORT_DEVICE; 616 break; 617 case BCMA_EROM_REGION_TYPE_BRIDGE: 618 port_type = BHND_PORT_BRIDGE; 619 break; 620 case BCMA_EROM_REGION_TYPE_MWRAP: 621 case BCMA_EROM_REGION_TYPE_SWRAP: 622 port_type = BHND_PORT_AGENT; 623 break; 624 default: 625 EROM_LOG(erom, "unsupported region type %hhx\n", 626 region_type); 627 return (EINVAL); 628 }; 629 630 /* Fetch the list to be populated */ 631 sports = bcma_corecfg_get_port_list(corecfg, port_type); 632 633 /* Allocate a new port descriptor */ 634 sport = bcma_alloc_sport(port_num, port_type); 635 if (sport == NULL) 636 return (ENOMEM); 637 638 /* Read all address regions defined for this port */ 639 for (bcma_rmid_t region_num = 0;; region_num++) { 640 struct bcma_map *map; 641 struct bcma_erom_sport_region spr; 642 643 /* No valid port definition should come anywhere near 644 * BCMA_RMID_MAX. */ 645 if (region_num == BCMA_RMID_MAX) { 646 EROM_LOG(erom, "core%u %s%u: region count reached " 647 "upper limit of %u\n", 648 corecfg->core_info.core_idx, 649 bhnd_port_type_name(port_type), 650 port_num, BCMA_RMID_MAX); 651 652 error = EINVAL; 653 goto cleanup; 654 } 655 656 /* Parse the next region entry. */ 657 entry_offset = bcma_erom_tell(erom); 658 error = bcma_erom_parse_sport_region(erom, &spr); 659 if (error && error != ENOENT) { 660 EROM_LOG(erom, "core%u %s%u.%u: invalid slave port " 661 "address region\n", 662 corecfg->core_info.core_idx, 663 bhnd_port_type_name(port_type), 664 port_num, region_num); 665 goto cleanup; 666 } 667 668 /* ENOENT signals no further region entries */ 669 if (error == ENOENT) { 670 /* No further entries */ 671 error = 0; 672 break; 673 } 674 675 /* A region or type mismatch also signals no further region 676 * entries */ 677 if (spr.region_port != port_num || 678 spr.region_type != region_type) 679 { 680 /* We don't want to consume this entry */ 681 bcma_erom_seek(erom, entry_offset); 682 683 error = 0; 684 goto cleanup; 685 } 686 687 /* 688 * Create the map entry. 689 */ 690 map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT); 691 if (map == NULL) { 692 error = ENOMEM; 693 goto cleanup; 694 } 695 696 map->m_region_num = region_num; 697 map->m_base = spr.base_addr; 698 map->m_size = spr.size; 699 map->m_rid = -1; 700 701 /* Add the region map to the port */ 702 STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link); 703 sport->sp_num_maps++; 704 } 705 706 cleanup: 707 /* Append the new port descriptor on success, or deallocate the 708 * partially parsed descriptor on failure. */ 709 if (error == 0) { 710 STAILQ_INSERT_TAIL(sports, sport, sp_link); 711 } else if (sport != NULL) { 712 bcma_free_sport(sport); 713 } 714 715 return error; 716 } 717 718 /** 719 * Parse the next core entry from the EROM table and produce a bcma_corecfg 720 * to be owned by the caller. 721 * 722 * @param erom EROM read state. 723 * @param[out] result On success, the core's device info. The caller inherits 724 * ownership of this allocation. 725 * 726 * @return If successful, returns 0. If the end of the EROM table is hit, 727 * ENOENT will be returned. On error, returns a non-zero error value. 728 */ 729 int 730 bcma_erom_parse_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result) 731 { 732 struct bcma_corecfg *cfg; 733 struct bcma_erom_core core; 734 uint8_t first_region_type; 735 bus_size_t initial_offset; 736 u_int core_index; 737 int core_unit; 738 int error; 739 740 cfg = NULL; 741 initial_offset = bcma_erom_tell(erom); 742 743 /* Parse the next core entry */ 744 if ((error = bcma_erom_parse_core(erom, &core))) 745 return (error); 746 747 /* Determine the core's index and unit numbers */ 748 bcma_erom_reset(erom); 749 core_unit = 0; 750 core_index = 0; 751 for (; bcma_erom_tell(erom) != initial_offset; core_index++) { 752 struct bcma_erom_core prev_core; 753 754 /* Parse next core */ 755 if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) 756 return (error); 757 758 if ((error = bcma_erom_parse_core(erom, &prev_core))) 759 return (error); 760 761 /* Is earlier unit? */ 762 if (core.vendor == prev_core.vendor && 763 core.device == prev_core.device) 764 { 765 core_unit++; 766 } 767 768 /* Seek to next core */ 769 if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) 770 return (error); 771 } 772 773 /* We already parsed the core descriptor */ 774 if ((error = erom_skip_core(erom))) 775 return (error); 776 777 /* Allocate our corecfg */ 778 cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor, 779 core.device, core.rev); 780 if (cfg == NULL) 781 return (ENOMEM); 782 783 /* These are 5-bit values in the EROM table, and should never be able 784 * to overflow BCMA_PID_MAX. */ 785 KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count")); 786 KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count")); 787 KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX, 788 ("unsupported wport count")); 789 790 if (bootverbose) { 791 EROM_LOG(erom, 792 "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n", 793 core_index, 794 bhnd_vendor_name(core.vendor), 795 bhnd_find_core_name(core.vendor, core.device), 796 core.device, core.rev, core_unit); 797 } 798 799 cfg->num_master_ports = core.num_mport; 800 cfg->num_dev_ports = 0; /* determined below */ 801 cfg->num_bridge_ports = 0; /* determined blow */ 802 cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap; 803 804 /* Parse Master Port Descriptors */ 805 for (uint8_t i = 0; i < core.num_mport; i++) { 806 struct bcma_mport *mport; 807 struct bcma_erom_mport mpd; 808 809 /* Parse the master port descriptor */ 810 error = bcma_erom_parse_mport(erom, &mpd); 811 if (error) 812 goto failed; 813 814 /* Initialize a new bus mport structure */ 815 mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT); 816 if (mport == NULL) { 817 error = ENOMEM; 818 goto failed; 819 } 820 821 mport->mp_vid = mpd.port_vid; 822 mport->mp_num = mpd.port_num; 823 824 /* Update dinfo */ 825 STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link); 826 } 827 828 829 /* 830 * Determine whether this is a bridge device; if so, we can 831 * expect the first sequence of address region descriptors to 832 * be of EROM_REGION_TYPE_BRIDGE instead of 833 * BCMA_EROM_REGION_TYPE_DEVICE. 834 * 835 * It's unclear whether this is the correct mechanism by which we 836 * should detect/handle bridge devices, but this approach matches 837 * that of (some of) Broadcom's published drivers. 838 */ 839 if (core.num_dport > 0) { 840 uint32_t entry; 841 842 if ((error = bcma_erom_peek32(erom, &entry))) 843 goto failed; 844 845 if (BCMA_EROM_ENTRY_IS(entry, REGION) && 846 BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE) 847 { 848 first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE; 849 cfg->num_dev_ports = 0; 850 cfg->num_bridge_ports = core.num_dport; 851 } else { 852 first_region_type = BCMA_EROM_REGION_TYPE_DEVICE; 853 cfg->num_dev_ports = core.num_dport; 854 cfg->num_bridge_ports = 0; 855 } 856 } 857 858 /* Device/bridge port descriptors */ 859 for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) { 860 error = erom_corecfg_fill_port_regions(erom, cfg, sp_num, 861 first_region_type); 862 863 if (error) 864 goto failed; 865 } 866 867 /* Wrapper (aka device management) descriptors (for master ports). */ 868 for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) { 869 error = erom_corecfg_fill_port_regions(erom, cfg, sp_num, 870 BCMA_EROM_REGION_TYPE_MWRAP); 871 872 if (error) 873 goto failed; 874 } 875 876 877 /* Wrapper (aka device management) descriptors (for slave ports). */ 878 for (uint8_t i = 0; i < core.num_swrap; i++) { 879 /* Slave wrapper ports are not numbered distinctly from master 880 * wrapper ports. */ 881 uint8_t sp_num = core.num_mwrap + i; 882 error = erom_corecfg_fill_port_regions(erom, cfg, sp_num, 883 BCMA_EROM_REGION_TYPE_SWRAP); 884 885 if (error) 886 goto failed; 887 } 888 889 *result = cfg; 890 return (0); 891 892 failed: 893 if (cfg != NULL) 894 bcma_free_corecfg(cfg); 895 896 return error; 897 } 898