1 /*- 2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Landon Fuller 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 17 * redistribution must be conditioned upon including a substantially 18 * similar Disclaimer requirement for further binary redistribution. 19 * 20 * NO WARRANTY 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGES. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/limits.h> 40 41 #include "bhndb_private.h" 42 #include "bhndbvar.h" 43 44 static int bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat, 45 const struct bhnd_dma_translation *translation, 46 bus_dma_tag_t *dmat); 47 48 /** 49 * Attach a BHND bridge device to @p parent. 50 * 51 * @param parent A parent PCI device. 52 * @param[out] bhndb On success, the probed and attached bhndb bridge device. 53 * @param unit The device unit number, or -1 to select the next available unit 54 * number. 55 * 56 * @retval 0 success 57 * @retval non-zero Failed to attach the bhndb device. 58 */ 59 int 60 bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit) 61 { 62 int error; 63 64 *bhndb = device_add_child(parent, "bhndb", unit); 65 if (*bhndb == NULL) 66 return (ENXIO); 67 68 if (!(error = device_probe_and_attach(*bhndb))) 69 return (0); 70 71 if ((device_delete_child(parent, *bhndb))) 72 device_printf(parent, "failed to detach bhndb child\n"); 73 74 return (error); 75 } 76 77 /* 78 * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl. 79 */ 80 static void 81 bhndb_do_suspend_resources(device_t dev, struct resource_list *rl) 82 { 83 struct resource_list_entry *rle; 84 85 /* Suspend all child resources. */ 86 STAILQ_FOREACH(rle, rl, link) { 87 /* Skip non-allocated resources */ 88 if (rle->res == NULL) 89 continue; 90 91 BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type, 92 rle->res); 93 } 94 } 95 96 /** 97 * Helper function for implementing BUS_RESUME_CHILD() on bridged 98 * bhnd(4) buses. 99 * 100 * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() 101 * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all 102 * child resources, ensuring that the device's allocated bridge resources 103 * will be available to other devices during bus resumption. 104 * 105 * Before suspending any resources, @p child is suspended by 106 * calling bhnd_generic_suspend_child(). 107 * 108 * If @p child is not a direct child of @p dev, suspension is delegated to 109 * the @p dev parent. 110 */ 111 int 112 bhnd_generic_br_suspend_child(device_t dev, device_t child) 113 { 114 struct resource_list *rl; 115 int error; 116 117 if (device_get_parent(child) != dev) 118 BUS_SUSPEND_CHILD(device_get_parent(dev), child); 119 120 if (device_is_suspended(child)) 121 return (EBUSY); 122 123 /* Suspend the child device */ 124 if ((error = bhnd_generic_suspend_child(dev, child))) 125 return (error); 126 127 /* Fetch the resource list. If none, there's nothing else to do */ 128 rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); 129 if (rl == NULL) 130 return (0); 131 132 /* Suspend all child resources. */ 133 bhndb_do_suspend_resources(dev, rl); 134 135 return (0); 136 } 137 138 /** 139 * Helper function for implementing BUS_RESUME_CHILD() on bridged 140 * bhnd(4) bus devices. 141 * 142 * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() 143 * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all 144 * child resources, before delegating to bhnd_generic_resume_child(). 145 * 146 * If resource resumption fails, @p child will not be resumed. 147 * 148 * If @p child is not a direct child of @p dev, suspension is delegated to 149 * the @p dev parent. 150 */ 151 int 152 bhnd_generic_br_resume_child(device_t dev, device_t child) 153 { 154 struct resource_list *rl; 155 struct resource_list_entry *rle; 156 int error; 157 158 if (device_get_parent(child) != dev) 159 BUS_RESUME_CHILD(device_get_parent(dev), child); 160 161 if (!device_is_suspended(child)) 162 return (EBUSY); 163 164 /* Fetch the resource list. If none, there's nothing else to do */ 165 rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); 166 if (rl == NULL) 167 return (bhnd_generic_resume_child(dev, child)); 168 169 /* Resume all resources */ 170 STAILQ_FOREACH(rle, rl, link) { 171 /* Skip non-allocated resources */ 172 if (rle->res == NULL) 173 continue; 174 175 error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev, 176 rle->type, rle->res); 177 if (error) { 178 /* Put all resources back into a suspend state */ 179 bhndb_do_suspend_resources(dev, rl); 180 return (error); 181 } 182 } 183 184 /* Now that all resources are resumed, resume child */ 185 if ((error = bhnd_generic_resume_child(dev, child))) { 186 /* Put all resources back into a suspend state */ 187 bhndb_do_suspend_resources(dev, rl); 188 } 189 190 return (error); 191 } 192 193 /** 194 * Find a host resource of @p type that maps the given range. 195 * 196 * @param hr The resource state to search. 197 * @param type The resource type to search for (see SYS_RES_*). 198 * @param start The start address of the range to search for. 199 * @param count The size of the range to search for. 200 * 201 * @retval resource the host resource containing the requested range. 202 * @retval NULL if no resource containing the requested range can be found. 203 */ 204 struct resource * 205 bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type, 206 rman_res_t start, rman_res_t count) 207 { 208 for (u_int i = 0; hr->resource_specs[i].type != -1; i++) { 209 struct resource *r = hr->resources[i]; 210 211 if (hr->resource_specs[i].type != type) 212 continue; 213 214 /* Verify range */ 215 if (rman_get_start(r) > start) 216 continue; 217 218 if (rman_get_end(r) < (start + count - 1)) 219 continue; 220 221 return (r); 222 } 223 224 return (NULL); 225 } 226 227 /** 228 * Find a host resource of that matches the given register window definition. 229 * 230 * @param hr The resource state to search. 231 * @param win A register window definition. 232 * 233 * @retval resource the host resource corresponding to @p win. 234 * @retval NULL if no resource corresponding to @p win can be found. 235 */ 236 struct resource * 237 bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr, 238 const struct bhndb_regwin *win) 239 { 240 const struct resource_spec *rspecs; 241 242 rspecs = hr->resource_specs; 243 for (u_int i = 0; rspecs[i].type != -1; i++) { 244 if (win->res.type != rspecs[i].type) 245 continue; 246 247 if (win->res.rid != rspecs[i].rid) 248 continue; 249 250 /* Found declared resource */ 251 return (hr->resources[i]); 252 } 253 254 device_printf(hr->owner, "missing regwin resource spec " 255 "(type=%d, rid=%d)\n", win->res.type, win->res.rid); 256 257 return (NULL); 258 } 259 260 /** 261 * Allocate and initialize a new resource state structure. 262 * 263 * @param dev The bridge device. 264 * @param parent_dev The parent device from which host resources should be 265 * allocated. 266 * @param cfg The hardware configuration to be used. 267 */ 268 struct bhndb_resources * 269 bhndb_alloc_resources(device_t dev, device_t parent_dev, 270 const struct bhndb_hwcfg *cfg) 271 { 272 struct bhndb_resources *r; 273 const struct bhndb_regwin *win; 274 bus_size_t last_window_size; 275 int rnid; 276 int error; 277 bool free_ht_mem, free_br_mem, free_br_irq; 278 279 free_ht_mem = false; 280 free_br_mem = false; 281 free_br_irq = false; 282 283 r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO); 284 if (r == NULL) 285 return (NULL); 286 287 /* Basic initialization */ 288 r->dev = dev; 289 r->cfg = cfg; 290 r->res = NULL; 291 r->min_prio = BHNDB_PRIORITY_NONE; 292 STAILQ_INIT(&r->bus_regions); 293 STAILQ_INIT(&r->bus_intrs); 294 295 mtx_init(&r->dw_steal_mtx, device_get_nameunit(dev), 296 "bhndb dwa_steal lock", MTX_SPIN); 297 298 /* Initialize host address space resource manager. */ 299 r->ht_mem_rman.rm_start = 0; 300 r->ht_mem_rman.rm_end = ~0; 301 r->ht_mem_rman.rm_type = RMAN_ARRAY; 302 r->ht_mem_rman.rm_descr = "BHNDB host memory"; 303 if ((error = rman_init(&r->ht_mem_rman))) { 304 device_printf(r->dev, "could not initialize ht_mem_rman\n"); 305 goto failed; 306 } 307 free_ht_mem = true; 308 309 310 /* Initialize resource manager for the bridged address space. */ 311 r->br_mem_rman.rm_start = 0; 312 r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT; 313 r->br_mem_rman.rm_type = RMAN_ARRAY; 314 r->br_mem_rman.rm_descr = "BHNDB bridged memory"; 315 316 if ((error = rman_init(&r->br_mem_rman))) { 317 device_printf(r->dev, "could not initialize br_mem_rman\n"); 318 goto failed; 319 } 320 free_br_mem = true; 321 322 error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT); 323 if (error) { 324 device_printf(r->dev, "could not configure br_mem_rman\n"); 325 goto failed; 326 } 327 328 329 /* Initialize resource manager for the bridged interrupt controller. */ 330 r->br_irq_rman.rm_start = 0; 331 r->br_irq_rman.rm_end = RM_MAX_END; 332 r->br_irq_rman.rm_type = RMAN_ARRAY; 333 r->br_irq_rman.rm_descr = "BHNDB bridged interrupts"; 334 335 if ((error = rman_init(&r->br_irq_rman))) { 336 device_printf(r->dev, "could not initialize br_irq_rman\n"); 337 goto failed; 338 } 339 free_br_irq = true; 340 341 error = rman_manage_region(&r->br_irq_rman, 0, RM_MAX_END); 342 if (error) { 343 device_printf(r->dev, "could not configure br_irq_rman\n"); 344 goto failed; 345 } 346 347 /* Fetch the dynamic regwin count and verify that it does not exceed 348 * what is representable via our freelist bitstring. */ 349 r->dwa_count = bhndb_regwin_count(cfg->register_windows, 350 BHNDB_REGWIN_T_DYN); 351 if (r->dwa_count >= INT_MAX) { 352 device_printf(r->dev, "max dynamic regwin count exceeded\n"); 353 goto failed; 354 } 355 356 /* Allocate the dynamic window allocation table. */ 357 r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND, 358 M_NOWAIT); 359 if (r->dw_alloc == NULL) 360 goto failed; 361 362 /* Allocate the dynamic window allocation freelist */ 363 r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT); 364 if (r->dwa_freelist == NULL) 365 goto failed; 366 367 /* Initialize the dynamic window table */ 368 rnid = 0; 369 last_window_size = 0; 370 for (win = cfg->register_windows; 371 win->win_type != BHNDB_REGWIN_T_INVALID; win++) 372 { 373 struct bhndb_dw_alloc *dwa; 374 375 /* Skip non-DYN windows */ 376 if (win->win_type != BHNDB_REGWIN_T_DYN) 377 continue; 378 379 /* Validate the window size */ 380 if (win->win_size == 0) { 381 device_printf(r->dev, "ignoring zero-length dynamic " 382 "register window\n"); 383 continue; 384 } else if (last_window_size == 0) { 385 last_window_size = win->win_size; 386 } else if (last_window_size != win->win_size) { 387 /* 388 * No existing hardware should trigger this. 389 * 390 * If you run into this in the future, the dynamic 391 * window allocator and the resource priority system 392 * will need to be extended to support multiple register 393 * window allocation pools. 394 */ 395 device_printf(r->dev, "devices that vend multiple " 396 "dynamic register window sizes are not currently " 397 "supported\n"); 398 goto failed; 399 } 400 401 dwa = &r->dw_alloc[rnid]; 402 dwa->win = win; 403 dwa->parent_res = NULL; 404 dwa->rnid = rnid; 405 dwa->target = 0x0; 406 407 LIST_INIT(&dwa->refs); 408 rnid++; 409 } 410 411 /* Allocate host resources */ 412 error = bhndb_alloc_host_resources(&r->res, dev, parent_dev, r->cfg); 413 if (error) { 414 device_printf(r->dev, 415 "could not allocate host resources on %s: %d\n", 416 device_get_nameunit(parent_dev), error); 417 goto failed; 418 } 419 420 /* Populate (and validate) parent resource references for all 421 * dynamic windows */ 422 for (size_t i = 0; i < r->dwa_count; i++) { 423 struct bhndb_dw_alloc *dwa; 424 const struct bhndb_regwin *win; 425 426 dwa = &r->dw_alloc[i]; 427 win = dwa->win; 428 429 /* Find and validate corresponding resource. */ 430 dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win); 431 if (dwa->parent_res == NULL) { 432 device_printf(r->dev, "no host resource found for %u " 433 "register window with offset %#jx and " 434 "size %#jx\n", 435 win->win_type, 436 (uintmax_t)win->win_offset, 437 (uintmax_t)win->win_size); 438 439 error = ENXIO; 440 goto failed; 441 } 442 443 if (rman_get_size(dwa->parent_res) < win->win_offset + 444 win->win_size) 445 { 446 device_printf(r->dev, "resource %d too small for " 447 "register window with offset %llx and size %llx\n", 448 rman_get_rid(dwa->parent_res), 449 (unsigned long long) win->win_offset, 450 (unsigned long long) win->win_size); 451 452 error = EINVAL; 453 goto failed; 454 } 455 } 456 457 /* Add allocated memory resources to our host memory resource manager */ 458 for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) { 459 struct resource *res; 460 461 /* skip non-memory resources */ 462 if (r->res->resource_specs[i].type != SYS_RES_MEMORY) 463 continue; 464 465 /* add host resource to set of managed regions */ 466 res = r->res->resources[i]; 467 error = rman_manage_region(&r->ht_mem_rman, 468 rman_get_start(res), rman_get_end(res)); 469 if (error) { 470 device_printf(r->dev, 471 "could not register host memory region with " 472 "ht_mem_rman: %d\n", error); 473 goto failed; 474 } 475 } 476 477 return (r); 478 479 failed: 480 if (free_ht_mem) 481 rman_fini(&r->ht_mem_rman); 482 483 if (free_br_mem) 484 rman_fini(&r->br_mem_rman); 485 486 if (free_br_irq) 487 rman_fini(&r->br_irq_rman); 488 489 if (r->dw_alloc != NULL) 490 free(r->dw_alloc, M_BHND); 491 492 if (r->dwa_freelist != NULL) 493 free(r->dwa_freelist, M_BHND); 494 495 if (r->res != NULL) 496 bhndb_release_host_resources(r->res); 497 498 mtx_destroy(&r->dw_steal_mtx); 499 500 free(r, M_BHND); 501 502 return (NULL); 503 } 504 505 /** 506 * Create a new DMA tag for the given @p translation. 507 * 508 * @param dev The bridge device. 509 * @param parent_dmat The parent DMA tag, or NULL if none. 510 * @param translation The DMA translation for which a DMA tag will 511 * be created. 512 * @param[out] dmat On success, the newly created DMA tag. 513 * 514 * @retval 0 success 515 * @retval non-zero if creating the new DMA tag otherwise fails, a regular 516 * unix error code will be returned. 517 */ 518 static int 519 bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat, 520 const struct bhnd_dma_translation *translation, bus_dma_tag_t *dmat) 521 { 522 bus_dma_tag_t translation_tag; 523 bhnd_addr_t dt_mask; 524 bus_addr_t lowaddr, highaddr; 525 bus_size_t maxsegsz; 526 int error; 527 528 highaddr = BUS_SPACE_MAXADDR; 529 maxsegsz = BUS_SPACE_MAXSIZE; 530 531 /* Determine full addressable mask */ 532 dt_mask = (translation->addr_mask | translation->addrext_mask); 533 KASSERT(dt_mask != 0, ("DMA addr_mask invalid: %#jx", 534 (uintmax_t)dt_mask)); 535 536 /* (addr_mask|addrext_mask) is our maximum supported address */ 537 lowaddr = MIN(dt_mask, BUS_SPACE_MAXADDR); 538 539 /* Constrain to translation window size */ 540 if (translation->addr_mask < maxsegsz) 541 maxsegsz = translation->addr_mask; 542 543 /* Create our DMA tag */ 544 error = bus_dma_tag_create(parent_dmat, 545 1, 0, /* alignment, boundary */ 546 lowaddr, highaddr, 547 NULL, NULL, /* filter, filterarg */ 548 BUS_SPACE_MAXSIZE, 0, /* maxsize, nsegments */ 549 maxsegsz, 0, /* maxsegsize, flags */ 550 NULL, NULL, /* lockfunc, lockarg */ 551 &translation_tag); 552 if (error) { 553 device_printf(dev, "failed to create bridge DMA tag: %d\n", 554 error); 555 return (error); 556 } 557 558 *dmat = translation_tag; 559 return (0); 560 } 561 562 /** 563 * Deallocate the given bridge resource structure and any associated resources. 564 * 565 * @param br Resource state to be deallocated. 566 */ 567 void 568 bhndb_free_resources(struct bhndb_resources *br) 569 { 570 struct bhndb_region *region, *r_next; 571 struct bhndb_dw_alloc *dwa; 572 struct bhndb_dw_rentry *dwr, *dwr_next; 573 struct bhndb_intr_handler *ih; 574 bool leaked_regions, leaked_intrs; 575 576 leaked_regions = false; 577 leaked_intrs = false; 578 579 /* No window regions may still be held */ 580 if (!bhndb_dw_all_free(br)) { 581 for (int i = 0; i < br->dwa_count; i++) { 582 dwa = &br->dw_alloc[i]; 583 584 /* Skip free dynamic windows */ 585 if (bhndb_dw_is_free(br, dwa)) 586 continue; 587 588 device_printf(br->dev, 589 "leaked dynamic register window %d\n", dwa->rnid); 590 leaked_regions = true; 591 } 592 } 593 594 /* There should be no interrupt handlers still registered */ 595 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 596 device_printf(br->dev, "interrupt handler leaked %p\n", 597 ih->ih_cookiep); 598 } 599 600 if (leaked_intrs || leaked_regions) { 601 panic("leaked%s%s", leaked_intrs ? " active interrupts" : "", 602 leaked_regions ? " active register windows" : ""); 603 } 604 605 /* Release host resources allocated through our parent. */ 606 if (br->res != NULL) 607 bhndb_release_host_resources(br->res); 608 609 /* Clean up resource reservations */ 610 for (size_t i = 0; i < br->dwa_count; i++) { 611 dwa = &br->dw_alloc[i]; 612 613 LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) { 614 LIST_REMOVE(dwr, dw_link); 615 free(dwr, M_BHND); 616 } 617 } 618 619 /* Release bus regions */ 620 STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) { 621 STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link); 622 free(region, M_BHND); 623 } 624 625 /* Release our resource managers */ 626 rman_fini(&br->ht_mem_rman); 627 rman_fini(&br->br_mem_rman); 628 rman_fini(&br->br_irq_rman); 629 630 free(br->dw_alloc, M_BHND); 631 free(br->dwa_freelist, M_BHND); 632 633 mtx_destroy(&br->dw_steal_mtx); 634 635 free(br, M_BHND); 636 } 637 638 /** 639 * Allocate host bus resources defined by @p hwcfg. 640 * 641 * On success, the caller assumes ownership of the allocated host resources, 642 * which must be freed via bhndb_release_host_resources(). 643 * 644 * @param[out] resources On success, the allocated host resources. 645 * @param dev The bridge device. 646 * @param parent_dev The parent device from which host resources 647 * should be allocated (e.g. via 648 * bus_alloc_resources()). 649 * @param hwcfg The hardware configuration defining the host 650 * resources to be allocated 651 */ 652 int 653 bhndb_alloc_host_resources(struct bhndb_host_resources **resources, 654 device_t dev, device_t parent_dev, const struct bhndb_hwcfg *hwcfg) 655 { 656 struct bhndb_host_resources *hr; 657 const struct bhnd_dma_translation *dt; 658 bus_dma_tag_t parent_dmat; 659 size_t nres, ndt; 660 int error; 661 662 parent_dmat = bus_get_dma_tag(parent_dev); 663 664 hr = malloc(sizeof(*hr), M_BHND, M_WAITOK); 665 hr->owner = parent_dev; 666 hr->cfg = hwcfg; 667 hr->resource_specs = NULL; 668 hr->resources = NULL; 669 hr->dma_tags = NULL; 670 hr->num_dma_tags = 0; 671 672 /* Determine our bridge resource count from the hardware config. */ 673 nres = 0; 674 for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++) 675 nres++; 676 677 /* Determine the total count and validate our DMA translation table. */ 678 ndt = 0; 679 for (dt = hwcfg->dma_translations; dt != NULL && 680 !BHND_DMA_IS_TRANSLATION_TABLE_END(dt); dt++) 681 { 682 /* Validate the defined translation */ 683 if ((dt->base_addr & dt->addr_mask) != 0) { 684 device_printf(dev, "invalid DMA translation; base " 685 "address %#jx overlaps address mask %#jx", 686 (uintmax_t)dt->base_addr, (uintmax_t)dt->addr_mask); 687 688 error = EINVAL; 689 goto failed; 690 } 691 692 if ((dt->addrext_mask & dt->addr_mask) != 0) { 693 device_printf(dev, "invalid DMA translation; addrext " 694 "mask %#jx overlaps address mask %#jx", 695 (uintmax_t)dt->addrext_mask, 696 (uintmax_t)dt->addr_mask); 697 698 error = EINVAL; 699 goto failed; 700 } 701 702 /* Increment our entry count */ 703 ndt++; 704 } 705 706 /* Allocate our DMA tags */ 707 hr->dma_tags = malloc(sizeof(*hr->dma_tags) * ndt, M_BHND, 708 M_WAITOK|M_ZERO); 709 for (size_t i = 0; i < ndt; i++) { 710 error = bhndb_dma_tag_create(dev, parent_dmat, 711 &hwcfg->dma_translations[i], &hr->dma_tags[i]); 712 if (error) 713 goto failed; 714 715 hr->num_dma_tags++; 716 } 717 718 /* Allocate space for a non-const copy of our resource_spec 719 * table; this will be updated with the RIDs assigned by 720 * bus_alloc_resources. */ 721 hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1), 722 M_BHND, M_WAITOK); 723 724 /* Initialize and terminate the table */ 725 for (size_t i = 0; i < nres; i++) 726 hr->resource_specs[i] = hwcfg->resource_specs[i]; 727 728 hr->resource_specs[nres].type = -1; 729 730 /* Allocate space for our resource references */ 731 hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND, 732 M_WAITOK); 733 734 /* Allocate host resources */ 735 error = bus_alloc_resources(hr->owner, hr->resource_specs, 736 hr->resources); 737 if (error) { 738 device_printf(dev, "could not allocate bridge resources via " 739 "%s: %d\n", device_get_nameunit(parent_dev), error); 740 goto failed; 741 } 742 743 *resources = hr; 744 return (0); 745 746 failed: 747 if (hr->resource_specs != NULL) 748 free(hr->resource_specs, M_BHND); 749 750 if (hr->resources != NULL) 751 free(hr->resources, M_BHND); 752 753 for (size_t i = 0; i < hr->num_dma_tags; i++) 754 bus_dma_tag_destroy(hr->dma_tags[i]); 755 756 if (hr->dma_tags != NULL) 757 free(hr->dma_tags, M_BHND); 758 759 free(hr, M_BHND); 760 761 return (error); 762 } 763 764 /** 765 * Deallocate a set of bridge host resources. 766 * 767 * @param hr The resources to be freed. 768 */ 769 void 770 bhndb_release_host_resources(struct bhndb_host_resources *hr) 771 { 772 bus_release_resources(hr->owner, hr->resource_specs, hr->resources); 773 774 for (size_t i = 0; i < hr->num_dma_tags; i++) 775 bus_dma_tag_destroy(hr->dma_tags[i]); 776 777 free(hr->resources, M_BHND); 778 free(hr->resource_specs, M_BHND); 779 free(hr->dma_tags, M_BHND); 780 free(hr, M_BHND); 781 } 782 783 784 /** 785 * Search @p cores for the core serving as the bhnd host bridge. 786 * 787 * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged 788 * bhnd(4) devices to determine the hostb core: 789 * 790 * - The core must have a Broadcom vendor ID. 791 * - The core devclass must match the bridge type. 792 * - The core must be the first device on the bus with the bridged device 793 * class. 794 * 795 * @param cores The core table to search. 796 * @param ncores The number of cores in @p cores. 797 * @param bridge_devclass The expected device class of the bridge core. 798 * @param[out] core If found, the matching host bridge core info. 799 * 800 * @retval 0 success 801 * @retval ENOENT not found 802 */ 803 int 804 bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores, 805 bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core) 806 { 807 struct bhnd_core_match md; 808 struct bhnd_core_info *match; 809 u_int match_core_idx; 810 811 /* Set up a match descriptor for the required device class. */ 812 md = (struct bhnd_core_match) { 813 BHND_MATCH_CORE_CLASS(bridge_devclass), 814 BHND_MATCH_CORE_UNIT(0) 815 }; 816 817 /* Find the matching core with the lowest core index */ 818 match = NULL; 819 match_core_idx = UINT_MAX; 820 821 for (u_int i = 0; i < ncores; i++) { 822 if (!bhnd_core_matches(&cores[i], &md)) 823 continue; 824 825 /* Lower core indices take precedence */ 826 if (match != NULL && match_core_idx < match->core_idx) 827 continue; 828 829 match = &cores[i]; 830 match_core_idx = match->core_idx; 831 } 832 833 if (match == NULL) 834 return (ENOENT); 835 836 *core = *match; 837 return (0); 838 } 839 840 /** 841 * Allocate a host interrupt source and its backing SYS_RES_IRQ host resource. 842 * 843 * @param owner The device to be used to allocate a SYS_RES_IRQ 844 * resource with @p rid. 845 * @param rid The resource ID of the IRQ to be allocated. 846 * @param start The start value to be passed to bus_alloc_resource(). 847 * @param end The end value to be passed to bus_alloc_resource(). 848 * @param count The count to be passed to bus_alloc_resource(). 849 * @param flags The flags to be passed to bus_alloc_resource(). 850 * 851 * @retval non-NULL success 852 * @retval NULL if allocation fails. 853 */ 854 struct bhndb_intr_isrc * 855 bhndb_alloc_intr_isrc(device_t owner, int rid, rman_res_t start, rman_res_t end, 856 rman_res_t count, u_int flags) 857 { 858 struct bhndb_intr_isrc *isrc; 859 860 isrc = malloc(sizeof(*isrc), M_BHND, M_NOWAIT); 861 if (isrc == NULL) 862 return (NULL); 863 864 isrc->is_owner = owner; 865 isrc->is_rid = rid; 866 isrc->is_res = bus_alloc_resource(owner, SYS_RES_IRQ, &isrc->is_rid, 867 start, end, count, flags); 868 if (isrc->is_res == NULL) { 869 free(isrc, M_BHND); 870 return (NULL); 871 } 872 873 return (isrc); 874 } 875 876 /** 877 * Free a host interrupt source and its backing host resource. 878 * 879 * @param isrc The interrupt source to be freed. 880 */ 881 void 882 bhndb_free_intr_isrc(struct bhndb_intr_isrc *isrc) 883 { 884 bus_release_resource(isrc->is_owner, SYS_RES_IRQ, isrc->is_rid, 885 isrc->is_res); 886 free(isrc, M_BHND); 887 } 888 889 /** 890 * Allocate and initialize a new interrupt handler entry. 891 * 892 * @param owner The child device that owns this entry. 893 * @param r The child's interrupt resource. 894 * @param isrc The isrc mapped for this entry. 895 * 896 * @retval non-NULL success 897 * @retval NULL if allocation fails. 898 */ 899 struct bhndb_intr_handler * 900 bhndb_alloc_intr_handler(device_t owner, struct resource *r, 901 struct bhndb_intr_isrc *isrc) 902 { 903 struct bhndb_intr_handler *ih; 904 905 ih = malloc(sizeof(*ih), M_BHND, M_NOWAIT | M_ZERO); 906 ih->ih_owner = owner; 907 ih->ih_res = r; 908 ih->ih_isrc = isrc; 909 ih->ih_cookiep = NULL; 910 ih->ih_active = false; 911 912 return (ih); 913 } 914 915 /** 916 * Free an interrupt handler entry. 917 * 918 * @param br The resource state owning @p ih. 919 * @param ih The interrupt handler entry to be removed. 920 */ 921 void 922 bhndb_free_intr_handler(struct bhndb_intr_handler *ih) 923 { 924 KASSERT(!ih->ih_active, ("free of active interrupt handler %p", 925 ih->ih_cookiep)); 926 927 free(ih, M_BHND); 928 } 929 930 /** 931 * Add an active interrupt handler to the given resource state. 932 * 933 * @param br The resource state to be modified. 934 * @param ih The interrupt handler entry to be added. 935 */ 936 void 937 bhndb_register_intr_handler(struct bhndb_resources *br, 938 struct bhndb_intr_handler *ih) 939 { 940 KASSERT(!ih->ih_active, ("duplicate registration of interrupt " 941 "handler %p", ih->ih_cookiep)); 942 KASSERT(ih->ih_cookiep != NULL, ("missing cookiep")); 943 944 ih->ih_active = true; 945 STAILQ_INSERT_HEAD(&br->bus_intrs, ih, ih_link); 946 } 947 948 /** 949 * Remove an interrupt handler from the given resource state. 950 * 951 * @param br The resource state containing @p ih. 952 * @param ih The interrupt handler entry to be removed. 953 */ 954 void 955 bhndb_deregister_intr_handler(struct bhndb_resources *br, 956 struct bhndb_intr_handler *ih) 957 { 958 KASSERT(ih->ih_active, ("duplicate deregistration of interrupt " 959 "handler %p", ih->ih_cookiep)); 960 961 KASSERT(bhndb_find_intr_handler(br, ih) == ih, 962 ("unknown interrupt handler %p", ih)); 963 964 STAILQ_REMOVE(&br->bus_intrs, ih, bhndb_intr_handler, ih_link); 965 ih->ih_active = false; 966 } 967 968 /** 969 * Return the interrupt handler entry corresponding to @p cookiep, or NULL 970 * if no entry is found. 971 * 972 * @param br The resource state to search for the given @p cookiep. 973 * @param cookiep The interrupt handler's bus-assigned cookiep value. 974 */ 975 struct bhndb_intr_handler * 976 bhndb_find_intr_handler(struct bhndb_resources *br, void *cookiep) 977 { 978 struct bhndb_intr_handler *ih; 979 980 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 981 if (ih == cookiep) 982 return (ih); 983 } 984 985 /* Not found */ 986 return (NULL); 987 } 988 989 /** 990 * Find the maximum start and end limits of the bridged resource @p r. 991 * 992 * If the resource is not currently mapped by the bridge, ENOENT will be 993 * returned. 994 * 995 * @param br The resource state to search. 996 * @param type The resource type (see SYS_RES_*). 997 * @param r The resource to search for in @p br. 998 * @param[out] start On success, the minimum supported start address. 999 * @param[out] end On success, the maximum supported end address. 1000 * 1001 * @retval 0 success 1002 * @retval ENOENT no active mapping found for @p r of @p type 1003 */ 1004 int 1005 bhndb_find_resource_limits(struct bhndb_resources *br, int type, 1006 struct resource *r, rman_res_t *start, rman_res_t *end) 1007 { 1008 struct bhndb_dw_alloc *dynamic; 1009 struct bhndb_region *sregion; 1010 struct bhndb_intr_handler *ih; 1011 1012 switch (type) { 1013 case SYS_RES_IRQ: 1014 /* Is this one of ours? */ 1015 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 1016 if (ih->ih_res == r) 1017 continue; 1018 1019 /* We don't support adjusting IRQ resource limits */ 1020 *start = rman_get_start(r); 1021 *end = rman_get_end(r); 1022 return (0); 1023 } 1024 1025 /* Not found */ 1026 return (ENOENT); 1027 1028 case SYS_RES_MEMORY: { 1029 /* Check for an enclosing dynamic register window */ 1030 if ((dynamic = bhndb_dw_find_resource(br, r))) { 1031 *start = dynamic->target; 1032 *end = dynamic->target + dynamic->win->win_size - 1; 1033 return (0); 1034 } 1035 1036 /* Check for a static region */ 1037 sregion = bhndb_find_resource_region(br, rman_get_start(r), 1038 rman_get_size(r)); 1039 if (sregion != NULL && sregion->static_regwin != NULL) { 1040 *start = sregion->addr; 1041 *end = sregion->addr + sregion->size - 1; 1042 1043 return (0); 1044 } 1045 1046 /* Not found */ 1047 return (ENOENT); 1048 } 1049 1050 default: 1051 device_printf(br->dev, "unknown resource type: %d\n", type); 1052 return (ENOENT); 1053 } 1054 } 1055 1056 /** 1057 * Add a bus region entry to @p r for the given base @p addr and @p size. 1058 * 1059 * @param br The resource state to which the bus region entry will be added. 1060 * @param addr The base address of this region. 1061 * @param size The size of this region. 1062 * @param priority The resource priority to be assigned to allocations 1063 * made within this bus region. 1064 * @param alloc_flags resource allocation flags (@see bhndb_alloc_flags) 1065 * @param static_regwin If available, a static register window mapping this 1066 * bus region entry. If not available, NULL. 1067 * 1068 * @retval 0 success 1069 * @retval non-zero if adding the bus region fails. 1070 */ 1071 int 1072 bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, 1073 bhnd_size_t size, bhndb_priority_t priority, uint32_t alloc_flags, 1074 const struct bhndb_regwin *static_regwin) 1075 { 1076 struct bhndb_region *reg; 1077 1078 /* Insert in the bus resource list */ 1079 reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT); 1080 if (reg == NULL) 1081 return (ENOMEM); 1082 1083 *reg = (struct bhndb_region) { 1084 .addr = addr, 1085 .size = size, 1086 .priority = priority, 1087 .alloc_flags = alloc_flags, 1088 .static_regwin = static_regwin 1089 }; 1090 1091 STAILQ_INSERT_HEAD(&br->bus_regions, reg, link); 1092 1093 return (0); 1094 } 1095 1096 /** 1097 * Return true if a mapping of @p size bytes at @p addr is provided by either 1098 * one contiguous bus region, or by multiple discontiguous regions. 1099 * 1100 * @param br The resource state to query. 1101 * @param addr The requested starting address. 1102 * @param size The requested size. 1103 */ 1104 bool 1105 bhndb_has_static_region_mapping(struct bhndb_resources *br, 1106 bhnd_addr_t addr, bhnd_size_t size) 1107 { 1108 struct bhndb_region *region; 1109 bhnd_addr_t r_addr; 1110 1111 r_addr = addr; 1112 while ((region = bhndb_find_resource_region(br, r_addr, 1)) != NULL) { 1113 /* Must be backed by a static register window */ 1114 if (region->static_regwin == NULL) 1115 return (false); 1116 1117 /* Adjust the search offset */ 1118 r_addr += region->size; 1119 1120 /* Have we traversed a complete (if discontiguous) mapping? */ 1121 if (r_addr == addr + size) 1122 return (true); 1123 1124 } 1125 1126 /* No complete mapping found */ 1127 return (false); 1128 } 1129 1130 /** 1131 * Find the bus region that maps @p size bytes at @p addr. 1132 * 1133 * @param br The resource state to search. 1134 * @param addr The requested starting address. 1135 * @param size The requested size. 1136 * 1137 * @retval bhndb_region A region that fully contains the requested range. 1138 * @retval NULL If no mapping region can be found. 1139 */ 1140 struct bhndb_region * 1141 bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, 1142 bhnd_size_t size) 1143 { 1144 struct bhndb_region *region; 1145 1146 STAILQ_FOREACH(region, &br->bus_regions, link) { 1147 /* Request must fit within the region's mapping */ 1148 if (addr < region->addr) 1149 continue; 1150 1151 if (addr + size > region->addr + region->size) 1152 continue; 1153 1154 return (region); 1155 } 1156 1157 /* Not found */ 1158 return (NULL); 1159 } 1160 1161 /** 1162 * Find the entry matching @p r in @p dwa's references, if any. 1163 * 1164 * @param dwa The dynamic window allocation to search 1165 * @param r The resource to search for in @p dwa. 1166 */ 1167 static struct bhndb_dw_rentry * 1168 bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r) 1169 { 1170 struct bhndb_dw_rentry *rentry; 1171 1172 LIST_FOREACH(rentry, &dwa->refs, dw_link) { 1173 struct resource *dw_res = rentry->dw_res; 1174 1175 /* Match dev/rid/addr/size */ 1176 if (rman_get_device(dw_res) != rman_get_device(r) || 1177 rman_get_rid(dw_res) != rman_get_rid(r) || 1178 rman_get_start(dw_res) != rman_get_start(r) || 1179 rman_get_size(dw_res) != rman_get_size(r)) 1180 { 1181 continue; 1182 } 1183 1184 /* Matching allocation found */ 1185 return (rentry); 1186 } 1187 1188 return (NULL); 1189 } 1190 1191 /** 1192 * Find the dynamic region allocated for @p r, if any. 1193 * 1194 * @param br The resource state to search. 1195 * @param r The resource to search for. 1196 * 1197 * @retval bhndb_dw_alloc The allocation record for @p r. 1198 * @retval NULL if no dynamic window is allocated for @p r. 1199 */ 1200 struct bhndb_dw_alloc * 1201 bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r) 1202 { 1203 struct bhndb_dw_alloc *dwa; 1204 1205 for (size_t i = 0; i < br->dwa_count; i++) { 1206 dwa = &br->dw_alloc[i]; 1207 1208 /* Skip free dynamic windows */ 1209 if (bhndb_dw_is_free(br, dwa)) 1210 continue; 1211 1212 /* Matching allocation found? */ 1213 if (bhndb_dw_find_resource_entry(dwa, r) != NULL) 1214 return (dwa); 1215 } 1216 1217 return (NULL); 1218 } 1219 1220 /** 1221 * Find an existing dynamic window mapping @p size bytes 1222 * at @p addr. The window may or may not be free. 1223 * 1224 * @param br The resource state to search. 1225 * @param addr The requested starting address. 1226 * @param size The requested size. 1227 * 1228 * @retval bhndb_dw_alloc A window allocation that fully contains the requested 1229 * range. 1230 * @retval NULL If no mapping region can be found. 1231 */ 1232 struct bhndb_dw_alloc * 1233 bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr, 1234 bhnd_size_t size) 1235 { 1236 struct bhndb_dw_alloc *dwr; 1237 const struct bhndb_regwin *win; 1238 1239 /* Search for an existing dynamic mapping of this address range. */ 1240 for (size_t i = 0; i < br->dwa_count; i++) { 1241 dwr = &br->dw_alloc[i]; 1242 win = dwr->win; 1243 1244 /* Verify the range */ 1245 if (addr < dwr->target) 1246 continue; 1247 1248 if (addr + size > dwr->target + win->win_size) 1249 continue; 1250 1251 /* Found a usable mapping */ 1252 return (dwr); 1253 } 1254 1255 /* not found */ 1256 return (NULL); 1257 } 1258 1259 /** 1260 * Retain a reference to @p dwa for use by @p res. 1261 * 1262 * @param br The resource state owning @p dwa. 1263 * @param dwa The allocation record to be retained. 1264 * @param res The resource that will own a reference to @p dwa. 1265 * 1266 * @retval 0 success 1267 * @retval ENOMEM Failed to allocate a new reference structure. 1268 */ 1269 int 1270 bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, 1271 struct resource *res) 1272 { 1273 struct bhndb_dw_rentry *rentry; 1274 1275 KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL, 1276 ("double-retain of dynamic window for same resource")); 1277 1278 /* Insert a reference entry; we use M_NOWAIT to allow use from 1279 * within a non-sleepable lock */ 1280 rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT); 1281 if (rentry == NULL) 1282 return (ENOMEM); 1283 1284 rentry->dw_res = res; 1285 LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link); 1286 1287 /* Update the free list */ 1288 bit_set(br->dwa_freelist, dwa->rnid); 1289 1290 return (0); 1291 } 1292 1293 /** 1294 * Release a reference to @p dwa previously retained by @p res. If the 1295 * reference count of @p dwa reaches zero, it will be added to the 1296 * free list. 1297 * 1298 * @param br The resource state owning @p dwa. 1299 * @param dwa The allocation record to be released. 1300 * @param res The resource that currently owns a reference to @p dwa. 1301 */ 1302 void 1303 bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, 1304 struct resource *r) 1305 { 1306 struct bhndb_dw_rentry *rentry; 1307 1308 /* Find the rentry */ 1309 rentry = bhndb_dw_find_resource_entry(dwa, r); 1310 KASSERT(rentry != NULL, ("over release of resource entry")); 1311 1312 LIST_REMOVE(rentry, dw_link); 1313 free(rentry, M_BHND); 1314 1315 /* If this was the last reference, update the free list */ 1316 if (LIST_EMPTY(&dwa->refs)) 1317 bit_clear(br->dwa_freelist, dwa->rnid); 1318 } 1319 1320 /** 1321 * Attempt to set (or reset) the target address of @p dwa to map @p size bytes 1322 * at @p addr. 1323 * 1324 * This will apply any necessary window alignment and verify that 1325 * the window is capable of mapping the requested range prior to modifying 1326 * therecord. 1327 * 1328 * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request. 1329 * @param br The resource state owning @p dwa. 1330 * @param dwa The allocation record to be configured. 1331 * @param addr The address to be mapped via @p dwa. 1332 * @param size The number of bytes to be mapped at @p addr. 1333 * 1334 * @retval 0 success 1335 * @retval non-zero no usable register window available. 1336 */ 1337 int 1338 bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, 1339 struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size) 1340 { 1341 const struct bhndb_regwin *rw; 1342 bus_addr_t offset; 1343 int error; 1344 1345 rw = dwa->win; 1346 1347 KASSERT(bhndb_dw_is_free(br, dwa) || mtx_owned(&br->dw_steal_mtx), 1348 ("attempting to set the target address on an in-use window")); 1349 1350 /* Page-align the target address */ 1351 offset = addr % rw->win_size; 1352 dwa->target = addr - offset; 1353 1354 /* Verify that the window is large enough for the full target */ 1355 if (rw->win_size - offset < size) 1356 return (ENOMEM); 1357 1358 /* Update the window target */ 1359 error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); 1360 if (error) { 1361 dwa->target = 0x0; 1362 return (error); 1363 } 1364 1365 return (0); 1366 } 1367 1368 /** 1369 * Steal an in-use allocation record from @p br, returning the record's current 1370 * target in @p saved on success. 1371 * 1372 * This function acquires a mutex and disables interrupts; callers should 1373 * avoid holding a stolen window longer than required to issue an I/O 1374 * request. 1375 * 1376 * A successful call to bhndb_dw_steal() must be balanced with a call to 1377 * bhndb_dw_return_stolen(). 1378 * 1379 * @param br The resource state from which a window should be stolen. 1380 * @param saved The stolen window's saved target address. 1381 * 1382 * @retval non-NULL success 1383 * @retval NULL no dynamic window regions are defined. 1384 */ 1385 struct bhndb_dw_alloc * 1386 bhndb_dw_steal(struct bhndb_resources *br, bus_addr_t *saved) 1387 { 1388 struct bhndb_dw_alloc *dw_stolen; 1389 1390 KASSERT(bhndb_dw_next_free(br) == NULL, 1391 ("attempting to steal an in-use window while free windows remain")); 1392 1393 /* Nothing to steal from? */ 1394 if (br->dwa_count == 0) 1395 return (NULL); 1396 1397 /* 1398 * Acquire our steal spinlock; this will be released in 1399 * bhndb_dw_return_stolen(). 1400 * 1401 * Acquiring also disables interrupts, which is required when one is 1402 * stealing an in-use existing register window. 1403 */ 1404 mtx_lock_spin(&br->dw_steal_mtx); 1405 1406 dw_stolen = &br->dw_alloc[0]; 1407 *saved = dw_stolen->target; 1408 return (dw_stolen); 1409 } 1410 1411 /** 1412 * Return an allocation record previously stolen using bhndb_dw_steal(). 1413 * 1414 * @param dev The device on which to issue a BHNDB_SET_WINDOW_ADDR() request. 1415 * @param br The resource state owning @p dwa. 1416 * @param dwa The allocation record to be returned. 1417 * @param saved The original target address provided by bhndb_dw_steal(). 1418 */ 1419 void 1420 bhndb_dw_return_stolen(device_t dev, struct bhndb_resources *br, 1421 struct bhndb_dw_alloc *dwa, bus_addr_t saved) 1422 { 1423 int error; 1424 1425 mtx_assert(&br->dw_steal_mtx, MA_OWNED); 1426 1427 error = bhndb_dw_set_addr(dev, br, dwa, saved, 0); 1428 if (error) { 1429 panic("failed to restore register window target %#jx: %d\n", 1430 (uintmax_t)saved, error); 1431 } 1432 1433 mtx_unlock_spin(&br->dw_steal_mtx); 1434 } 1435 1436 /** 1437 * Return the count of @p type register windows in @p table. 1438 * 1439 * @param table The table to search. 1440 * @param type The required window type, or BHNDB_REGWIN_T_INVALID to 1441 * count all register window types. 1442 */ 1443 size_t 1444 bhndb_regwin_count(const struct bhndb_regwin *table, 1445 bhndb_regwin_type_t type) 1446 { 1447 const struct bhndb_regwin *rw; 1448 size_t count; 1449 1450 count = 0; 1451 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { 1452 if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type) 1453 count++; 1454 } 1455 1456 return (count); 1457 } 1458 1459 /** 1460 * Search @p table for the first window with the given @p type. 1461 * 1462 * @param table The table to search. 1463 * @param type The required window type. 1464 * @param min_size The minimum window size. 1465 * 1466 * @retval bhndb_regwin The first matching window. 1467 * @retval NULL If no window of the requested type could be found. 1468 */ 1469 const struct bhndb_regwin * 1470 bhndb_regwin_find_type(const struct bhndb_regwin *table, 1471 bhndb_regwin_type_t type, bus_size_t min_size) 1472 { 1473 const struct bhndb_regwin *rw; 1474 1475 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) 1476 { 1477 if (rw->win_type == type && rw->win_size >= min_size) 1478 return (rw); 1479 } 1480 1481 return (NULL); 1482 } 1483 1484 /** 1485 * Search @p windows for the first matching core window. 1486 * 1487 * @param table The table to search. 1488 * @param class The required core class. 1489 * @param unit The required core unit, or -1. 1490 * @param port_type The required port type. 1491 * @param port The required port. 1492 * @param region The required region. 1493 * @param offset The required readable core register block offset. 1494 * @param min_size The required minimum readable size at @p offset. 1495 * 1496 * @retval bhndb_regwin The first matching window. 1497 * @retval NULL If no matching window was found. 1498 */ 1499 const struct bhndb_regwin * 1500 bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class, 1501 int unit, bhnd_port_type port_type, u_int port, u_int region, 1502 bus_size_t offset, bus_size_t min_size) 1503 { 1504 const struct bhndb_regwin *rw; 1505 1506 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) 1507 { 1508 bus_size_t rw_offset; 1509 1510 /* Match on core, port, and region attributes */ 1511 if (rw->win_type != BHNDB_REGWIN_T_CORE) 1512 continue; 1513 1514 if (rw->d.core.class != class) 1515 continue; 1516 1517 if (unit != -1 && rw->d.core.unit != unit) 1518 continue; 1519 1520 if (rw->d.core.port_type != port_type) 1521 continue; 1522 1523 if (rw->d.core.port != port) 1524 continue; 1525 1526 if (rw->d.core.region != region) 1527 continue; 1528 1529 /* Verify that the requested range is mapped within 1530 * this register window */ 1531 if (rw->d.core.offset > offset) 1532 continue; 1533 1534 rw_offset = offset - rw->d.core.offset; 1535 1536 if (rw->win_size < rw_offset) 1537 continue; 1538 1539 if (rw->win_size - rw_offset < min_size) 1540 continue; 1541 1542 return (rw); 1543 } 1544 1545 return (NULL); 1546 } 1547 1548 /** 1549 * Search @p windows for the best available window of at least @p min_size. 1550 * 1551 * Search order: 1552 * - BHND_REGWIN_T_CORE 1553 * - BHND_REGWIN_T_DYN 1554 * 1555 * @param table The table to search. 1556 * @param class The required core class. 1557 * @param unit The required core unit, or -1. 1558 * @param port_type The required port type. 1559 * @param port The required port. 1560 * @param region The required region. 1561 * @param offset The required readable core register block offset. 1562 * @param min_size The required minimum readable size at @p offset. 1563 * 1564 * @retval bhndb_regwin The first matching window. 1565 * @retval NULL If no matching window was found. 1566 */ 1567 const struct bhndb_regwin * 1568 bhndb_regwin_find_best(const struct bhndb_regwin *table, 1569 bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, 1570 u_int region, bus_size_t offset, bus_size_t min_size) 1571 { 1572 const struct bhndb_regwin *rw; 1573 1574 /* Prefer a fixed core mapping */ 1575 rw = bhndb_regwin_find_core(table, class, unit, port_type, 1576 port, region, offset, min_size); 1577 if (rw != NULL) 1578 return (rw); 1579 1580 /* Fall back on a generic dynamic window */ 1581 return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size)); 1582 } 1583 1584 /** 1585 * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window 1586 * that matches against @p core. 1587 * 1588 * @param regw A register window to match against. 1589 * @param core The bhnd(4) core info to match against @p regw. 1590 */ 1591 bool 1592 bhndb_regwin_match_core(const struct bhndb_regwin *regw, 1593 struct bhnd_core_info *core) 1594 { 1595 /* Only core windows are supported */ 1596 if (regw->win_type != BHNDB_REGWIN_T_CORE) 1597 return (false); 1598 1599 /* Device class must match */ 1600 if (bhnd_core_class(core) != regw->d.core.class) 1601 return (false); 1602 1603 /* Device unit must match */ 1604 if (core->unit != regw->d.core.unit) 1605 return (false); 1606 1607 /* Matches */ 1608 return (true); 1609 } 1610 1611 /** 1612 * Search for a core resource priority descriptor in @p table that matches 1613 * @p core. 1614 * 1615 * @param table The table to search. 1616 * @param core The core to match against @p table. 1617 */ 1618 const struct bhndb_hw_priority * 1619 bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table, 1620 struct bhnd_core_info *core) 1621 { 1622 const struct bhndb_hw_priority *hp; 1623 1624 for (hp = table; hp->ports != NULL; hp++) { 1625 if (bhnd_core_matches(core, &hp->match)) 1626 return (hp); 1627 } 1628 1629 /* not found */ 1630 return (NULL); 1631 } 1632 1633 1634 /** 1635 * Search for a port resource priority descriptor in @p table. 1636 * 1637 * @param table The table to search. 1638 * @param core The core to match against @p table. 1639 * @param port_type The required port type. 1640 * @param port The required port. 1641 * @param region The required region. 1642 */ 1643 const struct bhndb_port_priority * 1644 bhndb_hw_priorty_find_port(const struct bhndb_hw_priority *table, 1645 struct bhnd_core_info *core, bhnd_port_type port_type, u_int port, 1646 u_int region) 1647 { 1648 const struct bhndb_hw_priority *hp; 1649 1650 if ((hp = bhndb_hw_priority_find_core(table, core)) == NULL) 1651 return (NULL); 1652 1653 for (u_int i = 0; i < hp->num_ports; i++) { 1654 const struct bhndb_port_priority *pp = &hp->ports[i]; 1655 1656 if (pp->type != port_type) 1657 continue; 1658 1659 if (pp->port != port) 1660 continue; 1661 1662 if (pp->region != region) 1663 continue; 1664 1665 return (pp); 1666 } 1667 1668 /* not found */ 1669 return (NULL); 1670 } 1671