1 /*- 2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Landon Fuller 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 17 * redistribution must be conditioned upon including a substantially 18 * similar Disclaimer requirement for further binary redistribution. 19 * 20 * NO WARRANTY 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGES. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/limits.h> 40 41 #include "bhndb_private.h" 42 #include "bhndbvar.h" 43 44 static int bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat, 45 const struct bhnd_dma_translation *translation, 46 bus_dma_tag_t *dmat); 47 48 /** 49 * Attach a BHND bridge device to @p parent. 50 * 51 * @param parent A parent PCI device. 52 * @param[out] bhndb On success, the probed and attached bhndb bridge device. 53 * @param unit The device unit number, or -1 to select the next available unit 54 * number. 55 * 56 * @retval 0 success 57 * @retval non-zero Failed to attach the bhndb device. 58 */ 59 int 60 bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit) 61 { 62 int error; 63 64 *bhndb = device_add_child(parent, "bhndb", unit); 65 if (*bhndb == NULL) 66 return (ENXIO); 67 68 if (!(error = device_probe_and_attach(*bhndb))) 69 return (0); 70 71 if ((device_delete_child(parent, *bhndb))) 72 device_printf(parent, "failed to detach bhndb child\n"); 73 74 return (error); 75 } 76 77 /* 78 * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl. 79 */ 80 static void 81 bhndb_do_suspend_resources(device_t dev, struct resource_list *rl) 82 { 83 struct resource_list_entry *rle; 84 85 /* Suspend all child resources. */ 86 STAILQ_FOREACH(rle, rl, link) { 87 /* Skip non-allocated resources */ 88 if (rle->res == NULL) 89 continue; 90 91 BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type, 92 rle->res); 93 } 94 } 95 96 /** 97 * Helper function for implementing BUS_RESUME_CHILD() on bridged 98 * bhnd(4) buses. 99 * 100 * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() 101 * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all 102 * child resources, ensuring that the device's allocated bridge resources 103 * will be available to other devices during bus resumption. 104 * 105 * Before suspending any resources, @p child is suspended by 106 * calling bhnd_generic_suspend_child(). 107 * 108 * If @p child is not a direct child of @p dev, suspension is delegated to 109 * the @p dev parent. 110 */ 111 int 112 bhnd_generic_br_suspend_child(device_t dev, device_t child) 113 { 114 struct resource_list *rl; 115 int error; 116 117 if (device_get_parent(child) != dev) 118 BUS_SUSPEND_CHILD(device_get_parent(dev), child); 119 120 if (device_is_suspended(child)) 121 return (EBUSY); 122 123 /* Suspend the child device */ 124 if ((error = bhnd_generic_suspend_child(dev, child))) 125 return (error); 126 127 /* Fetch the resource list. If none, there's nothing else to do */ 128 rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); 129 if (rl == NULL) 130 return (0); 131 132 /* Suspend all child resources. */ 133 bhndb_do_suspend_resources(dev, rl); 134 135 return (0); 136 } 137 138 /** 139 * Helper function for implementing BUS_RESUME_CHILD() on bridged 140 * bhnd(4) bus devices. 141 * 142 * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() 143 * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all 144 * child resources, before delegating to bhnd_generic_resume_child(). 145 * 146 * If resource resumption fails, @p child will not be resumed. 147 * 148 * If @p child is not a direct child of @p dev, suspension is delegated to 149 * the @p dev parent. 150 */ 151 int 152 bhnd_generic_br_resume_child(device_t dev, device_t child) 153 { 154 struct resource_list *rl; 155 struct resource_list_entry *rle; 156 int error; 157 158 if (device_get_parent(child) != dev) 159 BUS_RESUME_CHILD(device_get_parent(dev), child); 160 161 if (!device_is_suspended(child)) 162 return (EBUSY); 163 164 /* Fetch the resource list. If none, there's nothing else to do */ 165 rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); 166 if (rl == NULL) 167 return (bhnd_generic_resume_child(dev, child)); 168 169 /* Resume all resources */ 170 STAILQ_FOREACH(rle, rl, link) { 171 /* Skip non-allocated resources */ 172 if (rle->res == NULL) 173 continue; 174 175 error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev, 176 rle->type, rle->res); 177 if (error) { 178 /* Put all resources back into a suspend state */ 179 bhndb_do_suspend_resources(dev, rl); 180 return (error); 181 } 182 } 183 184 /* Now that all resources are resumed, resume child */ 185 if ((error = bhnd_generic_resume_child(dev, child))) { 186 /* Put all resources back into a suspend state */ 187 bhndb_do_suspend_resources(dev, rl); 188 } 189 190 return (error); 191 } 192 193 /** 194 * Find a host resource of @p type that maps the given range. 195 * 196 * @param hr The resource state to search. 197 * @param type The resource type to search for (see SYS_RES_*). 198 * @param start The start address of the range to search for. 199 * @param count The size of the range to search for. 200 * 201 * @retval resource the host resource containing the requested range. 202 * @retval NULL if no resource containing the requested range can be found. 203 */ 204 struct resource * 205 bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type, 206 rman_res_t start, rman_res_t count) 207 { 208 for (u_int i = 0; hr->resource_specs[i].type != -1; i++) { 209 struct resource *r = hr->resources[i]; 210 211 if (hr->resource_specs[i].type != type) 212 continue; 213 214 /* Verify range */ 215 if (rman_get_start(r) > start) 216 continue; 217 218 if (rman_get_end(r) < (start + count - 1)) 219 continue; 220 221 return (r); 222 } 223 224 return (NULL); 225 } 226 227 /** 228 * Find a host resource of that matches the given register window definition. 229 * 230 * @param hr The resource state to search. 231 * @param win A register window definition. 232 * 233 * @retval resource the host resource corresponding to @p win. 234 * @retval NULL if no resource corresponding to @p win can be found. 235 */ 236 struct resource * 237 bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr, 238 const struct bhndb_regwin *win) 239 { 240 const struct resource_spec *rspecs; 241 242 rspecs = hr->resource_specs; 243 for (u_int i = 0; rspecs[i].type != -1; i++) { 244 if (win->res.type != rspecs[i].type) 245 continue; 246 247 if (win->res.rid != rspecs[i].rid) 248 continue; 249 250 /* Found declared resource */ 251 return (hr->resources[i]); 252 } 253 254 device_printf(hr->owner, "missing regwin resource spec " 255 "(type=%d, rid=%d)\n", win->res.type, win->res.rid); 256 257 return (NULL); 258 } 259 260 /** 261 * Allocate and initialize a new resource state structure. 262 * 263 * @param dev The bridge device. 264 * @param parent_dev The parent device from which host resources should be 265 * allocated. 266 * @param cfg The hardware configuration to be used. 267 */ 268 struct bhndb_resources * 269 bhndb_alloc_resources(device_t dev, device_t parent_dev, 270 const struct bhndb_hwcfg *cfg) 271 { 272 struct bhndb_resources *r; 273 const struct bhndb_regwin *win; 274 bus_size_t last_window_size; 275 int rnid; 276 int error; 277 bool free_ht_mem, free_br_mem, free_br_irq; 278 279 free_ht_mem = false; 280 free_br_mem = false; 281 free_br_irq = false; 282 283 r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO); 284 if (r == NULL) 285 return (NULL); 286 287 /* Basic initialization */ 288 r->dev = dev; 289 r->cfg = cfg; 290 r->res = NULL; 291 r->min_prio = BHNDB_PRIORITY_NONE; 292 STAILQ_INIT(&r->bus_regions); 293 STAILQ_INIT(&r->bus_intrs); 294 295 mtx_init(&r->dw_steal_mtx, device_get_nameunit(dev), 296 "bhndb dwa_steal lock", MTX_SPIN); 297 298 /* Initialize host address space resource manager. */ 299 r->ht_mem_rman.rm_start = 0; 300 r->ht_mem_rman.rm_end = ~0; 301 r->ht_mem_rman.rm_type = RMAN_ARRAY; 302 r->ht_mem_rman.rm_descr = "BHNDB host memory"; 303 if ((error = rman_init(&r->ht_mem_rman))) { 304 device_printf(r->dev, "could not initialize ht_mem_rman\n"); 305 goto failed; 306 } 307 free_ht_mem = true; 308 309 /* Initialize resource manager for the bridged address space. */ 310 r->br_mem_rman.rm_start = 0; 311 r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT; 312 r->br_mem_rman.rm_type = RMAN_ARRAY; 313 r->br_mem_rman.rm_descr = "BHNDB bridged memory"; 314 315 if ((error = rman_init(&r->br_mem_rman))) { 316 device_printf(r->dev, "could not initialize br_mem_rman\n"); 317 goto failed; 318 } 319 free_br_mem = true; 320 321 error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT); 322 if (error) { 323 device_printf(r->dev, "could not configure br_mem_rman\n"); 324 goto failed; 325 } 326 327 /* Initialize resource manager for the bridged interrupt controller. */ 328 r->br_irq_rman.rm_start = 0; 329 r->br_irq_rman.rm_end = RM_MAX_END; 330 r->br_irq_rman.rm_type = RMAN_ARRAY; 331 r->br_irq_rman.rm_descr = "BHNDB bridged interrupts"; 332 333 if ((error = rman_init(&r->br_irq_rman))) { 334 device_printf(r->dev, "could not initialize br_irq_rman\n"); 335 goto failed; 336 } 337 free_br_irq = true; 338 339 error = rman_manage_region(&r->br_irq_rman, 0, RM_MAX_END); 340 if (error) { 341 device_printf(r->dev, "could not configure br_irq_rman\n"); 342 goto failed; 343 } 344 345 /* Fetch the dynamic regwin count and verify that it does not exceed 346 * what is representable via our freelist bitstring. */ 347 r->dwa_count = bhndb_regwin_count(cfg->register_windows, 348 BHNDB_REGWIN_T_DYN); 349 if (r->dwa_count >= INT_MAX) { 350 device_printf(r->dev, "max dynamic regwin count exceeded\n"); 351 goto failed; 352 } 353 354 /* Allocate the dynamic window allocation table. */ 355 r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND, 356 M_NOWAIT); 357 if (r->dw_alloc == NULL) 358 goto failed; 359 360 /* Allocate the dynamic window allocation freelist */ 361 r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT); 362 if (r->dwa_freelist == NULL) 363 goto failed; 364 365 /* Initialize the dynamic window table */ 366 rnid = 0; 367 last_window_size = 0; 368 for (win = cfg->register_windows; 369 win->win_type != BHNDB_REGWIN_T_INVALID; win++) 370 { 371 struct bhndb_dw_alloc *dwa; 372 373 /* Skip non-DYN windows */ 374 if (win->win_type != BHNDB_REGWIN_T_DYN) 375 continue; 376 377 /* Validate the window size */ 378 if (win->win_size == 0) { 379 device_printf(r->dev, "ignoring zero-length dynamic " 380 "register window\n"); 381 continue; 382 } else if (last_window_size == 0) { 383 last_window_size = win->win_size; 384 } else if (last_window_size != win->win_size) { 385 /* 386 * No existing hardware should trigger this. 387 * 388 * If you run into this in the future, the dynamic 389 * window allocator and the resource priority system 390 * will need to be extended to support multiple register 391 * window allocation pools. 392 */ 393 device_printf(r->dev, "devices that vend multiple " 394 "dynamic register window sizes are not currently " 395 "supported\n"); 396 goto failed; 397 } 398 399 dwa = &r->dw_alloc[rnid]; 400 dwa->win = win; 401 dwa->parent_res = NULL; 402 dwa->rnid = rnid; 403 dwa->target = 0x0; 404 405 LIST_INIT(&dwa->refs); 406 rnid++; 407 } 408 409 /* Allocate host resources */ 410 error = bhndb_alloc_host_resources(&r->res, dev, parent_dev, r->cfg); 411 if (error) { 412 device_printf(r->dev, 413 "could not allocate host resources on %s: %d\n", 414 device_get_nameunit(parent_dev), error); 415 goto failed; 416 } 417 418 /* Populate (and validate) parent resource references for all 419 * dynamic windows */ 420 for (size_t i = 0; i < r->dwa_count; i++) { 421 struct bhndb_dw_alloc *dwa; 422 const struct bhndb_regwin *win; 423 424 dwa = &r->dw_alloc[i]; 425 win = dwa->win; 426 427 /* Find and validate corresponding resource. */ 428 dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win); 429 if (dwa->parent_res == NULL) { 430 device_printf(r->dev, "no host resource found for %u " 431 "register window with offset %#jx and " 432 "size %#jx\n", 433 win->win_type, 434 (uintmax_t)win->win_offset, 435 (uintmax_t)win->win_size); 436 437 error = ENXIO; 438 goto failed; 439 } 440 441 if (rman_get_size(dwa->parent_res) < win->win_offset + 442 win->win_size) 443 { 444 device_printf(r->dev, "resource %d too small for " 445 "register window with offset %llx and size %llx\n", 446 rman_get_rid(dwa->parent_res), 447 (unsigned long long) win->win_offset, 448 (unsigned long long) win->win_size); 449 450 error = EINVAL; 451 goto failed; 452 } 453 } 454 455 /* Add allocated memory resources to our host memory resource manager */ 456 for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) { 457 struct resource *res; 458 459 /* skip non-memory resources */ 460 if (r->res->resource_specs[i].type != SYS_RES_MEMORY) 461 continue; 462 463 /* add host resource to set of managed regions */ 464 res = r->res->resources[i]; 465 error = rman_manage_region(&r->ht_mem_rman, 466 rman_get_start(res), rman_get_end(res)); 467 if (error) { 468 device_printf(r->dev, 469 "could not register host memory region with " 470 "ht_mem_rman: %d\n", error); 471 goto failed; 472 } 473 } 474 475 return (r); 476 477 failed: 478 if (free_ht_mem) 479 rman_fini(&r->ht_mem_rman); 480 481 if (free_br_mem) 482 rman_fini(&r->br_mem_rman); 483 484 if (free_br_irq) 485 rman_fini(&r->br_irq_rman); 486 487 if (r->dw_alloc != NULL) 488 free(r->dw_alloc, M_BHND); 489 490 if (r->dwa_freelist != NULL) 491 free(r->dwa_freelist, M_BHND); 492 493 if (r->res != NULL) 494 bhndb_release_host_resources(r->res); 495 496 mtx_destroy(&r->dw_steal_mtx); 497 498 free(r, M_BHND); 499 500 return (NULL); 501 } 502 503 /** 504 * Create a new DMA tag for the given @p translation. 505 * 506 * @param dev The bridge device. 507 * @param parent_dmat The parent DMA tag, or NULL if none. 508 * @param translation The DMA translation for which a DMA tag will 509 * be created. 510 * @param[out] dmat On success, the newly created DMA tag. 511 * 512 * @retval 0 success 513 * @retval non-zero if creating the new DMA tag otherwise fails, a regular 514 * unix error code will be returned. 515 */ 516 static int 517 bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat, 518 const struct bhnd_dma_translation *translation, bus_dma_tag_t *dmat) 519 { 520 bus_dma_tag_t translation_tag; 521 bhnd_addr_t dt_mask; 522 bus_addr_t lowaddr, highaddr; 523 bus_size_t maxsegsz; 524 int error; 525 526 highaddr = BUS_SPACE_MAXADDR; 527 maxsegsz = BUS_SPACE_MAXSIZE; 528 529 /* Determine full addressable mask */ 530 dt_mask = (translation->addr_mask | translation->addrext_mask); 531 KASSERT(dt_mask != 0, ("DMA addr_mask invalid: %#jx", 532 (uintmax_t)dt_mask)); 533 534 /* (addr_mask|addrext_mask) is our maximum supported address */ 535 lowaddr = MIN(dt_mask, BUS_SPACE_MAXADDR); 536 537 /* Constrain to translation window size */ 538 if (translation->addr_mask < maxsegsz) 539 maxsegsz = translation->addr_mask; 540 541 /* Create our DMA tag */ 542 error = bus_dma_tag_create(parent_dmat, 543 1, 0, /* alignment, boundary */ 544 lowaddr, highaddr, 545 NULL, NULL, /* filter, filterarg */ 546 BUS_SPACE_MAXSIZE, 0, /* maxsize, nsegments */ 547 maxsegsz, 0, /* maxsegsize, flags */ 548 NULL, NULL, /* lockfunc, lockarg */ 549 &translation_tag); 550 if (error) { 551 device_printf(dev, "failed to create bridge DMA tag: %d\n", 552 error); 553 return (error); 554 } 555 556 *dmat = translation_tag; 557 return (0); 558 } 559 560 /** 561 * Deallocate the given bridge resource structure and any associated resources. 562 * 563 * @param br Resource state to be deallocated. 564 */ 565 void 566 bhndb_free_resources(struct bhndb_resources *br) 567 { 568 struct bhndb_region *region, *r_next; 569 struct bhndb_dw_alloc *dwa; 570 struct bhndb_dw_rentry *dwr, *dwr_next; 571 struct bhndb_intr_handler *ih; 572 bool leaked_regions, leaked_intrs; 573 574 leaked_regions = false; 575 leaked_intrs = false; 576 577 /* No window regions may still be held */ 578 if (!bhndb_dw_all_free(br)) { 579 for (int i = 0; i < br->dwa_count; i++) { 580 dwa = &br->dw_alloc[i]; 581 582 /* Skip free dynamic windows */ 583 if (bhndb_dw_is_free(br, dwa)) 584 continue; 585 586 device_printf(br->dev, 587 "leaked dynamic register window %d\n", dwa->rnid); 588 leaked_regions = true; 589 } 590 } 591 592 /* There should be no interrupt handlers still registered */ 593 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 594 device_printf(br->dev, "interrupt handler leaked %p\n", 595 ih->ih_cookiep); 596 } 597 598 if (leaked_intrs || leaked_regions) { 599 panic("leaked%s%s", leaked_intrs ? " active interrupts" : "", 600 leaked_regions ? " active register windows" : ""); 601 } 602 603 /* Release host resources allocated through our parent. */ 604 if (br->res != NULL) 605 bhndb_release_host_resources(br->res); 606 607 /* Clean up resource reservations */ 608 for (size_t i = 0; i < br->dwa_count; i++) { 609 dwa = &br->dw_alloc[i]; 610 611 LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) { 612 LIST_REMOVE(dwr, dw_link); 613 free(dwr, M_BHND); 614 } 615 } 616 617 /* Release bus regions */ 618 STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) { 619 STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link); 620 free(region, M_BHND); 621 } 622 623 /* Release our resource managers */ 624 rman_fini(&br->ht_mem_rman); 625 rman_fini(&br->br_mem_rman); 626 rman_fini(&br->br_irq_rman); 627 628 free(br->dw_alloc, M_BHND); 629 free(br->dwa_freelist, M_BHND); 630 631 mtx_destroy(&br->dw_steal_mtx); 632 633 free(br, M_BHND); 634 } 635 636 /** 637 * Allocate host bus resources defined by @p hwcfg. 638 * 639 * On success, the caller assumes ownership of the allocated host resources, 640 * which must be freed via bhndb_release_host_resources(). 641 * 642 * @param[out] resources On success, the allocated host resources. 643 * @param dev The bridge device. 644 * @param parent_dev The parent device from which host resources 645 * should be allocated (e.g. via 646 * bus_alloc_resources()). 647 * @param hwcfg The hardware configuration defining the host 648 * resources to be allocated 649 */ 650 int 651 bhndb_alloc_host_resources(struct bhndb_host_resources **resources, 652 device_t dev, device_t parent_dev, const struct bhndb_hwcfg *hwcfg) 653 { 654 struct bhndb_host_resources *hr; 655 const struct bhnd_dma_translation *dt; 656 bus_dma_tag_t parent_dmat; 657 size_t nres, ndt; 658 int error; 659 660 parent_dmat = bus_get_dma_tag(parent_dev); 661 662 hr = malloc(sizeof(*hr), M_BHND, M_WAITOK); 663 hr->owner = parent_dev; 664 hr->cfg = hwcfg; 665 hr->resource_specs = NULL; 666 hr->resources = NULL; 667 hr->dma_tags = NULL; 668 hr->num_dma_tags = 0; 669 670 /* Determine our bridge resource count from the hardware config. */ 671 nres = 0; 672 for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++) 673 nres++; 674 675 /* Determine the total count and validate our DMA translation table. */ 676 ndt = 0; 677 for (dt = hwcfg->dma_translations; dt != NULL && 678 !BHND_DMA_IS_TRANSLATION_TABLE_END(dt); dt++) 679 { 680 /* Validate the defined translation */ 681 if ((dt->base_addr & dt->addr_mask) != 0) { 682 device_printf(dev, "invalid DMA translation; base " 683 "address %#jx overlaps address mask %#jx", 684 (uintmax_t)dt->base_addr, (uintmax_t)dt->addr_mask); 685 686 error = EINVAL; 687 goto failed; 688 } 689 690 if ((dt->addrext_mask & dt->addr_mask) != 0) { 691 device_printf(dev, "invalid DMA translation; addrext " 692 "mask %#jx overlaps address mask %#jx", 693 (uintmax_t)dt->addrext_mask, 694 (uintmax_t)dt->addr_mask); 695 696 error = EINVAL; 697 goto failed; 698 } 699 700 /* Increment our entry count */ 701 ndt++; 702 } 703 704 /* Allocate our DMA tags */ 705 hr->dma_tags = malloc(sizeof(*hr->dma_tags) * ndt, M_BHND, 706 M_WAITOK|M_ZERO); 707 for (size_t i = 0; i < ndt; i++) { 708 error = bhndb_dma_tag_create(dev, parent_dmat, 709 &hwcfg->dma_translations[i], &hr->dma_tags[i]); 710 if (error) 711 goto failed; 712 713 hr->num_dma_tags++; 714 } 715 716 /* Allocate space for a non-const copy of our resource_spec 717 * table; this will be updated with the RIDs assigned by 718 * bus_alloc_resources. */ 719 hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1), 720 M_BHND, M_WAITOK); 721 722 /* Initialize and terminate the table */ 723 for (size_t i = 0; i < nres; i++) 724 hr->resource_specs[i] = hwcfg->resource_specs[i]; 725 726 hr->resource_specs[nres].type = -1; 727 728 /* Allocate space for our resource references */ 729 hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND, 730 M_WAITOK); 731 732 /* Allocate host resources */ 733 error = bus_alloc_resources(hr->owner, hr->resource_specs, 734 hr->resources); 735 if (error) { 736 device_printf(dev, "could not allocate bridge resources via " 737 "%s: %d\n", device_get_nameunit(parent_dev), error); 738 goto failed; 739 } 740 741 *resources = hr; 742 return (0); 743 744 failed: 745 if (hr->resource_specs != NULL) 746 free(hr->resource_specs, M_BHND); 747 748 if (hr->resources != NULL) 749 free(hr->resources, M_BHND); 750 751 for (size_t i = 0; i < hr->num_dma_tags; i++) 752 bus_dma_tag_destroy(hr->dma_tags[i]); 753 754 if (hr->dma_tags != NULL) 755 free(hr->dma_tags, M_BHND); 756 757 free(hr, M_BHND); 758 759 return (error); 760 } 761 762 /** 763 * Deallocate a set of bridge host resources. 764 * 765 * @param hr The resources to be freed. 766 */ 767 void 768 bhndb_release_host_resources(struct bhndb_host_resources *hr) 769 { 770 bus_release_resources(hr->owner, hr->resource_specs, hr->resources); 771 772 for (size_t i = 0; i < hr->num_dma_tags; i++) 773 bus_dma_tag_destroy(hr->dma_tags[i]); 774 775 free(hr->resources, M_BHND); 776 free(hr->resource_specs, M_BHND); 777 free(hr->dma_tags, M_BHND); 778 free(hr, M_BHND); 779 } 780 781 /** 782 * Search @p cores for the core serving as the bhnd host bridge. 783 * 784 * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged 785 * bhnd(4) devices to determine the hostb core: 786 * 787 * - The core must have a Broadcom vendor ID. 788 * - The core devclass must match the bridge type. 789 * - The core must be the first device on the bus with the bridged device 790 * class. 791 * 792 * @param cores The core table to search. 793 * @param ncores The number of cores in @p cores. 794 * @param bridge_devclass The expected device class of the bridge core. 795 * @param[out] core If found, the matching host bridge core info. 796 * 797 * @retval 0 success 798 * @retval ENOENT not found 799 */ 800 int 801 bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores, 802 bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core) 803 { 804 struct bhnd_core_match md; 805 struct bhnd_core_info *match; 806 u_int match_core_idx; 807 808 /* Set up a match descriptor for the required device class. */ 809 md = (struct bhnd_core_match) { 810 BHND_MATCH_CORE_CLASS(bridge_devclass), 811 BHND_MATCH_CORE_UNIT(0) 812 }; 813 814 /* Find the matching core with the lowest core index */ 815 match = NULL; 816 match_core_idx = UINT_MAX; 817 818 for (u_int i = 0; i < ncores; i++) { 819 if (!bhnd_core_matches(&cores[i], &md)) 820 continue; 821 822 /* Lower core indices take precedence */ 823 if (match != NULL && match_core_idx < match->core_idx) 824 continue; 825 826 match = &cores[i]; 827 match_core_idx = match->core_idx; 828 } 829 830 if (match == NULL) 831 return (ENOENT); 832 833 *core = *match; 834 return (0); 835 } 836 837 /** 838 * Allocate a host interrupt source and its backing SYS_RES_IRQ host resource. 839 * 840 * @param owner The device to be used to allocate a SYS_RES_IRQ 841 * resource with @p rid. 842 * @param rid The resource ID of the IRQ to be allocated. 843 * @param start The start value to be passed to bus_alloc_resource(). 844 * @param end The end value to be passed to bus_alloc_resource(). 845 * @param count The count to be passed to bus_alloc_resource(). 846 * @param flags The flags to be passed to bus_alloc_resource(). 847 * 848 * @retval non-NULL success 849 * @retval NULL if allocation fails. 850 */ 851 struct bhndb_intr_isrc * 852 bhndb_alloc_intr_isrc(device_t owner, int rid, rman_res_t start, rman_res_t end, 853 rman_res_t count, u_int flags) 854 { 855 struct bhndb_intr_isrc *isrc; 856 857 isrc = malloc(sizeof(*isrc), M_BHND, M_NOWAIT); 858 if (isrc == NULL) 859 return (NULL); 860 861 isrc->is_owner = owner; 862 isrc->is_rid = rid; 863 isrc->is_res = bus_alloc_resource(owner, SYS_RES_IRQ, &isrc->is_rid, 864 start, end, count, flags); 865 if (isrc->is_res == NULL) { 866 free(isrc, M_BHND); 867 return (NULL); 868 } 869 870 return (isrc); 871 } 872 873 /** 874 * Free a host interrupt source and its backing host resource. 875 * 876 * @param isrc The interrupt source to be freed. 877 */ 878 void 879 bhndb_free_intr_isrc(struct bhndb_intr_isrc *isrc) 880 { 881 bus_release_resource(isrc->is_owner, SYS_RES_IRQ, isrc->is_rid, 882 isrc->is_res); 883 free(isrc, M_BHND); 884 } 885 886 /** 887 * Allocate and initialize a new interrupt handler entry. 888 * 889 * @param owner The child device that owns this entry. 890 * @param r The child's interrupt resource. 891 * @param isrc The isrc mapped for this entry. 892 * 893 * @retval non-NULL success 894 * @retval NULL if allocation fails. 895 */ 896 struct bhndb_intr_handler * 897 bhndb_alloc_intr_handler(device_t owner, struct resource *r, 898 struct bhndb_intr_isrc *isrc) 899 { 900 struct bhndb_intr_handler *ih; 901 902 ih = malloc(sizeof(*ih), M_BHND, M_NOWAIT | M_ZERO); 903 ih->ih_owner = owner; 904 ih->ih_res = r; 905 ih->ih_isrc = isrc; 906 ih->ih_cookiep = NULL; 907 ih->ih_active = false; 908 909 return (ih); 910 } 911 912 /** 913 * Free an interrupt handler entry. 914 * 915 * @param br The resource state owning @p ih. 916 * @param ih The interrupt handler entry to be removed. 917 */ 918 void 919 bhndb_free_intr_handler(struct bhndb_intr_handler *ih) 920 { 921 KASSERT(!ih->ih_active, ("free of active interrupt handler %p", 922 ih->ih_cookiep)); 923 924 free(ih, M_BHND); 925 } 926 927 /** 928 * Add an active interrupt handler to the given resource state. 929 * 930 * @param br The resource state to be modified. 931 * @param ih The interrupt handler entry to be added. 932 */ 933 void 934 bhndb_register_intr_handler(struct bhndb_resources *br, 935 struct bhndb_intr_handler *ih) 936 { 937 KASSERT(!ih->ih_active, ("duplicate registration of interrupt " 938 "handler %p", ih->ih_cookiep)); 939 KASSERT(ih->ih_cookiep != NULL, ("missing cookiep")); 940 941 ih->ih_active = true; 942 STAILQ_INSERT_HEAD(&br->bus_intrs, ih, ih_link); 943 } 944 945 /** 946 * Remove an interrupt handler from the given resource state. 947 * 948 * @param br The resource state containing @p ih. 949 * @param ih The interrupt handler entry to be removed. 950 */ 951 void 952 bhndb_deregister_intr_handler(struct bhndb_resources *br, 953 struct bhndb_intr_handler *ih) 954 { 955 KASSERT(ih->ih_active, ("duplicate deregistration of interrupt " 956 "handler %p", ih->ih_cookiep)); 957 958 KASSERT(bhndb_find_intr_handler(br, ih) == ih, 959 ("unknown interrupt handler %p", ih)); 960 961 STAILQ_REMOVE(&br->bus_intrs, ih, bhndb_intr_handler, ih_link); 962 ih->ih_active = false; 963 } 964 965 /** 966 * Return the interrupt handler entry corresponding to @p cookiep, or NULL 967 * if no entry is found. 968 * 969 * @param br The resource state to search for the given @p cookiep. 970 * @param cookiep The interrupt handler's bus-assigned cookiep value. 971 */ 972 struct bhndb_intr_handler * 973 bhndb_find_intr_handler(struct bhndb_resources *br, void *cookiep) 974 { 975 struct bhndb_intr_handler *ih; 976 977 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 978 if (ih == cookiep) 979 return (ih); 980 } 981 982 /* Not found */ 983 return (NULL); 984 } 985 986 /** 987 * Find the maximum start and end limits of the bridged resource @p r. 988 * 989 * If the resource is not currently mapped by the bridge, ENOENT will be 990 * returned. 991 * 992 * @param br The resource state to search. 993 * @param type The resource type (see SYS_RES_*). 994 * @param r The resource to search for in @p br. 995 * @param[out] start On success, the minimum supported start address. 996 * @param[out] end On success, the maximum supported end address. 997 * 998 * @retval 0 success 999 * @retval ENOENT no active mapping found for @p r of @p type 1000 */ 1001 int 1002 bhndb_find_resource_limits(struct bhndb_resources *br, int type, 1003 struct resource *r, rman_res_t *start, rman_res_t *end) 1004 { 1005 struct bhndb_dw_alloc *dynamic; 1006 struct bhndb_region *sregion; 1007 struct bhndb_intr_handler *ih; 1008 1009 switch (type) { 1010 case SYS_RES_IRQ: 1011 /* Is this one of ours? */ 1012 STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) { 1013 if (ih->ih_res == r) 1014 continue; 1015 1016 /* We don't support adjusting IRQ resource limits */ 1017 *start = rman_get_start(r); 1018 *end = rman_get_end(r); 1019 return (0); 1020 } 1021 1022 /* Not found */ 1023 return (ENOENT); 1024 1025 case SYS_RES_MEMORY: { 1026 /* Check for an enclosing dynamic register window */ 1027 if ((dynamic = bhndb_dw_find_resource(br, r))) { 1028 *start = dynamic->target; 1029 *end = dynamic->target + dynamic->win->win_size - 1; 1030 return (0); 1031 } 1032 1033 /* Check for a static region */ 1034 sregion = bhndb_find_resource_region(br, rman_get_start(r), 1035 rman_get_size(r)); 1036 if (sregion != NULL && sregion->static_regwin != NULL) { 1037 *start = sregion->addr; 1038 *end = sregion->addr + sregion->size - 1; 1039 1040 return (0); 1041 } 1042 1043 /* Not found */ 1044 return (ENOENT); 1045 } 1046 1047 default: 1048 device_printf(br->dev, "unknown resource type: %d\n", type); 1049 return (ENOENT); 1050 } 1051 } 1052 1053 /** 1054 * Add a bus region entry to @p r for the given base @p addr and @p size. 1055 * 1056 * @param br The resource state to which the bus region entry will be added. 1057 * @param addr The base address of this region. 1058 * @param size The size of this region. 1059 * @param priority The resource priority to be assigned to allocations 1060 * made within this bus region. 1061 * @param alloc_flags resource allocation flags (@see bhndb_alloc_flags) 1062 * @param static_regwin If available, a static register window mapping this 1063 * bus region entry. If not available, NULL. 1064 * 1065 * @retval 0 success 1066 * @retval non-zero if adding the bus region fails. 1067 */ 1068 int 1069 bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, 1070 bhnd_size_t size, bhndb_priority_t priority, uint32_t alloc_flags, 1071 const struct bhndb_regwin *static_regwin) 1072 { 1073 struct bhndb_region *reg; 1074 1075 /* Insert in the bus resource list */ 1076 reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT); 1077 if (reg == NULL) 1078 return (ENOMEM); 1079 1080 *reg = (struct bhndb_region) { 1081 .addr = addr, 1082 .size = size, 1083 .priority = priority, 1084 .alloc_flags = alloc_flags, 1085 .static_regwin = static_regwin 1086 }; 1087 1088 STAILQ_INSERT_HEAD(&br->bus_regions, reg, link); 1089 1090 return (0); 1091 } 1092 1093 /** 1094 * Return true if a mapping of @p size bytes at @p addr is provided by either 1095 * one contiguous bus region, or by multiple discontiguous regions. 1096 * 1097 * @param br The resource state to query. 1098 * @param addr The requested starting address. 1099 * @param size The requested size. 1100 */ 1101 bool 1102 bhndb_has_static_region_mapping(struct bhndb_resources *br, 1103 bhnd_addr_t addr, bhnd_size_t size) 1104 { 1105 struct bhndb_region *region; 1106 bhnd_addr_t r_addr; 1107 1108 r_addr = addr; 1109 while ((region = bhndb_find_resource_region(br, r_addr, 1)) != NULL) { 1110 /* Must be backed by a static register window */ 1111 if (region->static_regwin == NULL) 1112 return (false); 1113 1114 /* Adjust the search offset */ 1115 r_addr += region->size; 1116 1117 /* Have we traversed a complete (if discontiguous) mapping? */ 1118 if (r_addr == addr + size) 1119 return (true); 1120 } 1121 1122 /* No complete mapping found */ 1123 return (false); 1124 } 1125 1126 /** 1127 * Find the bus region that maps @p size bytes at @p addr. 1128 * 1129 * @param br The resource state to search. 1130 * @param addr The requested starting address. 1131 * @param size The requested size. 1132 * 1133 * @retval bhndb_region A region that fully contains the requested range. 1134 * @retval NULL If no mapping region can be found. 1135 */ 1136 struct bhndb_region * 1137 bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, 1138 bhnd_size_t size) 1139 { 1140 struct bhndb_region *region; 1141 1142 STAILQ_FOREACH(region, &br->bus_regions, link) { 1143 /* Request must fit within the region's mapping */ 1144 if (addr < region->addr) 1145 continue; 1146 1147 if (addr + size > region->addr + region->size) 1148 continue; 1149 1150 return (region); 1151 } 1152 1153 /* Not found */ 1154 return (NULL); 1155 } 1156 1157 /** 1158 * Find the entry matching @p r in @p dwa's references, if any. 1159 * 1160 * @param dwa The dynamic window allocation to search 1161 * @param r The resource to search for in @p dwa. 1162 */ 1163 static struct bhndb_dw_rentry * 1164 bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r) 1165 { 1166 struct bhndb_dw_rentry *rentry; 1167 1168 LIST_FOREACH(rentry, &dwa->refs, dw_link) { 1169 struct resource *dw_res = rentry->dw_res; 1170 1171 /* Match dev/rid/addr/size */ 1172 if (rman_get_device(dw_res) != rman_get_device(r) || 1173 rman_get_rid(dw_res) != rman_get_rid(r) || 1174 rman_get_start(dw_res) != rman_get_start(r) || 1175 rman_get_size(dw_res) != rman_get_size(r)) 1176 { 1177 continue; 1178 } 1179 1180 /* Matching allocation found */ 1181 return (rentry); 1182 } 1183 1184 return (NULL); 1185 } 1186 1187 /** 1188 * Find the dynamic region allocated for @p r, if any. 1189 * 1190 * @param br The resource state to search. 1191 * @param r The resource to search for. 1192 * 1193 * @retval bhndb_dw_alloc The allocation record for @p r. 1194 * @retval NULL if no dynamic window is allocated for @p r. 1195 */ 1196 struct bhndb_dw_alloc * 1197 bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r) 1198 { 1199 struct bhndb_dw_alloc *dwa; 1200 1201 for (size_t i = 0; i < br->dwa_count; i++) { 1202 dwa = &br->dw_alloc[i]; 1203 1204 /* Skip free dynamic windows */ 1205 if (bhndb_dw_is_free(br, dwa)) 1206 continue; 1207 1208 /* Matching allocation found? */ 1209 if (bhndb_dw_find_resource_entry(dwa, r) != NULL) 1210 return (dwa); 1211 } 1212 1213 return (NULL); 1214 } 1215 1216 /** 1217 * Find an existing dynamic window mapping @p size bytes 1218 * at @p addr. The window may or may not be free. 1219 * 1220 * @param br The resource state to search. 1221 * @param addr The requested starting address. 1222 * @param size The requested size. 1223 * 1224 * @retval bhndb_dw_alloc A window allocation that fully contains the requested 1225 * range. 1226 * @retval NULL If no mapping region can be found. 1227 */ 1228 struct bhndb_dw_alloc * 1229 bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr, 1230 bhnd_size_t size) 1231 { 1232 struct bhndb_dw_alloc *dwr; 1233 const struct bhndb_regwin *win; 1234 1235 /* Search for an existing dynamic mapping of this address range. */ 1236 for (size_t i = 0; i < br->dwa_count; i++) { 1237 dwr = &br->dw_alloc[i]; 1238 win = dwr->win; 1239 1240 /* Verify the range */ 1241 if (addr < dwr->target) 1242 continue; 1243 1244 if (addr + size > dwr->target + win->win_size) 1245 continue; 1246 1247 /* Found a usable mapping */ 1248 return (dwr); 1249 } 1250 1251 /* not found */ 1252 return (NULL); 1253 } 1254 1255 /** 1256 * Retain a reference to @p dwa for use by @p res. 1257 * 1258 * @param br The resource state owning @p dwa. 1259 * @param dwa The allocation record to be retained. 1260 * @param res The resource that will own a reference to @p dwa. 1261 * 1262 * @retval 0 success 1263 * @retval ENOMEM Failed to allocate a new reference structure. 1264 */ 1265 int 1266 bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, 1267 struct resource *res) 1268 { 1269 struct bhndb_dw_rentry *rentry; 1270 1271 KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL, 1272 ("double-retain of dynamic window for same resource")); 1273 1274 /* Insert a reference entry; we use M_NOWAIT to allow use from 1275 * within a non-sleepable lock */ 1276 rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT); 1277 if (rentry == NULL) 1278 return (ENOMEM); 1279 1280 rentry->dw_res = res; 1281 LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link); 1282 1283 /* Update the free list */ 1284 bit_set(br->dwa_freelist, dwa->rnid); 1285 1286 return (0); 1287 } 1288 1289 /** 1290 * Release a reference to @p dwa previously retained by @p res. If the 1291 * reference count of @p dwa reaches zero, it will be added to the 1292 * free list. 1293 * 1294 * @param br The resource state owning @p dwa. 1295 * @param dwa The allocation record to be released. 1296 * @param res The resource that currently owns a reference to @p dwa. 1297 */ 1298 void 1299 bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, 1300 struct resource *r) 1301 { 1302 struct bhndb_dw_rentry *rentry; 1303 1304 /* Find the rentry */ 1305 rentry = bhndb_dw_find_resource_entry(dwa, r); 1306 KASSERT(rentry != NULL, ("over release of resource entry")); 1307 1308 LIST_REMOVE(rentry, dw_link); 1309 free(rentry, M_BHND); 1310 1311 /* If this was the last reference, update the free list */ 1312 if (LIST_EMPTY(&dwa->refs)) 1313 bit_clear(br->dwa_freelist, dwa->rnid); 1314 } 1315 1316 /** 1317 * Attempt to set (or reset) the target address of @p dwa to map @p size bytes 1318 * at @p addr. 1319 * 1320 * This will apply any necessary window alignment and verify that 1321 * the window is capable of mapping the requested range prior to modifying 1322 * therecord. 1323 * 1324 * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request. 1325 * @param br The resource state owning @p dwa. 1326 * @param dwa The allocation record to be configured. 1327 * @param addr The address to be mapped via @p dwa. 1328 * @param size The number of bytes to be mapped at @p addr. 1329 * 1330 * @retval 0 success 1331 * @retval non-zero no usable register window available. 1332 */ 1333 int 1334 bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, 1335 struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size) 1336 { 1337 const struct bhndb_regwin *rw; 1338 bus_addr_t offset; 1339 int error; 1340 1341 rw = dwa->win; 1342 1343 KASSERT(bhndb_dw_is_free(br, dwa) || mtx_owned(&br->dw_steal_mtx), 1344 ("attempting to set the target address on an in-use window")); 1345 1346 /* Page-align the target address */ 1347 offset = addr % rw->win_size; 1348 dwa->target = addr - offset; 1349 1350 /* Verify that the window is large enough for the full target */ 1351 if (rw->win_size - offset < size) 1352 return (ENOMEM); 1353 1354 /* Update the window target */ 1355 error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); 1356 if (error) { 1357 dwa->target = 0x0; 1358 return (error); 1359 } 1360 1361 return (0); 1362 } 1363 1364 /** 1365 * Steal an in-use allocation record from @p br, returning the record's current 1366 * target in @p saved on success. 1367 * 1368 * This function acquires a mutex and disables interrupts; callers should 1369 * avoid holding a stolen window longer than required to issue an I/O 1370 * request. 1371 * 1372 * A successful call to bhndb_dw_steal() must be balanced with a call to 1373 * bhndb_dw_return_stolen(). 1374 * 1375 * @param br The resource state from which a window should be stolen. 1376 * @param saved The stolen window's saved target address. 1377 * 1378 * @retval non-NULL success 1379 * @retval NULL no dynamic window regions are defined. 1380 */ 1381 struct bhndb_dw_alloc * 1382 bhndb_dw_steal(struct bhndb_resources *br, bus_addr_t *saved) 1383 { 1384 struct bhndb_dw_alloc *dw_stolen; 1385 1386 KASSERT(bhndb_dw_next_free(br) == NULL, 1387 ("attempting to steal an in-use window while free windows remain")); 1388 1389 /* Nothing to steal from? */ 1390 if (br->dwa_count == 0) 1391 return (NULL); 1392 1393 /* 1394 * Acquire our steal spinlock; this will be released in 1395 * bhndb_dw_return_stolen(). 1396 * 1397 * Acquiring also disables interrupts, which is required when one is 1398 * stealing an in-use existing register window. 1399 */ 1400 mtx_lock_spin(&br->dw_steal_mtx); 1401 1402 dw_stolen = &br->dw_alloc[0]; 1403 *saved = dw_stolen->target; 1404 return (dw_stolen); 1405 } 1406 1407 /** 1408 * Return an allocation record previously stolen using bhndb_dw_steal(). 1409 * 1410 * @param dev The device on which to issue a BHNDB_SET_WINDOW_ADDR() request. 1411 * @param br The resource state owning @p dwa. 1412 * @param dwa The allocation record to be returned. 1413 * @param saved The original target address provided by bhndb_dw_steal(). 1414 */ 1415 void 1416 bhndb_dw_return_stolen(device_t dev, struct bhndb_resources *br, 1417 struct bhndb_dw_alloc *dwa, bus_addr_t saved) 1418 { 1419 int error; 1420 1421 mtx_assert(&br->dw_steal_mtx, MA_OWNED); 1422 1423 error = bhndb_dw_set_addr(dev, br, dwa, saved, 0); 1424 if (error) { 1425 panic("failed to restore register window target %#jx: %d\n", 1426 (uintmax_t)saved, error); 1427 } 1428 1429 mtx_unlock_spin(&br->dw_steal_mtx); 1430 } 1431 1432 /** 1433 * Return the count of @p type register windows in @p table. 1434 * 1435 * @param table The table to search. 1436 * @param type The required window type, or BHNDB_REGWIN_T_INVALID to 1437 * count all register window types. 1438 */ 1439 size_t 1440 bhndb_regwin_count(const struct bhndb_regwin *table, 1441 bhndb_regwin_type_t type) 1442 { 1443 const struct bhndb_regwin *rw; 1444 size_t count; 1445 1446 count = 0; 1447 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { 1448 if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type) 1449 count++; 1450 } 1451 1452 return (count); 1453 } 1454 1455 /** 1456 * Search @p table for the first window with the given @p type. 1457 * 1458 * @param table The table to search. 1459 * @param type The required window type. 1460 * @param min_size The minimum window size. 1461 * 1462 * @retval bhndb_regwin The first matching window. 1463 * @retval NULL If no window of the requested type could be found. 1464 */ 1465 const struct bhndb_regwin * 1466 bhndb_regwin_find_type(const struct bhndb_regwin *table, 1467 bhndb_regwin_type_t type, bus_size_t min_size) 1468 { 1469 const struct bhndb_regwin *rw; 1470 1471 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) 1472 { 1473 if (rw->win_type == type && rw->win_size >= min_size) 1474 return (rw); 1475 } 1476 1477 return (NULL); 1478 } 1479 1480 /** 1481 * Search @p windows for the first matching core window. 1482 * 1483 * @param table The table to search. 1484 * @param class The required core class. 1485 * @param unit The required core unit, or -1. 1486 * @param port_type The required port type. 1487 * @param port The required port. 1488 * @param region The required region. 1489 * @param offset The required readable core register block offset. 1490 * @param min_size The required minimum readable size at @p offset. 1491 * 1492 * @retval bhndb_regwin The first matching window. 1493 * @retval NULL If no matching window was found. 1494 */ 1495 const struct bhndb_regwin * 1496 bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class, 1497 int unit, bhnd_port_type port_type, u_int port, u_int region, 1498 bus_size_t offset, bus_size_t min_size) 1499 { 1500 const struct bhndb_regwin *rw; 1501 1502 for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) 1503 { 1504 bus_size_t rw_offset; 1505 1506 /* Match on core, port, and region attributes */ 1507 if (rw->win_type != BHNDB_REGWIN_T_CORE) 1508 continue; 1509 1510 if (rw->d.core.class != class) 1511 continue; 1512 1513 if (unit != -1 && rw->d.core.unit != unit) 1514 continue; 1515 1516 if (rw->d.core.port_type != port_type) 1517 continue; 1518 1519 if (rw->d.core.port != port) 1520 continue; 1521 1522 if (rw->d.core.region != region) 1523 continue; 1524 1525 /* Verify that the requested range is mapped within 1526 * this register window */ 1527 if (rw->d.core.offset > offset) 1528 continue; 1529 1530 rw_offset = offset - rw->d.core.offset; 1531 1532 if (rw->win_size < rw_offset) 1533 continue; 1534 1535 if (rw->win_size - rw_offset < min_size) 1536 continue; 1537 1538 return (rw); 1539 } 1540 1541 return (NULL); 1542 } 1543 1544 /** 1545 * Search @p windows for the best available window of at least @p min_size. 1546 * 1547 * Search order: 1548 * - BHND_REGWIN_T_CORE 1549 * - BHND_REGWIN_T_DYN 1550 * 1551 * @param table The table to search. 1552 * @param class The required core class. 1553 * @param unit The required core unit, or -1. 1554 * @param port_type The required port type. 1555 * @param port The required port. 1556 * @param region The required region. 1557 * @param offset The required readable core register block offset. 1558 * @param min_size The required minimum readable size at @p offset. 1559 * 1560 * @retval bhndb_regwin The first matching window. 1561 * @retval NULL If no matching window was found. 1562 */ 1563 const struct bhndb_regwin * 1564 bhndb_regwin_find_best(const struct bhndb_regwin *table, 1565 bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, 1566 u_int region, bus_size_t offset, bus_size_t min_size) 1567 { 1568 const struct bhndb_regwin *rw; 1569 1570 /* Prefer a fixed core mapping */ 1571 rw = bhndb_regwin_find_core(table, class, unit, port_type, 1572 port, region, offset, min_size); 1573 if (rw != NULL) 1574 return (rw); 1575 1576 /* Fall back on a generic dynamic window */ 1577 return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size)); 1578 } 1579 1580 /** 1581 * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window 1582 * that matches against @p core. 1583 * 1584 * @param regw A register window to match against. 1585 * @param core The bhnd(4) core info to match against @p regw. 1586 */ 1587 bool 1588 bhndb_regwin_match_core(const struct bhndb_regwin *regw, 1589 struct bhnd_core_info *core) 1590 { 1591 /* Only core windows are supported */ 1592 if (regw->win_type != BHNDB_REGWIN_T_CORE) 1593 return (false); 1594 1595 /* Device class must match */ 1596 if (bhnd_core_class(core) != regw->d.core.class) 1597 return (false); 1598 1599 /* Device unit must match */ 1600 if (core->unit != regw->d.core.unit) 1601 return (false); 1602 1603 /* Matches */ 1604 return (true); 1605 } 1606 1607 /** 1608 * Search for a core resource priority descriptor in @p table that matches 1609 * @p core. 1610 * 1611 * @param table The table to search. 1612 * @param core The core to match against @p table. 1613 */ 1614 const struct bhndb_hw_priority * 1615 bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table, 1616 struct bhnd_core_info *core) 1617 { 1618 const struct bhndb_hw_priority *hp; 1619 1620 for (hp = table; hp->ports != NULL; hp++) { 1621 if (bhnd_core_matches(core, &hp->match)) 1622 return (hp); 1623 } 1624 1625 /* not found */ 1626 return (NULL); 1627 } 1628 1629 /** 1630 * Search for a port resource priority descriptor in @p table. 1631 * 1632 * @param table The table to search. 1633 * @param core The core to match against @p table. 1634 * @param port_type The required port type. 1635 * @param port The required port. 1636 * @param region The required region. 1637 */ 1638 const struct bhndb_port_priority * 1639 bhndb_hw_priorty_find_port(const struct bhndb_hw_priority *table, 1640 struct bhnd_core_info *core, bhnd_port_type port_type, u_int port, 1641 u_int region) 1642 { 1643 const struct bhndb_hw_priority *hp; 1644 1645 if ((hp = bhndb_hw_priority_find_core(table, core)) == NULL) 1646 return (NULL); 1647 1648 for (u_int i = 0; i < hp->num_ports; i++) { 1649 const struct bhndb_port_priority *pp = &hp->ports[i]; 1650 1651 if (pp->type != port_type) 1652 continue; 1653 1654 if (pp->port != port) 1655 continue; 1656 1657 if (pp->region != region) 1658 continue; 1659 1660 return (pp); 1661 } 1662 1663 /* not found */ 1664 return (NULL); 1665 } 1666