1 /*- 2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Landon Fuller 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 17 * redistribution must be conditioned upon including a substantially 18 * similar Disclaimer requirement for further binary redistribution. 19 * 20 * NO WARRANTY 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGES. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/refcount.h> 43 #include <sys/systm.h> 44 45 #include <machine/bus.h> 46 47 #include <dev/bhnd/cores/chipc/chipc.h> 48 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h> 49 50 #include "sibareg.h" 51 #include "sibavar.h" 52 53 static bhnd_erom_class_t * 54 siba_get_erom_class(driver_t *driver) 55 { 56 return (&siba_erom_parser); 57 } 58 59 int 60 siba_probe(device_t dev) 61 { 62 device_set_desc(dev, "SIBA BHND bus"); 63 return (BUS_PROBE_DEFAULT); 64 } 65 66 /** 67 * Default siba(4) bus driver implementation of DEVICE_ATTACH(). 68 * 69 * This implementation initializes internal siba(4) state and performs 70 * bus enumeration, and must be called by subclassing drivers in 71 * DEVICE_ATTACH() before any other bus methods. 72 */ 73 int 74 siba_attach(device_t dev) 75 { 76 struct siba_softc *sc; 77 int error; 78 79 sc = device_get_softc(dev); 80 sc->dev = dev; 81 82 SIBA_LOCK_INIT(sc); 83 84 /* Enumerate children */ 85 if ((error = siba_add_children(dev))) { 86 device_delete_children(dev); 87 SIBA_LOCK_DESTROY(sc); 88 return (error); 89 } 90 91 return (0); 92 } 93 94 int 95 siba_detach(device_t dev) 96 { 97 struct siba_softc *sc; 98 int error; 99 100 sc = device_get_softc(dev); 101 102 if ((error = bhnd_generic_detach(dev))) 103 return (error); 104 105 SIBA_LOCK_DESTROY(sc); 106 107 return (0); 108 } 109 110 int 111 siba_resume(device_t dev) 112 { 113 return (bhnd_generic_resume(dev)); 114 } 115 116 int 117 siba_suspend(device_t dev) 118 { 119 return (bhnd_generic_suspend(dev)); 120 } 121 122 static int 123 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 124 { 125 struct siba_softc *sc; 126 const struct siba_devinfo *dinfo; 127 const struct bhnd_core_info *cfg; 128 129 sc = device_get_softc(dev); 130 dinfo = device_get_ivars(child); 131 cfg = &dinfo->core_id.core_info; 132 133 switch (index) { 134 case BHND_IVAR_VENDOR: 135 *result = cfg->vendor; 136 return (0); 137 case BHND_IVAR_DEVICE: 138 *result = cfg->device; 139 return (0); 140 case BHND_IVAR_HWREV: 141 *result = cfg->hwrev; 142 return (0); 143 case BHND_IVAR_DEVICE_CLASS: 144 *result = bhnd_core_class(cfg); 145 return (0); 146 case BHND_IVAR_VENDOR_NAME: 147 *result = (uintptr_t) bhnd_vendor_name(cfg->vendor); 148 return (0); 149 case BHND_IVAR_DEVICE_NAME: 150 *result = (uintptr_t) bhnd_core_name(cfg); 151 return (0); 152 case BHND_IVAR_CORE_INDEX: 153 *result = cfg->core_idx; 154 return (0); 155 case BHND_IVAR_CORE_UNIT: 156 *result = cfg->unit; 157 return (0); 158 case BHND_IVAR_PMU_INFO: 159 SIBA_LOCK(sc); 160 switch (dinfo->pmu_state) { 161 case SIBA_PMU_NONE: 162 *result = (uintptr_t)NULL; 163 SIBA_UNLOCK(sc); 164 return (0); 165 166 case SIBA_PMU_BHND: 167 *result = (uintptr_t)dinfo->pmu.bhnd_info; 168 SIBA_UNLOCK(sc); 169 return (0); 170 171 case SIBA_PMU_PWRCTL: 172 case SIBA_PMU_FIXED: 173 *result = (uintptr_t)NULL; 174 SIBA_UNLOCK(sc); 175 return (0); 176 } 177 178 panic("invalid PMU state: %d", dinfo->pmu_state); 179 return (ENXIO); 180 181 default: 182 return (ENOENT); 183 } 184 } 185 186 static int 187 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 188 { 189 struct siba_softc *sc; 190 struct siba_devinfo *dinfo; 191 192 sc = device_get_softc(dev); 193 dinfo = device_get_ivars(child); 194 195 switch (index) { 196 case BHND_IVAR_VENDOR: 197 case BHND_IVAR_DEVICE: 198 case BHND_IVAR_HWREV: 199 case BHND_IVAR_DEVICE_CLASS: 200 case BHND_IVAR_VENDOR_NAME: 201 case BHND_IVAR_DEVICE_NAME: 202 case BHND_IVAR_CORE_INDEX: 203 case BHND_IVAR_CORE_UNIT: 204 return (EINVAL); 205 case BHND_IVAR_PMU_INFO: 206 SIBA_LOCK(sc); 207 switch (dinfo->pmu_state) { 208 case SIBA_PMU_NONE: 209 case SIBA_PMU_BHND: 210 dinfo->pmu.bhnd_info = (void *)value; 211 dinfo->pmu_state = SIBA_PMU_BHND; 212 SIBA_UNLOCK(sc); 213 return (0); 214 215 case SIBA_PMU_PWRCTL: 216 case SIBA_PMU_FIXED: 217 panic("bhnd_set_pmu_info() called with siba PMU state " 218 "%d", dinfo->pmu_state); 219 return (ENXIO); 220 } 221 222 panic("invalid PMU state: %d", dinfo->pmu_state); 223 return (ENXIO); 224 225 default: 226 return (ENOENT); 227 } 228 } 229 230 static struct resource_list * 231 siba_get_resource_list(device_t dev, device_t child) 232 { 233 struct siba_devinfo *dinfo = device_get_ivars(child); 234 return (&dinfo->resources); 235 } 236 237 /* BHND_BUS_ALLOC_PMU() */ 238 static int 239 siba_alloc_pmu(device_t dev, device_t child) 240 { 241 struct siba_softc *sc; 242 struct siba_devinfo *dinfo; 243 device_t chipc; 244 device_t pwrctl; 245 struct chipc_caps ccaps; 246 siba_pmu_state pmu_state; 247 int error; 248 249 if (device_get_parent(child) != dev) 250 return (EINVAL); 251 252 sc = device_get_softc(dev); 253 dinfo = device_get_ivars(child); 254 pwrctl = NULL; 255 256 /* Fetch ChipCommon capability flags */ 257 chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC); 258 if (chipc != NULL) { 259 ccaps = *BHND_CHIPC_GET_CAPS(chipc); 260 bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC); 261 } else { 262 memset(&ccaps, 0, sizeof(ccaps)); 263 } 264 265 /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and 266 * advertises PMU support */ 267 if (ccaps.pmu) { 268 if ((error = bhnd_generic_alloc_pmu(dev, child))) 269 return (error); 270 271 KASSERT(dinfo->pmu_state == SIBA_PMU_BHND, 272 ("unexpected PMU state: %d", dinfo->pmu_state)); 273 274 return (0); 275 } 276 277 /* 278 * This is either a legacy PWRCTL chipset, or the device does not 279 * support dynamic clock control. 280 * 281 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations. 282 */ 283 if (ccaps.pwr_ctrl) { 284 pmu_state = SIBA_PMU_PWRCTL; 285 pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL); 286 if (pwrctl == NULL) { 287 device_printf(dev, "PWRCTL not found\n"); 288 return (ENODEV); 289 } 290 } else { 291 pmu_state = SIBA_PMU_FIXED; 292 pwrctl = NULL; 293 } 294 295 SIBA_LOCK(sc); 296 297 /* Per-core PMU state already allocated? */ 298 if (dinfo->pmu_state != SIBA_PMU_NONE) { 299 panic("duplicate PMU allocation for %s", 300 device_get_nameunit(child)); 301 } 302 303 /* Update the child's PMU allocation state, and transfer ownership of 304 * the PWRCTL provider reference (if any) */ 305 dinfo->pmu_state = pmu_state; 306 dinfo->pmu.pwrctl = pwrctl; 307 308 SIBA_UNLOCK(sc); 309 310 return (0); 311 } 312 313 /* BHND_BUS_RELEASE_PMU() */ 314 static int 315 siba_release_pmu(device_t dev, device_t child) 316 { 317 struct siba_softc *sc; 318 struct siba_devinfo *dinfo; 319 device_t pwrctl; 320 int error; 321 322 if (device_get_parent(child) != dev) 323 return (EINVAL); 324 325 sc = device_get_softc(dev); 326 dinfo = device_get_ivars(child); 327 328 SIBA_LOCK(sc); 329 switch(dinfo->pmu_state) { 330 case SIBA_PMU_NONE: 331 panic("pmu over-release for %s", device_get_nameunit(child)); 332 SIBA_UNLOCK(sc); 333 return (ENXIO); 334 335 case SIBA_PMU_BHND: 336 SIBA_UNLOCK(sc); 337 return (bhnd_generic_release_pmu(dev, child)); 338 339 case SIBA_PMU_PWRCTL: 340 /* Requesting BHND_CLOCK_DYN releases any outstanding clock 341 * reservations */ 342 pwrctl = dinfo->pmu.pwrctl; 343 error = bhnd_pwrctl_request_clock(pwrctl, child, 344 BHND_CLOCK_DYN); 345 if (error) { 346 SIBA_UNLOCK(sc); 347 return (error); 348 } 349 350 /* Clean up the child's PMU state */ 351 dinfo->pmu_state = SIBA_PMU_NONE; 352 dinfo->pmu.pwrctl = NULL; 353 SIBA_UNLOCK(sc); 354 355 /* Release the provider reference */ 356 bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL); 357 return (0); 358 359 case SIBA_PMU_FIXED: 360 /* Clean up the child's PMU state */ 361 KASSERT(dinfo->pmu.pwrctl == NULL, 362 ("PWRCTL reference with FIXED state")); 363 364 dinfo->pmu_state = SIBA_PMU_NONE; 365 dinfo->pmu.pwrctl = NULL; 366 SIBA_UNLOCK(sc); 367 } 368 369 panic("invalid PMU state: %d", dinfo->pmu_state); 370 } 371 372 /* BHND_BUS_GET_CLOCK_LATENCY() */ 373 static int 374 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock, 375 u_int *latency) 376 { 377 struct siba_softc *sc; 378 struct siba_devinfo *dinfo; 379 int error; 380 381 if (device_get_parent(child) != dev) 382 return (EINVAL); 383 384 sc = device_get_softc(dev); 385 dinfo = device_get_ivars(child); 386 387 SIBA_LOCK(sc); 388 switch(dinfo->pmu_state) { 389 case SIBA_PMU_NONE: 390 panic("no active PMU request state"); 391 392 SIBA_UNLOCK(sc); 393 return (ENXIO); 394 395 case SIBA_PMU_BHND: 396 SIBA_UNLOCK(sc); 397 return (bhnd_generic_get_clock_latency(dev, child, clock, 398 latency)); 399 400 case SIBA_PMU_PWRCTL: 401 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock, 402 latency); 403 SIBA_UNLOCK(sc); 404 405 return (error); 406 407 case SIBA_PMU_FIXED: 408 SIBA_UNLOCK(sc); 409 410 /* HT clock is always available, and incurs no transition 411 * delay. */ 412 switch (clock) { 413 case BHND_CLOCK_HT: 414 *latency = 0; 415 return (0); 416 417 default: 418 return (ENODEV); 419 } 420 421 return (ENODEV); 422 } 423 424 panic("invalid PMU state: %d", dinfo->pmu_state); 425 } 426 427 /* BHND_BUS_GET_CLOCK_FREQ() */ 428 static int 429 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock, 430 u_int *freq) 431 { 432 struct siba_softc *sc; 433 struct siba_devinfo *dinfo; 434 int error; 435 436 if (device_get_parent(child) != dev) 437 return (EINVAL); 438 439 sc = device_get_softc(dev); 440 dinfo = device_get_ivars(child); 441 442 SIBA_LOCK(sc); 443 switch(dinfo->pmu_state) { 444 case SIBA_PMU_NONE: 445 panic("no active PMU request state"); 446 447 SIBA_UNLOCK(sc); 448 return (ENXIO); 449 450 case SIBA_PMU_BHND: 451 SIBA_UNLOCK(sc); 452 return (bhnd_generic_get_clock_freq(dev, child, clock, freq)); 453 454 case SIBA_PMU_PWRCTL: 455 error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock, 456 freq); 457 SIBA_UNLOCK(sc); 458 459 return (error); 460 461 case SIBA_PMU_FIXED: 462 SIBA_UNLOCK(sc); 463 464 return (ENODEV); 465 } 466 467 panic("invalid PMU state: %d", dinfo->pmu_state); 468 } 469 470 /* BHND_BUS_REQUEST_EXT_RSRC() */ 471 static int 472 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc) 473 { 474 struct siba_softc *sc; 475 struct siba_devinfo *dinfo; 476 477 if (device_get_parent(child) != dev) 478 return (EINVAL); 479 480 sc = device_get_softc(dev); 481 dinfo = device_get_ivars(child); 482 483 SIBA_LOCK(sc); 484 switch(dinfo->pmu_state) { 485 case SIBA_PMU_NONE: 486 panic("no active PMU request state"); 487 488 SIBA_UNLOCK(sc); 489 return (ENXIO); 490 491 case SIBA_PMU_BHND: 492 SIBA_UNLOCK(sc); 493 return (bhnd_generic_request_ext_rsrc(dev, child, rsrc)); 494 495 case SIBA_PMU_PWRCTL: 496 case SIBA_PMU_FIXED: 497 /* HW does not support per-core external resources */ 498 SIBA_UNLOCK(sc); 499 return (ENODEV); 500 } 501 502 panic("invalid PMU state: %d", dinfo->pmu_state); 503 } 504 505 /* BHND_BUS_RELEASE_EXT_RSRC() */ 506 static int 507 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc) 508 { 509 struct siba_softc *sc; 510 struct siba_devinfo *dinfo; 511 512 if (device_get_parent(child) != dev) 513 return (EINVAL); 514 515 sc = device_get_softc(dev); 516 dinfo = device_get_ivars(child); 517 518 SIBA_LOCK(sc); 519 switch(dinfo->pmu_state) { 520 case SIBA_PMU_NONE: 521 panic("no active PMU request state"); 522 523 SIBA_UNLOCK(sc); 524 return (ENXIO); 525 526 case SIBA_PMU_BHND: 527 SIBA_UNLOCK(sc); 528 return (bhnd_generic_release_ext_rsrc(dev, child, rsrc)); 529 530 case SIBA_PMU_PWRCTL: 531 case SIBA_PMU_FIXED: 532 /* HW does not support per-core external resources */ 533 SIBA_UNLOCK(sc); 534 return (ENODEV); 535 } 536 537 panic("invalid PMU state: %d", dinfo->pmu_state); 538 } 539 540 /* BHND_BUS_REQUEST_CLOCK() */ 541 static int 542 siba_request_clock(device_t dev, device_t child, bhnd_clock clock) 543 { 544 struct siba_softc *sc; 545 struct siba_devinfo *dinfo; 546 int error; 547 548 if (device_get_parent(child) != dev) 549 return (EINVAL); 550 551 sc = device_get_softc(dev); 552 dinfo = device_get_ivars(child); 553 554 SIBA_LOCK(sc); 555 switch(dinfo->pmu_state) { 556 case SIBA_PMU_NONE: 557 panic("no active PMU request state"); 558 559 SIBA_UNLOCK(sc); 560 return (ENXIO); 561 562 case SIBA_PMU_BHND: 563 SIBA_UNLOCK(sc); 564 return (bhnd_generic_request_clock(dev, child, clock)); 565 566 case SIBA_PMU_PWRCTL: 567 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, 568 clock); 569 SIBA_UNLOCK(sc); 570 571 return (error); 572 573 case SIBA_PMU_FIXED: 574 SIBA_UNLOCK(sc); 575 576 /* HT clock is always available, and fulfills any of the 577 * following clock requests */ 578 switch (clock) { 579 case BHND_CLOCK_DYN: 580 case BHND_CLOCK_ILP: 581 case BHND_CLOCK_ALP: 582 case BHND_CLOCK_HT: 583 return (0); 584 585 default: 586 return (ENODEV); 587 } 588 } 589 590 panic("invalid PMU state: %d", dinfo->pmu_state); 591 } 592 593 /* BHND_BUS_ENABLE_CLOCKS() */ 594 static int 595 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks) 596 { 597 struct siba_softc *sc; 598 struct siba_devinfo *dinfo; 599 600 if (device_get_parent(child) != dev) 601 return (EINVAL); 602 603 sc = device_get_softc(dev); 604 dinfo = device_get_ivars(child); 605 606 SIBA_LOCK(sc); 607 switch(dinfo->pmu_state) { 608 case SIBA_PMU_NONE: 609 panic("no active PMU request state"); 610 611 SIBA_UNLOCK(sc); 612 return (ENXIO); 613 614 case SIBA_PMU_BHND: 615 SIBA_UNLOCK(sc); 616 return (bhnd_generic_enable_clocks(dev, child, clocks)); 617 618 case SIBA_PMU_PWRCTL: 619 case SIBA_PMU_FIXED: 620 SIBA_UNLOCK(sc); 621 622 /* All (supported) clocks are already enabled by default */ 623 clocks &= ~(BHND_CLOCK_DYN | 624 BHND_CLOCK_ILP | 625 BHND_CLOCK_ALP | 626 BHND_CLOCK_HT); 627 628 if (clocks != 0) { 629 device_printf(dev, "%s requested unknown clocks: %#x\n", 630 device_get_nameunit(child), clocks); 631 return (ENODEV); 632 } 633 634 return (0); 635 } 636 637 panic("invalid PMU state: %d", dinfo->pmu_state); 638 } 639 640 static int 641 siba_read_iost(device_t dev, device_t child, uint16_t *iost) 642 { 643 uint32_t tmhigh; 644 int error; 645 646 error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4); 647 if (error) 648 return (error); 649 650 *iost = (SIBA_REG_GET(tmhigh, TMH_SISF)); 651 return (0); 652 } 653 654 static int 655 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) 656 { 657 uint32_t ts_low; 658 int error; 659 660 if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4))) 661 return (error); 662 663 *ioctl = (SIBA_REG_GET(ts_low, TML_SICF)); 664 return (0); 665 } 666 667 static int 668 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) 669 { 670 struct siba_devinfo *dinfo; 671 struct bhnd_resource *r; 672 uint32_t ts_low, ts_mask; 673 674 if (device_get_parent(child) != dev) 675 return (EINVAL); 676 677 /* Fetch CFG0 mapping */ 678 dinfo = device_get_ivars(child); 679 if ((r = dinfo->cfg_res[0]) == NULL) 680 return (ENODEV); 681 682 /* Mask and set TMSTATELOW core flag bits */ 683 ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK; 684 ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask; 685 686 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 687 ts_low, ts_mask); 688 return (0); 689 } 690 691 static bool 692 siba_is_hw_suspended(device_t dev, device_t child) 693 { 694 uint32_t ts_low; 695 uint16_t ioctl; 696 int error; 697 698 /* Fetch target state */ 699 error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4); 700 if (error) { 701 device_printf(child, "error reading HW reset state: %d\n", 702 error); 703 return (true); 704 } 705 706 /* Is core held in RESET? */ 707 if (ts_low & SIBA_TML_RESET) 708 return (true); 709 710 /* Is target reject enabled? */ 711 if (ts_low & SIBA_TML_REJ_MASK) 712 return (true); 713 714 /* Is core clocked? */ 715 ioctl = SIBA_REG_GET(ts_low, TML_SICF); 716 if (!(ioctl & BHND_IOCTL_CLK_EN)) 717 return (true); 718 719 return (false); 720 } 721 722 static int 723 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl, 724 uint16_t reset_ioctl) 725 { 726 struct siba_devinfo *dinfo; 727 struct bhnd_resource *r; 728 uint32_t ts_low, imstate; 729 uint16_t clkflags; 730 int error; 731 732 if (device_get_parent(child) != dev) 733 return (EINVAL); 734 735 dinfo = device_get_ivars(child); 736 737 /* Can't suspend the core without access to the CFG0 registers */ 738 if ((r = dinfo->cfg_res[0]) == NULL) 739 return (ENODEV); 740 741 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ 742 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; 743 if (ioctl & clkflags) 744 return (EINVAL); 745 746 /* Place core into known RESET state */ 747 if ((error = bhnd_suspend_hw(child, reset_ioctl))) 748 return (error); 749 750 /* Set RESET, clear REJ, set the caller's IOCTL flags, and 751 * force clocks to ensure the signal propagates throughout the 752 * core. */ 753 ts_low = SIBA_TML_RESET | 754 (ioctl << SIBA_TML_SICF_SHIFT) | 755 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | 756 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); 757 758 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 759 ts_low, UINT32_MAX); 760 761 /* Clear any target errors */ 762 if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) { 763 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 764 0x0, SIBA_TMH_SERR); 765 } 766 767 /* Clear any initiator errors */ 768 imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE); 769 if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) { 770 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, 771 SIBA_IM_IBE|SIBA_IM_TO); 772 } 773 774 /* Release from RESET while leaving clocks forced, ensuring the 775 * signal propagates throughout the core */ 776 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, 777 SIBA_TML_RESET); 778 779 /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE 780 * bit and allow the core to manage clock gating. */ 781 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, 782 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT)); 783 784 return (0); 785 } 786 787 static int 788 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl) 789 { 790 struct siba_softc *sc; 791 struct siba_devinfo *dinfo; 792 struct bhnd_resource *r; 793 uint32_t idl, ts_low, ts_mask; 794 uint16_t cflags, clkflags; 795 int error; 796 797 if (device_get_parent(child) != dev) 798 return (EINVAL); 799 800 sc = device_get_softc(dev); 801 dinfo = device_get_ivars(child); 802 803 /* Can't suspend the core without access to the CFG0 registers */ 804 if ((r = dinfo->cfg_res[0]) == NULL) 805 return (ENODEV); 806 807 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ 808 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; 809 if (ioctl & clkflags) 810 return (EINVAL); 811 812 /* Already in RESET? */ 813 ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW); 814 if (ts_low & SIBA_TML_RESET) 815 return (0); 816 817 /* If clocks are already disabled, we can place the core directly 818 * into RESET|REJ while setting the caller's IOCTL flags. */ 819 cflags = SIBA_REG_GET(ts_low, TML_SICF); 820 if (!(cflags & BHND_IOCTL_CLK_EN)) { 821 ts_low = SIBA_TML_RESET | SIBA_TML_REJ | 822 (ioctl << SIBA_TML_SICF_SHIFT); 823 ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK; 824 825 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 826 ts_low, ts_mask); 827 return (0); 828 } 829 830 /* Reject further transactions reaching this core */ 831 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 832 SIBA_TML_REJ, SIBA_TML_REJ); 833 834 /* Wait for transaction busy flag to clear for all transactions 835 * initiated by this core */ 836 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 837 0x0, SIBA_TMH_BUSY, 100000); 838 if (error) 839 return (error); 840 841 /* If this is an initiator core, we need to reject initiator 842 * transactions too. */ 843 idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW); 844 if (idl & SIBA_IDL_INIT) { 845 /* Reject further initiator transactions */ 846 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 847 SIBA_IM_RJ, SIBA_IM_RJ); 848 849 /* Wait for initiator busy flag to clear */ 850 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 851 0x0, SIBA_IM_BY, 100000); 852 if (error) 853 return (error); 854 } 855 856 /* Put the core into RESET, set the caller's IOCTL flags, and 857 * force clocks to ensure the RESET signal propagates throughout the 858 * core. */ 859 ts_low = SIBA_TML_RESET | 860 (ioctl << SIBA_TML_SICF_SHIFT) | 861 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | 862 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); 863 ts_mask = SIBA_TML_RESET | 864 SIBA_TML_SICF_MASK; 865 866 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, 867 ts_mask); 868 869 /* Give RESET ample time */ 870 DELAY(10); 871 872 /* Clear previously asserted initiator reject */ 873 if (idl & SIBA_IDL_INIT) { 874 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, 875 SIBA_IM_RJ); 876 } 877 878 /* Disable all clocks, leaving RESET and REJ asserted */ 879 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, 880 (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT); 881 882 /* 883 * Core is now in RESET. 884 * 885 * If the core holds any PWRCTL clock reservations, we need to release 886 * those now. This emulates the standard bhnd(4) PMU behavior of RESET 887 * automatically clearing clkctl 888 */ 889 SIBA_LOCK(sc); 890 if (dinfo->pmu_state == SIBA_PMU_PWRCTL) { 891 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, 892 BHND_CLOCK_DYN); 893 SIBA_UNLOCK(sc); 894 895 if (error) { 896 device_printf(child, "failed to release clock request: " 897 "%d", error); 898 return (error); 899 } 900 901 return (0); 902 } else { 903 SIBA_UNLOCK(sc); 904 return (0); 905 } 906 } 907 908 static int 909 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value, 910 u_int width) 911 { 912 struct siba_devinfo *dinfo; 913 rman_res_t r_size; 914 915 /* Must be directly attached */ 916 if (device_get_parent(child) != dev) 917 return (EINVAL); 918 919 /* CFG0 registers must be available */ 920 dinfo = device_get_ivars(child); 921 if (dinfo->cfg_res[0] == NULL) 922 return (ENODEV); 923 924 /* Offset must fall within CFG0 */ 925 r_size = rman_get_size(dinfo->cfg_res[0]->res); 926 if (r_size < offset || r_size - offset < width) 927 return (EFAULT); 928 929 switch (width) { 930 case 1: 931 *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0], 932 offset); 933 return (0); 934 case 2: 935 *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0], 936 offset); 937 return (0); 938 case 4: 939 *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0], 940 offset); 941 return (0); 942 default: 943 return (EINVAL); 944 } 945 } 946 947 static int 948 siba_write_config(device_t dev, device_t child, bus_size_t offset, 949 const void *value, u_int width) 950 { 951 struct siba_devinfo *dinfo; 952 struct bhnd_resource *r; 953 rman_res_t r_size; 954 955 /* Must be directly attached */ 956 if (device_get_parent(child) != dev) 957 return (EINVAL); 958 959 /* CFG0 registers must be available */ 960 dinfo = device_get_ivars(child); 961 if ((r = dinfo->cfg_res[0]) == NULL) 962 return (ENODEV); 963 964 /* Offset must fall within CFG0 */ 965 r_size = rman_get_size(r->res); 966 if (r_size < offset || r_size - offset < width) 967 return (EFAULT); 968 969 switch (width) { 970 case 1: 971 bhnd_bus_write_1(r, offset, *(const uint8_t *)value); 972 return (0); 973 case 2: 974 bhnd_bus_write_2(r, offset, *(const uint8_t *)value); 975 return (0); 976 case 4: 977 bhnd_bus_write_4(r, offset, *(const uint8_t *)value); 978 return (0); 979 default: 980 return (EINVAL); 981 } 982 } 983 984 static u_int 985 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type) 986 { 987 struct siba_devinfo *dinfo; 988 989 /* delegate non-bus-attached devices to our parent */ 990 if (device_get_parent(child) != dev) 991 return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, 992 type)); 993 994 dinfo = device_get_ivars(child); 995 return (siba_port_count(&dinfo->core_id, type)); 996 } 997 998 static u_int 999 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type, 1000 u_int port) 1001 { 1002 struct siba_devinfo *dinfo; 1003 1004 /* delegate non-bus-attached devices to our parent */ 1005 if (device_get_parent(child) != dev) 1006 return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, 1007 type, port)); 1008 1009 dinfo = device_get_ivars(child); 1010 return (siba_port_region_count(&dinfo->core_id, type, port)); 1011 } 1012 1013 static int 1014 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, 1015 u_int port_num, u_int region_num) 1016 { 1017 struct siba_devinfo *dinfo; 1018 struct siba_addrspace *addrspace; 1019 struct siba_cfg_block *cfg; 1020 1021 /* delegate non-bus-attached devices to our parent */ 1022 if (device_get_parent(child) != dev) 1023 return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child, 1024 port_type, port_num, region_num)); 1025 1026 dinfo = device_get_ivars(child); 1027 1028 /* Look for a matching addrspace entry */ 1029 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); 1030 if (addrspace != NULL) 1031 return (addrspace->sa_rid); 1032 1033 /* Try the config blocks */ 1034 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); 1035 if (cfg != NULL) 1036 return (cfg->cb_rid); 1037 1038 /* Not found */ 1039 return (-1); 1040 } 1041 1042 static int 1043 siba_decode_port_rid(device_t dev, device_t child, int type, int rid, 1044 bhnd_port_type *port_type, u_int *port_num, u_int *region_num) 1045 { 1046 struct siba_devinfo *dinfo; 1047 1048 /* delegate non-bus-attached devices to our parent */ 1049 if (device_get_parent(child) != dev) 1050 return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child, 1051 type, rid, port_type, port_num, region_num)); 1052 1053 dinfo = device_get_ivars(child); 1054 1055 /* Ports are always memory mapped */ 1056 if (type != SYS_RES_MEMORY) 1057 return (EINVAL); 1058 1059 /* Look for a matching addrspace entry */ 1060 for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) { 1061 if (dinfo->addrspace[i].sa_rid != rid) 1062 continue; 1063 1064 *port_type = BHND_PORT_DEVICE; 1065 *port_num = siba_addrspace_device_port(i); 1066 *region_num = siba_addrspace_device_region(i); 1067 return (0); 1068 } 1069 1070 /* Try the config blocks */ 1071 for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) { 1072 if (dinfo->cfg[i].cb_rid != rid) 1073 continue; 1074 1075 *port_type = BHND_PORT_AGENT; 1076 *port_num = siba_cfg_agent_port(i); 1077 *region_num = siba_cfg_agent_region(i); 1078 return (0); 1079 } 1080 1081 /* Not found */ 1082 return (ENOENT); 1083 } 1084 1085 static int 1086 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, 1087 u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) 1088 { 1089 struct siba_devinfo *dinfo; 1090 struct siba_addrspace *addrspace; 1091 struct siba_cfg_block *cfg; 1092 1093 /* delegate non-bus-attached devices to our parent */ 1094 if (device_get_parent(child) != dev) { 1095 return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child, 1096 port_type, port_num, region_num, addr, size)); 1097 } 1098 1099 dinfo = device_get_ivars(child); 1100 1101 /* Look for a matching addrspace */ 1102 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); 1103 if (addrspace != NULL) { 1104 *addr = addrspace->sa_base; 1105 *size = addrspace->sa_size - addrspace->sa_bus_reserved; 1106 return (0); 1107 } 1108 1109 /* Look for a matching cfg block */ 1110 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); 1111 if (cfg != NULL) { 1112 *addr = cfg->cb_base; 1113 *size = cfg->cb_size; 1114 return (0); 1115 } 1116 1117 /* Not found */ 1118 return (ENOENT); 1119 } 1120 1121 /** 1122 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). 1123 */ 1124 u_int 1125 siba_get_intr_count(device_t dev, device_t child) 1126 { 1127 struct siba_devinfo *dinfo; 1128 1129 /* delegate non-bus-attached devices to our parent */ 1130 if (device_get_parent(child) != dev) 1131 return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); 1132 1133 dinfo = device_get_ivars(child); 1134 if (!dinfo->intr_en) { 1135 /* No interrupts */ 1136 return (0); 1137 } else { 1138 /* One assigned interrupt */ 1139 return (1); 1140 } 1141 } 1142 1143 /** 1144 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). 1145 */ 1146 int 1147 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) 1148 { 1149 struct siba_devinfo *dinfo; 1150 1151 /* delegate non-bus-attached devices to our parent */ 1152 if (device_get_parent(child) != dev) 1153 return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, 1154 intr, ivec)); 1155 1156 /* Must be a valid interrupt ID */ 1157 if (intr >= siba_get_intr_count(dev, child)) 1158 return (ENXIO); 1159 1160 KASSERT(intr == 0, ("invalid ivec %u", intr)); 1161 1162 dinfo = device_get_ivars(child); 1163 1164 KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned")); 1165 *ivec = dinfo->intr.flag; 1166 return (0); 1167 } 1168 1169 /** 1170 * Register all address space mappings for @p di. 1171 * 1172 * @param dev The siba bus device. 1173 * @param di The device info instance on which to register all address 1174 * space entries. 1175 * @param r A resource mapping the enumeration table block for @p di. 1176 */ 1177 static int 1178 siba_register_addrspaces(device_t dev, struct siba_devinfo *di, 1179 struct bhnd_resource *r) 1180 { 1181 struct siba_core_id *cid; 1182 uint32_t addr; 1183 uint32_t size; 1184 int error; 1185 1186 cid = &di->core_id; 1187 1188 1189 /* Register the device address space entries */ 1190 for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) { 1191 uint32_t adm; 1192 u_int adm_offset; 1193 uint32_t bus_reserved; 1194 1195 /* Determine the register offset */ 1196 adm_offset = siba_admatch_offset(i); 1197 if (adm_offset == 0) { 1198 device_printf(dev, "addrspace %hhu is unsupported", i); 1199 return (ENODEV); 1200 } 1201 1202 /* Fetch the address match register value */ 1203 adm = bhnd_bus_read_4(r, adm_offset); 1204 1205 /* Parse the value */ 1206 if ((error = siba_parse_admatch(adm, &addr, &size))) { 1207 device_printf(dev, "failed to decode address " 1208 " match register value 0x%x\n", adm); 1209 return (error); 1210 } 1211 1212 /* If this is the device's core/enumeration addrespace, 1213 * reserve the Sonics configuration register blocks for the 1214 * use of our bus. */ 1215 bus_reserved = 0; 1216 if (i == SIBA_CORE_ADDRSPACE) 1217 bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE; 1218 1219 /* Append the region info */ 1220 error = siba_append_dinfo_region(di, i, addr, size, 1221 bus_reserved); 1222 if (error) 1223 return (error); 1224 } 1225 1226 return (0); 1227 } 1228 1229 1230 /** 1231 * Register all interrupt descriptors for @p dinfo. Must be called after 1232 * configuration blocks have been mapped. 1233 * 1234 * @param dev The siba bus device. 1235 * @param child The siba child device. 1236 * @param dinfo The device info instance on which to register all interrupt 1237 * descriptor entries. 1238 * @param r A resource mapping the enumeration table block for @p di. 1239 */ 1240 static int 1241 siba_register_interrupts(device_t dev, device_t child, 1242 struct siba_devinfo *dinfo, struct bhnd_resource *r) 1243 { 1244 uint32_t tpsflag; 1245 int error; 1246 1247 /* Is backplane interrupt distribution enabled for this core? */ 1248 tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG)); 1249 if ((tpsflag & SIBA_TPS_F0EN0) == 0) { 1250 dinfo->intr_en = false; 1251 return (0); 1252 } 1253 1254 /* Have one interrupt */ 1255 dinfo->intr_en = true; 1256 dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0); 1257 dinfo->intr.mapped = false; 1258 dinfo->intr.irq = 0; 1259 dinfo->intr.rid = -1; 1260 1261 /* Map the interrupt */ 1262 error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */, 1263 &dinfo->intr.irq); 1264 if (error) { 1265 device_printf(dev, "failed mapping interrupt line for core %u: " 1266 "%d\n", dinfo->core_id.core_info.core_idx, error); 1267 return (error); 1268 } 1269 dinfo->intr.mapped = true; 1270 1271 /* Update the resource list */ 1272 dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ, 1273 dinfo->intr.irq, dinfo->intr.irq, 1); 1274 1275 return (0); 1276 } 1277 1278 /** 1279 * Map per-core configuration blocks for @p dinfo. 1280 * 1281 * @param dev The siba bus device. 1282 * @param dinfo The device info instance on which to map all per-core 1283 * configuration blocks. 1284 */ 1285 static int 1286 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo) 1287 { 1288 struct siba_addrspace *addrspace; 1289 rman_res_t r_start, r_count, r_end; 1290 uint8_t num_cfg; 1291 int rid; 1292 1293 num_cfg = dinfo->core_id.num_cfg_blocks; 1294 if (num_cfg > SIBA_MAX_CFG) { 1295 device_printf(dev, "config block count %hhu out of range\n", 1296 num_cfg); 1297 return (ENXIO); 1298 } 1299 1300 /* Fetch the core register address space */ 1301 addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0); 1302 if (addrspace == NULL) { 1303 device_printf(dev, "missing device registers\n"); 1304 return (ENXIO); 1305 } 1306 1307 /* 1308 * Map the per-core configuration blocks 1309 */ 1310 for (uint8_t i = 0; i < num_cfg; i++) { 1311 /* Add to child's resource list */ 1312 r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i); 1313 r_count = SIBA_CFG_SIZE; 1314 r_end = r_start + r_count - 1; 1315 1316 rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, 1317 r_start, r_end, r_count); 1318 1319 /* Initialize config block descriptor */ 1320 dinfo->cfg[i] = ((struct siba_cfg_block) { 1321 .cb_base = r_start, 1322 .cb_size = SIBA_CFG_SIZE, 1323 .cb_rid = rid 1324 }); 1325 1326 /* Map the config resource for bus-level access */ 1327 dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i); 1328 dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev, 1329 SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end, 1330 r_count, RF_ACTIVE|RF_SHAREABLE); 1331 1332 if (dinfo->cfg_res[i] == NULL) { 1333 device_printf(dev, "failed to allocate SIBA_CFG%hhu\n", 1334 i); 1335 return (ENXIO); 1336 } 1337 } 1338 1339 return (0); 1340 } 1341 1342 static device_t 1343 siba_add_child(device_t dev, u_int order, const char *name, int unit) 1344 { 1345 struct siba_devinfo *dinfo; 1346 device_t child; 1347 1348 child = device_add_child_ordered(dev, order, name, unit); 1349 if (child == NULL) 1350 return (NULL); 1351 1352 if ((dinfo = siba_alloc_dinfo(dev)) == NULL) { 1353 device_delete_child(dev, child); 1354 return (NULL); 1355 } 1356 1357 device_set_ivars(child, dinfo); 1358 1359 return (child); 1360 } 1361 1362 static void 1363 siba_child_deleted(device_t dev, device_t child) 1364 { 1365 struct bhnd_softc *sc; 1366 struct siba_devinfo *dinfo; 1367 1368 sc = device_get_softc(dev); 1369 1370 /* Call required bhnd(4) implementation */ 1371 bhnd_generic_child_deleted(dev, child); 1372 1373 /* Free siba device info */ 1374 if ((dinfo = device_get_ivars(child)) != NULL) 1375 siba_free_dinfo(dev, child, dinfo); 1376 1377 device_set_ivars(child, NULL); 1378 } 1379 1380 /** 1381 * Scan the core table and add all valid discovered cores to 1382 * the bus. 1383 * 1384 * @param dev The siba bus device. 1385 */ 1386 int 1387 siba_add_children(device_t dev) 1388 { 1389 const struct bhnd_chipid *chipid; 1390 struct siba_core_id *cores; 1391 struct bhnd_resource *r; 1392 device_t *children; 1393 int rid; 1394 int error; 1395 1396 cores = NULL; 1397 r = NULL; 1398 1399 chipid = BHND_BUS_GET_CHIPID(dev, dev); 1400 1401 /* Allocate our temporary core and device table */ 1402 cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK); 1403 children = malloc(sizeof(*children) * chipid->ncores, M_BHND, 1404 M_WAITOK | M_ZERO); 1405 1406 /* 1407 * Add child devices for all discovered cores. 1408 * 1409 * On bridged devices, we'll exhaust our available register windows if 1410 * we map config blocks on unpopulated/disabled cores. To avoid this, we 1411 * defer mapping of the per-core siba(4) config blocks until all cores 1412 * have been enumerated and otherwise configured. 1413 */ 1414 for (u_int i = 0; i < chipid->ncores; i++) { 1415 struct siba_devinfo *dinfo; 1416 device_t child; 1417 uint32_t idhigh, idlow; 1418 rman_res_t r_count, r_end, r_start; 1419 1420 /* Map the core's register block */ 1421 rid = 0; 1422 r_start = SIBA_CORE_ADDR(i); 1423 r_count = SIBA_CORE_SIZE; 1424 r_end = r_start + SIBA_CORE_SIZE - 1; 1425 r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start, 1426 r_end, r_count, RF_ACTIVE); 1427 if (r == NULL) { 1428 error = ENXIO; 1429 goto failed; 1430 } 1431 1432 /* Read the core info */ 1433 idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); 1434 idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW)); 1435 1436 cores[i] = siba_parse_core_id(idhigh, idlow, i, 0); 1437 1438 /* Determine and set unit number */ 1439 for (u_int j = 0; j < i; j++) { 1440 struct bhnd_core_info *cur = &cores[i].core_info; 1441 struct bhnd_core_info *prev = &cores[j].core_info; 1442 1443 if (prev->vendor == cur->vendor && 1444 prev->device == cur->device) 1445 cur->unit++; 1446 } 1447 1448 /* Add the child device */ 1449 child = BUS_ADD_CHILD(dev, 0, NULL, -1); 1450 if (child == NULL) { 1451 error = ENXIO; 1452 goto failed; 1453 } 1454 1455 children[i] = child; 1456 1457 /* Initialize per-device bus info */ 1458 if ((dinfo = device_get_ivars(child)) == NULL) { 1459 error = ENXIO; 1460 goto failed; 1461 } 1462 1463 if ((error = siba_init_dinfo(dev, dinfo, &cores[i]))) 1464 goto failed; 1465 1466 /* Register the core's address space(s). */ 1467 if ((error = siba_register_addrspaces(dev, dinfo, r))) 1468 goto failed; 1469 1470 /* Register the core's interrupts */ 1471 if ((error = siba_register_interrupts(dev, child, dinfo, r))) 1472 goto failed; 1473 1474 /* Unmap the core's register block */ 1475 bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); 1476 r = NULL; 1477 1478 /* If pins are floating or the hardware is otherwise 1479 * unpopulated, the device shouldn't be used. */ 1480 if (bhnd_is_hw_disabled(child)) 1481 device_disable(child); 1482 } 1483 1484 /* Map all valid core's config register blocks and perform interrupt 1485 * assignment */ 1486 for (u_int i = 0; i < chipid->ncores; i++) { 1487 struct siba_devinfo *dinfo; 1488 device_t child; 1489 1490 child = children[i]; 1491 1492 /* Skip if core is disabled */ 1493 if (bhnd_is_hw_disabled(child)) 1494 continue; 1495 1496 dinfo = device_get_ivars(child); 1497 1498 /* Map the core's config blocks */ 1499 if ((error = siba_map_cfg_resources(dev, dinfo))) 1500 goto failed; 1501 1502 /* Issue bus callback for fully initialized child. */ 1503 BHND_BUS_CHILD_ADDED(dev, child); 1504 } 1505 1506 free(cores, M_BHND); 1507 free(children, M_BHND); 1508 1509 return (0); 1510 1511 failed: 1512 for (u_int i = 0; i < chipid->ncores; i++) { 1513 if (children[i] == NULL) 1514 continue; 1515 1516 device_delete_child(dev, children[i]); 1517 } 1518 1519 free(cores, M_BHND); 1520 free(children, M_BHND); 1521 1522 if (r != NULL) 1523 bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); 1524 1525 return (error); 1526 } 1527 1528 static device_method_t siba_methods[] = { 1529 /* Device interface */ 1530 DEVMETHOD(device_probe, siba_probe), 1531 DEVMETHOD(device_attach, siba_attach), 1532 DEVMETHOD(device_detach, siba_detach), 1533 DEVMETHOD(device_resume, siba_resume), 1534 DEVMETHOD(device_suspend, siba_suspend), 1535 1536 /* Bus interface */ 1537 DEVMETHOD(bus_add_child, siba_add_child), 1538 DEVMETHOD(bus_child_deleted, siba_child_deleted), 1539 DEVMETHOD(bus_read_ivar, siba_read_ivar), 1540 DEVMETHOD(bus_write_ivar, siba_write_ivar), 1541 DEVMETHOD(bus_get_resource_list, siba_get_resource_list), 1542 1543 /* BHND interface */ 1544 DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class), 1545 DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu), 1546 DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu), 1547 DEVMETHOD(bhnd_bus_request_clock, siba_request_clock), 1548 DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks), 1549 DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc), 1550 DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc), 1551 DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq), 1552 DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency), 1553 DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl), 1554 DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl), 1555 DEVMETHOD(bhnd_bus_read_iost, siba_read_iost), 1556 DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended), 1557 DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw), 1558 DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw), 1559 DEVMETHOD(bhnd_bus_read_config, siba_read_config), 1560 DEVMETHOD(bhnd_bus_write_config, siba_write_config), 1561 DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count), 1562 DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count), 1563 DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid), 1564 DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid), 1565 DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr), 1566 DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count), 1567 DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec), 1568 1569 DEVMETHOD_END 1570 }; 1571 1572 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver); 1573 1574 MODULE_VERSION(siba, 1); 1575 MODULE_DEPEND(siba, bhnd, 1, 1, 1); 1576