1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /* Base class for all Synopsys DesignWare PCI/PCIe drivers */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/proc.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/rman.h> 41 42 #include <machine/bus.h> 43 #include <machine/intr.h> 44 #include <machine/resource.h> 45 46 #include <dev/ofw/ofw_bus.h> 47 #include <dev/ofw/ofw_bus_subr.h> 48 #include <dev/ofw/ofw_pci.h> 49 #include <dev/ofw/ofwpci.h> 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcib_private.h> 53 #include <dev/pci/pci_dw.h> 54 55 #include "pcib_if.h" 56 #include "pci_dw_if.h" 57 58 #ifdef DEBUG 59 #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) 60 #else 61 #define debugf(fmt, args...) 62 #endif 63 64 #define DBI_WR1(sc, reg, val) pci_dw_dbi_wr1((sc)->dev, reg, val) 65 #define DBI_WR2(sc, reg, val) pci_dw_dbi_wr2((sc)->dev, reg, val) 66 #define DBI_WR4(sc, reg, val) pci_dw_dbi_wr4((sc)->dev, reg, val) 67 #define DBI_RD1(sc, reg) pci_dw_dbi_rd1((sc)->dev, reg) 68 #define DBI_RD2(sc, reg) pci_dw_dbi_rd2((sc)->dev, reg) 69 #define DBI_RD4(sc, reg) pci_dw_dbi_rd4((sc)->dev, reg) 70 71 #define IATU_UR_WR4(sc, reg, val) \ 72 bus_write_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg), (val)) 73 #define IATU_UR_RD4(sc, reg) \ 74 bus_read_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg)) 75 76 #define PCI_BUS_SHIFT 20 77 #define PCI_SLOT_SHIFT 15 78 #define PCI_FUNC_SHIFT 12 79 #define PCI_BUS_MASK 0xFF 80 #define PCI_SLOT_MASK 0x1F 81 #define PCI_FUNC_MASK 0x07 82 #define PCI_REG_MASK 0xFFF 83 84 #define IATU_CFG_BUS(bus) ((uint64_t)((bus) & 0xff) << 24) 85 #define IATU_CFG_SLOT(slot) ((uint64_t)((slot) & 0x1f) << 19) 86 #define IATU_CFG_FUNC(func) ((uint64_t)((func) & 0x07) << 16) 87 88 static uint32_t 89 pci_dw_dbi_read(device_t dev, u_int reg, int width) 90 { 91 struct pci_dw_softc *sc; 92 93 sc = device_get_softc(dev); 94 MPASS(sc->dbi_res != NULL); 95 96 switch (width) { 97 case 4: 98 return (bus_read_4(sc->dbi_res, reg)); 99 case 2: 100 return (bus_read_2(sc->dbi_res, reg)); 101 case 1: 102 return (bus_read_1(sc->dbi_res, reg)); 103 default: 104 device_printf(sc->dev, "Unsupported width: %d\n", width); 105 return (0xFFFFFFFF); 106 } 107 } 108 109 static void 110 pci_dw_dbi_write(device_t dev, u_int reg, uint32_t val, int width) 111 { 112 struct pci_dw_softc *sc; 113 114 sc = device_get_softc(dev); 115 MPASS(sc->dbi_res != NULL); 116 117 switch (width) { 118 case 4: 119 bus_write_4(sc->dbi_res, reg, val); 120 break; 121 case 2: 122 bus_write_2(sc->dbi_res, reg, val); 123 break; 124 case 1: 125 bus_write_1(sc->dbi_res, reg, val); 126 break; 127 default: 128 device_printf(sc->dev, "Unsupported width: %d\n", width); 129 break; 130 } 131 } 132 133 static void 134 pci_dw_dbi_protect(struct pci_dw_softc *sc, bool protect) 135 { 136 uint32_t reg; 137 138 reg = DBI_RD4(sc, DW_MISC_CONTROL_1); 139 if (protect) 140 reg &= ~DBI_RO_WR_EN; 141 else 142 reg |= DBI_RO_WR_EN; 143 DBI_WR4(sc, DW_MISC_CONTROL_1, reg); 144 } 145 146 static bool 147 pci_dw_check_dev(struct pci_dw_softc *sc, u_int bus, u_int slot, u_int func, 148 u_int reg) 149 { 150 bool status; 151 int rv; 152 153 if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX || 154 func > PCI_FUNCMAX || reg > PCIE_REGMAX) 155 return (false); 156 157 /* link is needed for access to all non-root busses */ 158 if (bus != sc->root_bus) { 159 rv = PCI_DW_GET_LINK(sc->dev, &status); 160 if (rv != 0 || !status) 161 return (false); 162 return (true); 163 } 164 165 /* we have only 1 device with 1 function root port */ 166 if (slot > 0 || func > 0) 167 return (false); 168 return (true); 169 } 170 171 static bool 172 pci_dw_detect_atu_unroll(struct pci_dw_softc *sc) 173 { 174 return (DBI_RD4(sc, DW_IATU_VIEWPORT) == 0xFFFFFFFFU); 175 } 176 177 static int 178 pci_dw_detect_out_atu_regions_unroll(struct pci_dw_softc *sc) 179 { 180 int num_regions, i; 181 uint32_t reg; 182 183 num_regions = sc->iatu_ur_size / DW_IATU_UR_STEP; 184 185 for (i = 0; i < num_regions; ++i) { 186 IATU_UR_WR4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR), 187 0x12340000); 188 reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR)); 189 if (reg != 0x12340000) 190 break; 191 } 192 193 sc->num_out_regions = i; 194 195 return (0); 196 } 197 198 static int 199 pci_dw_detect_out_atu_regions_legacy(struct pci_dw_softc *sc) 200 { 201 int num_viewports, i; 202 uint32_t reg; 203 204 /* Find out how many viewports there are in total */ 205 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(~0U)); 206 reg = DBI_RD4(sc, DW_IATU_VIEWPORT); 207 if (reg > IATU_REGION_INDEX(~0U)) { 208 device_printf(sc->dev, 209 "Cannot detect number of output iATU regions; read %#x\n", 210 reg); 211 return (ENXIO); 212 } 213 214 num_viewports = reg + 1; 215 216 /* 217 * Find out how many of them are outbound by seeing whether a dummy 218 * page-aligned address sticks. 219 */ 220 for (i = 0; i < num_viewports; ++i) { 221 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(i)); 222 DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, 0x12340000); 223 reg = DBI_RD4(sc, DW_IATU_LWR_TARGET_ADDR); 224 if (reg != 0x12340000) 225 break; 226 } 227 228 sc->num_out_regions = i; 229 230 return (0); 231 } 232 233 static int 234 pci_dw_detect_out_atu_regions(struct pci_dw_softc *sc) 235 { 236 if (sc->iatu_ur_res) 237 return (pci_dw_detect_out_atu_regions_unroll(sc)); 238 else 239 return (pci_dw_detect_out_atu_regions_legacy(sc)); 240 } 241 242 static int 243 pci_dw_map_out_atu_unroll(struct pci_dw_softc *sc, int idx, int type, 244 uint64_t pa, uint64_t pci_addr, uint32_t size) 245 { 246 uint32_t reg; 247 int i; 248 249 if (size == 0) 250 return (0); 251 252 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_BASE_ADDR), 253 pa & 0xFFFFFFFF); 254 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_BASE_ADDR), 255 (pa >> 32) & 0xFFFFFFFF); 256 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LIMIT_ADDR), 257 (pa + size - 1) & 0xFFFFFFFF); 258 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_TARGET_ADDR), 259 pci_addr & 0xFFFFFFFF); 260 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_TARGET_ADDR), 261 (pci_addr >> 32) & 0xFFFFFFFF); 262 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL1), 263 IATU_CTRL1_TYPE(type)); 264 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL2), 265 IATU_CTRL2_REGION_EN); 266 267 /* Wait until setup becomes valid */ 268 for (i = 10; i > 0; i--) { 269 reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(idx, CTRL2)); 270 if (reg & IATU_CTRL2_REGION_EN) 271 return (0); 272 DELAY(5); 273 } 274 275 device_printf(sc->dev, 276 "Cannot map outbound region %d in unroll mode iATU\n", idx); 277 return (ETIMEDOUT); 278 } 279 280 static int 281 pci_dw_map_out_atu_legacy(struct pci_dw_softc *sc, int idx, int type, 282 uint64_t pa, uint64_t pci_addr, uint32_t size) 283 { 284 uint32_t reg; 285 int i; 286 287 if (size == 0) 288 return (0); 289 290 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(idx)); 291 DBI_WR4(sc, DW_IATU_LWR_BASE_ADDR, pa & 0xFFFFFFFF); 292 DBI_WR4(sc, DW_IATU_UPPER_BASE_ADDR, (pa >> 32) & 0xFFFFFFFF); 293 DBI_WR4(sc, DW_IATU_LIMIT_ADDR, (pa + size - 1) & 0xFFFFFFFF); 294 DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, pci_addr & 0xFFFFFFFF); 295 DBI_WR4(sc, DW_IATU_UPPER_TARGET_ADDR, (pci_addr >> 32) & 0xFFFFFFFF); 296 DBI_WR4(sc, DW_IATU_CTRL1, IATU_CTRL1_TYPE(type)); 297 DBI_WR4(sc, DW_IATU_CTRL2, IATU_CTRL2_REGION_EN); 298 299 /* Wait until setup becomes valid */ 300 for (i = 10; i > 0; i--) { 301 reg = DBI_RD4(sc, DW_IATU_CTRL2); 302 if (reg & IATU_CTRL2_REGION_EN) 303 return (0); 304 DELAY(5); 305 } 306 307 device_printf(sc->dev, 308 "Cannot map outbound region %d in legacy mode iATU\n", idx); 309 return (ETIMEDOUT); 310 } 311 312 /* Map one outbound ATU region */ 313 static int 314 pci_dw_map_out_atu(struct pci_dw_softc *sc, int idx, int type, 315 uint64_t pa, uint64_t pci_addr, uint32_t size) 316 { 317 if (sc->iatu_ur_res) 318 return (pci_dw_map_out_atu_unroll(sc, idx, type, pa, 319 pci_addr, size)); 320 else 321 return (pci_dw_map_out_atu_legacy(sc, idx, type, pa, 322 pci_addr, size)); 323 } 324 325 static int 326 pci_dw_setup_hw(struct pci_dw_softc *sc) 327 { 328 uint32_t reg; 329 int rv, i; 330 331 pci_dw_dbi_protect(sc, false); 332 333 /* Setup config registers */ 334 DBI_WR1(sc, PCIR_CLASS, PCIC_BRIDGE); 335 DBI_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI); 336 DBI_WR4(sc, PCIR_BAR(0), 4); 337 DBI_WR4(sc, PCIR_BAR(1), 0); 338 DBI_WR1(sc, PCIR_INTPIN, 1); 339 DBI_WR1(sc, PCIR_PRIBUS_1, sc->root_bus); 340 DBI_WR1(sc, PCIR_SECBUS_1, sc->sub_bus); 341 DBI_WR1(sc, PCIR_SUBBUS_1, sc->bus_end); 342 DBI_WR2(sc, PCIR_COMMAND, 343 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | 344 PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN); 345 pci_dw_dbi_protect(sc, true); 346 347 /* Setup outbound memory windows */ 348 for (i = 0; i < min(sc->num_mem_ranges, sc->num_out_regions - 1); ++i) { 349 rv = pci_dw_map_out_atu(sc, i + 1, IATU_CTRL1_TYPE_MEM, 350 sc->mem_ranges[i].host, sc->mem_ranges[i].pci, 351 sc->mem_ranges[i].size); 352 if (rv != 0) 353 return (rv); 354 } 355 356 /* If we have enough regions ... */ 357 if (sc->num_mem_ranges + 1 < sc->num_out_regions && 358 sc->io_range.size != 0) { 359 /* Setup outbound I/O window */ 360 rv = pci_dw_map_out_atu(sc, sc->num_mem_ranges + 1, 361 IATU_CTRL1_TYPE_IO, sc->io_range.host, sc->io_range.pci, 362 sc->io_range.size); 363 if (rv != 0) 364 return (rv); 365 } 366 367 /* Adjust number of lanes */ 368 reg = DBI_RD4(sc, DW_PORT_LINK_CTRL); 369 reg &= ~PORT_LINK_CAPABLE(~0); 370 switch (sc->num_lanes) { 371 case 1: 372 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_1); 373 break; 374 case 2: 375 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_2); 376 break; 377 case 4: 378 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_4); 379 break; 380 case 8: 381 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_8); 382 break; 383 case 16: 384 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_16); 385 break; 386 case 32: 387 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_32); 388 break; 389 default: 390 device_printf(sc->dev, 391 "'num-lanes' property have invalid value: %d\n", 392 sc->num_lanes); 393 return (EINVAL); 394 } 395 DBI_WR4(sc, DW_PORT_LINK_CTRL, reg); 396 397 /* And link width */ 398 reg = DBI_RD4(sc, DW_GEN2_CTRL); 399 reg &= ~GEN2_CTRL_NUM_OF_LANES(~0); 400 switch (sc->num_lanes) { 401 case 1: 402 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_1); 403 break; 404 case 2: 405 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_2); 406 break; 407 case 4: 408 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_4); 409 break; 410 case 8: 411 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_8); 412 break; 413 case 16: 414 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_16); 415 break; 416 case 32: 417 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_32); 418 break; 419 } 420 DBI_WR4(sc, DW_GEN2_CTRL, reg); 421 422 reg = DBI_RD4(sc, DW_GEN2_CTRL); 423 reg |= DIRECT_SPEED_CHANGE; 424 DBI_WR4(sc, DW_GEN2_CTRL, reg); 425 426 return (0); 427 } 428 429 static int 430 pci_dw_decode_ranges(struct pci_dw_softc *sc, struct ofw_pci_range *ranges, 431 int nranges) 432 { 433 int i, nmem, rv; 434 435 nmem = 0; 436 for (i = 0; i < nranges; i++) { 437 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 438 OFW_PCI_PHYS_HI_SPACE_MEM32) 439 ++nmem; 440 } 441 442 sc->mem_ranges = malloc(nmem * sizeof(*sc->mem_ranges), M_DEVBUF, 443 M_WAITOK); 444 sc->num_mem_ranges = nmem; 445 446 nmem = 0; 447 for (i = 0; i < nranges; i++) { 448 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 449 OFW_PCI_PHYS_HI_SPACE_IO) { 450 if (sc->io_range.size != 0) { 451 device_printf(sc->dev, 452 "Duplicated IO range found in DT\n"); 453 rv = ENXIO; 454 goto out; 455 } 456 457 sc->io_range = ranges[i]; 458 if (sc->io_range.size > UINT32_MAX) { 459 device_printf(sc->dev, 460 "ATU IO window size is too large. " 461 "Up to 4GB windows are supported, " 462 "trimming window size to 4GB\n"); 463 sc->io_range.size = UINT32_MAX; 464 } 465 } 466 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 467 OFW_PCI_PHYS_HI_SPACE_MEM32) { 468 MPASS(nmem < sc->num_mem_ranges); 469 sc->mem_ranges[nmem] = ranges[i]; 470 if (sc->mem_ranges[nmem].size > UINT32_MAX) { 471 device_printf(sc->dev, 472 "ATU MEM window size is too large. " 473 "Up to 4GB windows are supported, " 474 "trimming window size to 4GB\n"); 475 sc->mem_ranges[nmem].size = UINT32_MAX; 476 } 477 ++nmem; 478 } 479 } 480 481 MPASS(nmem == sc->num_mem_ranges); 482 483 if (nmem == 0) { 484 device_printf(sc->dev, 485 "Missing required memory range in DT\n"); 486 return (ENXIO); 487 } 488 489 return (0); 490 491 out: 492 free(sc->mem_ranges, M_DEVBUF); 493 return (rv); 494 } 495 496 /*----------------------------------------------------------------------------- 497 * 498 * P C I B I N T E R F A C E 499 */ 500 501 static uint32_t 502 pci_dw_read_config(device_t dev, u_int bus, u_int slot, 503 u_int func, u_int reg, int bytes) 504 { 505 struct pci_dw_softc *sc; 506 struct resource *res; 507 uint32_t data; 508 uint64_t addr; 509 int type, rv; 510 511 sc = device_get_softc(dev); 512 513 if (!pci_dw_check_dev(sc, bus, slot, func, reg)) 514 return (0xFFFFFFFFU); 515 516 if (bus == sc->root_bus) { 517 res = (sc->dbi_res); 518 } else { 519 addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) | 520 IATU_CFG_FUNC(func); 521 if (bus == sc->sub_bus) 522 type = IATU_CTRL1_TYPE_CFG0; 523 else 524 type = IATU_CTRL1_TYPE_CFG1; 525 rv = pci_dw_map_out_atu(sc, 0, type, 526 sc->cfg_pa, addr, sc->cfg_size); 527 if (rv != 0) 528 return (0xFFFFFFFFU); 529 res = sc->cfg_res; 530 } 531 532 switch (bytes) { 533 case 1: 534 data = bus_read_1(res, reg); 535 break; 536 case 2: 537 data = bus_read_2(res, reg); 538 break; 539 case 4: 540 data = bus_read_4(res, reg); 541 break; 542 default: 543 data = 0xFFFFFFFFU; 544 } 545 546 return (data); 547 548 } 549 550 static void 551 pci_dw_write_config(device_t dev, u_int bus, u_int slot, 552 u_int func, u_int reg, uint32_t val, int bytes) 553 { 554 struct pci_dw_softc *sc; 555 struct resource *res; 556 uint64_t addr; 557 int type, rv; 558 559 sc = device_get_softc(dev); 560 if (!pci_dw_check_dev(sc, bus, slot, func, reg)) 561 return; 562 563 if (bus == sc->root_bus) { 564 res = (sc->dbi_res); 565 } else { 566 addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) | 567 IATU_CFG_FUNC(func); 568 if (bus == sc->sub_bus) 569 type = IATU_CTRL1_TYPE_CFG0; 570 else 571 type = IATU_CTRL1_TYPE_CFG1; 572 rv = pci_dw_map_out_atu(sc, 0, type, 573 sc->cfg_pa, addr, sc->cfg_size); 574 if (rv != 0) 575 return ; 576 res = sc->cfg_res; 577 } 578 579 switch (bytes) { 580 case 1: 581 bus_write_1(res, reg, val); 582 break; 583 case 2: 584 bus_write_2(res, reg, val); 585 break; 586 case 4: 587 bus_write_4(res, reg, val); 588 break; 589 default: 590 break; 591 } 592 } 593 594 static int 595 pci_dw_alloc_msi(device_t pci, device_t child, int count, 596 int maxcount, int *irqs) 597 { 598 phandle_t msi_parent; 599 int rv; 600 601 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 602 &msi_parent, NULL); 603 if (rv != 0) 604 return (rv); 605 606 return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, 607 irqs)); 608 } 609 610 static int 611 pci_dw_release_msi(device_t pci, device_t child, int count, int *irqs) 612 { 613 phandle_t msi_parent; 614 int rv; 615 616 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 617 &msi_parent, NULL); 618 if (rv != 0) 619 return (rv); 620 return (intr_release_msi(pci, child, msi_parent, count, irqs)); 621 } 622 623 static int 624 pci_dw_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, 625 uint32_t *data) 626 { 627 phandle_t msi_parent; 628 int rv; 629 630 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 631 &msi_parent, NULL); 632 if (rv != 0) 633 return (rv); 634 635 return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); 636 } 637 638 static int 639 pci_dw_alloc_msix(device_t pci, device_t child, int *irq) 640 { 641 phandle_t msi_parent; 642 int rv; 643 644 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 645 &msi_parent, NULL); 646 if (rv != 0) 647 return (rv); 648 return (intr_alloc_msix(pci, child, msi_parent, irq)); 649 } 650 651 static int 652 pci_dw_release_msix(device_t pci, device_t child, int irq) 653 { 654 phandle_t msi_parent; 655 int rv; 656 657 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 658 &msi_parent, NULL); 659 if (rv != 0) 660 return (rv); 661 return (intr_release_msix(pci, child, msi_parent, irq)); 662 } 663 664 static int 665 pci_dw_get_id(device_t pci, device_t child, enum pci_id_type type, 666 uintptr_t *id) 667 { 668 phandle_t node; 669 int rv; 670 uint32_t rid; 671 uint16_t pci_rid; 672 673 if (type != PCI_ID_MSI) 674 return (pcib_get_id(pci, child, type, id)); 675 676 node = ofw_bus_get_node(pci); 677 pci_rid = pci_get_rid(child); 678 679 rv = ofw_bus_msimap(node, pci_rid, NULL, &rid); 680 if (rv != 0) 681 return (rv); 682 *id = rid; 683 684 return (0); 685 } 686 687 /*----------------------------------------------------------------------------- 688 * 689 * B U S / D E V I C E I N T E R F A C E 690 */ 691 static bus_dma_tag_t 692 pci_dw_get_dma_tag(device_t dev, device_t child) 693 { 694 struct pci_dw_softc *sc; 695 696 sc = device_get_softc(dev); 697 return (sc->dmat); 698 } 699 700 int 701 pci_dw_init(device_t dev) 702 { 703 struct pci_dw_softc *sc; 704 int rv, rid; 705 bool unroll_mode; 706 707 sc = device_get_softc(dev); 708 sc->dev = dev; 709 sc->node = ofw_bus_get_node(dev); 710 711 mtx_init(&sc->mtx, "pci_dw_mtx", NULL, MTX_DEF); 712 713 /* XXXn Should not be this configurable ? */ 714 sc->bus_start = 0; 715 sc->bus_end = 255; 716 sc->root_bus = 0; 717 sc->sub_bus = 1; 718 719 /* Read FDT properties */ 720 if (!sc->coherent) 721 sc->coherent = OF_hasprop(sc->node, "dma-coherent"); 722 723 rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes, 724 sizeof(sc->num_lanes)); 725 if (rv != sizeof(sc->num_lanes)) 726 sc->num_lanes = 1; 727 if (sc->num_lanes != 1 && sc->num_lanes != 2 && 728 sc->num_lanes != 4 && sc->num_lanes != 8) { 729 device_printf(dev, 730 "invalid number of lanes: %d\n",sc->num_lanes); 731 sc->num_lanes = 0; 732 rv = ENXIO; 733 goto out; 734 } 735 736 rid = 0; 737 rv = ofw_bus_find_string_index(sc->node, "reg-names", "config", &rid); 738 if (rv != 0) { 739 device_printf(dev, "Cannot get config space memory\n"); 740 rv = ENXIO; 741 goto out; 742 } 743 sc->cfg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 744 RF_ACTIVE); 745 if (sc->cfg_res == NULL) { 746 device_printf(dev, "Cannot allocate config space(rid: %d)\n", 747 rid); 748 rv = ENXIO; 749 goto out; 750 } 751 752 /* Fill up config region related variables */ 753 sc->cfg_size = rman_get_size(sc->cfg_res); 754 sc->cfg_pa = rman_get_start(sc->cfg_res) ; 755 756 if (bootverbose) 757 device_printf(dev, "Bus is%s cache-coherent\n", 758 sc->coherent ? "" : " not"); 759 rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 760 1, 0, /* alignment, bounds */ 761 BUS_SPACE_MAXADDR, /* lowaddr */ 762 BUS_SPACE_MAXADDR, /* highaddr */ 763 NULL, NULL, /* filter, filterarg */ 764 BUS_SPACE_MAXSIZE, /* maxsize */ 765 BUS_SPACE_UNRESTRICTED, /* nsegments */ 766 BUS_SPACE_MAXSIZE, /* maxsegsize */ 767 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 768 NULL, NULL, /* lockfunc, lockarg */ 769 &sc->dmat); 770 if (rv != 0) 771 goto out; 772 773 rv = ofw_pcib_init(dev); 774 if (rv != 0) 775 goto out; 776 rv = pci_dw_decode_ranges(sc, sc->ofw_pci.sc_range, 777 sc->ofw_pci.sc_nrange); 778 if (rv != 0) 779 goto out; 780 781 unroll_mode = pci_dw_detect_atu_unroll(sc); 782 if (bootverbose) 783 device_printf(dev, "Using iATU %s mode\n", 784 unroll_mode ? "unroll" : "legacy"); 785 if (unroll_mode) { 786 rid = 0; 787 rv = ofw_bus_find_string_index(sc->node, "reg-names", "atu", &rid); 788 if (rv == 0) { 789 sc->iatu_ur_res = bus_alloc_resource_any(dev, 790 SYS_RES_MEMORY, &rid, RF_ACTIVE); 791 if (sc->iatu_ur_res == NULL) { 792 device_printf(dev, 793 "Cannot allocate iATU space (rid: %d)\n", 794 rid); 795 rv = ENXIO; 796 goto out; 797 } 798 sc->iatu_ur_offset = 0; 799 sc->iatu_ur_size = rman_get_size(sc->iatu_ur_res); 800 } else if (rv == ENOENT) { 801 sc->iatu_ur_res = sc->dbi_res; 802 sc->iatu_ur_offset = DW_DEFAULT_IATU_UR_DBI_OFFSET; 803 sc->iatu_ur_size = DW_DEFAULT_IATU_UR_DBI_SIZE; 804 } else { 805 device_printf(dev, "Cannot get iATU space memory\n"); 806 rv = ENXIO; 807 goto out; 808 } 809 } 810 811 rv = pci_dw_detect_out_atu_regions(sc); 812 if (rv != 0) 813 goto out; 814 815 if (bootverbose) 816 device_printf(sc->dev, "Detected outbound iATU regions: %d\n", 817 sc->num_out_regions); 818 819 rv = pci_dw_setup_hw(sc); 820 if (rv != 0) 821 goto out; 822 823 device_add_child(dev, "pci", DEVICE_UNIT_ANY); 824 825 return (0); 826 out: 827 /* XXX Cleanup */ 828 return (rv); 829 } 830 831 static device_method_t pci_dw_methods[] = { 832 /* Bus interface */ 833 DEVMETHOD(bus_get_dma_tag, pci_dw_get_dma_tag), 834 835 /* pcib interface */ 836 DEVMETHOD(pcib_read_config, pci_dw_read_config), 837 DEVMETHOD(pcib_write_config, pci_dw_write_config), 838 DEVMETHOD(pcib_alloc_msi, pci_dw_alloc_msi), 839 DEVMETHOD(pcib_release_msi, pci_dw_release_msi), 840 DEVMETHOD(pcib_alloc_msix, pci_dw_alloc_msix), 841 DEVMETHOD(pcib_release_msix, pci_dw_release_msix), 842 DEVMETHOD(pcib_map_msi, pci_dw_map_msi), 843 DEVMETHOD(pcib_get_id, pci_dw_get_id), 844 845 /* OFW bus interface */ 846 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), 847 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), 848 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), 849 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), 850 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), 851 852 /* PCI DW interface */ 853 DEVMETHOD(pci_dw_dbi_read, pci_dw_dbi_read), 854 DEVMETHOD(pci_dw_dbi_write, pci_dw_dbi_write), 855 DEVMETHOD_END 856 }; 857 858 DEFINE_CLASS_1(pcib, pci_dw_driver, pci_dw_methods, 859 sizeof(struct pci_dw_softc), ofw_pcib_driver); 860