1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /* Base class for all Synopsys DesignWare PCI/PCIe drivers */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/devmap.h> 35 #include <sys/proc.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/rman.h> 42 43 #include <machine/bus.h> 44 #include <machine/intr.h> 45 #include <machine/resource.h> 46 47 #include <dev/ofw/ofw_bus.h> 48 #include <dev/ofw/ofw_bus_subr.h> 49 #include <dev/ofw/ofw_pci.h> 50 #include <dev/ofw/ofwpci.h> 51 #include <dev/pci/pcivar.h> 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcib_private.h> 54 #include <dev/pci/pci_dw.h> 55 56 #include "pcib_if.h" 57 #include "pci_dw_if.h" 58 59 #ifdef DEBUG 60 #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) 61 #else 62 #define debugf(fmt, args...) 63 #endif 64 65 #define DBI_WR1(sc, reg, val) pci_dw_dbi_wr1((sc)->dev, reg, val) 66 #define DBI_WR2(sc, reg, val) pci_dw_dbi_wr2((sc)->dev, reg, val) 67 #define DBI_WR4(sc, reg, val) pci_dw_dbi_wr4((sc)->dev, reg, val) 68 #define DBI_RD1(sc, reg) pci_dw_dbi_rd1((sc)->dev, reg) 69 #define DBI_RD2(sc, reg) pci_dw_dbi_rd2((sc)->dev, reg) 70 #define DBI_RD4(sc, reg) pci_dw_dbi_rd4((sc)->dev, reg) 71 72 #define IATU_UR_WR4(sc, reg, val) \ 73 bus_write_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg), (val)) 74 #define IATU_UR_RD4(sc, reg) \ 75 bus_read_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg)) 76 77 #define PCI_BUS_SHIFT 20 78 #define PCI_SLOT_SHIFT 15 79 #define PCI_FUNC_SHIFT 12 80 #define PCI_BUS_MASK 0xFF 81 #define PCI_SLOT_MASK 0x1F 82 #define PCI_FUNC_MASK 0x07 83 #define PCI_REG_MASK 0xFFF 84 85 #define IATU_CFG_BUS(bus) ((uint64_t)((bus) & 0xff) << 24) 86 #define IATU_CFG_SLOT(slot) ((uint64_t)((slot) & 0x1f) << 19) 87 #define IATU_CFG_FUNC(func) ((uint64_t)((func) & 0x07) << 16) 88 89 static uint32_t 90 pci_dw_dbi_read(device_t dev, u_int reg, int width) 91 { 92 struct pci_dw_softc *sc; 93 94 sc = device_get_softc(dev); 95 MPASS(sc->dbi_res != NULL); 96 97 switch (width) { 98 case 4: 99 return (bus_read_4(sc->dbi_res, reg)); 100 case 2: 101 return (bus_read_2(sc->dbi_res, reg)); 102 case 1: 103 return (bus_read_1(sc->dbi_res, reg)); 104 default: 105 device_printf(sc->dev, "Unsupported width: %d\n", width); 106 return (0xFFFFFFFF); 107 } 108 } 109 110 static void 111 pci_dw_dbi_write(device_t dev, u_int reg, uint32_t val, int width) 112 { 113 struct pci_dw_softc *sc; 114 115 sc = device_get_softc(dev); 116 MPASS(sc->dbi_res != NULL); 117 118 switch (width) { 119 case 4: 120 bus_write_4(sc->dbi_res, reg, val); 121 break; 122 case 2: 123 bus_write_2(sc->dbi_res, reg, val); 124 break; 125 case 1: 126 bus_write_1(sc->dbi_res, reg, val); 127 break; 128 default: 129 device_printf(sc->dev, "Unsupported width: %d\n", width); 130 break; 131 } 132 } 133 134 static void 135 pci_dw_dbi_protect(struct pci_dw_softc *sc, bool protect) 136 { 137 uint32_t reg; 138 139 reg = DBI_RD4(sc, DW_MISC_CONTROL_1); 140 if (protect) 141 reg &= ~DBI_RO_WR_EN; 142 else 143 reg |= DBI_RO_WR_EN; 144 DBI_WR4(sc, DW_MISC_CONTROL_1, reg); 145 } 146 147 static bool 148 pci_dw_check_dev(struct pci_dw_softc *sc, u_int bus, u_int slot, u_int func, 149 u_int reg) 150 { 151 bool status; 152 int rv; 153 154 if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX || 155 func > PCI_FUNCMAX || reg > PCIE_REGMAX) 156 return (false); 157 158 /* link is needed for access to all non-root busses */ 159 if (bus != sc->root_bus) { 160 rv = PCI_DW_GET_LINK(sc->dev, &status); 161 if (rv != 0 || !status) 162 return (false); 163 return (true); 164 } 165 166 /* we have only 1 device with 1 function root port */ 167 if (slot > 0 || func > 0) 168 return (false); 169 return (true); 170 } 171 172 static bool 173 pci_dw_detect_atu_unroll(struct pci_dw_softc *sc) 174 { 175 return (DBI_RD4(sc, DW_IATU_VIEWPORT) == 0xFFFFFFFFU); 176 } 177 178 static int 179 pci_dw_detect_out_atu_regions_unroll(struct pci_dw_softc *sc) 180 { 181 int num_regions, i; 182 uint32_t reg; 183 184 num_regions = sc->iatu_ur_size / DW_IATU_UR_STEP; 185 186 for (i = 0; i < num_regions; ++i) { 187 IATU_UR_WR4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR), 188 0x12340000); 189 reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR)); 190 if (reg != 0x12340000) 191 break; 192 } 193 194 sc->num_out_regions = i; 195 196 return (0); 197 } 198 199 static int 200 pci_dw_detect_out_atu_regions_legacy(struct pci_dw_softc *sc) 201 { 202 int num_viewports, i; 203 uint32_t reg; 204 205 /* Find out how many viewports there are in total */ 206 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(~0U)); 207 reg = DBI_RD4(sc, DW_IATU_VIEWPORT); 208 if (reg > IATU_REGION_INDEX(~0U)) { 209 device_printf(sc->dev, 210 "Cannot detect number of output iATU regions; read %#x\n", 211 reg); 212 return (ENXIO); 213 } 214 215 num_viewports = reg + 1; 216 217 /* 218 * Find out how many of them are outbound by seeing whether a dummy 219 * page-aligned address sticks. 220 */ 221 for (i = 0; i < num_viewports; ++i) { 222 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(i)); 223 DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, 0x12340000); 224 reg = DBI_RD4(sc, DW_IATU_LWR_TARGET_ADDR); 225 if (reg != 0x12340000) 226 break; 227 } 228 229 sc->num_out_regions = i; 230 231 return (0); 232 } 233 234 static int 235 pci_dw_detect_out_atu_regions(struct pci_dw_softc *sc) 236 { 237 if (sc->iatu_ur_res) 238 return (pci_dw_detect_out_atu_regions_unroll(sc)); 239 else 240 return (pci_dw_detect_out_atu_regions_legacy(sc)); 241 } 242 243 static int 244 pci_dw_map_out_atu_unroll(struct pci_dw_softc *sc, int idx, int type, 245 uint64_t pa, uint64_t pci_addr, uint32_t size) 246 { 247 uint32_t reg; 248 int i; 249 250 if (size == 0) 251 return (0); 252 253 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_BASE_ADDR), 254 pa & 0xFFFFFFFF); 255 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_BASE_ADDR), 256 (pa >> 32) & 0xFFFFFFFF); 257 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LIMIT_ADDR), 258 (pa + size - 1) & 0xFFFFFFFF); 259 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_TARGET_ADDR), 260 pci_addr & 0xFFFFFFFF); 261 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_TARGET_ADDR), 262 (pci_addr >> 32) & 0xFFFFFFFF); 263 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL1), 264 IATU_CTRL1_TYPE(type)); 265 IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL2), 266 IATU_CTRL2_REGION_EN); 267 268 /* Wait until setup becomes valid */ 269 for (i = 10; i > 0; i--) { 270 reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(idx, CTRL2)); 271 if (reg & IATU_CTRL2_REGION_EN) 272 return (0); 273 DELAY(5); 274 } 275 276 device_printf(sc->dev, 277 "Cannot map outbound region %d in unroll mode iATU\n", idx); 278 return (ETIMEDOUT); 279 } 280 281 static int 282 pci_dw_map_out_atu_legacy(struct pci_dw_softc *sc, int idx, int type, 283 uint64_t pa, uint64_t pci_addr, uint32_t size) 284 { 285 uint32_t reg; 286 int i; 287 288 if (size == 0) 289 return (0); 290 291 DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(idx)); 292 DBI_WR4(sc, DW_IATU_LWR_BASE_ADDR, pa & 0xFFFFFFFF); 293 DBI_WR4(sc, DW_IATU_UPPER_BASE_ADDR, (pa >> 32) & 0xFFFFFFFF); 294 DBI_WR4(sc, DW_IATU_LIMIT_ADDR, (pa + size - 1) & 0xFFFFFFFF); 295 DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, pci_addr & 0xFFFFFFFF); 296 DBI_WR4(sc, DW_IATU_UPPER_TARGET_ADDR, (pci_addr >> 32) & 0xFFFFFFFF); 297 DBI_WR4(sc, DW_IATU_CTRL1, IATU_CTRL1_TYPE(type)); 298 DBI_WR4(sc, DW_IATU_CTRL2, IATU_CTRL2_REGION_EN); 299 300 /* Wait until setup becomes valid */ 301 for (i = 10; i > 0; i--) { 302 reg = DBI_RD4(sc, DW_IATU_CTRL2); 303 if (reg & IATU_CTRL2_REGION_EN) 304 return (0); 305 DELAY(5); 306 } 307 308 device_printf(sc->dev, 309 "Cannot map outbound region %d in legacy mode iATU\n", idx); 310 return (ETIMEDOUT); 311 } 312 313 /* Map one outbound ATU region */ 314 static int 315 pci_dw_map_out_atu(struct pci_dw_softc *sc, int idx, int type, 316 uint64_t pa, uint64_t pci_addr, uint32_t size) 317 { 318 if (sc->iatu_ur_res) 319 return (pci_dw_map_out_atu_unroll(sc, idx, type, pa, 320 pci_addr, size)); 321 else 322 return (pci_dw_map_out_atu_legacy(sc, idx, type, pa, 323 pci_addr, size)); 324 } 325 326 static int 327 pci_dw_setup_hw(struct pci_dw_softc *sc) 328 { 329 uint32_t reg; 330 int rv, i; 331 332 pci_dw_dbi_protect(sc, false); 333 334 /* Setup config registers */ 335 DBI_WR1(sc, PCIR_CLASS, PCIC_BRIDGE); 336 DBI_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI); 337 DBI_WR4(sc, PCIR_BAR(0), 4); 338 DBI_WR4(sc, PCIR_BAR(1), 0); 339 DBI_WR1(sc, PCIR_INTPIN, 1); 340 DBI_WR1(sc, PCIR_PRIBUS_1, sc->root_bus); 341 DBI_WR1(sc, PCIR_SECBUS_1, sc->sub_bus); 342 DBI_WR1(sc, PCIR_SUBBUS_1, sc->bus_end); 343 DBI_WR2(sc, PCIR_COMMAND, 344 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | 345 PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN); 346 pci_dw_dbi_protect(sc, true); 347 348 /* Setup outbound memory windows */ 349 for (i = 0; i < min(sc->num_mem_ranges, sc->num_out_regions - 1); ++i) { 350 rv = pci_dw_map_out_atu(sc, i + 1, IATU_CTRL1_TYPE_MEM, 351 sc->mem_ranges[i].host, sc->mem_ranges[i].pci, 352 sc->mem_ranges[i].size); 353 if (rv != 0) 354 return (rv); 355 } 356 357 /* If we have enough regions ... */ 358 if (sc->num_mem_ranges + 1 < sc->num_out_regions && 359 sc->io_range.size != 0) { 360 /* Setup outbound I/O window */ 361 rv = pci_dw_map_out_atu(sc, sc->num_mem_ranges + 1, 362 IATU_CTRL1_TYPE_IO, sc->io_range.host, sc->io_range.pci, 363 sc->io_range.size); 364 if (rv != 0) 365 return (rv); 366 } 367 368 /* Adjust number of lanes */ 369 reg = DBI_RD4(sc, DW_PORT_LINK_CTRL); 370 reg &= ~PORT_LINK_CAPABLE(~0); 371 switch (sc->num_lanes) { 372 case 1: 373 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_1); 374 break; 375 case 2: 376 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_2); 377 break; 378 case 4: 379 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_4); 380 break; 381 case 8: 382 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_8); 383 break; 384 case 16: 385 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_16); 386 break; 387 case 32: 388 reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_32); 389 break; 390 default: 391 device_printf(sc->dev, 392 "'num-lanes' property have invalid value: %d\n", 393 sc->num_lanes); 394 return (EINVAL); 395 } 396 DBI_WR4(sc, DW_PORT_LINK_CTRL, reg); 397 398 /* And link width */ 399 reg = DBI_RD4(sc, DW_GEN2_CTRL); 400 reg &= ~GEN2_CTRL_NUM_OF_LANES(~0); 401 switch (sc->num_lanes) { 402 case 1: 403 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_1); 404 break; 405 case 2: 406 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_2); 407 break; 408 case 4: 409 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_4); 410 break; 411 case 8: 412 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_8); 413 break; 414 case 16: 415 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_16); 416 break; 417 case 32: 418 reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_32); 419 break; 420 } 421 DBI_WR4(sc, DW_GEN2_CTRL, reg); 422 423 reg = DBI_RD4(sc, DW_GEN2_CTRL); 424 reg |= DIRECT_SPEED_CHANGE; 425 DBI_WR4(sc, DW_GEN2_CTRL, reg); 426 427 return (0); 428 } 429 430 static int 431 pci_dw_decode_ranges(struct pci_dw_softc *sc, struct ofw_pci_range *ranges, 432 int nranges) 433 { 434 int i, nmem, rv; 435 436 nmem = 0; 437 for (i = 0; i < nranges; i++) { 438 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 439 OFW_PCI_PHYS_HI_SPACE_MEM32) 440 ++nmem; 441 } 442 443 sc->mem_ranges = malloc(nmem * sizeof(*sc->mem_ranges), M_DEVBUF, 444 M_WAITOK); 445 sc->num_mem_ranges = nmem; 446 447 nmem = 0; 448 for (i = 0; i < nranges; i++) { 449 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 450 OFW_PCI_PHYS_HI_SPACE_IO) { 451 if (sc->io_range.size != 0) { 452 device_printf(sc->dev, 453 "Duplicated IO range found in DT\n"); 454 rv = ENXIO; 455 goto out; 456 } 457 458 sc->io_range = ranges[i]; 459 if (sc->io_range.size > UINT32_MAX) { 460 device_printf(sc->dev, 461 "ATU IO window size is too large. " 462 "Up to 4GB windows are supported, " 463 "trimming window size to 4GB\n"); 464 sc->io_range.size = UINT32_MAX; 465 } 466 } 467 if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == 468 OFW_PCI_PHYS_HI_SPACE_MEM32) { 469 MPASS(nmem < sc->num_mem_ranges); 470 sc->mem_ranges[nmem] = ranges[i]; 471 if (sc->mem_ranges[nmem].size > UINT32_MAX) { 472 device_printf(sc->dev, 473 "ATU MEM window size is too large. " 474 "Up to 4GB windows are supported, " 475 "trimming window size to 4GB\n"); 476 sc->mem_ranges[nmem].size = UINT32_MAX; 477 } 478 ++nmem; 479 } 480 } 481 482 MPASS(nmem == sc->num_mem_ranges); 483 484 if (nmem == 0) { 485 device_printf(sc->dev, 486 "Missing required memory range in DT\n"); 487 return (ENXIO); 488 } 489 490 return (0); 491 492 out: 493 free(sc->mem_ranges, M_DEVBUF); 494 return (rv); 495 } 496 497 /*----------------------------------------------------------------------------- 498 * 499 * P C I B I N T E R F A C E 500 */ 501 502 static uint32_t 503 pci_dw_read_config(device_t dev, u_int bus, u_int slot, 504 u_int func, u_int reg, int bytes) 505 { 506 struct pci_dw_softc *sc; 507 struct resource *res; 508 uint32_t data; 509 uint64_t addr; 510 int type, rv; 511 512 sc = device_get_softc(dev); 513 514 if (!pci_dw_check_dev(sc, bus, slot, func, reg)) 515 return (0xFFFFFFFFU); 516 517 if (bus == sc->root_bus) { 518 res = (sc->dbi_res); 519 } else { 520 addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) | 521 IATU_CFG_FUNC(func); 522 if (bus == sc->sub_bus) 523 type = IATU_CTRL1_TYPE_CFG0; 524 else 525 type = IATU_CTRL1_TYPE_CFG1; 526 rv = pci_dw_map_out_atu(sc, 0, type, 527 sc->cfg_pa, addr, sc->cfg_size); 528 if (rv != 0) 529 return (0xFFFFFFFFU); 530 res = sc->cfg_res; 531 } 532 533 switch (bytes) { 534 case 1: 535 data = bus_read_1(res, reg); 536 break; 537 case 2: 538 data = bus_read_2(res, reg); 539 break; 540 case 4: 541 data = bus_read_4(res, reg); 542 break; 543 default: 544 data = 0xFFFFFFFFU; 545 } 546 547 return (data); 548 549 } 550 551 static void 552 pci_dw_write_config(device_t dev, u_int bus, u_int slot, 553 u_int func, u_int reg, uint32_t val, int bytes) 554 { 555 struct pci_dw_softc *sc; 556 struct resource *res; 557 uint64_t addr; 558 int type, rv; 559 560 sc = device_get_softc(dev); 561 if (!pci_dw_check_dev(sc, bus, slot, func, reg)) 562 return; 563 564 if (bus == sc->root_bus) { 565 res = (sc->dbi_res); 566 } else { 567 addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) | 568 IATU_CFG_FUNC(func); 569 if (bus == sc->sub_bus) 570 type = IATU_CTRL1_TYPE_CFG0; 571 else 572 type = IATU_CTRL1_TYPE_CFG1; 573 rv = pci_dw_map_out_atu(sc, 0, type, 574 sc->cfg_pa, addr, sc->cfg_size); 575 if (rv != 0) 576 return ; 577 res = sc->cfg_res; 578 } 579 580 switch (bytes) { 581 case 1: 582 bus_write_1(res, reg, val); 583 break; 584 case 2: 585 bus_write_2(res, reg, val); 586 break; 587 case 4: 588 bus_write_4(res, reg, val); 589 break; 590 default: 591 break; 592 } 593 } 594 595 static int 596 pci_dw_alloc_msi(device_t pci, device_t child, int count, 597 int maxcount, int *irqs) 598 { 599 phandle_t msi_parent; 600 int rv; 601 602 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 603 &msi_parent, NULL); 604 if (rv != 0) 605 return (rv); 606 607 return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, 608 irqs)); 609 } 610 611 static int 612 pci_dw_release_msi(device_t pci, device_t child, int count, int *irqs) 613 { 614 phandle_t msi_parent; 615 int rv; 616 617 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 618 &msi_parent, NULL); 619 if (rv != 0) 620 return (rv); 621 return (intr_release_msi(pci, child, msi_parent, count, irqs)); 622 } 623 624 static int 625 pci_dw_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, 626 uint32_t *data) 627 { 628 phandle_t msi_parent; 629 int rv; 630 631 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 632 &msi_parent, NULL); 633 if (rv != 0) 634 return (rv); 635 636 return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); 637 } 638 639 static int 640 pci_dw_alloc_msix(device_t pci, device_t child, int *irq) 641 { 642 phandle_t msi_parent; 643 int rv; 644 645 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 646 &msi_parent, NULL); 647 if (rv != 0) 648 return (rv); 649 return (intr_alloc_msix(pci, child, msi_parent, irq)); 650 } 651 652 static int 653 pci_dw_release_msix(device_t pci, device_t child, int irq) 654 { 655 phandle_t msi_parent; 656 int rv; 657 658 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 659 &msi_parent, NULL); 660 if (rv != 0) 661 return (rv); 662 return (intr_release_msix(pci, child, msi_parent, irq)); 663 } 664 665 static int 666 pci_dw_get_id(device_t pci, device_t child, enum pci_id_type type, 667 uintptr_t *id) 668 { 669 phandle_t node; 670 int rv; 671 uint32_t rid; 672 uint16_t pci_rid; 673 674 if (type != PCI_ID_MSI) 675 return (pcib_get_id(pci, child, type, id)); 676 677 node = ofw_bus_get_node(pci); 678 pci_rid = pci_get_rid(child); 679 680 rv = ofw_bus_msimap(node, pci_rid, NULL, &rid); 681 if (rv != 0) 682 return (rv); 683 *id = rid; 684 685 return (0); 686 } 687 688 /*----------------------------------------------------------------------------- 689 * 690 * B U S / D E V I C E I N T E R F A C E 691 */ 692 static bus_dma_tag_t 693 pci_dw_get_dma_tag(device_t dev, device_t child) 694 { 695 struct pci_dw_softc *sc; 696 697 sc = device_get_softc(dev); 698 return (sc->dmat); 699 } 700 701 int 702 pci_dw_init(device_t dev) 703 { 704 struct pci_dw_softc *sc; 705 int rv, rid; 706 bool unroll_mode; 707 708 sc = device_get_softc(dev); 709 sc->dev = dev; 710 sc->node = ofw_bus_get_node(dev); 711 712 mtx_init(&sc->mtx, "pci_dw_mtx", NULL, MTX_DEF); 713 714 /* XXXn Should not be this configurable ? */ 715 sc->bus_start = 0; 716 sc->bus_end = 255; 717 sc->root_bus = 0; 718 sc->sub_bus = 1; 719 720 /* Read FDT properties */ 721 if (!sc->coherent) 722 sc->coherent = OF_hasprop(sc->node, "dma-coherent"); 723 724 rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes, 725 sizeof(sc->num_lanes)); 726 if (rv != sizeof(sc->num_lanes)) 727 sc->num_lanes = 1; 728 if (sc->num_lanes != 1 && sc->num_lanes != 2 && 729 sc->num_lanes != 4 && sc->num_lanes != 8) { 730 device_printf(dev, 731 "invalid number of lanes: %d\n",sc->num_lanes); 732 sc->num_lanes = 0; 733 rv = ENXIO; 734 goto out; 735 } 736 737 rid = 0; 738 rv = ofw_bus_find_string_index(sc->node, "reg-names", "config", &rid); 739 if (rv != 0) { 740 device_printf(dev, "Cannot get config space memory\n"); 741 rv = ENXIO; 742 goto out; 743 } 744 sc->cfg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 745 RF_ACTIVE); 746 if (sc->cfg_res == NULL) { 747 device_printf(dev, "Cannot allocate config space(rid: %d)\n", 748 rid); 749 rv = ENXIO; 750 goto out; 751 } 752 753 /* Fill up config region related variables */ 754 sc->cfg_size = rman_get_size(sc->cfg_res); 755 sc->cfg_pa = rman_get_start(sc->cfg_res) ; 756 757 if (bootverbose) 758 device_printf(dev, "Bus is%s cache-coherent\n", 759 sc->coherent ? "" : " not"); 760 rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 761 1, 0, /* alignment, bounds */ 762 BUS_SPACE_MAXADDR, /* lowaddr */ 763 BUS_SPACE_MAXADDR, /* highaddr */ 764 NULL, NULL, /* filter, filterarg */ 765 BUS_SPACE_MAXSIZE, /* maxsize */ 766 BUS_SPACE_UNRESTRICTED, /* nsegments */ 767 BUS_SPACE_MAXSIZE, /* maxsegsize */ 768 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 769 NULL, NULL, /* lockfunc, lockarg */ 770 &sc->dmat); 771 if (rv != 0) 772 goto out; 773 774 rv = ofw_pcib_init(dev); 775 if (rv != 0) 776 goto out; 777 rv = pci_dw_decode_ranges(sc, sc->ofw_pci.sc_range, 778 sc->ofw_pci.sc_nrange); 779 if (rv != 0) 780 goto out; 781 782 unroll_mode = pci_dw_detect_atu_unroll(sc); 783 if (bootverbose) 784 device_printf(dev, "Using iATU %s mode\n", 785 unroll_mode ? "unroll" : "legacy"); 786 if (unroll_mode) { 787 rid = 0; 788 rv = ofw_bus_find_string_index(sc->node, "reg-names", "atu", &rid); 789 if (rv == 0) { 790 sc->iatu_ur_res = bus_alloc_resource_any(dev, 791 SYS_RES_MEMORY, &rid, RF_ACTIVE); 792 if (sc->iatu_ur_res == NULL) { 793 device_printf(dev, 794 "Cannot allocate iATU space (rid: %d)\n", 795 rid); 796 rv = ENXIO; 797 goto out; 798 } 799 sc->iatu_ur_offset = 0; 800 sc->iatu_ur_size = rman_get_size(sc->iatu_ur_res); 801 } else if (rv == ENOENT) { 802 sc->iatu_ur_res = sc->dbi_res; 803 sc->iatu_ur_offset = DW_DEFAULT_IATU_UR_DBI_OFFSET; 804 sc->iatu_ur_size = DW_DEFAULT_IATU_UR_DBI_SIZE; 805 } else { 806 device_printf(dev, "Cannot get iATU space memory\n"); 807 rv = ENXIO; 808 goto out; 809 } 810 } 811 812 rv = pci_dw_detect_out_atu_regions(sc); 813 if (rv != 0) 814 goto out; 815 816 if (bootverbose) 817 device_printf(sc->dev, "Detected outbound iATU regions: %d\n", 818 sc->num_out_regions); 819 820 rv = pci_dw_setup_hw(sc); 821 if (rv != 0) 822 goto out; 823 824 device_add_child(dev, "pci", -1); 825 826 return (0); 827 out: 828 /* XXX Cleanup */ 829 return (rv); 830 } 831 832 static device_method_t pci_dw_methods[] = { 833 /* Bus interface */ 834 DEVMETHOD(bus_get_dma_tag, pci_dw_get_dma_tag), 835 836 /* pcib interface */ 837 DEVMETHOD(pcib_read_config, pci_dw_read_config), 838 DEVMETHOD(pcib_write_config, pci_dw_write_config), 839 DEVMETHOD(pcib_alloc_msi, pci_dw_alloc_msi), 840 DEVMETHOD(pcib_release_msi, pci_dw_release_msi), 841 DEVMETHOD(pcib_alloc_msix, pci_dw_alloc_msix), 842 DEVMETHOD(pcib_release_msix, pci_dw_release_msix), 843 DEVMETHOD(pcib_map_msi, pci_dw_map_msi), 844 DEVMETHOD(pcib_get_id, pci_dw_get_id), 845 846 /* OFW bus interface */ 847 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), 848 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), 849 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), 850 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), 851 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), 852 853 /* PCI DW interface */ 854 DEVMETHOD(pci_dw_dbi_read, pci_dw_dbi_read), 855 DEVMETHOD(pci_dw_dbi_write, pci_dw_dbi_write), 856 DEVMETHOD_END 857 }; 858 859 DEFINE_CLASS_1(pcib, pci_dw_driver, pci_dw_methods, 860 sizeof(struct pci_dw_softc), ofw_pcib_driver); 861