1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /* Rockchip PCIe controller driver */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/gpio.h> 35 #include <sys/proc.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/rman.h> 42 43 #include <machine/bus.h> 44 #include <machine/intr.h> 45 #include <machine/resource.h> 46 47 #include <dev/clk/clk.h> 48 #include <dev/hwreset/hwreset.h> 49 #include <dev/extres/phy/phy.h> 50 #include <dev/extres/regulator/regulator.h> 51 #include <dev/gpio/gpiobusvar.h> 52 #include <dev/ofw/ofw_bus.h> 53 #include <dev/ofw/ofw_bus_subr.h> 54 #include <dev/ofw/ofw_pci.h> 55 #include <dev/ofw/ofwpci.h> 56 #include <dev/pci/pcivar.h> 57 #include <dev/pci/pcireg.h> 58 #include <dev/pci/pcib_private.h> 59 60 #include <dev/ofw/ofw_bus.h> 61 62 #include "pcib_if.h" 63 64 #define ATU_CFG_BUS(x) (((x) & 0x0ff) << 20) 65 #define ATU_CFG_SLOT(x) (((x) & 0x01f) << 15) 66 #define ATU_CFG_FUNC(x) (((x) & 0x007) << 12) 67 #define ATU_CFG_REG(x) (((x) & 0xfff) << 0) 68 69 #define ATU_TYPE_MEM 0x2 70 #define ATU_TYPE_IO 0x6 71 #define ATU_TYPE_CFG0 0xA 72 #define ATU_TYPE_CFG1 0xB 73 #define ATY_TYPE_NOR_MSG 0xC 74 75 #define ATU_OB_REGIONS 33 76 #define ATU_OB_REGION_SHIFT 20 77 #define ATU_OB_REGION_SIZE (1 << ATU_OB_REGION_SHIFT) 78 #define ATU_OB_REGION_0_SIZE (( ATU_OB_REGIONS - 1) * ATU_OB_REGION_SIZE) 79 80 #define ATU_IB_REGIONS 3 81 82 #define PCIE_CLIENT_BASIC_STRAP_CONF 0x000000 83 #define STRAP_CONF_GEN_2 (1 << 7) 84 #define STRAP_CONF_MODE_RC (1 << 6) 85 #define STRAP_CONF_LANES(n) ((((n) / 2) & 0x3) << 4) 86 #define STRAP_CONF_ARI_EN (1 << 3) 87 #define STRAP_CONF_SR_IOV_EN (1 << 2) 88 #define STRAP_CONF_LINK_TRAIN_EN (1 << 1) 89 #define STRAP_CONF_CONF_EN (1 << 0) 90 #define PCIE_CLIENT_HOT_RESET_CTRL 0x000018 91 #define HOT_RESET_CTRL_LINK_DOWN_RESET (1 << 1) 92 #define HOT_RESET_CTRL_HOT_RESET_IN (1 << 0) 93 #define PCIE_CLIENT_BASIC_STATUS0 0x000044 94 #define PCIE_CLIENT_BASIC_STATUS1 0x000048 95 #define STATUS1_LINK_ST_GET(x) (((x) >> 20) & 0x3) 96 #define STATUS1_LINK_ST_UP 3 97 #define PCIE_CLIENT_INT_MASK 0x00004C 98 #define PCIE_CLIENT_INT_STATUS 0x000050 99 #define PCIE_CLIENT_INT_LEGACY_DONE (1 << 15) 100 #define PCIE_CLIENT_INT_MSG (1 << 14) 101 #define PCIE_CLIENT_INT_HOT_RST (1 << 13) 102 #define PCIE_CLIENT_INT_DPA (1 << 12) 103 #define PCIE_CLIENT_INT_FATAL_ERR (1 << 11) 104 #define PCIE_CLIENT_INT_NFATAL_ERR (1 << 10) 105 #define PCIE_CLIENT_INT_CORR_ERR (1 << 9) 106 #define PCIE_CLIENT_INT_INTD (1 << 8) 107 #define PCIE_CLIENT_INT_INTC (1 << 7) 108 #define PCIE_CLIENT_INT_INTB (1 << 6) 109 #define PCIE_CLIENT_INT_INTA (1 << 5) 110 #define PCIE_CLIENT_INT_LOCAL (1 << 4) 111 #define PCIE_CLIENT_INT_UDMA (1 << 3) 112 #define PCIE_CLIENT_INT_PHY (1 << 2) 113 #define PCIE_CLIENT_INT_HOT_PLUG (1 << 1) 114 #define PCIE_CLIENT_INT_PWR_STCG (1 << 0) 115 #define PCIE_CLIENT_INT_LEGACY (PCIE_CLIENT_INT_INTA | \ 116 PCIE_CLIENT_INT_INTB | \ 117 PCIE_CLIENT_INT_INTC | \ 118 PCIE_CLIENT_INT_INTD) 119 120 #define PCIE_CORE_CTRL0 0x900000 121 #define CORE_CTRL_LANES_GET(x) (((x) >> 20) & 0x3) 122 #define PCIE_CORE_CTRL1 0x900004 123 #define PCIE_CORE_CONFIG_VENDOR 0x900044 124 #define PCIE_CORE_INT_STATUS 0x90020c 125 #define PCIE_CORE_INT_PRFPE (1 << 0) 126 #define PCIE_CORE_INT_CRFPE (1 << 1) 127 #define PCIE_CORE_INT_RRPE (1 << 2) 128 #define PCIE_CORE_INT_PRFO (1 << 3) 129 #define PCIE_CORE_INT_CRFO (1 << 4) 130 #define PCIE_CORE_INT_RT (1 << 5) 131 #define PCIE_CORE_INT_RTR (1 << 6) 132 #define PCIE_CORE_INT_PE (1 << 7) 133 #define PCIE_CORE_INT_MTR (1 << 8) 134 #define PCIE_CORE_INT_UCR (1 << 9) 135 #define PCIE_CORE_INT_FCE (1 << 10) 136 #define PCIE_CORE_INT_CT (1 << 11) 137 #define PCIE_CORE_INT_UTC (1 << 18) 138 #define PCIE_CORE_INT_MMVC (1 << 19) 139 #define PCIE_CORE_INT_MASK 0x900210 140 #define PCIE_CORE_PHY_FUNC_CONF 0x9002C0 141 #define PCIE_CORE_RC_BAR_CONF 0x900300 142 143 #define PCIE_RC_CONFIG_STD_BASE 0x800000 144 #define PCIE_RC_CONFIG_PRIV_BASE 0xA00000 145 #define PCIE_RC_CONFIG_DCSR 0xA000C8 146 #define PCIE_RC_CONFIG_DCSR_MPS_MASK (0x7 << 5) 147 #define PCIE_RC_CONFIG_DCSR_MPS_128 (0 << 5) 148 #define PCIE_RC_CONFIG_DCSR_MPS_256 (1 << 5) 149 #define PCIE_RC_CONFIG_LINK_CAP 0xA00CC 150 #define PCIE_RC_CONFIG_LINK_CAP_L0S (1 << 10) 151 152 #define PCIE_RC_CONFIG_LCS 0xA000D0 153 #define PCIE_RC_CONFIG_THP_CAP 0xA00274 154 #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK 0xFFF00000 155 156 #define PCIE_CORE_OB_ADDR0(n) (0xC00000 + 0x20 * (n) + 0x00) 157 #define PCIE_CORE_OB_ADDR1(n) (0xC00000 + 0x20 * (n) + 0x04) 158 #define PCIE_CORE_OB_DESC0(n) (0xC00000 + 0x20 * (n) + 0x08) 159 #define PCIE_CORE_OB_DESC1(n) (0xC00000 + 0x20 * (n) + 0x0C) 160 #define PCIE_CORE_OB_DESC2(n) (0xC00000 + 0x20 * (n) + 0x10) 161 #define PCIE_CORE_OB_DESC3(n) (0xC00000 + 0x20 * (n) + 0x14) 162 163 #define PCIE_CORE_IB_ADDR0(n) (0xC00800 + 0x8 * (n) + 0x00) 164 #define PCIE_CORE_IB_ADDR1(n) (0xC00800 + 0x8 * (n) + 0x04) 165 166 #define PRIV_CFG_RD4(sc, reg) \ 167 (uint32_t)rk_pcie_local_cfg_read(sc, true, reg, 4) 168 #define PRIV_CFG_RD2(sc, reg) \ 169 (uint16_t)rk_pcie_local_cfg_read(sc, true, reg, 2) 170 #define PRIV_CFG_RD1(sc, reg) \ 171 (uint8_t)rk_pcie_local_cfg_read(sc, true, reg, 1) 172 #define PRIV_CFG_WR4(sc, reg, val) \ 173 rk_pcie_local_cfg_write(sc, true, reg, val, 4) 174 #define PRIV_CFG_WR2(sc, reg, val) \ 175 rk_pcie_local_cfg_write(sc, true, reg, val, 2) 176 #define PRIV_CFG_WR1(sc, reg, val) \ 177 rk_pcie_local_cfg_write(sc, true, reg, val, 1) 178 179 #define APB_WR4(_sc, _r, _v) bus_write_4((_sc)->apb_mem_res, (_r), (_v)) 180 #define APB_RD4(_sc, _r) bus_read_4((_sc)->apb_mem_res, (_r)) 181 182 #define MAX_LANES 4 183 184 #define RK_PCIE_ENABLE_MSI 185 #define RK_PCIE_ENABLE_MSIX 186 187 struct rk_pcie_softc { 188 struct ofw_pci_softc ofw_pci; /* Must be first */ 189 190 struct resource *axi_mem_res; 191 struct resource *apb_mem_res; 192 struct resource *client_irq_res; 193 struct resource *legacy_irq_res; 194 struct resource *sys_irq_res; 195 void *client_irq_cookie; 196 void *legacy_irq_cookie; 197 void *sys_irq_cookie; 198 199 device_t dev; 200 phandle_t node; 201 struct mtx mtx; 202 203 struct ofw_pci_range mem_range; 204 struct ofw_pci_range pref_mem_range; 205 struct ofw_pci_range io_range; 206 207 bool coherent; 208 bus_dma_tag_t dmat; 209 210 int num_lanes; 211 bool link_is_gen2; 212 bool no_l0s; 213 214 u_int bus_start; 215 u_int bus_end; 216 u_int root_bus; 217 u_int sub_bus; 218 219 regulator_t supply_12v; 220 regulator_t supply_3v3; 221 regulator_t supply_1v8; 222 regulator_t supply_0v9; 223 hwreset_t hwreset_core; 224 hwreset_t hwreset_mgmt; 225 hwreset_t hwreset_mgmt_sticky; 226 hwreset_t hwreset_pipe; 227 hwreset_t hwreset_pm; 228 hwreset_t hwreset_aclk; 229 hwreset_t hwreset_pclk; 230 clk_t clk_aclk; 231 clk_t clk_aclk_perf; 232 clk_t clk_hclk; 233 clk_t clk_pm; 234 phy_t phys[MAX_LANES]; 235 gpio_pin_t gpio_ep; 236 }; 237 238 /* Compatible devices. */ 239 static struct ofw_compat_data compat_data[] = { 240 {"rockchip,rk3399-pcie", 1}, 241 {NULL, 0}, 242 }; 243 244 static uint32_t 245 rk_pcie_local_cfg_read(struct rk_pcie_softc *sc, bool priv, u_int reg, 246 int bytes) 247 { 248 uint32_t val; 249 bus_addr_t base; 250 251 if (priv) 252 base = PCIE_RC_CONFIG_PRIV_BASE; 253 else 254 base = PCIE_RC_CONFIG_STD_BASE; 255 256 switch (bytes) { 257 case 4: 258 val = bus_read_4(sc->apb_mem_res, base + reg); 259 break; 260 case 2: 261 val = bus_read_2(sc->apb_mem_res, base + reg); 262 break; 263 case 1: 264 val = bus_read_1(sc->apb_mem_res, base + reg); 265 break; 266 default: 267 val = 0xFFFFFFFF; 268 } 269 return (val); 270 } 271 272 static void 273 rk_pcie_local_cfg_write(struct rk_pcie_softc *sc, bool priv, u_int reg, 274 uint32_t val, int bytes) 275 { 276 uint32_t val2; 277 bus_addr_t base; 278 279 if (priv) 280 base = PCIE_RC_CONFIG_PRIV_BASE; 281 else 282 base = PCIE_RC_CONFIG_STD_BASE; 283 284 switch (bytes) { 285 case 4: 286 bus_write_4(sc->apb_mem_res, base + reg, val); 287 break; 288 case 2: 289 val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3)); 290 val2 &= ~(0xffff << ((reg & 3) << 3)); 291 val2 |= ((val & 0xffff) << ((reg & 3) << 3)); 292 bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2); 293 break; 294 case 1: 295 val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3)); 296 val2 &= ~(0xff << ((reg & 3) << 3)); 297 val2 |= ((val & 0xff) << ((reg & 3) << 3)); 298 bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2); 299 break; 300 } 301 } 302 303 static bool 304 rk_pcie_check_dev(struct rk_pcie_softc *sc, u_int bus, u_int slot, u_int func, 305 u_int reg) 306 { 307 uint32_t val; 308 309 if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX || 310 func > PCI_FUNCMAX || reg > PCIE_REGMAX) 311 return (false); 312 313 if (bus == sc->root_bus) { 314 /* we have only 1 device with 1 function root port */ 315 if (slot > 0 || func > 0) 316 return (false); 317 return (true); 318 } 319 320 /* link is needed for accessing non-root busses */ 321 val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); 322 if (STATUS1_LINK_ST_GET(val) != STATUS1_LINK_ST_UP) 323 return (false); 324 325 /* only one device can be on first subordinate bus */ 326 if (bus == sc->sub_bus && slot != 0 ) 327 return (false); 328 return (true); 329 } 330 331 static void 332 rk_pcie_map_out_atu(struct rk_pcie_softc *sc, int idx, int type, 333 int num_bits, uint64_t pa) 334 { 335 uint32_t addr0; 336 uint64_t max_size __diagused; 337 338 /* Check HW constrains */ 339 max_size = idx == 0 ? ATU_OB_REGION_0_SIZE: ATU_OB_REGION_SIZE; 340 KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx)); 341 KASSERT(num_bits >= 7 && num_bits <= 63, 342 ("Bit width of region is invalid: %d\n", num_bits)); 343 KASSERT(max_size <= (1ULL << (num_bits + 1)), 344 ("Bit width is invalid for given region[%d]: %d\n", idx, num_bits)); 345 346 addr0 = (uint32_t)pa & 0xFFFFFF00; 347 addr0 |= num_bits; 348 APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), addr0); 349 APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), (uint32_t)(pa >> 32)); 350 APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type); 351 APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus); 352 353 /* Readback for sync */ 354 APB_RD4(sc, PCIE_CORE_OB_DESC1(idx)); 355 } 356 357 static void 358 rk_pcie_map_cfg_atu(struct rk_pcie_softc *sc, int idx, int type) 359 { 360 361 /* Check HW constrains */ 362 KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx)); 363 364 /* 365 * Config window is only 25 bits width, so we cannot encode full bus 366 * range into it. Remaining bits of bus number should be taken from 367 * DESC1 field. 368 */ 369 APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), 25 - 1); 370 APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), 0); 371 APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type); 372 APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus); 373 374 /* Readback for sync */ 375 APB_RD4(sc, PCIE_CORE_OB_DESC1(idx)); 376 377 } 378 379 static void 380 rk_pcie_map_in_atu(struct rk_pcie_softc *sc, int idx, int num_bits, uint64_t pa) 381 { 382 uint32_t addr0; 383 384 /* Check HW constrains */ 385 KASSERT(idx < ATU_IB_REGIONS, ("Invalid region index: %d\n", idx)); 386 KASSERT(num_bits >= 7 && num_bits <= 63, 387 ("Bit width of region is invalid: %d\n", num_bits)); 388 389 addr0 = (uint32_t)pa & 0xFFFFFF00; 390 addr0 |= num_bits; 391 APB_WR4(sc, PCIE_CORE_IB_ADDR0(idx), addr0); 392 APB_WR4(sc, PCIE_CORE_IB_ADDR1(idx), (uint32_t)(pa >> 32)); 393 394 /* Readback for sync */ 395 APB_RD4(sc, PCIE_CORE_IB_ADDR1(idx)); 396 } 397 398 static int 399 rk_pcie_decode_ranges(struct rk_pcie_softc *sc, struct ofw_pci_range *ranges, 400 int nranges) 401 { 402 int i; 403 404 for (i = 0; i < nranges; i++) { 405 switch(ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { 406 case OFW_PCI_PHYS_HI_SPACE_IO: 407 if (sc->io_range.size != 0) { 408 device_printf(sc->dev, 409 "Duplicated IO range found in DT\n"); 410 return (ENXIO); 411 } 412 sc->io_range = ranges[i]; 413 break; 414 case OFW_PCI_PHYS_HI_SPACE_MEM32: 415 case OFW_PCI_PHYS_HI_SPACE_MEM64: 416 if (ranges[i].pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { 417 if (sc->pref_mem_range.size != 0) { 418 device_printf(sc->dev, 419 "Duplicated memory range found " 420 "in DT\n"); 421 return (ENXIO); 422 } 423 sc->pref_mem_range = ranges[i]; 424 } else { 425 if (sc->mem_range.size != 0) { 426 device_printf(sc->dev, 427 "Duplicated memory range found " 428 "in DT\n"); 429 return (ENXIO); 430 } 431 sc->mem_range = ranges[i]; 432 } 433 } 434 } 435 if (sc->mem_range.size == 0) { 436 device_printf(sc->dev, 437 " At least memory range should be defined in DT.\n"); 438 return (ENXIO); 439 } 440 return (0); 441 } 442 443 /*----------------------------------------------------------------------------- 444 * 445 * P C I B I N T E R F A C E 446 */ 447 static uint32_t 448 rk_pcie_read_config(device_t dev, u_int bus, u_int slot, 449 u_int func, u_int reg, int bytes) 450 { 451 struct rk_pcie_softc *sc; 452 uint32_t d32, data; 453 uint16_t d16; 454 uint8_t d8; 455 uint64_t addr; 456 int type, ret; 457 458 sc = device_get_softc(dev); 459 460 if (!rk_pcie_check_dev(sc, bus, slot, func, reg)) 461 return (0xFFFFFFFFU); 462 if (bus == sc->root_bus) 463 return (rk_pcie_local_cfg_read(sc, false, reg, bytes)); 464 465 addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) | 466 ATU_CFG_REG(reg); 467 type = bus == sc->sub_bus ? ATU_TYPE_CFG0: ATU_TYPE_CFG1; 468 rk_pcie_map_cfg_atu(sc, 0, type); 469 470 ret = -1; 471 switch (bytes) { 472 case 1: 473 ret = bus_peek_1(sc->axi_mem_res, addr, &d8); 474 data = d8; 475 break; 476 case 2: 477 ret = bus_peek_2(sc->axi_mem_res, addr, &d16); 478 data = d16; 479 break; 480 case 4: 481 ret = bus_peek_4(sc->axi_mem_res, addr, &d32); 482 data = d32; 483 break; 484 } 485 if (ret != 0) 486 data = 0xFFFFFFFF; 487 return (data); 488 } 489 490 static void 491 rk_pcie_write_config(device_t dev, u_int bus, u_int slot, 492 u_int func, u_int reg, uint32_t val, int bytes) 493 { 494 struct rk_pcie_softc *sc; 495 uint64_t addr; 496 int type; 497 498 sc = device_get_softc(dev); 499 500 if (!rk_pcie_check_dev(sc, bus, slot, func, reg)) 501 return; 502 503 if (bus == sc->root_bus) 504 return (rk_pcie_local_cfg_write(sc, false, reg, val, bytes)); 505 506 addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) | 507 ATU_CFG_REG(reg); 508 type = bus == sc->sub_bus ? ATU_TYPE_CFG0: ATU_TYPE_CFG1; 509 rk_pcie_map_cfg_atu(sc, 0, type); 510 511 switch (bytes) { 512 case 1: 513 bus_poke_1(sc->axi_mem_res, addr, (uint8_t)val); 514 break; 515 case 2: 516 bus_poke_2(sc->axi_mem_res, addr, (uint16_t)val); 517 break; 518 case 4: 519 bus_poke_4(sc->axi_mem_res, addr, val); 520 break; 521 default: 522 break; 523 } 524 } 525 526 #ifdef RK_PCIE_ENABLE_MSI 527 static int 528 rk_pcie_alloc_msi(device_t pci, device_t child, int count, 529 int maxcount, int *irqs) 530 { 531 phandle_t msi_parent; 532 int rv; 533 534 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 535 &msi_parent, NULL); 536 if (rv != 0) 537 return (rv); 538 539 rv = intr_alloc_msi(pci, child, msi_parent, count, maxcount,irqs); 540 return (rv); 541 } 542 543 static int 544 rk_pcie_release_msi(device_t pci, device_t child, int count, int *irqs) 545 { 546 phandle_t msi_parent; 547 int rv; 548 549 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 550 &msi_parent, NULL); 551 if (rv != 0) 552 return (rv); 553 rv = intr_release_msi(pci, child, msi_parent, count, irqs); 554 return (rv); 555 } 556 #endif 557 558 static int 559 rk_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, 560 uint32_t *data) 561 { 562 phandle_t msi_parent; 563 int rv; 564 565 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 566 &msi_parent, NULL); 567 if (rv != 0) 568 return (rv); 569 rv = intr_map_msi(pci, child, msi_parent, irq, addr, data); 570 return (rv); 571 } 572 573 #ifdef RK_PCIE_ENABLE_MSIX 574 static int 575 rk_pcie_alloc_msix(device_t pci, device_t child, int *irq) 576 { 577 phandle_t msi_parent; 578 int rv; 579 580 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 581 &msi_parent, NULL); 582 if (rv != 0) 583 return (rv); 584 rv = intr_alloc_msix(pci, child, msi_parent, irq); 585 return (rv); 586 } 587 588 static int 589 rk_pcie_release_msix(device_t pci, device_t child, int irq) 590 { 591 phandle_t msi_parent; 592 int rv; 593 594 rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), 595 &msi_parent, NULL); 596 if (rv != 0) 597 return (rv); 598 rv = intr_release_msix(pci, child, msi_parent, irq); 599 return (rv); 600 } 601 #endif 602 603 static int 604 rk_pcie_get_id(device_t pci, device_t child, enum pci_id_type type, 605 uintptr_t *id) 606 { 607 phandle_t node; 608 int rv; 609 uint32_t rid; 610 uint16_t pci_rid; 611 612 if (type != PCI_ID_MSI) 613 return (pcib_get_id(pci, child, type, id)); 614 615 node = ofw_bus_get_node(pci); 616 pci_rid = pci_get_rid(child); 617 618 rv = ofw_bus_msimap(node, pci_rid, NULL, &rid); 619 if (rv != 0) 620 return (rv); 621 622 *id = rid; 623 return (0); 624 } 625 626 static int 627 rk_pcie_route_interrupt(device_t bus, device_t dev, int pin) 628 { 629 struct rk_pcie_softc *sc; 630 u_int irq; 631 632 sc = device_get_softc(bus); 633 irq = intr_map_clone_irq(rman_get_start(sc->legacy_irq_res)); 634 device_printf(bus, "route pin %d for device %d.%d to %u\n", 635 pin, pci_get_slot(dev), pci_get_function(dev), irq); 636 637 return (irq); 638 } 639 640 /*----------------------------------------------------------------------------- 641 * 642 * B U S / D E V I C E I N T E R F A C E 643 */ 644 static int 645 rk_pcie_parse_fdt_resources(struct rk_pcie_softc *sc) 646 { 647 int i, rv; 648 char buf[16]; 649 650 /* Regulators. All are optional. */ 651 rv = regulator_get_by_ofw_property(sc->dev, 0, 652 "vpcie12v-supply", &sc->supply_12v); 653 if (rv != 0 && rv != ENOENT) { 654 device_printf(sc->dev,"Cannot get 'vpcie12' regulator\n"); 655 return (ENXIO); 656 } 657 rv = regulator_get_by_ofw_property(sc->dev, 0, 658 "vpcie3v3-supply", &sc->supply_3v3); 659 if (rv != 0 && rv != ENOENT) { 660 device_printf(sc->dev,"Cannot get 'vpcie3v3' regulator\n"); 661 return (ENXIO); 662 } 663 rv = regulator_get_by_ofw_property(sc->dev, 0, 664 "vpcie1v8-supply", &sc->supply_1v8); 665 if (rv != 0 && rv != ENOENT) { 666 device_printf(sc->dev,"Cannot get 'vpcie1v8' regulator\n"); 667 return (ENXIO); 668 } 669 rv = regulator_get_by_ofw_property(sc->dev, 0, 670 "vpcie0v9-supply", &sc->supply_0v9); 671 if (rv != 0 && rv != ENOENT) { 672 device_printf(sc->dev,"Cannot get 'vpcie0v9' regulator\n"); 673 return (ENXIO); 674 } 675 676 /* Resets. */ 677 rv = hwreset_get_by_ofw_name(sc->dev, 0, "core", &sc->hwreset_core); 678 if (rv != 0) { 679 device_printf(sc->dev, "Cannot get 'core' reset\n"); 680 return (ENXIO); 681 } 682 rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt", &sc->hwreset_mgmt); 683 if (rv != 0) { 684 device_printf(sc->dev, "Cannot get 'mgmt' reset\n"); 685 return (ENXIO); 686 } 687 rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt-sticky", 688 &sc->hwreset_mgmt_sticky); 689 if (rv != 0) { 690 device_printf(sc->dev, "Cannot get 'mgmt-sticky' reset\n"); 691 return (ENXIO); 692 } 693 rv = hwreset_get_by_ofw_name(sc->dev, 0, "pipe", &sc->hwreset_pipe); 694 if (rv != 0) { 695 device_printf(sc->dev, "Cannot get 'pipe' reset\n"); 696 return (ENXIO); 697 } 698 rv = hwreset_get_by_ofw_name(sc->dev, 0, "pm", &sc->hwreset_pm); 699 if (rv != 0) { 700 device_printf(sc->dev, "Cannot get 'pm' reset\n"); 701 return (ENXIO); 702 } 703 rv = hwreset_get_by_ofw_name(sc->dev, 0, "aclk", &sc->hwreset_aclk); 704 if (rv != 0) { 705 device_printf(sc->dev, "Cannot get 'aclk' reset\n"); 706 return (ENXIO); 707 } 708 rv = hwreset_get_by_ofw_name(sc->dev, 0, "pclk", &sc->hwreset_pclk); 709 if (rv != 0) { 710 device_printf(sc->dev, "Cannot get 'pclk' reset\n"); 711 return (ENXIO); 712 } 713 714 /* Clocks. */ 715 rv = clk_get_by_ofw_name(sc->dev, 0, "aclk", &sc->clk_aclk); 716 if (rv != 0) { 717 device_printf(sc->dev, "Cannot get 'aclk' clock\n"); 718 return (ENXIO); 719 } 720 rv = clk_get_by_ofw_name(sc->dev, 0, "aclk-perf", &sc->clk_aclk_perf); 721 if (rv != 0) { 722 device_printf(sc->dev, "Cannot get 'aclk-perf' clock\n"); 723 return (ENXIO); 724 } 725 rv = clk_get_by_ofw_name(sc->dev, 0, "hclk", &sc->clk_hclk); 726 if (rv != 0) { 727 device_printf(sc->dev, "Cannot get 'hclk' clock\n"); 728 return (ENXIO); 729 } 730 rv = clk_get_by_ofw_name(sc->dev, 0, "pm", &sc->clk_pm); 731 if (rv != 0) { 732 device_printf(sc->dev, "Cannot get 'pm' clock\n"); 733 return (ENXIO); 734 } 735 736 /* Phys. */ 737 for (i = 0; i < MAX_LANES; i++ ) { 738 sprintf (buf, "pcie-phy-%d", i); 739 rv = phy_get_by_ofw_name(sc->dev, 0, buf, sc->phys + i); 740 if (rv != 0) { 741 device_printf(sc->dev, "Cannot get '%s' phy\n", buf); 742 return (ENXIO); 743 } 744 } 745 746 /* GPIO for PERST#. Optional */ 747 rv = gpio_pin_get_by_ofw_property(sc->dev, sc->node, "ep-gpios", 748 &sc->gpio_ep); 749 if (rv != 0 && rv != ENOENT) { 750 device_printf(sc->dev, "Cannot get 'ep-gpios' gpio\n"); 751 return (ENXIO); 752 } 753 754 return (0); 755 } 756 757 static int 758 rk_pcie_enable_resources(struct rk_pcie_softc *sc) 759 { 760 int i, rv; 761 uint32_t val; 762 763 /* Assert all resets */ 764 rv = hwreset_assert(sc->hwreset_pclk); 765 if (rv != 0) { 766 device_printf(sc->dev, "Cannot assert 'pclk' reset\n"); 767 return (rv); 768 } 769 rv = hwreset_assert(sc->hwreset_aclk); 770 if (rv != 0) { 771 device_printf(sc->dev, "Cannot assert 'aclk' reset\n"); 772 return (rv); 773 } 774 rv = hwreset_assert(sc->hwreset_pm); 775 if (rv != 0) { 776 device_printf(sc->dev, "Cannot assert 'pm' reset\n"); 777 return (rv); 778 } 779 rv = hwreset_assert(sc->hwreset_pipe); 780 if (rv != 0) { 781 device_printf(sc->dev, "Cannot assert 'pipe' reset\n"); 782 return (rv); 783 } 784 rv = hwreset_assert(sc->hwreset_mgmt_sticky); 785 if (rv != 0) { 786 device_printf(sc->dev, "Cannot assert 'mgmt_sticky' reset\n"); 787 return (rv); 788 } 789 rv = hwreset_assert(sc->hwreset_mgmt); 790 if (rv != 0) { 791 device_printf(sc->dev, "Cannot assert 'hmgmt' reset\n"); 792 return (rv); 793 } 794 rv = hwreset_assert(sc->hwreset_core); 795 if (rv != 0) { 796 device_printf(sc->dev, "Cannot assert 'hcore' reset\n"); 797 return (rv); 798 } 799 DELAY(10000); 800 801 /* Enable clockls */ 802 rv = clk_enable(sc->clk_aclk); 803 if (rv != 0) { 804 device_printf(sc->dev, "Cannot enable 'aclk' clock\n"); 805 return (rv); 806 } 807 rv = clk_enable(sc->clk_aclk_perf); 808 if (rv != 0) { 809 device_printf(sc->dev, "Cannot enable 'aclk_perf' clock\n"); 810 return (rv); 811 } 812 rv = clk_enable(sc->clk_hclk); 813 if (rv != 0) { 814 device_printf(sc->dev, "Cannot enable 'hclk' clock\n"); 815 return (rv); 816 } 817 rv = clk_enable(sc->clk_pm); 818 if (rv != 0) { 819 device_printf(sc->dev, "Cannot enable 'pm' clock\n"); 820 return (rv); 821 } 822 823 /* Power up regulators */ 824 if (sc->supply_12v != NULL) { 825 rv = regulator_enable(sc->supply_12v); 826 if (rv != 0) { 827 device_printf(sc->dev, 828 "Cannot enable 'vpcie12' regulator\n"); 829 return (rv); 830 } 831 } 832 if (sc->supply_3v3 != NULL) { 833 rv = regulator_enable(sc->supply_3v3); 834 if (rv != 0) { 835 device_printf(sc->dev, 836 "Cannot enable 'vpcie3v3' regulator\n"); 837 return (rv); 838 } 839 } 840 if (sc->supply_1v8 != NULL) { 841 rv = regulator_enable(sc->supply_1v8); 842 if (rv != 0) { 843 device_printf(sc->dev, 844 "Cannot enable 'vpcie1v8' regulator\n"); 845 return (rv); 846 } 847 } 848 if (sc->supply_0v9 != NULL) { 849 rv = regulator_enable(sc->supply_0v9); 850 if (rv != 0) { 851 device_printf(sc->dev, 852 "Cannot enable 'vpcie1v8' regulator\n"); 853 return (rv); 854 } 855 } 856 DELAY(1000); 857 858 /* Deassert basic resets*/ 859 rv = hwreset_deassert(sc->hwreset_pm); 860 if (rv != 0) { 861 device_printf(sc->dev, "Cannot deassert 'pm' reset\n"); 862 return (rv); 863 } 864 rv = hwreset_deassert(sc->hwreset_aclk); 865 if (rv != 0) { 866 device_printf(sc->dev, "Cannot deassert 'aclk' reset\n"); 867 return (rv); 868 } 869 rv = hwreset_deassert(sc->hwreset_pclk); 870 if (rv != 0) { 871 device_printf(sc->dev, "Cannot deassert 'pclk' reset\n"); 872 return (rv); 873 } 874 875 /* Set basic PCIe core mode (RC, lanes, gen1 or 2) */ 876 val = STRAP_CONF_GEN_2 << 16 | 877 (sc->link_is_gen2 ? STRAP_CONF_GEN_2: 0); 878 val |= STRAP_CONF_MODE_RC << 16 | STRAP_CONF_MODE_RC; 879 val |= STRAP_CONF_LANES(~0) << 16 | STRAP_CONF_LANES(sc->num_lanes); 880 val |= STRAP_CONF_ARI_EN << 16 | STRAP_CONF_ARI_EN; 881 val |= STRAP_CONF_CONF_EN << 16 | STRAP_CONF_CONF_EN; 882 APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, val); 883 884 for (i = 0; i < MAX_LANES; i++) { 885 rv = phy_enable(sc->phys[i]); 886 if (rv != 0) { 887 device_printf(sc->dev, "Cannot enable phy %d\n", i); 888 return (rv); 889 } 890 } 891 892 /* Deassert rest of resets - order is important ! */ 893 rv = hwreset_deassert(sc->hwreset_mgmt_sticky); 894 if (rv != 0) { 895 device_printf(sc->dev, "Cannot deassert 'mgmt_sticky' reset\n"); 896 return (rv); 897 } 898 rv = hwreset_deassert(sc->hwreset_core); 899 if (rv != 0) { 900 device_printf(sc->dev, "Cannot deassert 'core' reset\n"); 901 return (rv); 902 } 903 rv = hwreset_deassert(sc->hwreset_mgmt); 904 if (rv != 0) { 905 device_printf(sc->dev, "Cannot deassert 'mgmt' reset\n"); 906 return (rv); 907 } 908 rv = hwreset_deassert(sc->hwreset_pipe); 909 if (rv != 0) { 910 device_printf(sc->dev, "Cannot deassert 'pipe' reset\n"); 911 return (rv); 912 } 913 return (0); 914 } 915 916 static int 917 rk_pcie_setup_hw(struct rk_pcie_softc *sc) 918 { 919 uint32_t val; 920 int i, rv; 921 922 /* Assert PERST# if defined */ 923 if (sc->gpio_ep != NULL) { 924 rv = gpio_pin_set_active(sc->gpio_ep, 0); 925 if (rv != 0) { 926 device_printf(sc->dev, 927 "Cannot clear 'gpio-ep' gpio\n"); 928 return (rv); 929 } 930 } 931 932 rv = rk_pcie_enable_resources(sc); 933 if (rv != 0) 934 return(rv); 935 936 /* Fix wrong default value for transmited FTS for L0s exit */ 937 val = APB_RD4(sc, PCIE_CORE_CTRL1); 938 val |= 0xFFFF << 8; 939 APB_WR4(sc, PCIE_CORE_CTRL1, val); 940 941 /* Setup PCIE Link Status & Control register */ 942 val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); 943 val |= PCIEM_LINK_CTL_COMMON_CLOCK; 944 APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); 945 val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); 946 val |= PCIEM_LINK_CTL_RCB; 947 APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); 948 949 /* Enable training for GEN1 */ 950 APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, 951 STRAP_CONF_LINK_TRAIN_EN << 16 | STRAP_CONF_LINK_TRAIN_EN); 952 953 /* Deassert PERST# if defined */ 954 if (sc->gpio_ep != NULL) { 955 rv = gpio_pin_set_active(sc->gpio_ep, 1); 956 if (rv != 0) { 957 device_printf(sc->dev, "Cannot set 'gpio-ep' gpio\n"); 958 return (rv); 959 } 960 } 961 962 /* Wait for link */ 963 for (i = 500; i > 0; i--) { 964 val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); 965 if (STATUS1_LINK_ST_GET(val) == STATUS1_LINK_ST_UP) 966 break; 967 DELAY(1000); 968 } 969 if (i <= 0) { 970 device_printf(sc->dev, 971 "Gen1 link training timeouted: 0x%08X.\n", val); 972 return (0); 973 } 974 975 if (sc->link_is_gen2) { 976 val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); 977 val |= PCIEM_LINK_CTL_RETRAIN_LINK; 978 APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); 979 980 /* Wait for link */ 981 for (i = 500; i > 0; i--) { 982 val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); 983 if (STATUS1_LINK_ST_GET(val) == 984 STATUS1_LINK_ST_UP) 985 break; 986 DELAY(1000); 987 } 988 if (i <= 0) 989 device_printf(sc->dev, "Gen2 link training " 990 "timeouted: 0x%08X.\n", val); 991 } 992 993 val = APB_RD4(sc, PCIE_CORE_CTRL0); 994 val = CORE_CTRL_LANES_GET(val); 995 if (bootverbose) 996 device_printf(sc->dev, "Link width: %d\n", 1 << val); 997 998 return (0); 999 } 1000 1001 static int 1002 rk_pcie_setup_sw(struct rk_pcie_softc *sc) 1003 { 1004 uint32_t val; 1005 int i, region; 1006 1007 pcib_bridge_init(sc->dev); 1008 1009 /* Setup config registers */ 1010 APB_WR4(sc, PCIE_CORE_CONFIG_VENDOR, 0x1D87); /* Rockchip vendor ID*/ 1011 PRIV_CFG_WR1(sc, PCIR_CLASS, PCIC_BRIDGE); 1012 PRIV_CFG_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI); 1013 PRIV_CFG_WR1(sc, PCIR_PRIBUS_1, sc->root_bus); 1014 PRIV_CFG_WR1(sc, PCIR_SECBUS_1, sc->sub_bus); 1015 PRIV_CFG_WR1(sc, PCIR_SUBBUS_1, sc->bus_end); 1016 PRIV_CFG_WR2(sc, PCIR_COMMAND, PCIM_CMD_MEMEN | 1017 PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN); 1018 1019 /* Don't advertise L1 power substate */ 1020 val = APB_RD4(sc, PCIE_RC_CONFIG_THP_CAP); 1021 val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; 1022 APB_WR4(sc, PCIE_RC_CONFIG_THP_CAP, val); 1023 1024 /* Don't advertise L0s */ 1025 if (sc->no_l0s) { 1026 val = APB_RD4(sc, PCIE_RC_CONFIG_LINK_CAP); 1027 val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; 1028 APB_WR4(sc, PCIE_RC_CONFIG_LINK_CAP_L0S, val); 1029 } 1030 1031 /*Adjust maximum payload size*/ 1032 val = APB_RD4(sc, PCIE_RC_CONFIG_DCSR); 1033 val &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; 1034 val |= PCIE_RC_CONFIG_DCSR_MPS_128; 1035 APB_WR4(sc, PCIE_RC_CONFIG_DCSR, val); 1036 1037 /* 1038 * Prepare IB ATU 1039 * map whole address range in 1:1 mappings 1040 */ 1041 rk_pcie_map_in_atu(sc, 2, 64 - 1, 0); 1042 1043 /* Prepare OB ATU */ 1044 /* - region 0 (32 MB) is used for config access */ 1045 region = 0; 1046 rk_pcie_map_out_atu(sc, region++, ATU_TYPE_CFG0, 25 - 1, 0); 1047 1048 /* - then map memory (by using 1MB regions */ 1049 for (i = 0; i < sc->mem_range.size / ATU_OB_REGION_SIZE; i++) { 1050 rk_pcie_map_out_atu(sc, region++, ATU_TYPE_MEM, 1051 ATU_OB_REGION_SHIFT - 1, 1052 sc->mem_range.pci + ATU_OB_REGION_SIZE * i); 1053 } 1054 1055 /* - IO space is next, one region typically*/ 1056 for (i = 0; i < sc->io_range.size / ATU_OB_REGION_SIZE; i++) { 1057 rk_pcie_map_out_atu(sc, region++, ATU_TYPE_IO, 1058 ATU_OB_REGION_SHIFT - 1, 1059 sc->io_range.pci + ATU_OB_REGION_SIZE * i); 1060 } 1061 APB_WR4(sc, PCIE_CORE_RC_BAR_CONF, 0); 1062 return (0); 1063 } 1064 1065 static int 1066 rk_pcie_sys_irq(void *arg) 1067 { 1068 struct rk_pcie_softc *sc; 1069 uint32_t irq; 1070 1071 sc = (struct rk_pcie_softc *)arg; 1072 irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); 1073 if (irq & PCIE_CLIENT_INT_LOCAL) { 1074 irq = APB_RD4(sc, PCIE_CORE_INT_STATUS); 1075 APB_WR4(sc, PCIE_CORE_INT_STATUS, irq); 1076 APB_WR4(sc, PCIE_CLIENT_INT_STATUS, PCIE_CLIENT_INT_LOCAL); 1077 1078 device_printf(sc->dev, "'sys' interrupt received: 0x%04X\n", 1079 irq); 1080 } 1081 1082 return (FILTER_HANDLED); 1083 } 1084 1085 static int 1086 rk_pcie_client_irq(void *arg) 1087 { 1088 struct rk_pcie_softc *sc; 1089 uint32_t irq; 1090 1091 sc = (struct rk_pcie_softc *)arg; 1092 irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); 1093 /* Clear causes handled by other interrups */ 1094 irq &= ~PCIE_CLIENT_INT_LOCAL; 1095 irq &= ~PCIE_CLIENT_INT_LEGACY; 1096 APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq); 1097 1098 device_printf(sc->dev, "'client' interrupt received: 0x%04X\n", irq); 1099 1100 return (FILTER_HANDLED); 1101 } 1102 1103 static int 1104 rk_pcie_legacy_irq(void *arg) 1105 { 1106 struct rk_pcie_softc *sc; 1107 uint32_t irq; 1108 1109 sc = (struct rk_pcie_softc *)arg; 1110 irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); 1111 irq &= PCIE_CLIENT_INT_LEGACY; 1112 APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq); 1113 1114 /* all legacy interrupt are shared, do nothing */ 1115 return (FILTER_STRAY); 1116 } 1117 1118 static bus_dma_tag_t 1119 rk_pcie_get_dma_tag(device_t dev, device_t child) 1120 { 1121 struct rk_pcie_softc *sc; 1122 1123 sc = device_get_softc(dev); 1124 return (sc->dmat); 1125 } 1126 1127 static int 1128 rk_pcie_probe(device_t dev) 1129 { 1130 1131 if (!ofw_bus_status_okay(dev)) 1132 return (ENXIO); 1133 1134 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1135 return (ENXIO); 1136 1137 device_set_desc(dev, "Rockchip PCIe controller"); 1138 return (BUS_PROBE_DEFAULT); 1139 } 1140 1141 static int 1142 rk_pcie_attach(device_t dev) 1143 { 1144 struct resource_map_request req; 1145 struct resource_map map; 1146 struct rk_pcie_softc *sc; 1147 uint32_t val; 1148 int rv, rid, max_speed; 1149 1150 sc = device_get_softc(dev); 1151 sc->dev = dev; 1152 sc->node = ofw_bus_get_node(dev); 1153 1154 mtx_init(&sc->mtx, "rk_pcie_mtx", NULL, MTX_DEF); 1155 1156 /* XXX Should not be this configurable ? */ 1157 sc->bus_start = 0; 1158 sc->bus_end = 0x1F; 1159 sc->root_bus = sc->bus_start; 1160 sc->sub_bus = 1; 1161 1162 /* Read FDT properties */ 1163 rv = rk_pcie_parse_fdt_resources(sc); 1164 if (rv != 0) 1165 goto out; 1166 1167 sc->coherent = OF_hasprop(sc->node, "dma-coherent"); 1168 sc->no_l0s = OF_hasprop(sc->node, "aspm-no-l0s"); 1169 rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes, 1170 sizeof(sc->num_lanes)); 1171 if (rv != sizeof(sc->num_lanes)) 1172 sc->num_lanes = 1; 1173 if (sc->num_lanes != 1 && sc->num_lanes != 2 && sc->num_lanes != 4) { 1174 device_printf(dev, 1175 "invalid number of lanes: %d\n",sc->num_lanes); 1176 sc->num_lanes = 0; 1177 rv = ENXIO; 1178 goto out; 1179 } 1180 1181 rv = OF_getencprop(sc->node, "max-link-speed", &max_speed, 1182 sizeof(max_speed)); 1183 if (rv != sizeof(max_speed) || max_speed != 1) 1184 sc->link_is_gen2 = true; 1185 else 1186 sc->link_is_gen2 = false; 1187 1188 rv = ofw_bus_find_string_index(sc->node, "reg-names", "axi-base", &rid); 1189 if (rv != 0) { 1190 device_printf(dev, "Cannot get 'axi-base' memory\n"); 1191 rv = ENXIO; 1192 goto out; 1193 } 1194 sc->axi_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1195 RF_ACTIVE | RF_UNMAPPED); 1196 if (sc->axi_mem_res == NULL) { 1197 device_printf(dev, "Cannot allocate 'axi-base' (rid: %d)\n", 1198 rid); 1199 rv = ENXIO; 1200 goto out; 1201 } 1202 resource_init_map_request(&req); 1203 req.memattr = VM_MEMATTR_DEVICE_NP; 1204 rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->axi_mem_res, &req, 1205 &map); 1206 if (rv != 0) { 1207 device_printf(dev, "Cannot map 'axi-base' (rid: %d)\n", 1208 rid); 1209 goto out; 1210 } 1211 rman_set_mapping(sc->axi_mem_res, &map); 1212 1213 rv = ofw_bus_find_string_index(sc->node, "reg-names", "apb-base", &rid); 1214 if (rv != 0) { 1215 device_printf(dev, "Cannot get 'apb-base' memory\n"); 1216 rv = ENXIO; 1217 goto out; 1218 } 1219 sc->apb_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1220 RF_ACTIVE); 1221 if (sc->apb_mem_res == NULL) { 1222 device_printf(dev, "Cannot allocate 'apb-base' (rid: %d)\n", 1223 rid); 1224 rv = ENXIO; 1225 goto out; 1226 } 1227 1228 rv = ofw_bus_find_string_index(sc->node, "interrupt-names", 1229 "client", &rid); 1230 if (rv != 0) { 1231 device_printf(dev, "Cannot get 'client' IRQ\n"); 1232 rv = ENXIO; 1233 goto out; 1234 } 1235 sc->client_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1236 RF_ACTIVE | RF_SHAREABLE); 1237 if (sc->client_irq_res == NULL) { 1238 device_printf(dev, "Cannot allocate 'client' IRQ resource\n"); 1239 rv = ENXIO; 1240 goto out; 1241 } 1242 1243 rv = ofw_bus_find_string_index(sc->node, "interrupt-names", 1244 "legacy", &rid); 1245 if (rv != 0) { 1246 device_printf(dev, "Cannot get 'legacy' IRQ\n"); 1247 rv = ENXIO; 1248 goto out; 1249 } 1250 sc->legacy_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1251 RF_ACTIVE | RF_SHAREABLE); 1252 if (sc->legacy_irq_res == NULL) { 1253 device_printf(dev, "Cannot allocate 'legacy' IRQ resource\n"); 1254 rv = ENXIO; 1255 goto out; 1256 } 1257 1258 rv = ofw_bus_find_string_index(sc->node, "interrupt-names", 1259 "sys", &rid); 1260 if (rv != 0) { 1261 device_printf(dev, "Cannot get 'sys' IRQ\n"); 1262 rv = ENXIO; 1263 goto out; 1264 } 1265 sc->sys_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1266 RF_ACTIVE | RF_SHAREABLE); 1267 if (sc->sys_irq_res == NULL) { 1268 device_printf(dev, "Cannot allocate 'sys' IRQ resource\n"); 1269 rv = ENXIO; 1270 goto out; 1271 } 1272 1273 if (bootverbose) 1274 device_printf(dev, "Bus is%s cache-coherent\n", 1275 sc->coherent ? "" : " not"); 1276 rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1277 1, 0, /* alignment, bounds */ 1278 BUS_SPACE_MAXADDR, /* lowaddr */ 1279 BUS_SPACE_MAXADDR, /* highaddr */ 1280 NULL, NULL, /* filter, filterarg */ 1281 BUS_SPACE_MAXSIZE, /* maxsize */ 1282 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1283 BUS_SPACE_MAXSIZE, /* maxsegsize */ 1284 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 1285 NULL, NULL, /* lockfunc, lockarg */ 1286 &sc->dmat); 1287 if (rv != 0) 1288 goto out; 1289 1290 rv = ofw_pcib_init(dev); 1291 if (rv != 0) 1292 goto out; 1293 1294 rv = rk_pcie_decode_ranges(sc, sc->ofw_pci.sc_range, 1295 sc->ofw_pci.sc_nrange); 1296 if (rv != 0) 1297 goto out_full; 1298 rv = rk_pcie_setup_hw(sc); 1299 if (rv != 0) 1300 goto out_full; 1301 1302 rv = rk_pcie_setup_sw(sc); 1303 if (rv != 0) 1304 goto out_full; 1305 1306 rv = bus_setup_intr(dev, sc->client_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, 1307 rk_pcie_client_irq, NULL, sc, &sc->client_irq_cookie); 1308 if (rv != 0) { 1309 device_printf(dev, "cannot setup client interrupt handler\n"); 1310 rv = ENXIO; 1311 goto out_full; 1312 } 1313 1314 rv = bus_setup_intr(dev, sc->legacy_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, 1315 rk_pcie_legacy_irq, NULL, sc, &sc->legacy_irq_cookie); 1316 if (rv != 0) { 1317 device_printf(dev, "cannot setup client interrupt handler\n"); 1318 rv = ENXIO; 1319 goto out_full; 1320 } 1321 1322 rv = bus_setup_intr(dev, sc->sys_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, 1323 rk_pcie_sys_irq, NULL, sc, &sc->sys_irq_cookie); 1324 if (rv != 0) { 1325 device_printf(dev, "cannot setup client interrupt handler\n"); 1326 rv = ENXIO; 1327 goto out_full; 1328 } 1329 1330 /* Enable interrupts */ 1331 val = 1332 PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | 1333 PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | 1334 PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | 1335 PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_INTA | 1336 PCIE_CLIENT_INT_INTB | PCIE_CLIENT_INT_INTC | 1337 PCIE_CLIENT_INT_INTD | PCIE_CLIENT_INT_PHY; 1338 1339 APB_WR4(sc, PCIE_CLIENT_INT_MASK, (val << 16) & ~val); 1340 1341 val = 1342 PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | 1343 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | 1344 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | 1345 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | 1346 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | 1347 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | 1348 PCIE_CORE_INT_MMVC; 1349 APB_WR4(sc, PCIE_CORE_INT_MASK, ~(val)); 1350 1351 val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); 1352 val |= PCIEM_LINK_CTL_LBMIE | PCIEM_LINK_CTL_LABIE; 1353 APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); 1354 1355 DELAY(250000); 1356 device_add_child(dev, "pci", -1); 1357 return (bus_generic_attach(dev)); 1358 1359 out_full: 1360 bus_teardown_intr(dev, sc->sys_irq_res, sc->sys_irq_cookie); 1361 bus_teardown_intr(dev, sc->legacy_irq_res, sc->legacy_irq_cookie); 1362 bus_teardown_intr(dev, sc->client_irq_res, sc->client_irq_cookie); 1363 ofw_pcib_fini(dev); 1364 out: 1365 bus_dma_tag_destroy(sc->dmat); 1366 bus_free_resource(dev, SYS_RES_IRQ, sc->sys_irq_res); 1367 bus_free_resource(dev, SYS_RES_IRQ, sc->legacy_irq_res); 1368 bus_free_resource(dev, SYS_RES_IRQ, sc->client_irq_res); 1369 bus_free_resource(dev, SYS_RES_MEMORY, sc->apb_mem_res); 1370 bus_free_resource(dev, SYS_RES_MEMORY, sc->axi_mem_res); 1371 /* GPIO */ 1372 gpio_pin_release(sc->gpio_ep); 1373 /* Phys */ 1374 for (int i = 0; i < MAX_LANES; i++) { 1375 phy_release(sc->phys[i]); 1376 } 1377 /* Clocks */ 1378 clk_release(sc->clk_aclk); 1379 clk_release(sc->clk_aclk_perf); 1380 clk_release(sc->clk_hclk); 1381 clk_release(sc->clk_pm); 1382 /* Resets */ 1383 hwreset_release(sc->hwreset_core); 1384 hwreset_release(sc->hwreset_mgmt); 1385 hwreset_release(sc->hwreset_pipe); 1386 hwreset_release(sc->hwreset_pm); 1387 hwreset_release(sc->hwreset_aclk); 1388 hwreset_release(sc->hwreset_pclk); 1389 /* Regulators */ 1390 regulator_release(sc->supply_12v); 1391 regulator_release(sc->supply_3v3); 1392 regulator_release(sc->supply_1v8); 1393 regulator_release(sc->supply_0v9); 1394 return (rv); 1395 } 1396 1397 static device_method_t rk_pcie_methods[] = { 1398 /* Device interface */ 1399 DEVMETHOD(device_probe, rk_pcie_probe), 1400 DEVMETHOD(device_attach, rk_pcie_attach), 1401 1402 /* Bus interface */ 1403 DEVMETHOD(bus_get_dma_tag, rk_pcie_get_dma_tag), 1404 1405 /* pcib interface */ 1406 DEVMETHOD(pcib_read_config, rk_pcie_read_config), 1407 DEVMETHOD(pcib_write_config, rk_pcie_write_config), 1408 DEVMETHOD(pcib_route_interrupt, rk_pcie_route_interrupt), 1409 #ifdef RK_PCIE_ENABLE_MSI 1410 DEVMETHOD(pcib_alloc_msi, rk_pcie_alloc_msi), 1411 DEVMETHOD(pcib_release_msi, rk_pcie_release_msi), 1412 #endif 1413 #ifdef RK_PCIE_ENABLE_MSIX 1414 DEVMETHOD(pcib_alloc_msix, rk_pcie_alloc_msix), 1415 DEVMETHOD(pcib_release_msix, rk_pcie_release_msix), 1416 #endif 1417 DEVMETHOD(pcib_map_msi, rk_pcie_map_msi), 1418 DEVMETHOD(pcib_get_id, rk_pcie_get_id), 1419 1420 /* OFW bus interface */ 1421 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), 1422 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), 1423 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), 1424 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), 1425 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), 1426 1427 DEVMETHOD_END 1428 }; 1429 1430 DEFINE_CLASS_1(pcib, rk_pcie_driver, rk_pcie_methods, 1431 sizeof(struct rk_pcie_softc), ofw_pcib_driver); 1432 DRIVER_MODULE( rk_pcie, simplebus, rk_pcie_driver, NULL, NULL); 1433