1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * https://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/align.h> 12 #include <linux/bitops.h> 13 #include <linux/clk.h> 14 #include <linux/delay.h> 15 #include <linux/dma/edma.h> 16 #include <linux/gpio/consumer.h> 17 #include <linux/ioport.h> 18 #include <linux/of.h> 19 #include <linux/platform_device.h> 20 #include <linux/sizes.h> 21 #include <linux/types.h> 22 23 #include "../../pci.h" 24 #include "pcie-designware.h" 25 26 static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = { 27 [DW_PCIE_DBI_CLK] = "dbi", 28 [DW_PCIE_MSTR_CLK] = "mstr", 29 [DW_PCIE_SLV_CLK] = "slv", 30 }; 31 32 static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = { 33 [DW_PCIE_PIPE_CLK] = "pipe", 34 [DW_PCIE_CORE_CLK] = "core", 35 [DW_PCIE_AUX_CLK] = "aux", 36 [DW_PCIE_REF_CLK] = "ref", 37 }; 38 39 static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = { 40 [DW_PCIE_DBI_RST] = "dbi", 41 [DW_PCIE_MSTR_RST] = "mstr", 42 [DW_PCIE_SLV_RST] = "slv", 43 }; 44 45 static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = { 46 [DW_PCIE_NON_STICKY_RST] = "non-sticky", 47 [DW_PCIE_STICKY_RST] = "sticky", 48 [DW_PCIE_CORE_RST] = "core", 49 [DW_PCIE_PIPE_RST] = "pipe", 50 [DW_PCIE_PHY_RST] = "phy", 51 [DW_PCIE_HOT_RST] = "hot", 52 [DW_PCIE_PWR_RST] = "pwr", 53 }; 54 55 static int dw_pcie_get_clocks(struct dw_pcie *pci) 56 { 57 int i, ret; 58 59 for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++) 60 pci->app_clks[i].id = dw_pcie_app_clks[i]; 61 62 for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++) 63 pci->core_clks[i].id = dw_pcie_core_clks[i]; 64 65 ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS, 66 pci->app_clks); 67 if (ret) 68 return ret; 69 70 return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS, 71 pci->core_clks); 72 } 73 74 static int dw_pcie_get_resets(struct dw_pcie *pci) 75 { 76 int i, ret; 77 78 for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++) 79 pci->app_rsts[i].id = dw_pcie_app_rsts[i]; 80 81 for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++) 82 pci->core_rsts[i].id = dw_pcie_core_rsts[i]; 83 84 ret = devm_reset_control_bulk_get_optional_shared(pci->dev, 85 DW_PCIE_NUM_APP_RSTS, 86 pci->app_rsts); 87 if (ret) 88 return ret; 89 90 ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev, 91 DW_PCIE_NUM_CORE_RSTS, 92 pci->core_rsts); 93 if (ret) 94 return ret; 95 96 pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH); 97 if (IS_ERR(pci->pe_rst)) 98 return PTR_ERR(pci->pe_rst); 99 100 return 0; 101 } 102 103 int dw_pcie_get_resources(struct dw_pcie *pci) 104 { 105 struct platform_device *pdev = to_platform_device(pci->dev); 106 struct device_node *np = dev_of_node(pci->dev); 107 struct resource *res; 108 int ret; 109 110 if (!pci->dbi_base) { 111 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 112 pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res); 113 if (IS_ERR(pci->dbi_base)) 114 return PTR_ERR(pci->dbi_base); 115 } 116 117 /* DBI2 is mainly useful for the endpoint controller */ 118 if (!pci->dbi_base2) { 119 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); 120 if (res) { 121 pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res); 122 if (IS_ERR(pci->dbi_base2)) 123 return PTR_ERR(pci->dbi_base2); 124 } else { 125 pci->dbi_base2 = pci->dbi_base + SZ_4K; 126 } 127 } 128 129 /* For non-unrolled iATU/eDMA platforms this range will be ignored */ 130 if (!pci->atu_base) { 131 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); 132 if (res) { 133 pci->atu_size = resource_size(res); 134 pci->atu_base = devm_ioremap_resource(pci->dev, res); 135 if (IS_ERR(pci->atu_base)) 136 return PTR_ERR(pci->atu_base); 137 } else { 138 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; 139 } 140 } 141 142 /* Set a default value suitable for at most 8 in and 8 out windows */ 143 if (!pci->atu_size) 144 pci->atu_size = SZ_4K; 145 146 /* eDMA region can be mapped to a custom base address */ 147 if (!pci->edma.reg_base) { 148 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); 149 if (res) { 150 pci->edma.reg_base = devm_ioremap_resource(pci->dev, res); 151 if (IS_ERR(pci->edma.reg_base)) 152 return PTR_ERR(pci->edma.reg_base); 153 } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) { 154 pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET; 155 } 156 } 157 158 /* LLDD is supposed to manually switch the clocks and resets state */ 159 if (dw_pcie_cap_is(pci, REQ_RES)) { 160 ret = dw_pcie_get_clocks(pci); 161 if (ret) 162 return ret; 163 164 ret = dw_pcie_get_resets(pci); 165 if (ret) 166 return ret; 167 } 168 169 if (pci->link_gen < 1) 170 pci->link_gen = of_pci_get_max_link_speed(np); 171 172 of_property_read_u32(np, "num-lanes", &pci->num_lanes); 173 174 if (of_property_read_bool(np, "snps,enable-cdm-check")) 175 dw_pcie_cap_set(pci, CDM_CHECK); 176 177 return 0; 178 } 179 180 void dw_pcie_version_detect(struct dw_pcie *pci) 181 { 182 u32 ver; 183 184 /* The content of the CSR is zero on DWC PCIe older than v4.70a */ 185 ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER); 186 if (!ver) 187 return; 188 189 if (pci->version && pci->version != ver) 190 dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n", 191 pci->version, ver); 192 else 193 pci->version = ver; 194 195 ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE); 196 197 if (pci->type && pci->type != ver) 198 dev_warn(pci->dev, "Types don't match (%08x != %08x)\n", 199 pci->type, ver); 200 else 201 pci->type = ver; 202 } 203 204 /* 205 * These interfaces resemble the pci_find_*capability() interfaces, but these 206 * are for configuring host controllers, which are bridges *to* PCI devices but 207 * are not PCI devices themselves. 208 */ 209 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, 210 u8 cap) 211 { 212 u8 cap_id, next_cap_ptr; 213 u16 reg; 214 215 if (!cap_ptr) 216 return 0; 217 218 reg = dw_pcie_readw_dbi(pci, cap_ptr); 219 cap_id = (reg & 0x00ff); 220 221 if (cap_id > PCI_CAP_ID_MAX) 222 return 0; 223 224 if (cap_id == cap) 225 return cap_ptr; 226 227 next_cap_ptr = (reg & 0xff00) >> 8; 228 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 229 } 230 231 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) 232 { 233 u8 next_cap_ptr; 234 u16 reg; 235 236 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); 237 next_cap_ptr = (reg & 0x00ff); 238 239 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 240 } 241 EXPORT_SYMBOL_GPL(dw_pcie_find_capability); 242 243 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, 244 u8 cap) 245 { 246 u32 header; 247 int ttl; 248 int pos = PCI_CFG_SPACE_SIZE; 249 250 /* minimum 8 bytes per capability */ 251 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 252 253 if (start) 254 pos = start; 255 256 header = dw_pcie_readl_dbi(pci, pos); 257 /* 258 * If we have no capabilities, this is indicated by cap ID, 259 * cap version and next pointer all being 0. 260 */ 261 if (header == 0) 262 return 0; 263 264 while (ttl-- > 0) { 265 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 266 return pos; 267 268 pos = PCI_EXT_CAP_NEXT(header); 269 if (pos < PCI_CFG_SPACE_SIZE) 270 break; 271 272 header = dw_pcie_readl_dbi(pci, pos); 273 } 274 275 return 0; 276 } 277 278 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) 279 { 280 return dw_pcie_find_next_ext_capability(pci, 0, cap); 281 } 282 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); 283 284 int dw_pcie_read(void __iomem *addr, int size, u32 *val) 285 { 286 if (!IS_ALIGNED((uintptr_t)addr, size)) { 287 *val = 0; 288 return PCIBIOS_BAD_REGISTER_NUMBER; 289 } 290 291 if (size == 4) { 292 *val = readl(addr); 293 } else if (size == 2) { 294 *val = readw(addr); 295 } else if (size == 1) { 296 *val = readb(addr); 297 } else { 298 *val = 0; 299 return PCIBIOS_BAD_REGISTER_NUMBER; 300 } 301 302 return PCIBIOS_SUCCESSFUL; 303 } 304 EXPORT_SYMBOL_GPL(dw_pcie_read); 305 306 int dw_pcie_write(void __iomem *addr, int size, u32 val) 307 { 308 if (!IS_ALIGNED((uintptr_t)addr, size)) 309 return PCIBIOS_BAD_REGISTER_NUMBER; 310 311 if (size == 4) 312 writel(val, addr); 313 else if (size == 2) 314 writew(val, addr); 315 else if (size == 1) 316 writeb(val, addr); 317 else 318 return PCIBIOS_BAD_REGISTER_NUMBER; 319 320 return PCIBIOS_SUCCESSFUL; 321 } 322 EXPORT_SYMBOL_GPL(dw_pcie_write); 323 324 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) 325 { 326 int ret; 327 u32 val; 328 329 if (pci->ops && pci->ops->read_dbi) 330 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); 331 332 ret = dw_pcie_read(pci->dbi_base + reg, size, &val); 333 if (ret) 334 dev_err(pci->dev, "Read DBI address failed\n"); 335 336 return val; 337 } 338 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); 339 340 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 341 { 342 int ret; 343 344 if (pci->ops && pci->ops->write_dbi) { 345 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); 346 return; 347 } 348 349 ret = dw_pcie_write(pci->dbi_base + reg, size, val); 350 if (ret) 351 dev_err(pci->dev, "Write DBI address failed\n"); 352 } 353 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); 354 355 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 356 { 357 int ret; 358 359 if (pci->ops && pci->ops->write_dbi2) { 360 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); 361 return; 362 } 363 364 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); 365 if (ret) 366 dev_err(pci->dev, "write DBI address failed\n"); 367 } 368 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi2); 369 370 static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, 371 u32 index) 372 { 373 if (dw_pcie_cap_is(pci, IATU_UNROLL)) 374 return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); 375 376 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); 377 return pci->atu_base; 378 } 379 380 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg) 381 { 382 void __iomem *base; 383 int ret; 384 u32 val; 385 386 base = dw_pcie_select_atu(pci, dir, index); 387 388 if (pci->ops && pci->ops->read_dbi) 389 return pci->ops->read_dbi(pci, base, reg, 4); 390 391 ret = dw_pcie_read(base + reg, 4, &val); 392 if (ret) 393 dev_err(pci->dev, "Read ATU address failed\n"); 394 395 return val; 396 } 397 398 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index, 399 u32 reg, u32 val) 400 { 401 void __iomem *base; 402 int ret; 403 404 base = dw_pcie_select_atu(pci, dir, index); 405 406 if (pci->ops && pci->ops->write_dbi) { 407 pci->ops->write_dbi(pci, base, reg, 4, val); 408 return; 409 } 410 411 ret = dw_pcie_write(base + reg, 4, val); 412 if (ret) 413 dev_err(pci->dev, "Write ATU address failed\n"); 414 } 415 416 static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg) 417 { 418 return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg); 419 } 420 421 static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg, 422 u32 val) 423 { 424 dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val); 425 } 426 427 static inline u32 dw_pcie_enable_ecrc(u32 val) 428 { 429 /* 430 * DesignWare core version 4.90A has a design issue where the 'TD' 431 * bit in the Control register-1 of the ATU outbound region acts 432 * like an override for the ECRC setting, i.e., the presence of TLP 433 * Digest (ECRC) in the outgoing TLPs is solely determined by this 434 * bit. This is contrary to the PCIe spec which says that the 435 * enablement of the ECRC is solely determined by the AER 436 * registers. 437 * 438 * Because of this, even when the ECRC is enabled through AER 439 * registers, the transactions going through ATU won't have TLP 440 * Digest as there is no way the PCI core AER code could program 441 * the TD bit which is specific to the DesignWare core. 442 * 443 * The best way to handle this scenario is to program the TD bit 444 * always. It affects only the traffic from root port to downstream 445 * devices. 446 * 447 * At this point, 448 * When ECRC is enabled in AER registers, everything works normally 449 * When ECRC is NOT enabled in AER registers, then, 450 * on Root Port:- TLP Digest (DWord size) gets appended to each packet 451 * even through it is not required. Since downstream 452 * TLPs are mostly for configuration accesses and BAR 453 * accesses, they are not in critical path and won't 454 * have much negative effect on the performance. 455 * on End Point:- TLP Digest is received for some/all the packets coming 456 * from the root port. TLP Digest is ignored because, 457 * as per the PCIe Spec r5.0 v1.0 section 2.2.3 458 * "TLP Digest Rules", when an endpoint receives TLP 459 * Digest when its ECRC check functionality is disabled 460 * in AER registers, received TLP Digest is just ignored. 461 * Since there is no issue or error reported either side, best way to 462 * handle the scenario is to program TD bit by default. 463 */ 464 465 return val | PCIE_ATU_TD; 466 } 467 468 static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, 469 int index, int type, u64 cpu_addr, 470 u64 pci_addr, u64 size) 471 { 472 u32 retries, val; 473 u64 limit_addr; 474 475 if (pci->ops && pci->ops->cpu_addr_fixup) 476 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); 477 478 limit_addr = cpu_addr + size - 1; 479 480 if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || 481 !IS_ALIGNED(cpu_addr, pci->region_align) || 482 !IS_ALIGNED(pci_addr, pci->region_align) || !size) { 483 return -EINVAL; 484 } 485 486 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, 487 lower_32_bits(cpu_addr)); 488 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, 489 upper_32_bits(cpu_addr)); 490 491 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, 492 lower_32_bits(limit_addr)); 493 if (dw_pcie_ver_is_ge(pci, 460A)) 494 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, 495 upper_32_bits(limit_addr)); 496 497 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, 498 lower_32_bits(pci_addr)); 499 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, 500 upper_32_bits(pci_addr)); 501 502 val = type | PCIE_ATU_FUNC_NUM(func_no); 503 if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && 504 dw_pcie_ver_is_ge(pci, 460A)) 505 val |= PCIE_ATU_INCREASE_REGION_SIZE; 506 if (dw_pcie_ver_is(pci, 490A)) 507 val = dw_pcie_enable_ecrc(val); 508 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); 509 510 dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); 511 512 /* 513 * Make sure ATU enable takes effect before any subsequent config 514 * and I/O accesses. 515 */ 516 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 517 val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2); 518 if (val & PCIE_ATU_ENABLE) 519 return 0; 520 521 mdelay(LINK_WAIT_IATU); 522 } 523 524 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 525 526 return -ETIMEDOUT; 527 } 528 529 int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 530 u64 cpu_addr, u64 pci_addr, u64 size) 531 { 532 return __dw_pcie_prog_outbound_atu(pci, 0, index, type, 533 cpu_addr, pci_addr, size); 534 } 535 536 int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, 537 int type, u64 cpu_addr, u64 pci_addr, 538 u64 size) 539 { 540 return __dw_pcie_prog_outbound_atu(pci, func_no, index, type, 541 cpu_addr, pci_addr, size); 542 } 543 544 static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg) 545 { 546 return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg); 547 } 548 549 static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg, 550 u32 val) 551 { 552 dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val); 553 } 554 555 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, 556 u64 cpu_addr, u64 pci_addr, u64 size) 557 { 558 u64 limit_addr = pci_addr + size - 1; 559 u32 retries, val; 560 561 if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || 562 !IS_ALIGNED(cpu_addr, pci->region_align) || 563 !IS_ALIGNED(pci_addr, pci->region_align) || !size) { 564 return -EINVAL; 565 } 566 567 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE, 568 lower_32_bits(pci_addr)); 569 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE, 570 upper_32_bits(pci_addr)); 571 572 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT, 573 lower_32_bits(limit_addr)); 574 if (dw_pcie_ver_is_ge(pci, 460A)) 575 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT, 576 upper_32_bits(limit_addr)); 577 578 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, 579 lower_32_bits(cpu_addr)); 580 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, 581 upper_32_bits(cpu_addr)); 582 583 val = type; 584 if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && 585 dw_pcie_ver_is_ge(pci, 460A)) 586 val |= PCIE_ATU_INCREASE_REGION_SIZE; 587 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val); 588 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); 589 590 /* 591 * Make sure ATU enable takes effect before any subsequent config 592 * and I/O accesses. 593 */ 594 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 595 val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); 596 if (val & PCIE_ATU_ENABLE) 597 return 0; 598 599 mdelay(LINK_WAIT_IATU); 600 } 601 602 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 603 604 return -ETIMEDOUT; 605 } 606 607 int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, 608 int type, u64 cpu_addr, u8 bar) 609 { 610 u32 retries, val; 611 612 if (!IS_ALIGNED(cpu_addr, pci->region_align)) 613 return -EINVAL; 614 615 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, 616 lower_32_bits(cpu_addr)); 617 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, 618 upper_32_bits(cpu_addr)); 619 620 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | 621 PCIE_ATU_FUNC_NUM(func_no)); 622 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, 623 PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN | 624 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 625 626 /* 627 * Make sure ATU enable takes effect before any subsequent config 628 * and I/O accesses. 629 */ 630 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 631 val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); 632 if (val & PCIE_ATU_ENABLE) 633 return 0; 634 635 mdelay(LINK_WAIT_IATU); 636 } 637 638 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 639 640 return -ETIMEDOUT; 641 } 642 643 void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index) 644 { 645 dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0); 646 } 647 648 int dw_pcie_wait_for_link(struct dw_pcie *pci) 649 { 650 u32 offset, val; 651 int retries; 652 653 /* Check if the link is up or not */ 654 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 655 if (dw_pcie_link_up(pci)) 656 break; 657 658 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 659 } 660 661 if (retries >= LINK_WAIT_MAX_RETRIES) { 662 dev_info(pci->dev, "Phy link never came up\n"); 663 return -ETIMEDOUT; 664 } 665 666 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 667 val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); 668 669 dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", 670 FIELD_GET(PCI_EXP_LNKSTA_CLS, val), 671 FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); 672 673 return 0; 674 } 675 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); 676 677 int dw_pcie_link_up(struct dw_pcie *pci) 678 { 679 u32 val; 680 681 if (pci->ops && pci->ops->link_up) 682 return pci->ops->link_up(pci); 683 684 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1); 685 return ((val & PCIE_PORT_DEBUG1_LINK_UP) && 686 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); 687 } 688 EXPORT_SYMBOL_GPL(dw_pcie_link_up); 689 690 void dw_pcie_upconfig_setup(struct dw_pcie *pci) 691 { 692 u32 val; 693 694 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL); 695 val |= PORT_MLTI_UPCFG_SUPPORT; 696 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val); 697 } 698 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); 699 700 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) 701 { 702 u32 cap, ctrl2, link_speed; 703 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 704 705 cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 706 ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); 707 ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; 708 709 switch (pcie_link_speed[link_gen]) { 710 case PCIE_SPEED_2_5GT: 711 link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; 712 break; 713 case PCIE_SPEED_5_0GT: 714 link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; 715 break; 716 case PCIE_SPEED_8_0GT: 717 link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT; 718 break; 719 case PCIE_SPEED_16_0GT: 720 link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT; 721 break; 722 default: 723 /* Use hardware capability */ 724 link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); 725 ctrl2 &= ~PCI_EXP_LNKCTL2_HASD; 726 break; 727 } 728 729 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); 730 731 cap &= ~((u32)PCI_EXP_LNKCAP_SLS); 732 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); 733 734 } 735 736 static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes) 737 { 738 u32 lnkcap, lwsc, plc; 739 u8 cap; 740 741 if (!num_lanes) 742 return; 743 744 /* Set the number of lanes */ 745 plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 746 plc &= ~PORT_LINK_FAST_LINK_MODE; 747 plc &= ~PORT_LINK_MODE_MASK; 748 749 /* Set link width speed control register */ 750 lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 751 lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK; 752 switch (num_lanes) { 753 case 1: 754 plc |= PORT_LINK_MODE_1_LANES; 755 lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; 756 break; 757 case 2: 758 plc |= PORT_LINK_MODE_2_LANES; 759 lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES; 760 break; 761 case 4: 762 plc |= PORT_LINK_MODE_4_LANES; 763 lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES; 764 break; 765 case 8: 766 plc |= PORT_LINK_MODE_8_LANES; 767 lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES; 768 break; 769 default: 770 dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes); 771 return; 772 } 773 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc); 774 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc); 775 776 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 777 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP); 778 lnkcap &= ~PCI_EXP_LNKCAP_MLW; 779 lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes); 780 dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap); 781 } 782 783 void dw_pcie_iatu_detect(struct dw_pcie *pci) 784 { 785 int max_region, ob, ib; 786 u32 val, min, dir; 787 u64 max; 788 789 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); 790 if (val == 0xFFFFFFFF) { 791 dw_pcie_cap_set(pci, IATU_UNROLL); 792 793 max_region = min((int)pci->atu_size / 512, 256); 794 } else { 795 pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; 796 pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; 797 798 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); 799 max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; 800 } 801 802 for (ob = 0; ob < max_region; ob++) { 803 dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000); 804 val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET); 805 if (val != 0x11110000) 806 break; 807 } 808 809 for (ib = 0; ib < max_region; ib++) { 810 dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000); 811 val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET); 812 if (val != 0x11110000) 813 break; 814 } 815 816 if (ob) { 817 dir = PCIE_ATU_REGION_DIR_OB; 818 } else if (ib) { 819 dir = PCIE_ATU_REGION_DIR_IB; 820 } else { 821 dev_err(pci->dev, "No iATU regions found\n"); 822 return; 823 } 824 825 dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0); 826 min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT); 827 828 if (dw_pcie_ver_is_ge(pci, 460A)) { 829 dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF); 830 max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT); 831 } else { 832 max = 0; 833 } 834 835 pci->num_ob_windows = ob; 836 pci->num_ib_windows = ib; 837 pci->region_align = 1 << fls(min); 838 pci->region_limit = (max << 32) | (SZ_4G - 1); 839 840 dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n", 841 dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F", 842 pci->num_ob_windows, pci->num_ib_windows, 843 pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); 844 } 845 846 static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg) 847 { 848 u32 val = 0; 849 int ret; 850 851 if (pci->ops && pci->ops->read_dbi) 852 return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4); 853 854 ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val); 855 if (ret) 856 dev_err(pci->dev, "Read DMA address failed\n"); 857 858 return val; 859 } 860 861 static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr) 862 { 863 struct platform_device *pdev = to_platform_device(dev); 864 char name[6]; 865 int ret; 866 867 if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH) 868 return -EINVAL; 869 870 ret = platform_get_irq_byname_optional(pdev, "dma"); 871 if (ret > 0) 872 return ret; 873 874 snprintf(name, sizeof(name), "dma%u", nr); 875 876 return platform_get_irq_byname_optional(pdev, name); 877 } 878 879 static struct dw_edma_plat_ops dw_pcie_edma_ops = { 880 .irq_vector = dw_pcie_edma_irq_vector, 881 }; 882 883 static int dw_pcie_edma_find_chip(struct dw_pcie *pci) 884 { 885 u32 val; 886 887 /* 888 * Indirect eDMA CSRs access has been completely removed since v5.40a 889 * thus no space is now reserved for the eDMA channels viewport and 890 * former DMA CTRL register is no longer fixed to FFs. 891 * 892 * Note that Renesas R-Car S4-8's PCIe controllers for unknown reason 893 * have zeros in the eDMA CTRL register even though the HW-manual 894 * explicitly states there must FFs if the unrolled mapping is enabled. 895 * For such cases the low-level drivers are supposed to manually 896 * activate the unrolled mapping to bypass the auto-detection procedure. 897 */ 898 if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL)) 899 val = 0xFFFFFFFF; 900 else 901 val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL); 902 903 if (val == 0xFFFFFFFF && pci->edma.reg_base) { 904 pci->edma.mf = EDMA_MF_EDMA_UNROLL; 905 906 val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL); 907 } else if (val != 0xFFFFFFFF) { 908 pci->edma.mf = EDMA_MF_EDMA_LEGACY; 909 910 pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE; 911 } else { 912 return -ENODEV; 913 } 914 915 pci->edma.dev = pci->dev; 916 917 if (!pci->edma.ops) 918 pci->edma.ops = &dw_pcie_edma_ops; 919 920 pci->edma.flags |= DW_EDMA_CHIP_LOCAL; 921 922 pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); 923 pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); 924 925 /* Sanity check the channels count if the mapping was incorrect */ 926 if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH || 927 !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH) 928 return -EINVAL; 929 930 return 0; 931 } 932 933 static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) 934 { 935 struct platform_device *pdev = to_platform_device(pci->dev); 936 u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; 937 char name[6]; 938 int ret; 939 940 if (pci->edma.nr_irqs == 1) 941 return 0; 942 else if (pci->edma.nr_irqs > 1) 943 return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0; 944 945 ret = platform_get_irq_byname_optional(pdev, "dma"); 946 if (ret > 0) { 947 pci->edma.nr_irqs = 1; 948 return 0; 949 } 950 951 for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) { 952 snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs); 953 954 ret = platform_get_irq_byname_optional(pdev, name); 955 if (ret <= 0) 956 return -EINVAL; 957 } 958 959 return 0; 960 } 961 962 static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci) 963 { 964 struct dw_edma_region *ll; 965 dma_addr_t paddr; 966 int i; 967 968 for (i = 0; i < pci->edma.ll_wr_cnt; i++) { 969 ll = &pci->edma.ll_region_wr[i]; 970 ll->sz = DMA_LLP_MEM_SIZE; 971 ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, 972 &paddr, GFP_KERNEL); 973 if (!ll->vaddr.mem) 974 return -ENOMEM; 975 976 ll->paddr = paddr; 977 } 978 979 for (i = 0; i < pci->edma.ll_rd_cnt; i++) { 980 ll = &pci->edma.ll_region_rd[i]; 981 ll->sz = DMA_LLP_MEM_SIZE; 982 ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, 983 &paddr, GFP_KERNEL); 984 if (!ll->vaddr.mem) 985 return -ENOMEM; 986 987 ll->paddr = paddr; 988 } 989 990 return 0; 991 } 992 993 int dw_pcie_edma_detect(struct dw_pcie *pci) 994 { 995 int ret; 996 997 /* Don't fail if no eDMA was found (for the backward compatibility) */ 998 ret = dw_pcie_edma_find_chip(pci); 999 if (ret) 1000 return 0; 1001 1002 /* Don't fail on the IRQs verification (for the backward compatibility) */ 1003 ret = dw_pcie_edma_irq_verify(pci); 1004 if (ret) { 1005 dev_err(pci->dev, "Invalid eDMA IRQs found\n"); 1006 return 0; 1007 } 1008 1009 ret = dw_pcie_edma_ll_alloc(pci); 1010 if (ret) { 1011 dev_err(pci->dev, "Couldn't allocate LLP memory\n"); 1012 return ret; 1013 } 1014 1015 /* Don't fail if the DW eDMA driver can't find the device */ 1016 ret = dw_edma_probe(&pci->edma); 1017 if (ret && ret != -ENODEV) { 1018 dev_err(pci->dev, "Couldn't register eDMA device\n"); 1019 return ret; 1020 } 1021 1022 dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n", 1023 pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F", 1024 pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt); 1025 1026 return 0; 1027 } 1028 1029 void dw_pcie_edma_remove(struct dw_pcie *pci) 1030 { 1031 dw_edma_remove(&pci->edma); 1032 } 1033 1034 void dw_pcie_setup(struct dw_pcie *pci) 1035 { 1036 u32 val; 1037 1038 if (pci->link_gen > 0) 1039 dw_pcie_link_set_max_speed(pci, pci->link_gen); 1040 1041 /* Configure Gen1 N_FTS */ 1042 if (pci->n_fts[0]) { 1043 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 1044 val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK); 1045 val |= PORT_AFR_N_FTS(pci->n_fts[0]); 1046 val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]); 1047 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 1048 } 1049 1050 /* Configure Gen2+ N_FTS */ 1051 if (pci->n_fts[1]) { 1052 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 1053 val &= ~PORT_LOGIC_N_FTS_MASK; 1054 val |= pci->n_fts[1]; 1055 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 1056 } 1057 1058 if (dw_pcie_cap_is(pci, CDM_CHECK)) { 1059 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 1060 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | 1061 PCIE_PL_CHK_REG_CHK_REG_START; 1062 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 1063 } 1064 1065 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 1066 val &= ~PORT_LINK_FAST_LINK_MODE; 1067 val |= PORT_LINK_DLL_LINK_EN; 1068 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 1069 1070 dw_pcie_link_set_max_link_width(pci, pci->num_lanes); 1071 } 1072