1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/limits.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/pci-ecam.h> 27 #include <linux/pci-pwrctrl.h> 28 #include <linux/pm_opp.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/platform_device.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/regulator/consumer.h> 34 #include <linux/reset.h> 35 #include <linux/slab.h> 36 #include <linux/types.h> 37 #include <linux/units.h> 38 39 #include "../../pci.h" 40 #include "../pci-host-common.h" 41 #include "pcie-designware.h" 42 #include "pcie-qcom-common.h" 43 44 /* PARF registers */ 45 #define PARF_SYS_CTRL 0x00 46 #define PARF_PM_CTRL 0x20 47 #define PARF_PCS_DEEMPH 0x34 48 #define PARF_PCS_SWING 0x38 49 #define PARF_PHY_CTRL 0x40 50 #define PARF_PHY_REFCLK 0x4c 51 #define PARF_CONFIG_BITS 0x50 52 #define PARF_DBI_BASE_ADDR 0x168 53 #define PARF_SLV_ADDR_SPACE_SIZE 0x16c 54 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 55 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 56 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 57 #define PARF_Q2A_FLUSH 0x1ac 58 #define PARF_LTSSM 0x1b0 59 #define PARF_SID_OFFSET 0x234 60 #define PARF_BDF_TRANSLATE_CFG 0x24c 61 #define PARF_DBI_BASE_ADDR_V2 0x350 62 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 63 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 64 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 65 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 66 #define PARF_ATU_BASE_ADDR 0x634 67 #define PARF_ATU_BASE_ADDR_HI 0x638 68 #define PARF_DEVICE_TYPE 0x1000 69 #define PARF_BDF_TO_SID_TABLE_N 0x2000 70 #define PARF_BDF_TO_SID_CFG 0x2c00 71 72 /* ELBI registers */ 73 #define ELBI_SYS_CTRL 0x04 74 75 /* DBI registers */ 76 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 77 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 78 79 /* MHI registers */ 80 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 81 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 83 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 84 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 85 86 /* PARF_SYS_CTRL register fields */ 87 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 88 #define MST_WAKEUP_EN BIT(13) 89 #define SLV_WAKEUP_EN BIT(12) 90 #define MSTR_ACLK_CGC_DIS BIT(10) 91 #define SLV_ACLK_CGC_DIS BIT(9) 92 #define CORE_CLK_CGC_DIS BIT(6) 93 #define AUX_PWR_DET BIT(4) 94 #define L23_CLK_RMV_DIS BIT(2) 95 #define L1_CLK_RMV_DIS BIT(1) 96 97 /* PARF_PM_CTRL register fields */ 98 #define REQ_NOT_ENTR_L1 BIT(5) 99 100 /* PARF_PCS_DEEMPH register fields */ 101 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 102 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 103 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 104 105 /* PARF_PCS_SWING register fields */ 106 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 107 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 108 109 /* PARF_PHY_CTRL register fields */ 110 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 111 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 112 #define PHY_TEST_PWR_DOWN BIT(0) 113 114 /* PARF_PHY_REFCLK register fields */ 115 #define PHY_REFCLK_SSP_EN BIT(16) 116 #define PHY_REFCLK_USE_PAD BIT(12) 117 118 /* PARF_CONFIG_BITS register fields */ 119 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 120 121 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 122 #define SLV_ADDR_SPACE_SZ 0x80000000 123 124 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 125 #define AHB_CLK_EN BIT(0) 126 #define MSTR_AXI_CLK_EN BIT(1) 127 #define BYPASS BIT(4) 128 129 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 130 #define EN BIT(31) 131 132 /* PARF_LTSSM register fields */ 133 #define LTSSM_EN BIT(8) 134 135 /* PARF_NO_SNOOP_OVERRIDE register fields */ 136 #define WR_NO_SNOOP_OVERRIDE_EN BIT(1) 137 #define RD_NO_SNOOP_OVERRIDE_EN BIT(3) 138 139 /* PARF_DEVICE_TYPE register fields */ 140 #define DEVICE_TYPE_RC 0x4 141 142 /* PARF_BDF_TO_SID_CFG fields */ 143 #define BDF_TO_SID_BYPASS BIT(0) 144 145 /* ELBI_SYS_CTRL register fields */ 146 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 147 148 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 149 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 150 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 151 152 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 153 #define CFG_BRIDGE_SB_INIT BIT(0) 154 155 /* PCI_EXP_SLTCAP register fields */ 156 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 157 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 158 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 159 PCI_EXP_SLTCAP_PCP | \ 160 PCI_EXP_SLTCAP_MRLSP | \ 161 PCI_EXP_SLTCAP_AIP | \ 162 PCI_EXP_SLTCAP_PIP | \ 163 PCI_EXP_SLTCAP_HPS | \ 164 PCI_EXP_SLTCAP_EIP | \ 165 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 166 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 167 168 #define PERST_DELAY_US 1000 169 170 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 171 172 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 173 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 174 175 struct qcom_pcie_resources_1_0_0 { 176 struct clk_bulk_data *clks; 177 int num_clks; 178 struct reset_control *core; 179 struct regulator *vdda; 180 }; 181 182 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 183 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 184 struct qcom_pcie_resources_2_1_0 { 185 struct clk_bulk_data *clks; 186 int num_clks; 187 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 188 int num_resets; 189 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 190 }; 191 192 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 193 struct qcom_pcie_resources_2_3_2 { 194 struct clk_bulk_data *clks; 195 int num_clks; 196 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 197 }; 198 199 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 200 struct qcom_pcie_resources_2_3_3 { 201 struct clk_bulk_data *clks; 202 int num_clks; 203 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 204 }; 205 206 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 207 struct qcom_pcie_resources_2_4_0 { 208 struct clk_bulk_data *clks; 209 int num_clks; 210 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 211 int num_resets; 212 }; 213 214 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 215 struct qcom_pcie_resources_2_7_0 { 216 struct clk_bulk_data *clks; 217 int num_clks; 218 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 219 struct reset_control *rst; 220 }; 221 222 struct qcom_pcie_resources_2_9_0 { 223 struct clk_bulk_data *clks; 224 int num_clks; 225 struct reset_control *rst; 226 }; 227 228 union qcom_pcie_resources { 229 struct qcom_pcie_resources_1_0_0 v1_0_0; 230 struct qcom_pcie_resources_2_1_0 v2_1_0; 231 struct qcom_pcie_resources_2_3_2 v2_3_2; 232 struct qcom_pcie_resources_2_3_3 v2_3_3; 233 struct qcom_pcie_resources_2_4_0 v2_4_0; 234 struct qcom_pcie_resources_2_7_0 v2_7_0; 235 struct qcom_pcie_resources_2_9_0 v2_9_0; 236 }; 237 238 struct qcom_pcie; 239 240 struct qcom_pcie_ops { 241 int (*get_resources)(struct qcom_pcie *pcie); 242 int (*init)(struct qcom_pcie *pcie); 243 int (*post_init)(struct qcom_pcie *pcie); 244 void (*host_post_init)(struct qcom_pcie *pcie); 245 void (*deinit)(struct qcom_pcie *pcie); 246 void (*ltssm_enable)(struct qcom_pcie *pcie); 247 int (*config_sid)(struct qcom_pcie *pcie); 248 }; 249 250 /** 251 * struct qcom_pcie_cfg - Per SoC config struct 252 * @ops: qcom PCIe ops structure 253 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache 254 * snooping 255 * @firmware_managed: Set if the Root Complex is firmware managed 256 */ 257 struct qcom_pcie_cfg { 258 const struct qcom_pcie_ops *ops; 259 bool override_no_snoop; 260 bool firmware_managed; 261 bool no_l0s; 262 }; 263 264 struct qcom_pcie_perst { 265 struct list_head list; 266 struct gpio_desc *desc; 267 }; 268 269 struct qcom_pcie_port { 270 struct list_head list; 271 struct phy *phy; 272 struct list_head perst; 273 }; 274 275 struct qcom_pcie { 276 struct dw_pcie *pci; 277 void __iomem *parf; /* DT parf */ 278 void __iomem *mhi; 279 union qcom_pcie_resources res; 280 struct icc_path *icc_mem; 281 struct icc_path *icc_cpu; 282 const struct qcom_pcie_cfg *cfg; 283 struct dentry *debugfs; 284 struct list_head ports; 285 bool suspended; 286 bool use_pm_opp; 287 }; 288 289 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 290 291 static void __qcom_pcie_perst_assert(struct qcom_pcie *pcie, bool assert) 292 { 293 struct qcom_pcie_perst *perst; 294 struct qcom_pcie_port *port; 295 int val = assert ? 1 : 0; 296 297 list_for_each_entry(port, &pcie->ports, list) { 298 list_for_each_entry(perst, &port->perst, list) 299 gpiod_set_value_cansleep(perst->desc, val); 300 } 301 302 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 303 } 304 305 static void qcom_pcie_perst_assert(struct qcom_pcie *pcie) 306 { 307 __qcom_pcie_perst_assert(pcie, true); 308 } 309 310 static void qcom_pcie_perst_deassert(struct qcom_pcie *pcie) 311 { 312 /* Ensure that PERST# has been asserted for at least 100 ms */ 313 msleep(PCIE_T_PVPERL_MS); 314 __qcom_pcie_perst_assert(pcie, false); 315 } 316 317 static int qcom_pcie_start_link(struct dw_pcie *pci) 318 { 319 struct qcom_pcie *pcie = to_qcom_pcie(pci); 320 321 qcom_pcie_common_set_equalization(pci); 322 323 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) 324 qcom_pcie_common_set_16gt_lane_margining(pci); 325 326 /* Enable Link Training state machine */ 327 if (pcie->cfg->ops->ltssm_enable) 328 pcie->cfg->ops->ltssm_enable(pcie); 329 330 return 0; 331 } 332 333 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) 334 { 335 struct qcom_pcie *pcie = to_qcom_pcie(pci); 336 u16 offset; 337 u32 val; 338 339 if (!pcie->cfg->no_l0s) 340 return; 341 342 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 343 344 dw_pcie_dbi_ro_wr_en(pci); 345 346 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 347 val &= ~PCI_EXP_LNKCAP_ASPM_L0S; 348 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 349 350 dw_pcie_dbi_ro_wr_dis(pci); 351 } 352 353 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 354 { 355 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 356 u32 val; 357 358 dw_pcie_dbi_ro_wr_en(pci); 359 360 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 361 val &= ~PCI_EXP_SLTCAP_HPC; 362 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 363 364 dw_pcie_dbi_ro_wr_dis(pci); 365 } 366 367 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) 368 { 369 struct dw_pcie *pci = pcie->pci; 370 371 if (pci->dbi_phys_addr) { 372 /* 373 * PARF_DBI_BASE_ADDR register is in CPU domain and require to 374 * be programmed with CPU physical address. 375 */ 376 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 377 PARF_DBI_BASE_ADDR); 378 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 379 PARF_SLV_ADDR_SPACE_SIZE); 380 } 381 } 382 383 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) 384 { 385 struct dw_pcie *pci = pcie->pci; 386 387 if (pci->dbi_phys_addr) { 388 /* 389 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are 390 * in CPU domain and require to be programmed with CPU 391 * physical addresses. 392 */ 393 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 394 PARF_DBI_BASE_ADDR_V2); 395 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + 396 PARF_DBI_BASE_ADDR_V2_HI); 397 398 if (pci->atu_phys_addr) { 399 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + 400 PARF_ATU_BASE_ADDR); 401 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + 402 PARF_ATU_BASE_ADDR_HI); 403 } 404 405 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); 406 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 407 PARF_SLV_ADDR_SPACE_SIZE_V2_HI); 408 } 409 } 410 411 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 412 { 413 struct dw_pcie *pci = pcie->pci; 414 u32 val; 415 416 if (!pci->elbi_base) { 417 dev_err(pci->dev, "ELBI is not present\n"); 418 return; 419 } 420 /* enable link training */ 421 val = readl(pci->elbi_base + ELBI_SYS_CTRL); 422 val |= ELBI_SYS_CTRL_LT_ENABLE; 423 writel(val, pci->elbi_base + ELBI_SYS_CTRL); 424 } 425 426 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 427 { 428 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 429 struct dw_pcie *pci = pcie->pci; 430 struct device *dev = pci->dev; 431 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 432 int ret; 433 434 res->supplies[0].supply = "vdda"; 435 res->supplies[1].supply = "vdda_phy"; 436 res->supplies[2].supply = "vdda_refclk"; 437 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 438 res->supplies); 439 if (ret) 440 return ret; 441 442 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 443 if (res->num_clks < 0) { 444 dev_err(dev, "Failed to get clocks\n"); 445 return res->num_clks; 446 } 447 448 res->resets[0].id = "pci"; 449 res->resets[1].id = "axi"; 450 res->resets[2].id = "ahb"; 451 res->resets[3].id = "por"; 452 res->resets[4].id = "phy"; 453 res->resets[5].id = "ext"; 454 455 /* ext is optional on APQ8016 */ 456 res->num_resets = is_apq ? 5 : 6; 457 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 458 if (ret < 0) 459 return ret; 460 461 return 0; 462 } 463 464 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 465 { 466 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 467 468 clk_bulk_disable_unprepare(res->num_clks, res->clks); 469 reset_control_bulk_assert(res->num_resets, res->resets); 470 471 writel(1, pcie->parf + PARF_PHY_CTRL); 472 473 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 474 } 475 476 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 477 { 478 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 479 struct dw_pcie *pci = pcie->pci; 480 struct device *dev = pci->dev; 481 int ret; 482 483 /* reset the PCIe interface as uboot can leave it undefined state */ 484 ret = reset_control_bulk_assert(res->num_resets, res->resets); 485 if (ret < 0) { 486 dev_err(dev, "cannot assert resets\n"); 487 return ret; 488 } 489 490 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 491 if (ret < 0) { 492 dev_err(dev, "cannot enable regulators\n"); 493 return ret; 494 } 495 496 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 497 if (ret < 0) { 498 dev_err(dev, "cannot deassert resets\n"); 499 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 500 return ret; 501 } 502 503 return 0; 504 } 505 506 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 507 { 508 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 509 struct dw_pcie *pci = pcie->pci; 510 struct device *dev = pci->dev; 511 struct device_node *node = dev->of_node; 512 u32 val; 513 int ret; 514 515 /* enable PCIe clocks and resets */ 516 val = readl(pcie->parf + PARF_PHY_CTRL); 517 val &= ~PHY_TEST_PWR_DOWN; 518 writel(val, pcie->parf + PARF_PHY_CTRL); 519 520 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 521 if (ret) 522 return ret; 523 524 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 525 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 526 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 527 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 528 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 529 pcie->parf + PARF_PCS_DEEMPH); 530 writel(PCS_SWING_TX_SWING_FULL(120) | 531 PCS_SWING_TX_SWING_LOW(120), 532 pcie->parf + PARF_PCS_SWING); 533 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 534 } 535 536 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 537 /* set TX termination offset */ 538 val = readl(pcie->parf + PARF_PHY_CTRL); 539 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 540 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 541 writel(val, pcie->parf + PARF_PHY_CTRL); 542 } 543 544 /* enable external reference clock */ 545 val = readl(pcie->parf + PARF_PHY_REFCLK); 546 /* USE_PAD is required only for ipq806x */ 547 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 548 val &= ~PHY_REFCLK_USE_PAD; 549 val |= PHY_REFCLK_SSP_EN; 550 writel(val, pcie->parf + PARF_PHY_REFCLK); 551 552 /* wait for clock acquisition */ 553 usleep_range(1000, 1500); 554 555 /* Set the Max TLP size to 2K, instead of using default of 4K */ 556 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 557 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 558 writel(CFG_BRIDGE_SB_INIT, 559 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 560 561 qcom_pcie_clear_hpc(pcie->pci); 562 563 return 0; 564 } 565 566 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 567 { 568 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 569 struct dw_pcie *pci = pcie->pci; 570 struct device *dev = pci->dev; 571 572 res->vdda = devm_regulator_get(dev, "vdda"); 573 if (IS_ERR(res->vdda)) 574 return PTR_ERR(res->vdda); 575 576 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 577 if (res->num_clks < 0) { 578 dev_err(dev, "Failed to get clocks\n"); 579 return res->num_clks; 580 } 581 582 res->core = devm_reset_control_get_exclusive(dev, "core"); 583 return PTR_ERR_OR_ZERO(res->core); 584 } 585 586 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 587 { 588 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 589 590 reset_control_assert(res->core); 591 clk_bulk_disable_unprepare(res->num_clks, res->clks); 592 regulator_disable(res->vdda); 593 } 594 595 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 596 { 597 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 598 struct dw_pcie *pci = pcie->pci; 599 struct device *dev = pci->dev; 600 int ret; 601 602 ret = reset_control_deassert(res->core); 603 if (ret) { 604 dev_err(dev, "cannot deassert core reset\n"); 605 return ret; 606 } 607 608 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 609 if (ret) { 610 dev_err(dev, "cannot prepare/enable clocks\n"); 611 goto err_assert_reset; 612 } 613 614 ret = regulator_enable(res->vdda); 615 if (ret) { 616 dev_err(dev, "cannot enable vdda regulator\n"); 617 goto err_disable_clks; 618 } 619 620 return 0; 621 622 err_disable_clks: 623 clk_bulk_disable_unprepare(res->num_clks, res->clks); 624 err_assert_reset: 625 reset_control_assert(res->core); 626 627 return ret; 628 } 629 630 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 631 { 632 qcom_pcie_configure_dbi_base(pcie); 633 634 if (IS_ENABLED(CONFIG_PCI_MSI)) { 635 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 636 637 val |= EN; 638 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 639 } 640 641 qcom_pcie_clear_hpc(pcie->pci); 642 643 return 0; 644 } 645 646 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 647 { 648 u32 val; 649 650 /* enable link training */ 651 val = readl(pcie->parf + PARF_LTSSM); 652 val |= LTSSM_EN; 653 writel(val, pcie->parf + PARF_LTSSM); 654 } 655 656 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 657 { 658 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 659 struct dw_pcie *pci = pcie->pci; 660 struct device *dev = pci->dev; 661 int ret; 662 663 res->supplies[0].supply = "vdda"; 664 res->supplies[1].supply = "vddpe-3v3"; 665 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 666 res->supplies); 667 if (ret) 668 return ret; 669 670 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 671 if (res->num_clks < 0) { 672 dev_err(dev, "Failed to get clocks\n"); 673 return res->num_clks; 674 } 675 676 return 0; 677 } 678 679 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 680 { 681 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 682 683 clk_bulk_disable_unprepare(res->num_clks, res->clks); 684 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 685 } 686 687 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 688 { 689 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 690 struct dw_pcie *pci = pcie->pci; 691 struct device *dev = pci->dev; 692 int ret; 693 694 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 695 if (ret < 0) { 696 dev_err(dev, "cannot enable regulators\n"); 697 return ret; 698 } 699 700 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 701 if (ret) { 702 dev_err(dev, "cannot prepare/enable clocks\n"); 703 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 704 return ret; 705 } 706 707 return 0; 708 } 709 710 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 711 { 712 u32 val; 713 714 /* enable PCIe clocks and resets */ 715 val = readl(pcie->parf + PARF_PHY_CTRL); 716 val &= ~PHY_TEST_PWR_DOWN; 717 writel(val, pcie->parf + PARF_PHY_CTRL); 718 719 qcom_pcie_configure_dbi_base(pcie); 720 721 /* MAC PHY_POWERDOWN MUX DISABLE */ 722 val = readl(pcie->parf + PARF_SYS_CTRL); 723 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 724 writel(val, pcie->parf + PARF_SYS_CTRL); 725 726 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 727 val |= BYPASS; 728 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 729 730 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 731 val |= EN; 732 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 733 734 qcom_pcie_clear_hpc(pcie->pci); 735 736 return 0; 737 } 738 739 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 740 { 741 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 742 struct dw_pcie *pci = pcie->pci; 743 struct device *dev = pci->dev; 744 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 745 int ret; 746 747 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 748 if (res->num_clks < 0) { 749 dev_err(dev, "Failed to get clocks\n"); 750 return res->num_clks; 751 } 752 753 res->resets[0].id = "axi_m"; 754 res->resets[1].id = "axi_s"; 755 res->resets[2].id = "axi_m_sticky"; 756 res->resets[3].id = "pipe_sticky"; 757 res->resets[4].id = "pwr"; 758 res->resets[5].id = "ahb"; 759 res->resets[6].id = "pipe"; 760 res->resets[7].id = "axi_m_vmid"; 761 res->resets[8].id = "axi_s_xpu"; 762 res->resets[9].id = "parf"; 763 res->resets[10].id = "phy"; 764 res->resets[11].id = "phy_ahb"; 765 766 res->num_resets = is_ipq ? 12 : 6; 767 768 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 769 if (ret < 0) 770 return ret; 771 772 return 0; 773 } 774 775 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 776 { 777 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 778 779 reset_control_bulk_assert(res->num_resets, res->resets); 780 clk_bulk_disable_unprepare(res->num_clks, res->clks); 781 } 782 783 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 784 { 785 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 786 struct dw_pcie *pci = pcie->pci; 787 struct device *dev = pci->dev; 788 int ret; 789 790 ret = reset_control_bulk_assert(res->num_resets, res->resets); 791 if (ret < 0) { 792 dev_err(dev, "cannot assert resets\n"); 793 return ret; 794 } 795 796 usleep_range(10000, 12000); 797 798 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 799 if (ret < 0) { 800 dev_err(dev, "cannot deassert resets\n"); 801 return ret; 802 } 803 804 usleep_range(10000, 12000); 805 806 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 807 if (ret) { 808 reset_control_bulk_assert(res->num_resets, res->resets); 809 return ret; 810 } 811 812 return 0; 813 } 814 815 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 816 { 817 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 818 struct dw_pcie *pci = pcie->pci; 819 struct device *dev = pci->dev; 820 int ret; 821 822 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 823 if (res->num_clks < 0) { 824 dev_err(dev, "Failed to get clocks\n"); 825 return res->num_clks; 826 } 827 828 res->rst[0].id = "axi_m"; 829 res->rst[1].id = "axi_s"; 830 res->rst[2].id = "pipe"; 831 res->rst[3].id = "axi_m_sticky"; 832 res->rst[4].id = "sticky"; 833 res->rst[5].id = "ahb"; 834 res->rst[6].id = "sleep"; 835 836 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 837 if (ret < 0) 838 return ret; 839 840 return 0; 841 } 842 843 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 844 { 845 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 846 847 clk_bulk_disable_unprepare(res->num_clks, res->clks); 848 } 849 850 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 851 { 852 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 853 struct dw_pcie *pci = pcie->pci; 854 struct device *dev = pci->dev; 855 int ret; 856 857 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 858 if (ret < 0) { 859 dev_err(dev, "cannot assert resets\n"); 860 return ret; 861 } 862 863 usleep_range(2000, 2500); 864 865 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 866 if (ret < 0) { 867 dev_err(dev, "cannot deassert resets\n"); 868 return ret; 869 } 870 871 /* 872 * Don't have a way to see if the reset has completed. 873 * Wait for some time. 874 */ 875 usleep_range(2000, 2500); 876 877 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 878 if (ret) { 879 dev_err(dev, "cannot prepare/enable clocks\n"); 880 goto err_assert_resets; 881 } 882 883 return 0; 884 885 err_assert_resets: 886 /* 887 * Not checking for failure, will anyway return 888 * the original failure in 'ret'. 889 */ 890 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 891 892 return ret; 893 } 894 895 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 896 { 897 struct dw_pcie *pci = pcie->pci; 898 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 899 u32 val; 900 901 val = readl(pcie->parf + PARF_PHY_CTRL); 902 val &= ~PHY_TEST_PWR_DOWN; 903 writel(val, pcie->parf + PARF_PHY_CTRL); 904 905 qcom_pcie_configure_dbi_atu_base(pcie); 906 907 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 908 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 909 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 910 pcie->parf + PARF_SYS_CTRL); 911 writel(0, pcie->parf + PARF_Q2A_FLUSH); 912 913 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 914 915 dw_pcie_dbi_ro_wr_en(pci); 916 917 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 918 919 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 920 val &= ~PCI_EXP_LNKCAP_ASPMS; 921 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 922 923 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 924 PCI_EXP_DEVCTL2); 925 926 dw_pcie_dbi_ro_wr_dis(pci); 927 928 return 0; 929 } 930 931 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 932 { 933 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 934 struct dw_pcie *pci = pcie->pci; 935 struct device *dev = pci->dev; 936 int ret; 937 938 res->rst = devm_reset_control_array_get_exclusive(dev); 939 if (IS_ERR(res->rst)) 940 return PTR_ERR(res->rst); 941 942 res->supplies[0].supply = "vdda"; 943 res->supplies[1].supply = "vddpe-3v3"; 944 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 945 res->supplies); 946 if (ret) 947 return ret; 948 949 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 950 if (res->num_clks < 0) { 951 dev_err(dev, "Failed to get clocks\n"); 952 return res->num_clks; 953 } 954 955 return 0; 956 } 957 958 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 959 { 960 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 961 struct dw_pcie *pci = pcie->pci; 962 struct device *dev = pci->dev; 963 u32 val; 964 int ret; 965 966 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 967 if (ret < 0) { 968 dev_err(dev, "cannot enable regulators\n"); 969 return ret; 970 } 971 972 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 973 if (ret < 0) 974 goto err_disable_regulators; 975 976 ret = reset_control_assert(res->rst); 977 if (ret) { 978 dev_err(dev, "reset assert failed (%d)\n", ret); 979 goto err_disable_clocks; 980 } 981 982 usleep_range(1000, 1500); 983 984 ret = reset_control_deassert(res->rst); 985 if (ret) { 986 dev_err(dev, "reset deassert failed (%d)\n", ret); 987 goto err_disable_clocks; 988 } 989 990 /* Wait for reset to complete, required on SM8450 */ 991 usleep_range(1000, 1500); 992 993 /* configure PCIe to RC mode */ 994 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 995 996 /* enable PCIe clocks and resets */ 997 val = readl(pcie->parf + PARF_PHY_CTRL); 998 val &= ~PHY_TEST_PWR_DOWN; 999 writel(val, pcie->parf + PARF_PHY_CTRL); 1000 1001 qcom_pcie_configure_dbi_atu_base(pcie); 1002 1003 /* MAC PHY_POWERDOWN MUX DISABLE */ 1004 val = readl(pcie->parf + PARF_SYS_CTRL); 1005 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 1006 writel(val, pcie->parf + PARF_SYS_CTRL); 1007 1008 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1009 val |= BYPASS; 1010 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1011 1012 /* Enable L1 and L1SS */ 1013 val = readl(pcie->parf + PARF_PM_CTRL); 1014 val &= ~REQ_NOT_ENTR_L1; 1015 writel(val, pcie->parf + PARF_PM_CTRL); 1016 1017 pci->l1ss_support = true; 1018 1019 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1020 val |= EN; 1021 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1022 1023 return 0; 1024 err_disable_clocks: 1025 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1026 err_disable_regulators: 1027 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1028 1029 return ret; 1030 } 1031 1032 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1033 { 1034 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; 1035 1036 if (pcie_cfg->override_no_snoop) 1037 writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, 1038 pcie->parf + PARF_NO_SNOOP_OVERRIDE); 1039 1040 qcom_pcie_clear_hpc(pcie->pci); 1041 1042 return 0; 1043 } 1044 1045 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1046 { 1047 /* 1048 * Downstream devices need to be in D0 state before enabling PCI PM 1049 * substates. 1050 */ 1051 pci_set_power_state_locked(pdev, PCI_D0); 1052 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1053 1054 return 0; 1055 } 1056 1057 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1058 { 1059 struct dw_pcie_rp *pp = &pcie->pci->pp; 1060 1061 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1062 } 1063 1064 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1065 { 1066 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1067 1068 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1069 1070 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1071 } 1072 1073 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 1074 { 1075 /* iommu map structure */ 1076 struct { 1077 u32 bdf; 1078 u32 phandle; 1079 u32 smmu_sid; 1080 u32 smmu_sid_len; 1081 } *map; 1082 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1083 struct device *dev = pcie->pci->dev; 1084 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1085 int i, nr_map, size = 0; 1086 u32 smmu_sid_base; 1087 u32 val; 1088 1089 of_get_property(dev->of_node, "iommu-map", &size); 1090 if (!size) 1091 return 0; 1092 1093 /* Enable BDF to SID translation by disabling bypass mode (default) */ 1094 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); 1095 val &= ~BDF_TO_SID_BYPASS; 1096 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); 1097 1098 map = kzalloc(size, GFP_KERNEL); 1099 if (!map) 1100 return -ENOMEM; 1101 1102 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1103 size / sizeof(u32)); 1104 1105 nr_map = size / (sizeof(*map)); 1106 1107 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1108 1109 /* Registers need to be zero out first */ 1110 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1111 1112 /* Extract the SMMU SID base from the first entry of iommu-map */ 1113 smmu_sid_base = map[0].smmu_sid; 1114 1115 /* Look for an available entry to hold the mapping */ 1116 for (i = 0; i < nr_map; i++) { 1117 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1118 u32 val; 1119 u8 hash; 1120 1121 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1122 1123 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1124 1125 /* If the register is already populated, look for next available entry */ 1126 while (val) { 1127 u8 current_hash = hash++; 1128 u8 next_mask = 0xff; 1129 1130 /* If NEXT field is NULL then update it with next hash */ 1131 if (!(val & next_mask)) { 1132 val |= (u32)hash; 1133 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1134 } 1135 1136 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1137 } 1138 1139 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1140 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1141 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1142 } 1143 1144 kfree(map); 1145 1146 return 0; 1147 } 1148 1149 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1150 { 1151 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1152 struct dw_pcie *pci = pcie->pci; 1153 struct device *dev = pci->dev; 1154 1155 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 1156 if (res->num_clks < 0) { 1157 dev_err(dev, "Failed to get clocks\n"); 1158 return res->num_clks; 1159 } 1160 1161 res->rst = devm_reset_control_array_get_exclusive(dev); 1162 if (IS_ERR(res->rst)) 1163 return PTR_ERR(res->rst); 1164 1165 return 0; 1166 } 1167 1168 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1169 { 1170 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1171 1172 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1173 } 1174 1175 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1176 { 1177 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1178 struct device *dev = pcie->pci->dev; 1179 int ret; 1180 1181 ret = reset_control_assert(res->rst); 1182 if (ret) { 1183 dev_err(dev, "reset assert failed (%d)\n", ret); 1184 return ret; 1185 } 1186 1187 /* 1188 * Delay periods before and after reset deassert are working values 1189 * from downstream Codeaurora kernel 1190 */ 1191 usleep_range(2000, 2500); 1192 1193 ret = reset_control_deassert(res->rst); 1194 if (ret) { 1195 dev_err(dev, "reset deassert failed (%d)\n", ret); 1196 return ret; 1197 } 1198 1199 usleep_range(2000, 2500); 1200 1201 return clk_bulk_prepare_enable(res->num_clks, res->clks); 1202 } 1203 1204 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1205 { 1206 struct dw_pcie *pci = pcie->pci; 1207 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1208 u32 val; 1209 int i; 1210 1211 val = readl(pcie->parf + PARF_PHY_CTRL); 1212 val &= ~PHY_TEST_PWR_DOWN; 1213 writel(val, pcie->parf + PARF_PHY_CTRL); 1214 1215 qcom_pcie_configure_dbi_atu_base(pcie); 1216 1217 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1218 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1219 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1220 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1221 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1222 pci->dbi_base + GEN3_RELATED_OFF); 1223 1224 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1225 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1226 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1227 pcie->parf + PARF_SYS_CTRL); 1228 1229 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1230 1231 dw_pcie_dbi_ro_wr_en(pci); 1232 1233 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1234 1235 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1236 val &= ~PCI_EXP_LNKCAP_ASPMS; 1237 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1238 1239 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1240 PCI_EXP_DEVCTL2); 1241 1242 dw_pcie_dbi_ro_wr_dis(pci); 1243 1244 for (i = 0; i < 256; i++) 1245 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1246 1247 return 0; 1248 } 1249 1250 static bool qcom_pcie_link_up(struct dw_pcie *pci) 1251 { 1252 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1253 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1254 1255 return val & PCI_EXP_LNKSTA_DLLLA; 1256 } 1257 1258 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) 1259 { 1260 struct qcom_pcie_port *port; 1261 1262 list_for_each_entry(port, &pcie->ports, list) 1263 phy_power_off(port->phy); 1264 } 1265 1266 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) 1267 { 1268 struct qcom_pcie_port *port; 1269 int ret; 1270 1271 list_for_each_entry(port, &pcie->ports, list) { 1272 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1273 if (ret) 1274 return ret; 1275 1276 ret = phy_power_on(port->phy); 1277 if (ret) { 1278 qcom_pcie_phy_power_off(pcie); 1279 return ret; 1280 } 1281 } 1282 1283 return 0; 1284 } 1285 1286 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1287 { 1288 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1289 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1290 int ret; 1291 1292 qcom_pcie_perst_assert(pcie); 1293 1294 ret = pcie->cfg->ops->init(pcie); 1295 if (ret) 1296 return ret; 1297 1298 ret = qcom_pcie_phy_power_on(pcie); 1299 if (ret) 1300 goto err_deinit; 1301 1302 ret = pci_pwrctrl_create_devices(pci->dev); 1303 if (ret) 1304 goto err_disable_phy; 1305 1306 ret = pci_pwrctrl_power_on_devices(pci->dev); 1307 if (ret) 1308 goto err_pwrctrl_destroy; 1309 1310 if (pcie->cfg->ops->post_init) { 1311 ret = pcie->cfg->ops->post_init(pcie); 1312 if (ret) 1313 goto err_pwrctrl_power_off; 1314 } 1315 1316 qcom_pcie_clear_aspm_l0s(pcie->pci); 1317 dw_pcie_remove_capability(pcie->pci, PCI_CAP_ID_MSIX); 1318 dw_pcie_remove_ext_capability(pcie->pci, PCI_EXT_CAP_ID_DPC); 1319 1320 qcom_pcie_perst_deassert(pcie); 1321 1322 if (pcie->cfg->ops->config_sid) { 1323 ret = pcie->cfg->ops->config_sid(pcie); 1324 if (ret) 1325 goto err_assert_reset; 1326 } 1327 1328 return 0; 1329 1330 err_assert_reset: 1331 qcom_pcie_perst_assert(pcie); 1332 err_pwrctrl_power_off: 1333 pci_pwrctrl_power_off_devices(pci->dev); 1334 err_pwrctrl_destroy: 1335 if (ret != -EPROBE_DEFER) 1336 pci_pwrctrl_destroy_devices(pci->dev); 1337 err_disable_phy: 1338 qcom_pcie_phy_power_off(pcie); 1339 err_deinit: 1340 pcie->cfg->ops->deinit(pcie); 1341 1342 return ret; 1343 } 1344 1345 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1346 { 1347 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1348 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1349 1350 qcom_pcie_perst_assert(pcie); 1351 1352 /* 1353 * No need to destroy pwrctrl devices as this function only gets called 1354 * during system suspend as of now. 1355 */ 1356 pci_pwrctrl_power_off_devices(pci->dev); 1357 qcom_pcie_phy_power_off(pcie); 1358 pcie->cfg->ops->deinit(pcie); 1359 } 1360 1361 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1362 { 1363 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1364 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1365 1366 if (pcie->cfg->ops->host_post_init) 1367 pcie->cfg->ops->host_post_init(pcie); 1368 } 1369 1370 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1371 .init = qcom_pcie_host_init, 1372 .deinit = qcom_pcie_host_deinit, 1373 .post_init = qcom_pcie_host_post_init, 1374 }; 1375 1376 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1377 static const struct qcom_pcie_ops ops_2_1_0 = { 1378 .get_resources = qcom_pcie_get_resources_2_1_0, 1379 .init = qcom_pcie_init_2_1_0, 1380 .post_init = qcom_pcie_post_init_2_1_0, 1381 .deinit = qcom_pcie_deinit_2_1_0, 1382 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1383 }; 1384 1385 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1386 static const struct qcom_pcie_ops ops_1_0_0 = { 1387 .get_resources = qcom_pcie_get_resources_1_0_0, 1388 .init = qcom_pcie_init_1_0_0, 1389 .post_init = qcom_pcie_post_init_1_0_0, 1390 .deinit = qcom_pcie_deinit_1_0_0, 1391 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1392 }; 1393 1394 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1395 static const struct qcom_pcie_ops ops_2_3_2 = { 1396 .get_resources = qcom_pcie_get_resources_2_3_2, 1397 .init = qcom_pcie_init_2_3_2, 1398 .post_init = qcom_pcie_post_init_2_3_2, 1399 .deinit = qcom_pcie_deinit_2_3_2, 1400 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1401 }; 1402 1403 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1404 static const struct qcom_pcie_ops ops_2_4_0 = { 1405 .get_resources = qcom_pcie_get_resources_2_4_0, 1406 .init = qcom_pcie_init_2_4_0, 1407 .post_init = qcom_pcie_post_init_2_3_2, 1408 .deinit = qcom_pcie_deinit_2_4_0, 1409 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1410 }; 1411 1412 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1413 static const struct qcom_pcie_ops ops_2_3_3 = { 1414 .get_resources = qcom_pcie_get_resources_2_3_3, 1415 .init = qcom_pcie_init_2_3_3, 1416 .post_init = qcom_pcie_post_init_2_3_3, 1417 .deinit = qcom_pcie_deinit_2_3_3, 1418 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1419 }; 1420 1421 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1422 static const struct qcom_pcie_ops ops_2_7_0 = { 1423 .get_resources = qcom_pcie_get_resources_2_7_0, 1424 .init = qcom_pcie_init_2_7_0, 1425 .post_init = qcom_pcie_post_init_2_7_0, 1426 .deinit = qcom_pcie_deinit_2_7_0, 1427 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1428 }; 1429 1430 /* Qcom IP rev.: 1.9.0 */ 1431 static const struct qcom_pcie_ops ops_1_9_0 = { 1432 .get_resources = qcom_pcie_get_resources_2_7_0, 1433 .init = qcom_pcie_init_2_7_0, 1434 .post_init = qcom_pcie_post_init_2_7_0, 1435 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1436 .deinit = qcom_pcie_deinit_2_7_0, 1437 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1438 .config_sid = qcom_pcie_config_sid_1_9_0, 1439 }; 1440 1441 /* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ 1442 static const struct qcom_pcie_ops ops_1_21_0 = { 1443 .get_resources = qcom_pcie_get_resources_2_7_0, 1444 .init = qcom_pcie_init_2_7_0, 1445 .post_init = qcom_pcie_post_init_2_7_0, 1446 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1447 .deinit = qcom_pcie_deinit_2_7_0, 1448 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1449 }; 1450 1451 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1452 static const struct qcom_pcie_ops ops_2_9_0 = { 1453 .get_resources = qcom_pcie_get_resources_2_9_0, 1454 .init = qcom_pcie_init_2_9_0, 1455 .post_init = qcom_pcie_post_init_2_9_0, 1456 .deinit = qcom_pcie_deinit_2_9_0, 1457 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1458 }; 1459 1460 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1461 .ops = &ops_1_0_0, 1462 }; 1463 1464 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1465 .ops = &ops_1_9_0, 1466 }; 1467 1468 static const struct qcom_pcie_cfg cfg_1_34_0 = { 1469 .ops = &ops_1_9_0, 1470 .override_no_snoop = true, 1471 }; 1472 1473 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1474 .ops = &ops_2_1_0, 1475 }; 1476 1477 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1478 .ops = &ops_2_3_2, 1479 .no_l0s = true, 1480 }; 1481 1482 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1483 .ops = &ops_2_3_3, 1484 }; 1485 1486 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1487 .ops = &ops_2_4_0, 1488 }; 1489 1490 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1491 .ops = &ops_2_7_0, 1492 }; 1493 1494 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1495 .ops = &ops_2_9_0, 1496 }; 1497 1498 static const struct qcom_pcie_cfg cfg_sc8280xp = { 1499 .ops = &ops_1_21_0, 1500 .no_l0s = true, 1501 }; 1502 1503 static const struct qcom_pcie_cfg cfg_fw_managed = { 1504 .firmware_managed = true, 1505 }; 1506 1507 static const struct dw_pcie_ops dw_pcie_ops = { 1508 .link_up = qcom_pcie_link_up, 1509 .start_link = qcom_pcie_start_link, 1510 }; 1511 1512 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1513 { 1514 struct dw_pcie *pci = pcie->pci; 1515 int ret; 1516 1517 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1518 if (IS_ERR(pcie->icc_mem)) 1519 return PTR_ERR(pcie->icc_mem); 1520 1521 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); 1522 if (IS_ERR(pcie->icc_cpu)) 1523 return PTR_ERR(pcie->icc_cpu); 1524 /* 1525 * Some Qualcomm platforms require interconnect bandwidth constraints 1526 * to be set before enabling interconnect clocks. 1527 * 1528 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1529 * for the pcie-mem path. 1530 */ 1531 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1532 if (ret) { 1533 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1534 ret); 1535 return ret; 1536 } 1537 1538 /* 1539 * Since the CPU-PCIe path is only used for activities like register 1540 * access of the host controller and endpoint Config/BAR space access, 1541 * HW team has recommended to use a minimal bandwidth of 1KBps just to 1542 * keep the path active. 1543 */ 1544 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); 1545 if (ret) { 1546 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", 1547 ret); 1548 icc_set_bw(pcie->icc_mem, 0, 0); 1549 return ret; 1550 } 1551 1552 return 0; 1553 } 1554 1555 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) 1556 { 1557 u32 offset, status, width, speed; 1558 struct dw_pcie *pci = pcie->pci; 1559 struct dev_pm_opp_key key = {}; 1560 unsigned long freq_kbps; 1561 struct dev_pm_opp *opp; 1562 int ret, freq_mbps; 1563 1564 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1565 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1566 1567 /* Only update constraints if link is up. */ 1568 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1569 return; 1570 1571 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1572 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1573 1574 if (pcie->icc_mem) { 1575 ret = icc_set_bw(pcie->icc_mem, 0, 1576 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1577 if (ret) { 1578 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1579 ret); 1580 } 1581 } else if (pcie->use_pm_opp) { 1582 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); 1583 if (freq_mbps < 0) 1584 return; 1585 1586 freq_kbps = freq_mbps * KILO; 1587 opp = dev_pm_opp_find_level_exact(pci->dev, speed); 1588 if (IS_ERR(opp)) { 1589 /* opp-level is not defined use only frequency */ 1590 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, 1591 true); 1592 } else { 1593 /* put opp-level OPP */ 1594 dev_pm_opp_put(opp); 1595 1596 key.freq = freq_kbps * width; 1597 key.level = speed; 1598 key.bw = 0; 1599 opp = dev_pm_opp_find_key_exact(pci->dev, &key, true); 1600 } 1601 if (!IS_ERR(opp)) { 1602 ret = dev_pm_opp_set_opp(pci->dev, opp); 1603 if (ret) 1604 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", 1605 freq_kbps * width, ret); 1606 dev_pm_opp_put(opp); 1607 } 1608 } 1609 } 1610 1611 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1612 { 1613 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1614 1615 seq_printf(s, "L0s transition count: %u\n", 1616 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1617 1618 seq_printf(s, "L1 transition count: %u\n", 1619 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1620 1621 seq_printf(s, "L1.1 transition count: %u\n", 1622 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1623 1624 seq_printf(s, "L1.2 transition count: %u\n", 1625 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1626 1627 seq_printf(s, "L2 transition count: %u\n", 1628 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1629 1630 return 0; 1631 } 1632 1633 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1634 { 1635 struct dw_pcie *pci = pcie->pci; 1636 struct device *dev = pci->dev; 1637 char *name; 1638 1639 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1640 if (!name) 1641 return; 1642 1643 pcie->debugfs = debugfs_create_dir(name, NULL); 1644 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1645 qcom_pcie_link_transition_count); 1646 } 1647 1648 static void qcom_pci_free_msi(void *ptr) 1649 { 1650 struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; 1651 1652 if (pp && pp->use_imsi_rx) 1653 dw_pcie_free_msi(pp); 1654 } 1655 1656 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) 1657 { 1658 struct device *dev = cfg->parent; 1659 struct dw_pcie_rp *pp; 1660 struct dw_pcie *pci; 1661 int ret; 1662 1663 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1664 if (!pci) 1665 return -ENOMEM; 1666 1667 pci->dev = dev; 1668 pp = &pci->pp; 1669 pci->dbi_base = cfg->win; 1670 pp->num_vectors = MSI_DEF_NUM_VECTORS; 1671 1672 ret = dw_pcie_msi_host_init(pp); 1673 if (ret) 1674 return ret; 1675 1676 pp->use_imsi_rx = true; 1677 dw_pcie_msi_init(pp); 1678 1679 return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); 1680 } 1681 1682 static const struct pci_ecam_ops pci_qcom_ecam_ops = { 1683 .init = qcom_pcie_ecam_host_init, 1684 .pci_ops = { 1685 .map_bus = pci_ecam_map_bus, 1686 .read = pci_generic_config_read, 1687 .write = pci_generic_config_write, 1688 } 1689 }; 1690 1691 /* Parse PERST# from all nodes in depth first manner starting from @np */ 1692 static int qcom_pcie_parse_perst(struct qcom_pcie *pcie, 1693 struct qcom_pcie_port *port, 1694 struct device_node *np) 1695 { 1696 struct device *dev = pcie->pci->dev; 1697 struct qcom_pcie_perst *perst; 1698 struct gpio_desc *reset; 1699 int ret; 1700 1701 if (!of_find_property(np, "reset-gpios", NULL)) 1702 goto parse_child_node; 1703 1704 reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(np), "reset", 1705 GPIOD_OUT_HIGH, "PERST#"); 1706 if (IS_ERR(reset)) { 1707 /* 1708 * FIXME: GPIOLIB currently supports exclusive GPIO access only. 1709 * Non exclusive access is broken. But shared PERST# requires 1710 * non-exclusive access. So once GPIOLIB properly supports it, 1711 * implement it here. 1712 */ 1713 if (PTR_ERR(reset) == -EBUSY) 1714 dev_err(dev, "Shared PERST# is not supported\n"); 1715 1716 return PTR_ERR(reset); 1717 } 1718 1719 perst = devm_kzalloc(dev, sizeof(*perst), GFP_KERNEL); 1720 if (!perst) 1721 return -ENOMEM; 1722 1723 INIT_LIST_HEAD(&perst->list); 1724 perst->desc = reset; 1725 list_add_tail(&perst->list, &port->perst); 1726 1727 parse_child_node: 1728 for_each_available_child_of_node_scoped(np, child) { 1729 ret = qcom_pcie_parse_perst(pcie, port, child); 1730 if (ret) 1731 return ret; 1732 } 1733 1734 return 0; 1735 } 1736 1737 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) 1738 { 1739 struct device *dev = pcie->pci->dev; 1740 struct qcom_pcie_port *port; 1741 struct phy *phy; 1742 int ret; 1743 1744 phy = devm_of_phy_get(dev, node, NULL); 1745 if (IS_ERR(phy)) 1746 return PTR_ERR(phy); 1747 1748 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1749 if (!port) 1750 return -ENOMEM; 1751 1752 ret = phy_init(phy); 1753 if (ret) 1754 return ret; 1755 1756 INIT_LIST_HEAD(&port->perst); 1757 1758 ret = qcom_pcie_parse_perst(pcie, port, node); 1759 if (ret) 1760 return ret; 1761 1762 port->phy = phy; 1763 INIT_LIST_HEAD(&port->list); 1764 list_add_tail(&port->list, &pcie->ports); 1765 1766 return 0; 1767 } 1768 1769 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) 1770 { 1771 struct qcom_pcie_perst *perst, *tmp_perst; 1772 struct qcom_pcie_port *port, *tmp_port; 1773 struct device *dev = pcie->pci->dev; 1774 int ret = -ENODEV; 1775 1776 for_each_available_child_of_node_scoped(dev->of_node, of_port) { 1777 if (!of_node_is_type(of_port, "pci")) 1778 continue; 1779 ret = qcom_pcie_parse_port(pcie, of_port); 1780 if (ret) 1781 goto err_port_del; 1782 } 1783 1784 return ret; 1785 1786 err_port_del: 1787 list_for_each_entry_safe(port, tmp_port, &pcie->ports, list) { 1788 list_for_each_entry_safe(perst, tmp_perst, &port->perst, list) 1789 list_del(&perst->list); 1790 phy_exit(port->phy); 1791 list_del(&port->list); 1792 } 1793 1794 return ret; 1795 } 1796 1797 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) 1798 { 1799 struct device *dev = pcie->pci->dev; 1800 struct qcom_pcie_perst *perst; 1801 struct qcom_pcie_port *port; 1802 struct gpio_desc *reset; 1803 struct phy *phy; 1804 int ret; 1805 1806 phy = devm_phy_optional_get(dev, "pciephy"); 1807 if (IS_ERR(phy)) 1808 return PTR_ERR(phy); 1809 1810 reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1811 if (IS_ERR(reset)) 1812 return PTR_ERR(reset); 1813 1814 ret = phy_init(phy); 1815 if (ret) 1816 return ret; 1817 1818 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1819 if (!port) 1820 return -ENOMEM; 1821 1822 perst = devm_kzalloc(dev, sizeof(*perst), GFP_KERNEL); 1823 if (!perst) 1824 return -ENOMEM; 1825 1826 port->phy = phy; 1827 INIT_LIST_HEAD(&port->list); 1828 list_add_tail(&port->list, &pcie->ports); 1829 1830 perst->desc = reset; 1831 INIT_LIST_HEAD(&port->perst); 1832 INIT_LIST_HEAD(&perst->list); 1833 list_add_tail(&perst->list, &port->perst); 1834 1835 return 0; 1836 } 1837 1838 static int qcom_pcie_probe(struct platform_device *pdev) 1839 { 1840 struct qcom_pcie_perst *perst, *tmp_perst; 1841 struct qcom_pcie_port *port, *tmp_port; 1842 const struct qcom_pcie_cfg *pcie_cfg; 1843 unsigned long max_freq = ULONG_MAX; 1844 struct device *dev = &pdev->dev; 1845 struct dev_pm_opp *opp; 1846 struct qcom_pcie *pcie; 1847 struct dw_pcie_rp *pp; 1848 struct resource *res; 1849 struct dw_pcie *pci; 1850 int ret; 1851 1852 pcie_cfg = of_device_get_match_data(dev); 1853 if (!pcie_cfg) { 1854 dev_err(dev, "No platform data\n"); 1855 return -ENODATA; 1856 } 1857 1858 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { 1859 dev_err(dev, "No platform ops\n"); 1860 return -ENODATA; 1861 } 1862 1863 pm_runtime_enable(dev); 1864 ret = pm_runtime_get_sync(dev); 1865 if (ret < 0) 1866 goto err_pm_runtime_put; 1867 1868 if (pcie_cfg->firmware_managed) { 1869 struct pci_host_bridge *bridge; 1870 struct pci_config_window *cfg; 1871 1872 bridge = devm_pci_alloc_host_bridge(dev, 0); 1873 if (!bridge) { 1874 ret = -ENOMEM; 1875 goto err_pm_runtime_put; 1876 } 1877 1878 /* Parse and map our ECAM configuration space area */ 1879 cfg = pci_host_common_ecam_create(dev, bridge, 1880 &pci_qcom_ecam_ops); 1881 if (IS_ERR(cfg)) { 1882 ret = PTR_ERR(cfg); 1883 goto err_pm_runtime_put; 1884 } 1885 1886 bridge->sysdata = cfg; 1887 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; 1888 bridge->msi_domain = true; 1889 1890 ret = pci_host_probe(bridge); 1891 if (ret) 1892 goto err_pm_runtime_put; 1893 1894 return 0; 1895 } 1896 1897 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1898 if (!pcie) { 1899 ret = -ENOMEM; 1900 goto err_pm_runtime_put; 1901 } 1902 1903 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1904 if (!pci) { 1905 ret = -ENOMEM; 1906 goto err_pm_runtime_put; 1907 } 1908 1909 INIT_LIST_HEAD(&pcie->ports); 1910 1911 pci->dev = dev; 1912 pci->ops = &dw_pcie_ops; 1913 pp = &pci->pp; 1914 1915 pcie->pci = pci; 1916 1917 pcie->cfg = pcie_cfg; 1918 1919 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1920 if (IS_ERR(pcie->parf)) { 1921 ret = PTR_ERR(pcie->parf); 1922 goto err_pm_runtime_put; 1923 } 1924 1925 /* MHI region is optional */ 1926 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1927 if (res) { 1928 pcie->mhi = devm_ioremap_resource(dev, res); 1929 if (IS_ERR(pcie->mhi)) { 1930 ret = PTR_ERR(pcie->mhi); 1931 goto err_pm_runtime_put; 1932 } 1933 } 1934 1935 /* OPP table is optional */ 1936 ret = devm_pm_opp_of_add_table(dev); 1937 if (ret && ret != -ENODEV) { 1938 dev_err_probe(dev, ret, "Failed to add OPP table\n"); 1939 goto err_pm_runtime_put; 1940 } 1941 1942 /* 1943 * Before the PCIe link is initialized, vote for highest OPP in the OPP 1944 * table, so that we are voting for maximum voltage corner for the 1945 * link to come up in maximum supported speed. At the end of the 1946 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). 1947 */ 1948 if (!ret) { 1949 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1950 if (IS_ERR(opp)) { 1951 ret = PTR_ERR(opp); 1952 dev_err_probe(pci->dev, ret, 1953 "Unable to find max freq OPP\n"); 1954 goto err_pm_runtime_put; 1955 } else { 1956 ret = dev_pm_opp_set_opp(dev, opp); 1957 } 1958 1959 dev_pm_opp_put(opp); 1960 if (ret) { 1961 dev_err_probe(pci->dev, ret, 1962 "Failed to set OPP for freq %lu\n", 1963 max_freq); 1964 goto err_pm_runtime_put; 1965 } 1966 1967 pcie->use_pm_opp = true; 1968 } else { 1969 /* Skip ICC init if OPP is supported as it is handled by OPP */ 1970 ret = qcom_pcie_icc_init(pcie); 1971 if (ret) 1972 goto err_pm_runtime_put; 1973 } 1974 1975 ret = pcie->cfg->ops->get_resources(pcie); 1976 if (ret) 1977 goto err_pm_runtime_put; 1978 1979 pp->ops = &qcom_pcie_dw_ops; 1980 1981 ret = qcom_pcie_parse_ports(pcie); 1982 if (ret) { 1983 if (ret != -ENODEV) { 1984 dev_err_probe(pci->dev, ret, 1985 "Failed to parse Root Port: %d\n", ret); 1986 goto err_pm_runtime_put; 1987 } 1988 1989 /* 1990 * In the case of properties not populated in Root Port node, 1991 * fallback to the legacy method of parsing the Host Bridge 1992 * node. This is to maintain DT backwards compatibility. 1993 */ 1994 ret = qcom_pcie_parse_legacy_binding(pcie); 1995 if (ret) 1996 goto err_pm_runtime_put; 1997 } 1998 1999 platform_set_drvdata(pdev, pcie); 2000 2001 ret = dw_pcie_host_init(pp); 2002 if (ret) { 2003 dev_err_probe(dev, ret, "cannot initialize host\n"); 2004 goto err_phy_exit; 2005 } 2006 2007 qcom_pcie_icc_opp_update(pcie); 2008 2009 if (pcie->mhi) 2010 qcom_pcie_init_debugfs(pcie); 2011 2012 return 0; 2013 2014 err_phy_exit: 2015 list_for_each_entry_safe(port, tmp_port, &pcie->ports, list) { 2016 list_for_each_entry_safe(perst, tmp_perst, &port->perst, list) 2017 list_del(&perst->list); 2018 phy_exit(port->phy); 2019 list_del(&port->list); 2020 } 2021 err_pm_runtime_put: 2022 pm_runtime_put(dev); 2023 pm_runtime_disable(dev); 2024 2025 return ret; 2026 } 2027 2028 static int qcom_pcie_suspend_noirq(struct device *dev) 2029 { 2030 struct qcom_pcie *pcie; 2031 int ret = 0; 2032 2033 pcie = dev_get_drvdata(dev); 2034 if (!pcie) 2035 return 0; 2036 2037 /* 2038 * Set minimum bandwidth required to keep data path functional during 2039 * suspend. 2040 */ 2041 if (pcie->icc_mem) { 2042 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 2043 if (ret) { 2044 dev_err(dev, 2045 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 2046 ret); 2047 return ret; 2048 } 2049 } 2050 2051 /* 2052 * Turn OFF the resources only for controllers without active PCIe 2053 * devices. For controllers with active devices, the resources are kept 2054 * ON and the link is expected to be in L0/L1 (sub)states. 2055 * 2056 * Turning OFF the resources for controllers with active PCIe devices 2057 * will trigger access violation during the end of the suspend cycle, 2058 * as kernel tries to access the PCIe devices config space for masking 2059 * MSIs. 2060 * 2061 * Also, it is not desirable to put the link into L2/L3 state as that 2062 * implies VDD supply will be removed and the devices may go into 2063 * powerdown state. This will affect the lifetime of the storage devices 2064 * like NVMe. 2065 */ 2066 if (!dw_pcie_link_up(pcie->pci)) { 2067 qcom_pcie_host_deinit(&pcie->pci->pp); 2068 pcie->suspended = true; 2069 } 2070 2071 /* 2072 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. 2073 * Because on some platforms, DBI access can happen very late during the 2074 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC 2075 * error. 2076 */ 2077 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2078 ret = icc_disable(pcie->icc_cpu); 2079 if (ret) 2080 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); 2081 2082 if (pcie->use_pm_opp) 2083 dev_pm_opp_set_opp(pcie->pci->dev, NULL); 2084 } 2085 return ret; 2086 } 2087 2088 static int qcom_pcie_resume_noirq(struct device *dev) 2089 { 2090 struct qcom_pcie *pcie; 2091 int ret; 2092 2093 pcie = dev_get_drvdata(dev); 2094 if (!pcie) 2095 return 0; 2096 2097 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2098 ret = icc_enable(pcie->icc_cpu); 2099 if (ret) { 2100 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); 2101 return ret; 2102 } 2103 } 2104 2105 if (pcie->suspended) { 2106 ret = qcom_pcie_host_init(&pcie->pci->pp); 2107 if (ret) 2108 return ret; 2109 2110 pcie->suspended = false; 2111 } 2112 2113 qcom_pcie_icc_opp_update(pcie); 2114 2115 return 0; 2116 } 2117 2118 static const struct of_device_id qcom_pcie_match[] = { 2119 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 2120 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 2121 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 2122 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, 2123 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 2124 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 2125 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 2126 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 2127 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 2128 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, 2129 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 2130 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 2131 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, 2132 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, 2133 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, 2134 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 2135 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 2136 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, 2137 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 2138 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 2139 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 2140 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 2141 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 2142 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 2143 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 2144 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 2145 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, 2146 { } 2147 }; 2148 2149 static void qcom_fixup_class(struct pci_dev *dev) 2150 { 2151 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 2152 } 2153 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 2154 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 2155 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 2156 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 2157 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 2158 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 2159 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 2160 2161 static const struct dev_pm_ops qcom_pcie_pm_ops = { 2162 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 2163 }; 2164 2165 static struct platform_driver qcom_pcie_driver = { 2166 .probe = qcom_pcie_probe, 2167 .driver = { 2168 .name = "qcom-pcie", 2169 .suppress_bind_attrs = true, 2170 .of_match_table = qcom_pcie_match, 2171 .pm = &qcom_pcie_pm_ops, 2172 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2173 }, 2174 }; 2175 builtin_platform_driver(qcom_pcie_driver); 2176