1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/limits.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/pci-ecam.h> 27 #include <linux/pm_opp.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy/pcie.h> 31 #include <linux/phy/phy.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/reset.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/units.h> 37 38 #include "../../pci.h" 39 #include "../pci-host-common.h" 40 #include "pcie-designware.h" 41 #include "pcie-qcom-common.h" 42 43 /* PARF registers */ 44 #define PARF_SYS_CTRL 0x00 45 #define PARF_PM_CTRL 0x20 46 #define PARF_PCS_DEEMPH 0x34 47 #define PARF_PCS_SWING 0x38 48 #define PARF_PHY_CTRL 0x40 49 #define PARF_PHY_REFCLK 0x4c 50 #define PARF_CONFIG_BITS 0x50 51 #define PARF_DBI_BASE_ADDR 0x168 52 #define PARF_SLV_ADDR_SPACE_SIZE 0x16c 53 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 54 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 56 #define PARF_Q2A_FLUSH 0x1ac 57 #define PARF_LTSSM 0x1b0 58 #define PARF_INT_ALL_STATUS 0x224 59 #define PARF_INT_ALL_CLEAR 0x228 60 #define PARF_INT_ALL_MASK 0x22c 61 #define PARF_SID_OFFSET 0x234 62 #define PARF_BDF_TRANSLATE_CFG 0x24c 63 #define PARF_DBI_BASE_ADDR_V2 0x350 64 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 65 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 66 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 67 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 68 #define PARF_ATU_BASE_ADDR 0x634 69 #define PARF_ATU_BASE_ADDR_HI 0x638 70 #define PARF_DEVICE_TYPE 0x1000 71 #define PARF_BDF_TO_SID_TABLE_N 0x2000 72 #define PARF_BDF_TO_SID_CFG 0x2c00 73 74 /* ELBI registers */ 75 #define ELBI_SYS_CTRL 0x04 76 77 /* DBI registers */ 78 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 79 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 80 81 /* MHI registers */ 82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 83 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 84 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 85 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 86 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 87 88 /* PARF_SYS_CTRL register fields */ 89 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 90 #define MST_WAKEUP_EN BIT(13) 91 #define SLV_WAKEUP_EN BIT(12) 92 #define MSTR_ACLK_CGC_DIS BIT(10) 93 #define SLV_ACLK_CGC_DIS BIT(9) 94 #define CORE_CLK_CGC_DIS BIT(6) 95 #define AUX_PWR_DET BIT(4) 96 #define L23_CLK_RMV_DIS BIT(2) 97 #define L1_CLK_RMV_DIS BIT(1) 98 99 /* PARF_PM_CTRL register fields */ 100 #define REQ_NOT_ENTR_L1 BIT(5) 101 102 /* PARF_PCS_DEEMPH register fields */ 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 106 107 /* PARF_PCS_SWING register fields */ 108 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 109 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 110 111 /* PARF_PHY_CTRL register fields */ 112 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 113 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 114 #define PHY_TEST_PWR_DOWN BIT(0) 115 116 /* PARF_PHY_REFCLK register fields */ 117 #define PHY_REFCLK_SSP_EN BIT(16) 118 #define PHY_REFCLK_USE_PAD BIT(12) 119 120 /* PARF_CONFIG_BITS register fields */ 121 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 122 123 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 124 #define SLV_ADDR_SPACE_SZ 0x80000000 125 126 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 127 #define AHB_CLK_EN BIT(0) 128 #define MSTR_AXI_CLK_EN BIT(1) 129 #define BYPASS BIT(4) 130 131 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 132 #define EN BIT(31) 133 134 /* PARF_LTSSM register fields */ 135 #define LTSSM_EN BIT(8) 136 137 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 138 #define PARF_INT_ALL_LINK_UP BIT(13) 139 #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) 140 141 /* PARF_NO_SNOOP_OVERRIDE register fields */ 142 #define WR_NO_SNOOP_OVERRIDE_EN BIT(1) 143 #define RD_NO_SNOOP_OVERRIDE_EN BIT(3) 144 145 /* PARF_DEVICE_TYPE register fields */ 146 #define DEVICE_TYPE_RC 0x4 147 148 /* PARF_BDF_TO_SID_CFG fields */ 149 #define BDF_TO_SID_BYPASS BIT(0) 150 151 /* ELBI_SYS_CTRL register fields */ 152 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 153 154 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 155 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 156 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 157 158 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 159 #define CFG_BRIDGE_SB_INIT BIT(0) 160 161 /* PCI_EXP_SLTCAP register fields */ 162 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 163 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 164 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 165 PCI_EXP_SLTCAP_PCP | \ 166 PCI_EXP_SLTCAP_MRLSP | \ 167 PCI_EXP_SLTCAP_AIP | \ 168 PCI_EXP_SLTCAP_PIP | \ 169 PCI_EXP_SLTCAP_HPS | \ 170 PCI_EXP_SLTCAP_EIP | \ 171 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 172 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 173 174 #define PERST_DELAY_US 1000 175 176 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 177 178 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 179 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 180 181 struct qcom_pcie_resources_1_0_0 { 182 struct clk_bulk_data *clks; 183 int num_clks; 184 struct reset_control *core; 185 struct regulator *vdda; 186 }; 187 188 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 189 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 190 struct qcom_pcie_resources_2_1_0 { 191 struct clk_bulk_data *clks; 192 int num_clks; 193 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 194 int num_resets; 195 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 196 }; 197 198 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 199 struct qcom_pcie_resources_2_3_2 { 200 struct clk_bulk_data *clks; 201 int num_clks; 202 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 203 }; 204 205 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 206 struct qcom_pcie_resources_2_3_3 { 207 struct clk_bulk_data *clks; 208 int num_clks; 209 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 210 }; 211 212 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 213 struct qcom_pcie_resources_2_4_0 { 214 struct clk_bulk_data *clks; 215 int num_clks; 216 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 217 int num_resets; 218 }; 219 220 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 221 struct qcom_pcie_resources_2_7_0 { 222 struct clk_bulk_data *clks; 223 int num_clks; 224 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 225 struct reset_control *rst; 226 }; 227 228 struct qcom_pcie_resources_2_9_0 { 229 struct clk_bulk_data *clks; 230 int num_clks; 231 struct reset_control *rst; 232 }; 233 234 union qcom_pcie_resources { 235 struct qcom_pcie_resources_1_0_0 v1_0_0; 236 struct qcom_pcie_resources_2_1_0 v2_1_0; 237 struct qcom_pcie_resources_2_3_2 v2_3_2; 238 struct qcom_pcie_resources_2_3_3 v2_3_3; 239 struct qcom_pcie_resources_2_4_0 v2_4_0; 240 struct qcom_pcie_resources_2_7_0 v2_7_0; 241 struct qcom_pcie_resources_2_9_0 v2_9_0; 242 }; 243 244 struct qcom_pcie; 245 246 struct qcom_pcie_ops { 247 int (*get_resources)(struct qcom_pcie *pcie); 248 int (*init)(struct qcom_pcie *pcie); 249 int (*post_init)(struct qcom_pcie *pcie); 250 void (*host_post_init)(struct qcom_pcie *pcie); 251 void (*deinit)(struct qcom_pcie *pcie); 252 void (*ltssm_enable)(struct qcom_pcie *pcie); 253 int (*config_sid)(struct qcom_pcie *pcie); 254 }; 255 256 /** 257 * struct qcom_pcie_cfg - Per SoC config struct 258 * @ops: qcom PCIe ops structure 259 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache 260 * snooping 261 * @firmware_managed: Set if the Root Complex is firmware managed 262 */ 263 struct qcom_pcie_cfg { 264 const struct qcom_pcie_ops *ops; 265 bool override_no_snoop; 266 bool firmware_managed; 267 bool no_l0s; 268 }; 269 270 struct qcom_pcie_port { 271 struct list_head list; 272 struct gpio_desc *reset; 273 struct phy *phy; 274 }; 275 276 struct qcom_pcie { 277 struct dw_pcie *pci; 278 void __iomem *parf; /* DT parf */ 279 void __iomem *mhi; 280 union qcom_pcie_resources res; 281 struct icc_path *icc_mem; 282 struct icc_path *icc_cpu; 283 const struct qcom_pcie_cfg *cfg; 284 struct dentry *debugfs; 285 struct list_head ports; 286 bool suspended; 287 bool use_pm_opp; 288 }; 289 290 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 291 292 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) 293 { 294 struct qcom_pcie_port *port; 295 int val = assert ? 1 : 0; 296 297 list_for_each_entry(port, &pcie->ports, list) 298 gpiod_set_value_cansleep(port->reset, val); 299 300 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 301 } 302 303 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 304 { 305 qcom_perst_assert(pcie, true); 306 } 307 308 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 309 { 310 /* Ensure that PERST has been asserted for at least 100 ms */ 311 msleep(PCIE_T_PVPERL_MS); 312 qcom_perst_assert(pcie, false); 313 } 314 315 static int qcom_pcie_start_link(struct dw_pcie *pci) 316 { 317 struct qcom_pcie *pcie = to_qcom_pcie(pci); 318 319 qcom_pcie_common_set_equalization(pci); 320 321 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) 322 qcom_pcie_common_set_16gt_lane_margining(pci); 323 324 /* Enable Link Training state machine */ 325 if (pcie->cfg->ops->ltssm_enable) 326 pcie->cfg->ops->ltssm_enable(pcie); 327 328 return 0; 329 } 330 331 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) 332 { 333 struct qcom_pcie *pcie = to_qcom_pcie(pci); 334 u16 offset; 335 u32 val; 336 337 if (!pcie->cfg->no_l0s) 338 return; 339 340 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 341 342 dw_pcie_dbi_ro_wr_en(pci); 343 344 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 345 val &= ~PCI_EXP_LNKCAP_ASPM_L0S; 346 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 347 348 dw_pcie_dbi_ro_wr_dis(pci); 349 } 350 351 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 352 { 353 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 354 u32 val; 355 356 dw_pcie_dbi_ro_wr_en(pci); 357 358 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 359 val &= ~PCI_EXP_SLTCAP_HPC; 360 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 361 362 dw_pcie_dbi_ro_wr_dis(pci); 363 } 364 365 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) 366 { 367 struct dw_pcie *pci = pcie->pci; 368 369 if (pci->dbi_phys_addr) { 370 /* 371 * PARF_DBI_BASE_ADDR register is in CPU domain and require to 372 * be programmed with CPU physical address. 373 */ 374 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 375 PARF_DBI_BASE_ADDR); 376 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 377 PARF_SLV_ADDR_SPACE_SIZE); 378 } 379 } 380 381 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) 382 { 383 struct dw_pcie *pci = pcie->pci; 384 385 if (pci->dbi_phys_addr) { 386 /* 387 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are 388 * in CPU domain and require to be programmed with CPU 389 * physical addresses. 390 */ 391 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 392 PARF_DBI_BASE_ADDR_V2); 393 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + 394 PARF_DBI_BASE_ADDR_V2_HI); 395 396 if (pci->atu_phys_addr) { 397 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + 398 PARF_ATU_BASE_ADDR); 399 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + 400 PARF_ATU_BASE_ADDR_HI); 401 } 402 403 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); 404 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 405 PARF_SLV_ADDR_SPACE_SIZE_V2_HI); 406 } 407 } 408 409 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 410 { 411 struct dw_pcie *pci = pcie->pci; 412 u32 val; 413 414 if (!pci->elbi_base) { 415 dev_err(pci->dev, "ELBI is not present\n"); 416 return; 417 } 418 /* enable link training */ 419 val = readl(pci->elbi_base + ELBI_SYS_CTRL); 420 val |= ELBI_SYS_CTRL_LT_ENABLE; 421 writel(val, pci->elbi_base + ELBI_SYS_CTRL); 422 } 423 424 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 425 { 426 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 427 struct dw_pcie *pci = pcie->pci; 428 struct device *dev = pci->dev; 429 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 430 int ret; 431 432 res->supplies[0].supply = "vdda"; 433 res->supplies[1].supply = "vdda_phy"; 434 res->supplies[2].supply = "vdda_refclk"; 435 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 436 res->supplies); 437 if (ret) 438 return ret; 439 440 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 441 if (res->num_clks < 0) { 442 dev_err(dev, "Failed to get clocks\n"); 443 return res->num_clks; 444 } 445 446 res->resets[0].id = "pci"; 447 res->resets[1].id = "axi"; 448 res->resets[2].id = "ahb"; 449 res->resets[3].id = "por"; 450 res->resets[4].id = "phy"; 451 res->resets[5].id = "ext"; 452 453 /* ext is optional on APQ8016 */ 454 res->num_resets = is_apq ? 5 : 6; 455 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 456 if (ret < 0) 457 return ret; 458 459 return 0; 460 } 461 462 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 463 { 464 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 465 466 clk_bulk_disable_unprepare(res->num_clks, res->clks); 467 reset_control_bulk_assert(res->num_resets, res->resets); 468 469 writel(1, pcie->parf + PARF_PHY_CTRL); 470 471 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 472 } 473 474 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 475 { 476 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 477 struct dw_pcie *pci = pcie->pci; 478 struct device *dev = pci->dev; 479 int ret; 480 481 /* reset the PCIe interface as uboot can leave it undefined state */ 482 ret = reset_control_bulk_assert(res->num_resets, res->resets); 483 if (ret < 0) { 484 dev_err(dev, "cannot assert resets\n"); 485 return ret; 486 } 487 488 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 489 if (ret < 0) { 490 dev_err(dev, "cannot enable regulators\n"); 491 return ret; 492 } 493 494 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 495 if (ret < 0) { 496 dev_err(dev, "cannot deassert resets\n"); 497 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 498 return ret; 499 } 500 501 return 0; 502 } 503 504 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 505 { 506 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 507 struct dw_pcie *pci = pcie->pci; 508 struct device *dev = pci->dev; 509 struct device_node *node = dev->of_node; 510 u32 val; 511 int ret; 512 513 /* enable PCIe clocks and resets */ 514 val = readl(pcie->parf + PARF_PHY_CTRL); 515 val &= ~PHY_TEST_PWR_DOWN; 516 writel(val, pcie->parf + PARF_PHY_CTRL); 517 518 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 519 if (ret) 520 return ret; 521 522 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 523 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 524 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 525 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 526 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 527 pcie->parf + PARF_PCS_DEEMPH); 528 writel(PCS_SWING_TX_SWING_FULL(120) | 529 PCS_SWING_TX_SWING_LOW(120), 530 pcie->parf + PARF_PCS_SWING); 531 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 532 } 533 534 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 535 /* set TX termination offset */ 536 val = readl(pcie->parf + PARF_PHY_CTRL); 537 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 538 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 539 writel(val, pcie->parf + PARF_PHY_CTRL); 540 } 541 542 /* enable external reference clock */ 543 val = readl(pcie->parf + PARF_PHY_REFCLK); 544 /* USE_PAD is required only for ipq806x */ 545 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 546 val &= ~PHY_REFCLK_USE_PAD; 547 val |= PHY_REFCLK_SSP_EN; 548 writel(val, pcie->parf + PARF_PHY_REFCLK); 549 550 /* wait for clock acquisition */ 551 usleep_range(1000, 1500); 552 553 /* Set the Max TLP size to 2K, instead of using default of 4K */ 554 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 555 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 556 writel(CFG_BRIDGE_SB_INIT, 557 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 558 559 qcom_pcie_clear_hpc(pcie->pci); 560 561 return 0; 562 } 563 564 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 565 { 566 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 567 struct dw_pcie *pci = pcie->pci; 568 struct device *dev = pci->dev; 569 570 res->vdda = devm_regulator_get(dev, "vdda"); 571 if (IS_ERR(res->vdda)) 572 return PTR_ERR(res->vdda); 573 574 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 575 if (res->num_clks < 0) { 576 dev_err(dev, "Failed to get clocks\n"); 577 return res->num_clks; 578 } 579 580 res->core = devm_reset_control_get_exclusive(dev, "core"); 581 return PTR_ERR_OR_ZERO(res->core); 582 } 583 584 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 585 { 586 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 587 588 reset_control_assert(res->core); 589 clk_bulk_disable_unprepare(res->num_clks, res->clks); 590 regulator_disable(res->vdda); 591 } 592 593 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 594 { 595 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 596 struct dw_pcie *pci = pcie->pci; 597 struct device *dev = pci->dev; 598 int ret; 599 600 ret = reset_control_deassert(res->core); 601 if (ret) { 602 dev_err(dev, "cannot deassert core reset\n"); 603 return ret; 604 } 605 606 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 607 if (ret) { 608 dev_err(dev, "cannot prepare/enable clocks\n"); 609 goto err_assert_reset; 610 } 611 612 ret = regulator_enable(res->vdda); 613 if (ret) { 614 dev_err(dev, "cannot enable vdda regulator\n"); 615 goto err_disable_clks; 616 } 617 618 return 0; 619 620 err_disable_clks: 621 clk_bulk_disable_unprepare(res->num_clks, res->clks); 622 err_assert_reset: 623 reset_control_assert(res->core); 624 625 return ret; 626 } 627 628 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 629 { 630 qcom_pcie_configure_dbi_base(pcie); 631 632 if (IS_ENABLED(CONFIG_PCI_MSI)) { 633 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 634 635 val |= EN; 636 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 637 } 638 639 qcom_pcie_clear_hpc(pcie->pci); 640 641 return 0; 642 } 643 644 static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert) 645 { 646 struct qcom_pcie *pcie = to_qcom_pcie(pci); 647 648 if (assert) 649 qcom_ep_reset_assert(pcie); 650 else 651 qcom_ep_reset_deassert(pcie); 652 653 return 0; 654 } 655 656 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 657 { 658 u32 val; 659 660 /* enable link training */ 661 val = readl(pcie->parf + PARF_LTSSM); 662 val |= LTSSM_EN; 663 writel(val, pcie->parf + PARF_LTSSM); 664 } 665 666 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 667 { 668 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 669 struct dw_pcie *pci = pcie->pci; 670 struct device *dev = pci->dev; 671 int ret; 672 673 res->supplies[0].supply = "vdda"; 674 res->supplies[1].supply = "vddpe-3v3"; 675 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 676 res->supplies); 677 if (ret) 678 return ret; 679 680 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 681 if (res->num_clks < 0) { 682 dev_err(dev, "Failed to get clocks\n"); 683 return res->num_clks; 684 } 685 686 return 0; 687 } 688 689 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 690 { 691 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 692 693 clk_bulk_disable_unprepare(res->num_clks, res->clks); 694 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 695 } 696 697 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 698 { 699 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 700 struct dw_pcie *pci = pcie->pci; 701 struct device *dev = pci->dev; 702 int ret; 703 704 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 705 if (ret < 0) { 706 dev_err(dev, "cannot enable regulators\n"); 707 return ret; 708 } 709 710 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 711 if (ret) { 712 dev_err(dev, "cannot prepare/enable clocks\n"); 713 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 714 return ret; 715 } 716 717 return 0; 718 } 719 720 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 721 { 722 u32 val; 723 724 /* enable PCIe clocks and resets */ 725 val = readl(pcie->parf + PARF_PHY_CTRL); 726 val &= ~PHY_TEST_PWR_DOWN; 727 writel(val, pcie->parf + PARF_PHY_CTRL); 728 729 qcom_pcie_configure_dbi_base(pcie); 730 731 /* MAC PHY_POWERDOWN MUX DISABLE */ 732 val = readl(pcie->parf + PARF_SYS_CTRL); 733 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 734 writel(val, pcie->parf + PARF_SYS_CTRL); 735 736 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 737 val |= BYPASS; 738 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 739 740 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 741 val |= EN; 742 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 743 744 qcom_pcie_clear_hpc(pcie->pci); 745 746 return 0; 747 } 748 749 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 750 { 751 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 752 struct dw_pcie *pci = pcie->pci; 753 struct device *dev = pci->dev; 754 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 755 int ret; 756 757 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 758 if (res->num_clks < 0) { 759 dev_err(dev, "Failed to get clocks\n"); 760 return res->num_clks; 761 } 762 763 res->resets[0].id = "axi_m"; 764 res->resets[1].id = "axi_s"; 765 res->resets[2].id = "axi_m_sticky"; 766 res->resets[3].id = "pipe_sticky"; 767 res->resets[4].id = "pwr"; 768 res->resets[5].id = "ahb"; 769 res->resets[6].id = "pipe"; 770 res->resets[7].id = "axi_m_vmid"; 771 res->resets[8].id = "axi_s_xpu"; 772 res->resets[9].id = "parf"; 773 res->resets[10].id = "phy"; 774 res->resets[11].id = "phy_ahb"; 775 776 res->num_resets = is_ipq ? 12 : 6; 777 778 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 779 if (ret < 0) 780 return ret; 781 782 return 0; 783 } 784 785 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 786 { 787 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 788 789 reset_control_bulk_assert(res->num_resets, res->resets); 790 clk_bulk_disable_unprepare(res->num_clks, res->clks); 791 } 792 793 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 794 { 795 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 796 struct dw_pcie *pci = pcie->pci; 797 struct device *dev = pci->dev; 798 int ret; 799 800 ret = reset_control_bulk_assert(res->num_resets, res->resets); 801 if (ret < 0) { 802 dev_err(dev, "cannot assert resets\n"); 803 return ret; 804 } 805 806 usleep_range(10000, 12000); 807 808 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 809 if (ret < 0) { 810 dev_err(dev, "cannot deassert resets\n"); 811 return ret; 812 } 813 814 usleep_range(10000, 12000); 815 816 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 817 if (ret) { 818 reset_control_bulk_assert(res->num_resets, res->resets); 819 return ret; 820 } 821 822 return 0; 823 } 824 825 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 826 { 827 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 828 struct dw_pcie *pci = pcie->pci; 829 struct device *dev = pci->dev; 830 int ret; 831 832 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 833 if (res->num_clks < 0) { 834 dev_err(dev, "Failed to get clocks\n"); 835 return res->num_clks; 836 } 837 838 res->rst[0].id = "axi_m"; 839 res->rst[1].id = "axi_s"; 840 res->rst[2].id = "pipe"; 841 res->rst[3].id = "axi_m_sticky"; 842 res->rst[4].id = "sticky"; 843 res->rst[5].id = "ahb"; 844 res->rst[6].id = "sleep"; 845 846 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 847 if (ret < 0) 848 return ret; 849 850 return 0; 851 } 852 853 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 854 { 855 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 856 857 clk_bulk_disable_unprepare(res->num_clks, res->clks); 858 } 859 860 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 861 { 862 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 863 struct dw_pcie *pci = pcie->pci; 864 struct device *dev = pci->dev; 865 int ret; 866 867 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 868 if (ret < 0) { 869 dev_err(dev, "cannot assert resets\n"); 870 return ret; 871 } 872 873 usleep_range(2000, 2500); 874 875 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 876 if (ret < 0) { 877 dev_err(dev, "cannot deassert resets\n"); 878 return ret; 879 } 880 881 /* 882 * Don't have a way to see if the reset has completed. 883 * Wait for some time. 884 */ 885 usleep_range(2000, 2500); 886 887 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 888 if (ret) { 889 dev_err(dev, "cannot prepare/enable clocks\n"); 890 goto err_assert_resets; 891 } 892 893 return 0; 894 895 err_assert_resets: 896 /* 897 * Not checking for failure, will anyway return 898 * the original failure in 'ret'. 899 */ 900 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 901 902 return ret; 903 } 904 905 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 906 { 907 struct dw_pcie *pci = pcie->pci; 908 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 909 u32 val; 910 911 val = readl(pcie->parf + PARF_PHY_CTRL); 912 val &= ~PHY_TEST_PWR_DOWN; 913 writel(val, pcie->parf + PARF_PHY_CTRL); 914 915 qcom_pcie_configure_dbi_atu_base(pcie); 916 917 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 918 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 919 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 920 pcie->parf + PARF_SYS_CTRL); 921 writel(0, pcie->parf + PARF_Q2A_FLUSH); 922 923 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 924 925 dw_pcie_dbi_ro_wr_en(pci); 926 927 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 928 929 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 930 val &= ~PCI_EXP_LNKCAP_ASPMS; 931 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 932 933 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 934 PCI_EXP_DEVCTL2); 935 936 dw_pcie_dbi_ro_wr_dis(pci); 937 938 return 0; 939 } 940 941 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 942 { 943 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 944 struct dw_pcie *pci = pcie->pci; 945 struct device *dev = pci->dev; 946 int ret; 947 948 res->rst = devm_reset_control_array_get_exclusive(dev); 949 if (IS_ERR(res->rst)) 950 return PTR_ERR(res->rst); 951 952 res->supplies[0].supply = "vdda"; 953 res->supplies[1].supply = "vddpe-3v3"; 954 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 955 res->supplies); 956 if (ret) 957 return ret; 958 959 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 960 if (res->num_clks < 0) { 961 dev_err(dev, "Failed to get clocks\n"); 962 return res->num_clks; 963 } 964 965 return 0; 966 } 967 968 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 969 { 970 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 971 struct dw_pcie *pci = pcie->pci; 972 struct device *dev = pci->dev; 973 u32 val; 974 int ret; 975 976 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 977 if (ret < 0) { 978 dev_err(dev, "cannot enable regulators\n"); 979 return ret; 980 } 981 982 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 983 if (ret < 0) 984 goto err_disable_regulators; 985 986 ret = reset_control_assert(res->rst); 987 if (ret) { 988 dev_err(dev, "reset assert failed (%d)\n", ret); 989 goto err_disable_clocks; 990 } 991 992 usleep_range(1000, 1500); 993 994 ret = reset_control_deassert(res->rst); 995 if (ret) { 996 dev_err(dev, "reset deassert failed (%d)\n", ret); 997 goto err_disable_clocks; 998 } 999 1000 /* Wait for reset to complete, required on SM8450 */ 1001 usleep_range(1000, 1500); 1002 1003 /* configure PCIe to RC mode */ 1004 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1005 1006 /* enable PCIe clocks and resets */ 1007 val = readl(pcie->parf + PARF_PHY_CTRL); 1008 val &= ~PHY_TEST_PWR_DOWN; 1009 writel(val, pcie->parf + PARF_PHY_CTRL); 1010 1011 qcom_pcie_configure_dbi_atu_base(pcie); 1012 1013 /* MAC PHY_POWERDOWN MUX DISABLE */ 1014 val = readl(pcie->parf + PARF_SYS_CTRL); 1015 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 1016 writel(val, pcie->parf + PARF_SYS_CTRL); 1017 1018 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1019 val |= BYPASS; 1020 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1021 1022 /* Enable L1 and L1SS */ 1023 val = readl(pcie->parf + PARF_PM_CTRL); 1024 val &= ~REQ_NOT_ENTR_L1; 1025 writel(val, pcie->parf + PARF_PM_CTRL); 1026 1027 pci->l1ss_support = true; 1028 1029 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1030 val |= EN; 1031 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1032 1033 return 0; 1034 err_disable_clocks: 1035 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1036 err_disable_regulators: 1037 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1038 1039 return ret; 1040 } 1041 1042 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1043 { 1044 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; 1045 1046 if (pcie_cfg->override_no_snoop) 1047 writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, 1048 pcie->parf + PARF_NO_SNOOP_OVERRIDE); 1049 1050 qcom_pcie_clear_hpc(pcie->pci); 1051 1052 return 0; 1053 } 1054 1055 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1056 { 1057 /* 1058 * Downstream devices need to be in D0 state before enabling PCI PM 1059 * substates. 1060 */ 1061 pci_set_power_state_locked(pdev, PCI_D0); 1062 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1063 1064 return 0; 1065 } 1066 1067 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1068 { 1069 struct dw_pcie_rp *pp = &pcie->pci->pp; 1070 1071 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1072 } 1073 1074 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1075 { 1076 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1077 1078 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1079 1080 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1081 } 1082 1083 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 1084 { 1085 /* iommu map structure */ 1086 struct { 1087 u32 bdf; 1088 u32 phandle; 1089 u32 smmu_sid; 1090 u32 smmu_sid_len; 1091 } *map; 1092 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1093 struct device *dev = pcie->pci->dev; 1094 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1095 int i, nr_map, size = 0; 1096 u32 smmu_sid_base; 1097 u32 val; 1098 1099 of_get_property(dev->of_node, "iommu-map", &size); 1100 if (!size) 1101 return 0; 1102 1103 /* Enable BDF to SID translation by disabling bypass mode (default) */ 1104 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); 1105 val &= ~BDF_TO_SID_BYPASS; 1106 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); 1107 1108 map = kzalloc(size, GFP_KERNEL); 1109 if (!map) 1110 return -ENOMEM; 1111 1112 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1113 size / sizeof(u32)); 1114 1115 nr_map = size / (sizeof(*map)); 1116 1117 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1118 1119 /* Registers need to be zero out first */ 1120 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1121 1122 /* Extract the SMMU SID base from the first entry of iommu-map */ 1123 smmu_sid_base = map[0].smmu_sid; 1124 1125 /* Look for an available entry to hold the mapping */ 1126 for (i = 0; i < nr_map; i++) { 1127 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1128 u32 val; 1129 u8 hash; 1130 1131 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1132 1133 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1134 1135 /* If the register is already populated, look for next available entry */ 1136 while (val) { 1137 u8 current_hash = hash++; 1138 u8 next_mask = 0xff; 1139 1140 /* If NEXT field is NULL then update it with next hash */ 1141 if (!(val & next_mask)) { 1142 val |= (u32)hash; 1143 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1144 } 1145 1146 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1147 } 1148 1149 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1150 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1151 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1152 } 1153 1154 kfree(map); 1155 1156 return 0; 1157 } 1158 1159 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1160 { 1161 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1162 struct dw_pcie *pci = pcie->pci; 1163 struct device *dev = pci->dev; 1164 1165 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 1166 if (res->num_clks < 0) { 1167 dev_err(dev, "Failed to get clocks\n"); 1168 return res->num_clks; 1169 } 1170 1171 res->rst = devm_reset_control_array_get_exclusive(dev); 1172 if (IS_ERR(res->rst)) 1173 return PTR_ERR(res->rst); 1174 1175 return 0; 1176 } 1177 1178 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1179 { 1180 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1181 1182 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1183 } 1184 1185 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1186 { 1187 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1188 struct device *dev = pcie->pci->dev; 1189 int ret; 1190 1191 ret = reset_control_assert(res->rst); 1192 if (ret) { 1193 dev_err(dev, "reset assert failed (%d)\n", ret); 1194 return ret; 1195 } 1196 1197 /* 1198 * Delay periods before and after reset deassert are working values 1199 * from downstream Codeaurora kernel 1200 */ 1201 usleep_range(2000, 2500); 1202 1203 ret = reset_control_deassert(res->rst); 1204 if (ret) { 1205 dev_err(dev, "reset deassert failed (%d)\n", ret); 1206 return ret; 1207 } 1208 1209 usleep_range(2000, 2500); 1210 1211 return clk_bulk_prepare_enable(res->num_clks, res->clks); 1212 } 1213 1214 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1215 { 1216 struct dw_pcie *pci = pcie->pci; 1217 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1218 u32 val; 1219 int i; 1220 1221 val = readl(pcie->parf + PARF_PHY_CTRL); 1222 val &= ~PHY_TEST_PWR_DOWN; 1223 writel(val, pcie->parf + PARF_PHY_CTRL); 1224 1225 qcom_pcie_configure_dbi_atu_base(pcie); 1226 1227 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1228 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1229 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1230 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1231 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1232 pci->dbi_base + GEN3_RELATED_OFF); 1233 1234 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1235 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1236 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1237 pcie->parf + PARF_SYS_CTRL); 1238 1239 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1240 1241 dw_pcie_dbi_ro_wr_en(pci); 1242 1243 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1244 1245 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1246 val &= ~PCI_EXP_LNKCAP_ASPMS; 1247 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1248 1249 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1250 PCI_EXP_DEVCTL2); 1251 1252 dw_pcie_dbi_ro_wr_dis(pci); 1253 1254 for (i = 0; i < 256; i++) 1255 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1256 1257 return 0; 1258 } 1259 1260 static bool qcom_pcie_link_up(struct dw_pcie *pci) 1261 { 1262 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1263 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1264 1265 return val & PCI_EXP_LNKSTA_DLLLA; 1266 } 1267 1268 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) 1269 { 1270 struct qcom_pcie_port *port; 1271 1272 list_for_each_entry(port, &pcie->ports, list) 1273 phy_power_off(port->phy); 1274 } 1275 1276 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) 1277 { 1278 struct qcom_pcie_port *port; 1279 int ret; 1280 1281 list_for_each_entry(port, &pcie->ports, list) { 1282 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1283 if (ret) 1284 return ret; 1285 1286 ret = phy_power_on(port->phy); 1287 if (ret) { 1288 qcom_pcie_phy_power_off(pcie); 1289 return ret; 1290 } 1291 } 1292 1293 return 0; 1294 } 1295 1296 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1297 { 1298 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1299 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1300 int ret; 1301 1302 qcom_ep_reset_assert(pcie); 1303 1304 ret = pcie->cfg->ops->init(pcie); 1305 if (ret) 1306 return ret; 1307 1308 ret = qcom_pcie_phy_power_on(pcie); 1309 if (ret) 1310 goto err_deinit; 1311 1312 if (pcie->cfg->ops->post_init) { 1313 ret = pcie->cfg->ops->post_init(pcie); 1314 if (ret) 1315 goto err_disable_phy; 1316 } 1317 1318 qcom_pcie_clear_aspm_l0s(pcie->pci); 1319 1320 qcom_ep_reset_deassert(pcie); 1321 1322 if (pcie->cfg->ops->config_sid) { 1323 ret = pcie->cfg->ops->config_sid(pcie); 1324 if (ret) 1325 goto err_assert_reset; 1326 } 1327 1328 return 0; 1329 1330 err_assert_reset: 1331 qcom_ep_reset_assert(pcie); 1332 err_disable_phy: 1333 qcom_pcie_phy_power_off(pcie); 1334 err_deinit: 1335 pcie->cfg->ops->deinit(pcie); 1336 1337 return ret; 1338 } 1339 1340 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1341 { 1342 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1343 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1344 1345 qcom_ep_reset_assert(pcie); 1346 qcom_pcie_phy_power_off(pcie); 1347 pcie->cfg->ops->deinit(pcie); 1348 } 1349 1350 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1351 { 1352 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1353 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1354 1355 if (pcie->cfg->ops->host_post_init) 1356 pcie->cfg->ops->host_post_init(pcie); 1357 } 1358 1359 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1360 .init = qcom_pcie_host_init, 1361 .deinit = qcom_pcie_host_deinit, 1362 .post_init = qcom_pcie_host_post_init, 1363 }; 1364 1365 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1366 static const struct qcom_pcie_ops ops_2_1_0 = { 1367 .get_resources = qcom_pcie_get_resources_2_1_0, 1368 .init = qcom_pcie_init_2_1_0, 1369 .post_init = qcom_pcie_post_init_2_1_0, 1370 .deinit = qcom_pcie_deinit_2_1_0, 1371 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1372 }; 1373 1374 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1375 static const struct qcom_pcie_ops ops_1_0_0 = { 1376 .get_resources = qcom_pcie_get_resources_1_0_0, 1377 .init = qcom_pcie_init_1_0_0, 1378 .post_init = qcom_pcie_post_init_1_0_0, 1379 .deinit = qcom_pcie_deinit_1_0_0, 1380 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1381 }; 1382 1383 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1384 static const struct qcom_pcie_ops ops_2_3_2 = { 1385 .get_resources = qcom_pcie_get_resources_2_3_2, 1386 .init = qcom_pcie_init_2_3_2, 1387 .post_init = qcom_pcie_post_init_2_3_2, 1388 .deinit = qcom_pcie_deinit_2_3_2, 1389 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1390 }; 1391 1392 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1393 static const struct qcom_pcie_ops ops_2_4_0 = { 1394 .get_resources = qcom_pcie_get_resources_2_4_0, 1395 .init = qcom_pcie_init_2_4_0, 1396 .post_init = qcom_pcie_post_init_2_3_2, 1397 .deinit = qcom_pcie_deinit_2_4_0, 1398 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1399 }; 1400 1401 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1402 static const struct qcom_pcie_ops ops_2_3_3 = { 1403 .get_resources = qcom_pcie_get_resources_2_3_3, 1404 .init = qcom_pcie_init_2_3_3, 1405 .post_init = qcom_pcie_post_init_2_3_3, 1406 .deinit = qcom_pcie_deinit_2_3_3, 1407 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1408 }; 1409 1410 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1411 static const struct qcom_pcie_ops ops_2_7_0 = { 1412 .get_resources = qcom_pcie_get_resources_2_7_0, 1413 .init = qcom_pcie_init_2_7_0, 1414 .post_init = qcom_pcie_post_init_2_7_0, 1415 .deinit = qcom_pcie_deinit_2_7_0, 1416 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1417 }; 1418 1419 /* Qcom IP rev.: 1.9.0 */ 1420 static const struct qcom_pcie_ops ops_1_9_0 = { 1421 .get_resources = qcom_pcie_get_resources_2_7_0, 1422 .init = qcom_pcie_init_2_7_0, 1423 .post_init = qcom_pcie_post_init_2_7_0, 1424 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1425 .deinit = qcom_pcie_deinit_2_7_0, 1426 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1427 .config_sid = qcom_pcie_config_sid_1_9_0, 1428 }; 1429 1430 /* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ 1431 static const struct qcom_pcie_ops ops_1_21_0 = { 1432 .get_resources = qcom_pcie_get_resources_2_7_0, 1433 .init = qcom_pcie_init_2_7_0, 1434 .post_init = qcom_pcie_post_init_2_7_0, 1435 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1436 .deinit = qcom_pcie_deinit_2_7_0, 1437 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1438 }; 1439 1440 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1441 static const struct qcom_pcie_ops ops_2_9_0 = { 1442 .get_resources = qcom_pcie_get_resources_2_9_0, 1443 .init = qcom_pcie_init_2_9_0, 1444 .post_init = qcom_pcie_post_init_2_9_0, 1445 .deinit = qcom_pcie_deinit_2_9_0, 1446 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1447 }; 1448 1449 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1450 .ops = &ops_1_0_0, 1451 }; 1452 1453 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1454 .ops = &ops_1_9_0, 1455 }; 1456 1457 static const struct qcom_pcie_cfg cfg_1_34_0 = { 1458 .ops = &ops_1_9_0, 1459 .override_no_snoop = true, 1460 }; 1461 1462 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1463 .ops = &ops_2_1_0, 1464 }; 1465 1466 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1467 .ops = &ops_2_3_2, 1468 .no_l0s = true, 1469 }; 1470 1471 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1472 .ops = &ops_2_3_3, 1473 }; 1474 1475 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1476 .ops = &ops_2_4_0, 1477 }; 1478 1479 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1480 .ops = &ops_2_7_0, 1481 }; 1482 1483 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1484 .ops = &ops_2_9_0, 1485 }; 1486 1487 static const struct qcom_pcie_cfg cfg_sc8280xp = { 1488 .ops = &ops_1_21_0, 1489 .no_l0s = true, 1490 }; 1491 1492 static const struct qcom_pcie_cfg cfg_fw_managed = { 1493 .firmware_managed = true, 1494 }; 1495 1496 static const struct dw_pcie_ops dw_pcie_ops = { 1497 .link_up = qcom_pcie_link_up, 1498 .start_link = qcom_pcie_start_link, 1499 .assert_perst = qcom_pcie_assert_perst, 1500 }; 1501 1502 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1503 { 1504 struct dw_pcie *pci = pcie->pci; 1505 int ret; 1506 1507 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1508 if (IS_ERR(pcie->icc_mem)) 1509 return PTR_ERR(pcie->icc_mem); 1510 1511 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); 1512 if (IS_ERR(pcie->icc_cpu)) 1513 return PTR_ERR(pcie->icc_cpu); 1514 /* 1515 * Some Qualcomm platforms require interconnect bandwidth constraints 1516 * to be set before enabling interconnect clocks. 1517 * 1518 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1519 * for the pcie-mem path. 1520 */ 1521 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1522 if (ret) { 1523 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1524 ret); 1525 return ret; 1526 } 1527 1528 /* 1529 * Since the CPU-PCIe path is only used for activities like register 1530 * access of the host controller and endpoint Config/BAR space access, 1531 * HW team has recommended to use a minimal bandwidth of 1KBps just to 1532 * keep the path active. 1533 */ 1534 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); 1535 if (ret) { 1536 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", 1537 ret); 1538 icc_set_bw(pcie->icc_mem, 0, 0); 1539 return ret; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) 1546 { 1547 u32 offset, status, width, speed; 1548 struct dw_pcie *pci = pcie->pci; 1549 struct dev_pm_opp_key key = {}; 1550 unsigned long freq_kbps; 1551 struct dev_pm_opp *opp; 1552 int ret, freq_mbps; 1553 1554 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1555 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1556 1557 /* Only update constraints if link is up. */ 1558 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1559 return; 1560 1561 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1562 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1563 1564 if (pcie->icc_mem) { 1565 ret = icc_set_bw(pcie->icc_mem, 0, 1566 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1567 if (ret) { 1568 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1569 ret); 1570 } 1571 } else if (pcie->use_pm_opp) { 1572 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); 1573 if (freq_mbps < 0) 1574 return; 1575 1576 freq_kbps = freq_mbps * KILO; 1577 opp = dev_pm_opp_find_level_exact(pci->dev, speed); 1578 if (IS_ERR(opp)) { 1579 /* opp-level is not defined use only frequency */ 1580 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, 1581 true); 1582 } else { 1583 /* put opp-level OPP */ 1584 dev_pm_opp_put(opp); 1585 1586 key.freq = freq_kbps * width; 1587 key.level = speed; 1588 key.bw = 0; 1589 opp = dev_pm_opp_find_key_exact(pci->dev, &key, true); 1590 } 1591 if (!IS_ERR(opp)) { 1592 ret = dev_pm_opp_set_opp(pci->dev, opp); 1593 if (ret) 1594 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", 1595 freq_kbps * width, ret); 1596 dev_pm_opp_put(opp); 1597 } 1598 } 1599 } 1600 1601 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1602 { 1603 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1604 1605 seq_printf(s, "L0s transition count: %u\n", 1606 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1607 1608 seq_printf(s, "L1 transition count: %u\n", 1609 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1610 1611 seq_printf(s, "L1.1 transition count: %u\n", 1612 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1613 1614 seq_printf(s, "L1.2 transition count: %u\n", 1615 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1616 1617 seq_printf(s, "L2 transition count: %u\n", 1618 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1619 1620 return 0; 1621 } 1622 1623 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1624 { 1625 struct dw_pcie *pci = pcie->pci; 1626 struct device *dev = pci->dev; 1627 char *name; 1628 1629 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1630 if (!name) 1631 return; 1632 1633 pcie->debugfs = debugfs_create_dir(name, NULL); 1634 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1635 qcom_pcie_link_transition_count); 1636 } 1637 1638 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) 1639 { 1640 struct qcom_pcie *pcie = data; 1641 struct dw_pcie_rp *pp = &pcie->pci->pp; 1642 struct device *dev = pcie->pci->dev; 1643 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); 1644 1645 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); 1646 1647 if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { 1648 msleep(PCIE_RESET_CONFIG_WAIT_MS); 1649 dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); 1650 /* Rescan the bus to enumerate endpoint devices */ 1651 pci_lock_rescan_remove(); 1652 pci_rescan_bus(pp->bridge->bus); 1653 pci_unlock_rescan_remove(); 1654 1655 qcom_pcie_icc_opp_update(pcie); 1656 } else { 1657 dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", 1658 status); 1659 } 1660 1661 return IRQ_HANDLED; 1662 } 1663 1664 static void qcom_pci_free_msi(void *ptr) 1665 { 1666 struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; 1667 1668 if (pp && pp->has_msi_ctrl) 1669 dw_pcie_free_msi(pp); 1670 } 1671 1672 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) 1673 { 1674 struct device *dev = cfg->parent; 1675 struct dw_pcie_rp *pp; 1676 struct dw_pcie *pci; 1677 int ret; 1678 1679 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1680 if (!pci) 1681 return -ENOMEM; 1682 1683 pci->dev = dev; 1684 pp = &pci->pp; 1685 pci->dbi_base = cfg->win; 1686 pp->num_vectors = MSI_DEF_NUM_VECTORS; 1687 1688 ret = dw_pcie_msi_host_init(pp); 1689 if (ret) 1690 return ret; 1691 1692 pp->has_msi_ctrl = true; 1693 dw_pcie_msi_init(pp); 1694 1695 return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); 1696 } 1697 1698 static const struct pci_ecam_ops pci_qcom_ecam_ops = { 1699 .init = qcom_pcie_ecam_host_init, 1700 .pci_ops = { 1701 .map_bus = pci_ecam_map_bus, 1702 .read = pci_generic_config_read, 1703 .write = pci_generic_config_write, 1704 } 1705 }; 1706 1707 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) 1708 { 1709 struct device *dev = pcie->pci->dev; 1710 struct qcom_pcie_port *port; 1711 struct gpio_desc *reset; 1712 struct phy *phy; 1713 int ret; 1714 1715 reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), 1716 "reset", GPIOD_OUT_HIGH, "PERST#"); 1717 if (IS_ERR(reset)) 1718 return PTR_ERR(reset); 1719 1720 phy = devm_of_phy_get(dev, node, NULL); 1721 if (IS_ERR(phy)) 1722 return PTR_ERR(phy); 1723 1724 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1725 if (!port) 1726 return -ENOMEM; 1727 1728 ret = phy_init(phy); 1729 if (ret) 1730 return ret; 1731 1732 port->reset = reset; 1733 port->phy = phy; 1734 INIT_LIST_HEAD(&port->list); 1735 list_add_tail(&port->list, &pcie->ports); 1736 1737 return 0; 1738 } 1739 1740 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) 1741 { 1742 struct device *dev = pcie->pci->dev; 1743 struct qcom_pcie_port *port, *tmp; 1744 int ret = -ENOENT; 1745 1746 for_each_available_child_of_node_scoped(dev->of_node, of_port) { 1747 if (!of_node_is_type(of_port, "pci")) 1748 continue; 1749 ret = qcom_pcie_parse_port(pcie, of_port); 1750 if (ret) 1751 goto err_port_del; 1752 } 1753 1754 return ret; 1755 1756 err_port_del: 1757 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 1758 phy_exit(port->phy); 1759 list_del(&port->list); 1760 } 1761 1762 return ret; 1763 } 1764 1765 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) 1766 { 1767 struct device *dev = pcie->pci->dev; 1768 struct qcom_pcie_port *port; 1769 struct gpio_desc *reset; 1770 struct phy *phy; 1771 int ret; 1772 1773 phy = devm_phy_optional_get(dev, "pciephy"); 1774 if (IS_ERR(phy)) 1775 return PTR_ERR(phy); 1776 1777 reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1778 if (IS_ERR(reset)) 1779 return PTR_ERR(reset); 1780 1781 ret = phy_init(phy); 1782 if (ret) 1783 return ret; 1784 1785 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1786 if (!port) 1787 return -ENOMEM; 1788 1789 port->reset = reset; 1790 port->phy = phy; 1791 INIT_LIST_HEAD(&port->list); 1792 list_add_tail(&port->list, &pcie->ports); 1793 1794 return 0; 1795 } 1796 1797 static int qcom_pcie_probe(struct platform_device *pdev) 1798 { 1799 const struct qcom_pcie_cfg *pcie_cfg; 1800 unsigned long max_freq = ULONG_MAX; 1801 struct qcom_pcie_port *port, *tmp; 1802 struct device *dev = &pdev->dev; 1803 struct dev_pm_opp *opp; 1804 struct qcom_pcie *pcie; 1805 struct dw_pcie_rp *pp; 1806 struct resource *res; 1807 struct dw_pcie *pci; 1808 int ret, irq; 1809 char *name; 1810 1811 pcie_cfg = of_device_get_match_data(dev); 1812 if (!pcie_cfg) { 1813 dev_err(dev, "No platform data\n"); 1814 return -ENODATA; 1815 } 1816 1817 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { 1818 dev_err(dev, "No platform ops\n"); 1819 return -ENODATA; 1820 } 1821 1822 pm_runtime_enable(dev); 1823 ret = pm_runtime_get_sync(dev); 1824 if (ret < 0) 1825 goto err_pm_runtime_put; 1826 1827 if (pcie_cfg->firmware_managed) { 1828 struct pci_host_bridge *bridge; 1829 struct pci_config_window *cfg; 1830 1831 bridge = devm_pci_alloc_host_bridge(dev, 0); 1832 if (!bridge) { 1833 ret = -ENOMEM; 1834 goto err_pm_runtime_put; 1835 } 1836 1837 /* Parse and map our ECAM configuration space area */ 1838 cfg = pci_host_common_ecam_create(dev, bridge, 1839 &pci_qcom_ecam_ops); 1840 if (IS_ERR(cfg)) { 1841 ret = PTR_ERR(cfg); 1842 goto err_pm_runtime_put; 1843 } 1844 1845 bridge->sysdata = cfg; 1846 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; 1847 bridge->msi_domain = true; 1848 1849 ret = pci_host_probe(bridge); 1850 if (ret) 1851 goto err_pm_runtime_put; 1852 1853 return 0; 1854 } 1855 1856 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1857 if (!pcie) { 1858 ret = -ENOMEM; 1859 goto err_pm_runtime_put; 1860 } 1861 1862 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1863 if (!pci) { 1864 ret = -ENOMEM; 1865 goto err_pm_runtime_put; 1866 } 1867 1868 INIT_LIST_HEAD(&pcie->ports); 1869 1870 pci->dev = dev; 1871 pci->ops = &dw_pcie_ops; 1872 pp = &pci->pp; 1873 1874 pcie->pci = pci; 1875 1876 pcie->cfg = pcie_cfg; 1877 1878 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1879 if (IS_ERR(pcie->parf)) { 1880 ret = PTR_ERR(pcie->parf); 1881 goto err_pm_runtime_put; 1882 } 1883 1884 /* MHI region is optional */ 1885 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1886 if (res) { 1887 pcie->mhi = devm_ioremap_resource(dev, res); 1888 if (IS_ERR(pcie->mhi)) { 1889 ret = PTR_ERR(pcie->mhi); 1890 goto err_pm_runtime_put; 1891 } 1892 } 1893 1894 /* OPP table is optional */ 1895 ret = devm_pm_opp_of_add_table(dev); 1896 if (ret && ret != -ENODEV) { 1897 dev_err_probe(dev, ret, "Failed to add OPP table\n"); 1898 goto err_pm_runtime_put; 1899 } 1900 1901 /* 1902 * Before the PCIe link is initialized, vote for highest OPP in the OPP 1903 * table, so that we are voting for maximum voltage corner for the 1904 * link to come up in maximum supported speed. At the end of the 1905 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). 1906 */ 1907 if (!ret) { 1908 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1909 if (IS_ERR(opp)) { 1910 ret = PTR_ERR(opp); 1911 dev_err_probe(pci->dev, ret, 1912 "Unable to find max freq OPP\n"); 1913 goto err_pm_runtime_put; 1914 } else { 1915 ret = dev_pm_opp_set_opp(dev, opp); 1916 } 1917 1918 dev_pm_opp_put(opp); 1919 if (ret) { 1920 dev_err_probe(pci->dev, ret, 1921 "Failed to set OPP for freq %lu\n", 1922 max_freq); 1923 goto err_pm_runtime_put; 1924 } 1925 1926 pcie->use_pm_opp = true; 1927 } else { 1928 /* Skip ICC init if OPP is supported as it is handled by OPP */ 1929 ret = qcom_pcie_icc_init(pcie); 1930 if (ret) 1931 goto err_pm_runtime_put; 1932 } 1933 1934 ret = pcie->cfg->ops->get_resources(pcie); 1935 if (ret) 1936 goto err_pm_runtime_put; 1937 1938 pp->ops = &qcom_pcie_dw_ops; 1939 1940 ret = qcom_pcie_parse_ports(pcie); 1941 if (ret) { 1942 if (ret != -ENOENT) { 1943 dev_err_probe(pci->dev, ret, 1944 "Failed to parse Root Port: %d\n", ret); 1945 goto err_pm_runtime_put; 1946 } 1947 1948 /* 1949 * In the case of properties not populated in Root Port node, 1950 * fallback to the legacy method of parsing the Host Bridge 1951 * node. This is to maintain DT backwards compatibility. 1952 */ 1953 ret = qcom_pcie_parse_legacy_binding(pcie); 1954 if (ret) 1955 goto err_pm_runtime_put; 1956 } 1957 1958 platform_set_drvdata(pdev, pcie); 1959 1960 irq = platform_get_irq_byname_optional(pdev, "global"); 1961 if (irq > 0) 1962 pp->use_linkup_irq = true; 1963 1964 ret = dw_pcie_host_init(pp); 1965 if (ret) { 1966 dev_err(dev, "cannot initialize host\n"); 1967 goto err_phy_exit; 1968 } 1969 1970 name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d", 1971 pci_domain_nr(pp->bridge->bus)); 1972 if (!name) { 1973 ret = -ENOMEM; 1974 goto err_host_deinit; 1975 } 1976 1977 if (irq > 0) { 1978 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1979 qcom_pcie_global_irq_thread, 1980 IRQF_ONESHOT, name, pcie); 1981 if (ret) { 1982 dev_err_probe(&pdev->dev, ret, 1983 "Failed to request Global IRQ\n"); 1984 goto err_host_deinit; 1985 } 1986 1987 writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7, 1988 pcie->parf + PARF_INT_ALL_MASK); 1989 } 1990 1991 qcom_pcie_icc_opp_update(pcie); 1992 1993 if (pcie->mhi) 1994 qcom_pcie_init_debugfs(pcie); 1995 1996 return 0; 1997 1998 err_host_deinit: 1999 dw_pcie_host_deinit(pp); 2000 err_phy_exit: 2001 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 2002 phy_exit(port->phy); 2003 list_del(&port->list); 2004 } 2005 err_pm_runtime_put: 2006 pm_runtime_put(dev); 2007 pm_runtime_disable(dev); 2008 2009 return ret; 2010 } 2011 2012 static int qcom_pcie_suspend_noirq(struct device *dev) 2013 { 2014 struct qcom_pcie *pcie; 2015 int ret = 0; 2016 2017 pcie = dev_get_drvdata(dev); 2018 if (!pcie) 2019 return 0; 2020 2021 /* 2022 * Set minimum bandwidth required to keep data path functional during 2023 * suspend. 2024 */ 2025 if (pcie->icc_mem) { 2026 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 2027 if (ret) { 2028 dev_err(dev, 2029 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 2030 ret); 2031 return ret; 2032 } 2033 } 2034 2035 /* 2036 * Turn OFF the resources only for controllers without active PCIe 2037 * devices. For controllers with active devices, the resources are kept 2038 * ON and the link is expected to be in L0/L1 (sub)states. 2039 * 2040 * Turning OFF the resources for controllers with active PCIe devices 2041 * will trigger access violation during the end of the suspend cycle, 2042 * as kernel tries to access the PCIe devices config space for masking 2043 * MSIs. 2044 * 2045 * Also, it is not desirable to put the link into L2/L3 state as that 2046 * implies VDD supply will be removed and the devices may go into 2047 * powerdown state. This will affect the lifetime of the storage devices 2048 * like NVMe. 2049 */ 2050 if (!dw_pcie_link_up(pcie->pci)) { 2051 qcom_pcie_host_deinit(&pcie->pci->pp); 2052 pcie->suspended = true; 2053 } 2054 2055 /* 2056 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. 2057 * Because on some platforms, DBI access can happen very late during the 2058 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC 2059 * error. 2060 */ 2061 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2062 ret = icc_disable(pcie->icc_cpu); 2063 if (ret) 2064 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); 2065 2066 if (pcie->use_pm_opp) 2067 dev_pm_opp_set_opp(pcie->pci->dev, NULL); 2068 } 2069 return ret; 2070 } 2071 2072 static int qcom_pcie_resume_noirq(struct device *dev) 2073 { 2074 struct qcom_pcie *pcie; 2075 int ret; 2076 2077 pcie = dev_get_drvdata(dev); 2078 if (!pcie) 2079 return 0; 2080 2081 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2082 ret = icc_enable(pcie->icc_cpu); 2083 if (ret) { 2084 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); 2085 return ret; 2086 } 2087 } 2088 2089 if (pcie->suspended) { 2090 ret = qcom_pcie_host_init(&pcie->pci->pp); 2091 if (ret) 2092 return ret; 2093 2094 pcie->suspended = false; 2095 } 2096 2097 qcom_pcie_icc_opp_update(pcie); 2098 2099 return 0; 2100 } 2101 2102 static const struct of_device_id qcom_pcie_match[] = { 2103 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 2104 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 2105 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 2106 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, 2107 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 2108 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 2109 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 2110 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 2111 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 2112 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, 2113 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 2114 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 2115 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, 2116 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, 2117 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, 2118 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 2119 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 2120 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, 2121 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 2122 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 2123 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 2124 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 2125 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 2126 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 2127 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 2128 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 2129 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, 2130 { } 2131 }; 2132 2133 static void qcom_fixup_class(struct pci_dev *dev) 2134 { 2135 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 2136 } 2137 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 2138 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 2139 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 2140 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 2141 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 2142 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 2143 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 2144 2145 static const struct dev_pm_ops qcom_pcie_pm_ops = { 2146 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 2147 }; 2148 2149 static struct platform_driver qcom_pcie_driver = { 2150 .probe = qcom_pcie_probe, 2151 .driver = { 2152 .name = "qcom-pcie", 2153 .suppress_bind_attrs = true, 2154 .of_match_table = qcom_pcie_match, 2155 .pm = &qcom_pcie_pm_ops, 2156 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2157 }, 2158 }; 2159 builtin_platform_driver(qcom_pcie_driver); 2160