1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/limits.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/pci-ecam.h> 27 #include <linux/pm_opp.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy/pcie.h> 31 #include <linux/phy/phy.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/reset.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/units.h> 37 38 #include "../../pci.h" 39 #include "../pci-host-common.h" 40 #include "pcie-designware.h" 41 #include "pcie-qcom-common.h" 42 43 /* PARF registers */ 44 #define PARF_SYS_CTRL 0x00 45 #define PARF_PM_CTRL 0x20 46 #define PARF_PCS_DEEMPH 0x34 47 #define PARF_PCS_SWING 0x38 48 #define PARF_PHY_CTRL 0x40 49 #define PARF_PHY_REFCLK 0x4c 50 #define PARF_CONFIG_BITS 0x50 51 #define PARF_DBI_BASE_ADDR 0x168 52 #define PARF_SLV_ADDR_SPACE_SIZE 0x16c 53 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 54 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 56 #define PARF_Q2A_FLUSH 0x1ac 57 #define PARF_LTSSM 0x1b0 58 #define PARF_INT_ALL_STATUS 0x224 59 #define PARF_INT_ALL_CLEAR 0x228 60 #define PARF_INT_ALL_MASK 0x22c 61 #define PARF_SID_OFFSET 0x234 62 #define PARF_BDF_TRANSLATE_CFG 0x24c 63 #define PARF_DBI_BASE_ADDR_V2 0x350 64 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 65 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 66 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 67 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 68 #define PARF_ATU_BASE_ADDR 0x634 69 #define PARF_ATU_BASE_ADDR_HI 0x638 70 #define PARF_DEVICE_TYPE 0x1000 71 #define PARF_BDF_TO_SID_TABLE_N 0x2000 72 #define PARF_BDF_TO_SID_CFG 0x2c00 73 74 /* ELBI registers */ 75 #define ELBI_SYS_CTRL 0x04 76 77 /* DBI registers */ 78 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 79 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 80 81 /* MHI registers */ 82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 83 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 84 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 85 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 86 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 87 88 /* PARF_SYS_CTRL register fields */ 89 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 90 #define MST_WAKEUP_EN BIT(13) 91 #define SLV_WAKEUP_EN BIT(12) 92 #define MSTR_ACLK_CGC_DIS BIT(10) 93 #define SLV_ACLK_CGC_DIS BIT(9) 94 #define CORE_CLK_CGC_DIS BIT(6) 95 #define AUX_PWR_DET BIT(4) 96 #define L23_CLK_RMV_DIS BIT(2) 97 #define L1_CLK_RMV_DIS BIT(1) 98 99 /* PARF_PM_CTRL register fields */ 100 #define REQ_NOT_ENTR_L1 BIT(5) 101 102 /* PARF_PCS_DEEMPH register fields */ 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 106 107 /* PARF_PCS_SWING register fields */ 108 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 109 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 110 111 /* PARF_PHY_CTRL register fields */ 112 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 113 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 114 #define PHY_TEST_PWR_DOWN BIT(0) 115 116 /* PARF_PHY_REFCLK register fields */ 117 #define PHY_REFCLK_SSP_EN BIT(16) 118 #define PHY_REFCLK_USE_PAD BIT(12) 119 120 /* PARF_CONFIG_BITS register fields */ 121 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 122 123 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 124 #define SLV_ADDR_SPACE_SZ 0x80000000 125 126 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 127 #define AHB_CLK_EN BIT(0) 128 #define MSTR_AXI_CLK_EN BIT(1) 129 #define BYPASS BIT(4) 130 131 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 132 #define EN BIT(31) 133 134 /* PARF_LTSSM register fields */ 135 #define LTSSM_EN BIT(8) 136 137 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 138 #define PARF_INT_ALL_LINK_UP BIT(13) 139 #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) 140 141 /* PARF_NO_SNOOP_OVERRIDE register fields */ 142 #define WR_NO_SNOOP_OVERRIDE_EN BIT(1) 143 #define RD_NO_SNOOP_OVERRIDE_EN BIT(3) 144 145 /* PARF_DEVICE_TYPE register fields */ 146 #define DEVICE_TYPE_RC 0x4 147 148 /* PARF_BDF_TO_SID_CFG fields */ 149 #define BDF_TO_SID_BYPASS BIT(0) 150 151 /* ELBI_SYS_CTRL register fields */ 152 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 153 154 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 155 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 156 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 157 158 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 159 #define CFG_BRIDGE_SB_INIT BIT(0) 160 161 /* PCI_EXP_SLTCAP register fields */ 162 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 163 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 164 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 165 PCI_EXP_SLTCAP_PCP | \ 166 PCI_EXP_SLTCAP_MRLSP | \ 167 PCI_EXP_SLTCAP_AIP | \ 168 PCI_EXP_SLTCAP_PIP | \ 169 PCI_EXP_SLTCAP_HPS | \ 170 PCI_EXP_SLTCAP_EIP | \ 171 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 172 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 173 174 #define PERST_DELAY_US 1000 175 176 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 177 178 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 179 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 180 181 struct qcom_pcie_resources_1_0_0 { 182 struct clk_bulk_data *clks; 183 int num_clks; 184 struct reset_control *core; 185 struct regulator *vdda; 186 }; 187 188 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 189 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 190 struct qcom_pcie_resources_2_1_0 { 191 struct clk_bulk_data *clks; 192 int num_clks; 193 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 194 int num_resets; 195 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 196 }; 197 198 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 199 struct qcom_pcie_resources_2_3_2 { 200 struct clk_bulk_data *clks; 201 int num_clks; 202 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 203 }; 204 205 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 206 struct qcom_pcie_resources_2_3_3 { 207 struct clk_bulk_data *clks; 208 int num_clks; 209 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 210 }; 211 212 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 213 struct qcom_pcie_resources_2_4_0 { 214 struct clk_bulk_data *clks; 215 int num_clks; 216 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 217 int num_resets; 218 }; 219 220 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 221 struct qcom_pcie_resources_2_7_0 { 222 struct clk_bulk_data *clks; 223 int num_clks; 224 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 225 struct reset_control *rst; 226 }; 227 228 struct qcom_pcie_resources_2_9_0 { 229 struct clk_bulk_data *clks; 230 int num_clks; 231 struct reset_control *rst; 232 }; 233 234 union qcom_pcie_resources { 235 struct qcom_pcie_resources_1_0_0 v1_0_0; 236 struct qcom_pcie_resources_2_1_0 v2_1_0; 237 struct qcom_pcie_resources_2_3_2 v2_3_2; 238 struct qcom_pcie_resources_2_3_3 v2_3_3; 239 struct qcom_pcie_resources_2_4_0 v2_4_0; 240 struct qcom_pcie_resources_2_7_0 v2_7_0; 241 struct qcom_pcie_resources_2_9_0 v2_9_0; 242 }; 243 244 struct qcom_pcie; 245 246 struct qcom_pcie_ops { 247 int (*get_resources)(struct qcom_pcie *pcie); 248 int (*init)(struct qcom_pcie *pcie); 249 int (*post_init)(struct qcom_pcie *pcie); 250 void (*host_post_init)(struct qcom_pcie *pcie); 251 void (*deinit)(struct qcom_pcie *pcie); 252 void (*ltssm_enable)(struct qcom_pcie *pcie); 253 int (*config_sid)(struct qcom_pcie *pcie); 254 }; 255 256 /** 257 * struct qcom_pcie_cfg - Per SoC config struct 258 * @ops: qcom PCIe ops structure 259 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache 260 * snooping 261 * @firmware_managed: Set if the Root Complex is firmware managed 262 */ 263 struct qcom_pcie_cfg { 264 const struct qcom_pcie_ops *ops; 265 bool override_no_snoop; 266 bool firmware_managed; 267 bool no_l0s; 268 }; 269 270 struct qcom_pcie_port { 271 struct list_head list; 272 struct gpio_desc *reset; 273 struct phy *phy; 274 }; 275 276 struct qcom_pcie { 277 struct dw_pcie *pci; 278 void __iomem *parf; /* DT parf */ 279 void __iomem *elbi; /* DT elbi */ 280 void __iomem *mhi; 281 union qcom_pcie_resources res; 282 struct phy *phy; 283 struct gpio_desc *reset; 284 struct icc_path *icc_mem; 285 struct icc_path *icc_cpu; 286 const struct qcom_pcie_cfg *cfg; 287 struct dentry *debugfs; 288 struct list_head ports; 289 bool suspended; 290 bool use_pm_opp; 291 }; 292 293 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 294 295 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) 296 { 297 struct qcom_pcie_port *port; 298 int val = assert ? 1 : 0; 299 300 if (list_empty(&pcie->ports)) 301 gpiod_set_value_cansleep(pcie->reset, val); 302 else 303 list_for_each_entry(port, &pcie->ports, list) 304 gpiod_set_value_cansleep(port->reset, val); 305 306 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 307 } 308 309 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 310 { 311 qcom_perst_assert(pcie, true); 312 } 313 314 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 315 { 316 /* Ensure that PERST has been asserted for at least 100 ms */ 317 msleep(PCIE_T_PVPERL_MS); 318 qcom_perst_assert(pcie, false); 319 } 320 321 static int qcom_pcie_start_link(struct dw_pcie *pci) 322 { 323 struct qcom_pcie *pcie = to_qcom_pcie(pci); 324 325 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) { 326 qcom_pcie_common_set_16gt_equalization(pci); 327 qcom_pcie_common_set_16gt_lane_margining(pci); 328 } 329 330 /* Enable Link Training state machine */ 331 if (pcie->cfg->ops->ltssm_enable) 332 pcie->cfg->ops->ltssm_enable(pcie); 333 334 return 0; 335 } 336 337 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) 338 { 339 struct qcom_pcie *pcie = to_qcom_pcie(pci); 340 u16 offset; 341 u32 val; 342 343 if (!pcie->cfg->no_l0s) 344 return; 345 346 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 347 348 dw_pcie_dbi_ro_wr_en(pci); 349 350 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 351 val &= ~PCI_EXP_LNKCAP_ASPM_L0S; 352 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 353 354 dw_pcie_dbi_ro_wr_dis(pci); 355 } 356 357 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 358 { 359 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 360 u32 val; 361 362 dw_pcie_dbi_ro_wr_en(pci); 363 364 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 365 val &= ~PCI_EXP_SLTCAP_HPC; 366 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 367 368 dw_pcie_dbi_ro_wr_dis(pci); 369 } 370 371 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) 372 { 373 struct dw_pcie *pci = pcie->pci; 374 375 if (pci->dbi_phys_addr) { 376 /* 377 * PARF_DBI_BASE_ADDR register is in CPU domain and require to 378 * be programmed with CPU physical address. 379 */ 380 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 381 PARF_DBI_BASE_ADDR); 382 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 383 PARF_SLV_ADDR_SPACE_SIZE); 384 } 385 } 386 387 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) 388 { 389 struct dw_pcie *pci = pcie->pci; 390 391 if (pci->dbi_phys_addr) { 392 /* 393 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are 394 * in CPU domain and require to be programmed with CPU 395 * physical addresses. 396 */ 397 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 398 PARF_DBI_BASE_ADDR_V2); 399 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + 400 PARF_DBI_BASE_ADDR_V2_HI); 401 402 if (pci->atu_phys_addr) { 403 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + 404 PARF_ATU_BASE_ADDR); 405 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + 406 PARF_ATU_BASE_ADDR_HI); 407 } 408 409 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); 410 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 411 PARF_SLV_ADDR_SPACE_SIZE_V2_HI); 412 } 413 } 414 415 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 416 { 417 u32 val; 418 419 /* enable link training */ 420 val = readl(pcie->elbi + ELBI_SYS_CTRL); 421 val |= ELBI_SYS_CTRL_LT_ENABLE; 422 writel(val, pcie->elbi + ELBI_SYS_CTRL); 423 } 424 425 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 426 { 427 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 428 struct dw_pcie *pci = pcie->pci; 429 struct device *dev = pci->dev; 430 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 431 int ret; 432 433 res->supplies[0].supply = "vdda"; 434 res->supplies[1].supply = "vdda_phy"; 435 res->supplies[2].supply = "vdda_refclk"; 436 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 437 res->supplies); 438 if (ret) 439 return ret; 440 441 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 442 if (res->num_clks < 0) { 443 dev_err(dev, "Failed to get clocks\n"); 444 return res->num_clks; 445 } 446 447 res->resets[0].id = "pci"; 448 res->resets[1].id = "axi"; 449 res->resets[2].id = "ahb"; 450 res->resets[3].id = "por"; 451 res->resets[4].id = "phy"; 452 res->resets[5].id = "ext"; 453 454 /* ext is optional on APQ8016 */ 455 res->num_resets = is_apq ? 5 : 6; 456 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 457 if (ret < 0) 458 return ret; 459 460 return 0; 461 } 462 463 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 464 { 465 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 466 467 clk_bulk_disable_unprepare(res->num_clks, res->clks); 468 reset_control_bulk_assert(res->num_resets, res->resets); 469 470 writel(1, pcie->parf + PARF_PHY_CTRL); 471 472 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 473 } 474 475 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 476 { 477 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 478 struct dw_pcie *pci = pcie->pci; 479 struct device *dev = pci->dev; 480 int ret; 481 482 /* reset the PCIe interface as uboot can leave it undefined state */ 483 ret = reset_control_bulk_assert(res->num_resets, res->resets); 484 if (ret < 0) { 485 dev_err(dev, "cannot assert resets\n"); 486 return ret; 487 } 488 489 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 490 if (ret < 0) { 491 dev_err(dev, "cannot enable regulators\n"); 492 return ret; 493 } 494 495 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 496 if (ret < 0) { 497 dev_err(dev, "cannot deassert resets\n"); 498 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 499 return ret; 500 } 501 502 return 0; 503 } 504 505 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 506 { 507 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 508 struct dw_pcie *pci = pcie->pci; 509 struct device *dev = pci->dev; 510 struct device_node *node = dev->of_node; 511 u32 val; 512 int ret; 513 514 /* enable PCIe clocks and resets */ 515 val = readl(pcie->parf + PARF_PHY_CTRL); 516 val &= ~PHY_TEST_PWR_DOWN; 517 writel(val, pcie->parf + PARF_PHY_CTRL); 518 519 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 520 if (ret) 521 return ret; 522 523 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 524 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 525 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 526 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 527 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 528 pcie->parf + PARF_PCS_DEEMPH); 529 writel(PCS_SWING_TX_SWING_FULL(120) | 530 PCS_SWING_TX_SWING_LOW(120), 531 pcie->parf + PARF_PCS_SWING); 532 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 533 } 534 535 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 536 /* set TX termination offset */ 537 val = readl(pcie->parf + PARF_PHY_CTRL); 538 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 539 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 540 writel(val, pcie->parf + PARF_PHY_CTRL); 541 } 542 543 /* enable external reference clock */ 544 val = readl(pcie->parf + PARF_PHY_REFCLK); 545 /* USE_PAD is required only for ipq806x */ 546 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 547 val &= ~PHY_REFCLK_USE_PAD; 548 val |= PHY_REFCLK_SSP_EN; 549 writel(val, pcie->parf + PARF_PHY_REFCLK); 550 551 /* wait for clock acquisition */ 552 usleep_range(1000, 1500); 553 554 /* Set the Max TLP size to 2K, instead of using default of 4K */ 555 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 556 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 557 writel(CFG_BRIDGE_SB_INIT, 558 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 559 560 qcom_pcie_clear_hpc(pcie->pci); 561 562 return 0; 563 } 564 565 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 566 { 567 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 568 struct dw_pcie *pci = pcie->pci; 569 struct device *dev = pci->dev; 570 571 res->vdda = devm_regulator_get(dev, "vdda"); 572 if (IS_ERR(res->vdda)) 573 return PTR_ERR(res->vdda); 574 575 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 576 if (res->num_clks < 0) { 577 dev_err(dev, "Failed to get clocks\n"); 578 return res->num_clks; 579 } 580 581 res->core = devm_reset_control_get_exclusive(dev, "core"); 582 return PTR_ERR_OR_ZERO(res->core); 583 } 584 585 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 586 { 587 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 588 589 reset_control_assert(res->core); 590 clk_bulk_disable_unprepare(res->num_clks, res->clks); 591 regulator_disable(res->vdda); 592 } 593 594 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 595 { 596 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 597 struct dw_pcie *pci = pcie->pci; 598 struct device *dev = pci->dev; 599 int ret; 600 601 ret = reset_control_deassert(res->core); 602 if (ret) { 603 dev_err(dev, "cannot deassert core reset\n"); 604 return ret; 605 } 606 607 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 608 if (ret) { 609 dev_err(dev, "cannot prepare/enable clocks\n"); 610 goto err_assert_reset; 611 } 612 613 ret = regulator_enable(res->vdda); 614 if (ret) { 615 dev_err(dev, "cannot enable vdda regulator\n"); 616 goto err_disable_clks; 617 } 618 619 return 0; 620 621 err_disable_clks: 622 clk_bulk_disable_unprepare(res->num_clks, res->clks); 623 err_assert_reset: 624 reset_control_assert(res->core); 625 626 return ret; 627 } 628 629 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 630 { 631 qcom_pcie_configure_dbi_base(pcie); 632 633 if (IS_ENABLED(CONFIG_PCI_MSI)) { 634 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 635 636 val |= EN; 637 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 638 } 639 640 qcom_pcie_clear_hpc(pcie->pci); 641 642 return 0; 643 } 644 645 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 646 { 647 u32 val; 648 649 /* enable link training */ 650 val = readl(pcie->parf + PARF_LTSSM); 651 val |= LTSSM_EN; 652 writel(val, pcie->parf + PARF_LTSSM); 653 } 654 655 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 656 { 657 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 658 struct dw_pcie *pci = pcie->pci; 659 struct device *dev = pci->dev; 660 int ret; 661 662 res->supplies[0].supply = "vdda"; 663 res->supplies[1].supply = "vddpe-3v3"; 664 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 665 res->supplies); 666 if (ret) 667 return ret; 668 669 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 670 if (res->num_clks < 0) { 671 dev_err(dev, "Failed to get clocks\n"); 672 return res->num_clks; 673 } 674 675 return 0; 676 } 677 678 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 679 { 680 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 681 682 clk_bulk_disable_unprepare(res->num_clks, res->clks); 683 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 684 } 685 686 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 687 { 688 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 689 struct dw_pcie *pci = pcie->pci; 690 struct device *dev = pci->dev; 691 int ret; 692 693 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 694 if (ret < 0) { 695 dev_err(dev, "cannot enable regulators\n"); 696 return ret; 697 } 698 699 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 700 if (ret) { 701 dev_err(dev, "cannot prepare/enable clocks\n"); 702 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 703 return ret; 704 } 705 706 return 0; 707 } 708 709 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 710 { 711 u32 val; 712 713 /* enable PCIe clocks and resets */ 714 val = readl(pcie->parf + PARF_PHY_CTRL); 715 val &= ~PHY_TEST_PWR_DOWN; 716 writel(val, pcie->parf + PARF_PHY_CTRL); 717 718 qcom_pcie_configure_dbi_base(pcie); 719 720 /* MAC PHY_POWERDOWN MUX DISABLE */ 721 val = readl(pcie->parf + PARF_SYS_CTRL); 722 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 723 writel(val, pcie->parf + PARF_SYS_CTRL); 724 725 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 726 val |= BYPASS; 727 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 728 729 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 730 val |= EN; 731 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 732 733 qcom_pcie_clear_hpc(pcie->pci); 734 735 return 0; 736 } 737 738 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 739 { 740 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 741 struct dw_pcie *pci = pcie->pci; 742 struct device *dev = pci->dev; 743 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 744 int ret; 745 746 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 747 if (res->num_clks < 0) { 748 dev_err(dev, "Failed to get clocks\n"); 749 return res->num_clks; 750 } 751 752 res->resets[0].id = "axi_m"; 753 res->resets[1].id = "axi_s"; 754 res->resets[2].id = "axi_m_sticky"; 755 res->resets[3].id = "pipe_sticky"; 756 res->resets[4].id = "pwr"; 757 res->resets[5].id = "ahb"; 758 res->resets[6].id = "pipe"; 759 res->resets[7].id = "axi_m_vmid"; 760 res->resets[8].id = "axi_s_xpu"; 761 res->resets[9].id = "parf"; 762 res->resets[10].id = "phy"; 763 res->resets[11].id = "phy_ahb"; 764 765 res->num_resets = is_ipq ? 12 : 6; 766 767 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 768 if (ret < 0) 769 return ret; 770 771 return 0; 772 } 773 774 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 775 { 776 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 777 778 reset_control_bulk_assert(res->num_resets, res->resets); 779 clk_bulk_disable_unprepare(res->num_clks, res->clks); 780 } 781 782 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 783 { 784 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 785 struct dw_pcie *pci = pcie->pci; 786 struct device *dev = pci->dev; 787 int ret; 788 789 ret = reset_control_bulk_assert(res->num_resets, res->resets); 790 if (ret < 0) { 791 dev_err(dev, "cannot assert resets\n"); 792 return ret; 793 } 794 795 usleep_range(10000, 12000); 796 797 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 798 if (ret < 0) { 799 dev_err(dev, "cannot deassert resets\n"); 800 return ret; 801 } 802 803 usleep_range(10000, 12000); 804 805 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 806 if (ret) { 807 reset_control_bulk_assert(res->num_resets, res->resets); 808 return ret; 809 } 810 811 return 0; 812 } 813 814 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 815 { 816 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 817 struct dw_pcie *pci = pcie->pci; 818 struct device *dev = pci->dev; 819 int ret; 820 821 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 822 if (res->num_clks < 0) { 823 dev_err(dev, "Failed to get clocks\n"); 824 return res->num_clks; 825 } 826 827 res->rst[0].id = "axi_m"; 828 res->rst[1].id = "axi_s"; 829 res->rst[2].id = "pipe"; 830 res->rst[3].id = "axi_m_sticky"; 831 res->rst[4].id = "sticky"; 832 res->rst[5].id = "ahb"; 833 res->rst[6].id = "sleep"; 834 835 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 836 if (ret < 0) 837 return ret; 838 839 return 0; 840 } 841 842 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 843 { 844 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 845 846 clk_bulk_disable_unprepare(res->num_clks, res->clks); 847 } 848 849 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 850 { 851 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 852 struct dw_pcie *pci = pcie->pci; 853 struct device *dev = pci->dev; 854 int ret; 855 856 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 857 if (ret < 0) { 858 dev_err(dev, "cannot assert resets\n"); 859 return ret; 860 } 861 862 usleep_range(2000, 2500); 863 864 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 865 if (ret < 0) { 866 dev_err(dev, "cannot deassert resets\n"); 867 return ret; 868 } 869 870 /* 871 * Don't have a way to see if the reset has completed. 872 * Wait for some time. 873 */ 874 usleep_range(2000, 2500); 875 876 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 877 if (ret) { 878 dev_err(dev, "cannot prepare/enable clocks\n"); 879 goto err_assert_resets; 880 } 881 882 return 0; 883 884 err_assert_resets: 885 /* 886 * Not checking for failure, will anyway return 887 * the original failure in 'ret'. 888 */ 889 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 890 891 return ret; 892 } 893 894 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 895 { 896 struct dw_pcie *pci = pcie->pci; 897 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 898 u32 val; 899 900 val = readl(pcie->parf + PARF_PHY_CTRL); 901 val &= ~PHY_TEST_PWR_DOWN; 902 writel(val, pcie->parf + PARF_PHY_CTRL); 903 904 qcom_pcie_configure_dbi_atu_base(pcie); 905 906 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 907 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 908 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 909 pcie->parf + PARF_SYS_CTRL); 910 writel(0, pcie->parf + PARF_Q2A_FLUSH); 911 912 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 913 914 dw_pcie_dbi_ro_wr_en(pci); 915 916 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 917 918 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 919 val &= ~PCI_EXP_LNKCAP_ASPMS; 920 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 921 922 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 923 PCI_EXP_DEVCTL2); 924 925 dw_pcie_dbi_ro_wr_dis(pci); 926 927 return 0; 928 } 929 930 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 931 { 932 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 933 struct dw_pcie *pci = pcie->pci; 934 struct device *dev = pci->dev; 935 int ret; 936 937 res->rst = devm_reset_control_array_get_exclusive(dev); 938 if (IS_ERR(res->rst)) 939 return PTR_ERR(res->rst); 940 941 res->supplies[0].supply = "vdda"; 942 res->supplies[1].supply = "vddpe-3v3"; 943 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 944 res->supplies); 945 if (ret) 946 return ret; 947 948 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 949 if (res->num_clks < 0) { 950 dev_err(dev, "Failed to get clocks\n"); 951 return res->num_clks; 952 } 953 954 return 0; 955 } 956 957 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 958 { 959 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 960 struct dw_pcie *pci = pcie->pci; 961 struct device *dev = pci->dev; 962 u32 val; 963 int ret; 964 965 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 966 if (ret < 0) { 967 dev_err(dev, "cannot enable regulators\n"); 968 return ret; 969 } 970 971 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 972 if (ret < 0) 973 goto err_disable_regulators; 974 975 ret = reset_control_assert(res->rst); 976 if (ret) { 977 dev_err(dev, "reset assert failed (%d)\n", ret); 978 goto err_disable_clocks; 979 } 980 981 usleep_range(1000, 1500); 982 983 ret = reset_control_deassert(res->rst); 984 if (ret) { 985 dev_err(dev, "reset deassert failed (%d)\n", ret); 986 goto err_disable_clocks; 987 } 988 989 /* Wait for reset to complete, required on SM8450 */ 990 usleep_range(1000, 1500); 991 992 /* configure PCIe to RC mode */ 993 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 994 995 /* enable PCIe clocks and resets */ 996 val = readl(pcie->parf + PARF_PHY_CTRL); 997 val &= ~PHY_TEST_PWR_DOWN; 998 writel(val, pcie->parf + PARF_PHY_CTRL); 999 1000 qcom_pcie_configure_dbi_atu_base(pcie); 1001 1002 /* MAC PHY_POWERDOWN MUX DISABLE */ 1003 val = readl(pcie->parf + PARF_SYS_CTRL); 1004 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 1005 writel(val, pcie->parf + PARF_SYS_CTRL); 1006 1007 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1008 val |= BYPASS; 1009 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1010 1011 /* Enable L1 and L1SS */ 1012 val = readl(pcie->parf + PARF_PM_CTRL); 1013 val &= ~REQ_NOT_ENTR_L1; 1014 writel(val, pcie->parf + PARF_PM_CTRL); 1015 1016 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1017 val |= EN; 1018 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1019 1020 return 0; 1021 err_disable_clocks: 1022 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1023 err_disable_regulators: 1024 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1025 1026 return ret; 1027 } 1028 1029 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1030 { 1031 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; 1032 1033 if (pcie_cfg->override_no_snoop) 1034 writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, 1035 pcie->parf + PARF_NO_SNOOP_OVERRIDE); 1036 1037 qcom_pcie_clear_aspm_l0s(pcie->pci); 1038 qcom_pcie_clear_hpc(pcie->pci); 1039 1040 return 0; 1041 } 1042 1043 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1044 { 1045 /* 1046 * Downstream devices need to be in D0 state before enabling PCI PM 1047 * substates. 1048 */ 1049 pci_set_power_state_locked(pdev, PCI_D0); 1050 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1051 1052 return 0; 1053 } 1054 1055 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1056 { 1057 struct dw_pcie_rp *pp = &pcie->pci->pp; 1058 1059 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1060 } 1061 1062 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1063 { 1064 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1065 1066 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1067 1068 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1069 } 1070 1071 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 1072 { 1073 /* iommu map structure */ 1074 struct { 1075 u32 bdf; 1076 u32 phandle; 1077 u32 smmu_sid; 1078 u32 smmu_sid_len; 1079 } *map; 1080 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1081 struct device *dev = pcie->pci->dev; 1082 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1083 int i, nr_map, size = 0; 1084 u32 smmu_sid_base; 1085 u32 val; 1086 1087 of_get_property(dev->of_node, "iommu-map", &size); 1088 if (!size) 1089 return 0; 1090 1091 /* Enable BDF to SID translation by disabling bypass mode (default) */ 1092 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); 1093 val &= ~BDF_TO_SID_BYPASS; 1094 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); 1095 1096 map = kzalloc(size, GFP_KERNEL); 1097 if (!map) 1098 return -ENOMEM; 1099 1100 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1101 size / sizeof(u32)); 1102 1103 nr_map = size / (sizeof(*map)); 1104 1105 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1106 1107 /* Registers need to be zero out first */ 1108 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1109 1110 /* Extract the SMMU SID base from the first entry of iommu-map */ 1111 smmu_sid_base = map[0].smmu_sid; 1112 1113 /* Look for an available entry to hold the mapping */ 1114 for (i = 0; i < nr_map; i++) { 1115 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1116 u32 val; 1117 u8 hash; 1118 1119 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1120 1121 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1122 1123 /* If the register is already populated, look for next available entry */ 1124 while (val) { 1125 u8 current_hash = hash++; 1126 u8 next_mask = 0xff; 1127 1128 /* If NEXT field is NULL then update it with next hash */ 1129 if (!(val & next_mask)) { 1130 val |= (u32)hash; 1131 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1132 } 1133 1134 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1135 } 1136 1137 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1138 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1139 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1140 } 1141 1142 kfree(map); 1143 1144 return 0; 1145 } 1146 1147 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1148 { 1149 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1150 struct dw_pcie *pci = pcie->pci; 1151 struct device *dev = pci->dev; 1152 1153 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 1154 if (res->num_clks < 0) { 1155 dev_err(dev, "Failed to get clocks\n"); 1156 return res->num_clks; 1157 } 1158 1159 res->rst = devm_reset_control_array_get_exclusive(dev); 1160 if (IS_ERR(res->rst)) 1161 return PTR_ERR(res->rst); 1162 1163 return 0; 1164 } 1165 1166 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1167 { 1168 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1169 1170 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1171 } 1172 1173 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1174 { 1175 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1176 struct device *dev = pcie->pci->dev; 1177 int ret; 1178 1179 ret = reset_control_assert(res->rst); 1180 if (ret) { 1181 dev_err(dev, "reset assert failed (%d)\n", ret); 1182 return ret; 1183 } 1184 1185 /* 1186 * Delay periods before and after reset deassert are working values 1187 * from downstream Codeaurora kernel 1188 */ 1189 usleep_range(2000, 2500); 1190 1191 ret = reset_control_deassert(res->rst); 1192 if (ret) { 1193 dev_err(dev, "reset deassert failed (%d)\n", ret); 1194 return ret; 1195 } 1196 1197 usleep_range(2000, 2500); 1198 1199 return clk_bulk_prepare_enable(res->num_clks, res->clks); 1200 } 1201 1202 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1203 { 1204 struct dw_pcie *pci = pcie->pci; 1205 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1206 u32 val; 1207 int i; 1208 1209 val = readl(pcie->parf + PARF_PHY_CTRL); 1210 val &= ~PHY_TEST_PWR_DOWN; 1211 writel(val, pcie->parf + PARF_PHY_CTRL); 1212 1213 qcom_pcie_configure_dbi_atu_base(pcie); 1214 1215 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1216 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1217 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1218 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1219 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1220 pci->dbi_base + GEN3_RELATED_OFF); 1221 1222 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1223 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1224 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1225 pcie->parf + PARF_SYS_CTRL); 1226 1227 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1228 1229 dw_pcie_dbi_ro_wr_en(pci); 1230 1231 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1232 1233 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1234 val &= ~PCI_EXP_LNKCAP_ASPMS; 1235 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1236 1237 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1238 PCI_EXP_DEVCTL2); 1239 1240 dw_pcie_dbi_ro_wr_dis(pci); 1241 1242 for (i = 0; i < 256; i++) 1243 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1244 1245 return 0; 1246 } 1247 1248 static bool qcom_pcie_link_up(struct dw_pcie *pci) 1249 { 1250 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1251 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1252 1253 return val & PCI_EXP_LNKSTA_DLLLA; 1254 } 1255 1256 static void qcom_pcie_phy_exit(struct qcom_pcie *pcie) 1257 { 1258 struct qcom_pcie_port *port; 1259 1260 if (list_empty(&pcie->ports)) 1261 phy_exit(pcie->phy); 1262 else 1263 list_for_each_entry(port, &pcie->ports, list) 1264 phy_exit(port->phy); 1265 } 1266 1267 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) 1268 { 1269 struct qcom_pcie_port *port; 1270 1271 if (list_empty(&pcie->ports)) { 1272 phy_power_off(pcie->phy); 1273 } else { 1274 list_for_each_entry(port, &pcie->ports, list) 1275 phy_power_off(port->phy); 1276 } 1277 } 1278 1279 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) 1280 { 1281 struct qcom_pcie_port *port; 1282 int ret = 0; 1283 1284 if (list_empty(&pcie->ports)) { 1285 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1286 if (ret) 1287 return ret; 1288 1289 ret = phy_power_on(pcie->phy); 1290 if (ret) 1291 return ret; 1292 } else { 1293 list_for_each_entry(port, &pcie->ports, list) { 1294 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1295 if (ret) 1296 return ret; 1297 1298 ret = phy_power_on(port->phy); 1299 if (ret) { 1300 qcom_pcie_phy_power_off(pcie); 1301 return ret; 1302 } 1303 } 1304 } 1305 1306 return ret; 1307 } 1308 1309 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1310 { 1311 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1312 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1313 int ret; 1314 1315 qcom_ep_reset_assert(pcie); 1316 1317 ret = pcie->cfg->ops->init(pcie); 1318 if (ret) 1319 return ret; 1320 1321 ret = qcom_pcie_phy_power_on(pcie); 1322 if (ret) 1323 goto err_deinit; 1324 1325 if (pcie->cfg->ops->post_init) { 1326 ret = pcie->cfg->ops->post_init(pcie); 1327 if (ret) 1328 goto err_disable_phy; 1329 } 1330 1331 qcom_ep_reset_deassert(pcie); 1332 1333 if (pcie->cfg->ops->config_sid) { 1334 ret = pcie->cfg->ops->config_sid(pcie); 1335 if (ret) 1336 goto err_assert_reset; 1337 } 1338 1339 return 0; 1340 1341 err_assert_reset: 1342 qcom_ep_reset_assert(pcie); 1343 err_disable_phy: 1344 qcom_pcie_phy_power_off(pcie); 1345 err_deinit: 1346 pcie->cfg->ops->deinit(pcie); 1347 1348 return ret; 1349 } 1350 1351 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1352 { 1353 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1354 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1355 1356 qcom_ep_reset_assert(pcie); 1357 qcom_pcie_phy_power_off(pcie); 1358 pcie->cfg->ops->deinit(pcie); 1359 } 1360 1361 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1362 { 1363 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1364 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1365 1366 if (pcie->cfg->ops->host_post_init) 1367 pcie->cfg->ops->host_post_init(pcie); 1368 } 1369 1370 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1371 .init = qcom_pcie_host_init, 1372 .deinit = qcom_pcie_host_deinit, 1373 .post_init = qcom_pcie_host_post_init, 1374 }; 1375 1376 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1377 static const struct qcom_pcie_ops ops_2_1_0 = { 1378 .get_resources = qcom_pcie_get_resources_2_1_0, 1379 .init = qcom_pcie_init_2_1_0, 1380 .post_init = qcom_pcie_post_init_2_1_0, 1381 .deinit = qcom_pcie_deinit_2_1_0, 1382 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1383 }; 1384 1385 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1386 static const struct qcom_pcie_ops ops_1_0_0 = { 1387 .get_resources = qcom_pcie_get_resources_1_0_0, 1388 .init = qcom_pcie_init_1_0_0, 1389 .post_init = qcom_pcie_post_init_1_0_0, 1390 .deinit = qcom_pcie_deinit_1_0_0, 1391 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1392 }; 1393 1394 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1395 static const struct qcom_pcie_ops ops_2_3_2 = { 1396 .get_resources = qcom_pcie_get_resources_2_3_2, 1397 .init = qcom_pcie_init_2_3_2, 1398 .post_init = qcom_pcie_post_init_2_3_2, 1399 .deinit = qcom_pcie_deinit_2_3_2, 1400 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1401 }; 1402 1403 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1404 static const struct qcom_pcie_ops ops_2_4_0 = { 1405 .get_resources = qcom_pcie_get_resources_2_4_0, 1406 .init = qcom_pcie_init_2_4_0, 1407 .post_init = qcom_pcie_post_init_2_3_2, 1408 .deinit = qcom_pcie_deinit_2_4_0, 1409 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1410 }; 1411 1412 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1413 static const struct qcom_pcie_ops ops_2_3_3 = { 1414 .get_resources = qcom_pcie_get_resources_2_3_3, 1415 .init = qcom_pcie_init_2_3_3, 1416 .post_init = qcom_pcie_post_init_2_3_3, 1417 .deinit = qcom_pcie_deinit_2_3_3, 1418 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1419 }; 1420 1421 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1422 static const struct qcom_pcie_ops ops_2_7_0 = { 1423 .get_resources = qcom_pcie_get_resources_2_7_0, 1424 .init = qcom_pcie_init_2_7_0, 1425 .post_init = qcom_pcie_post_init_2_7_0, 1426 .deinit = qcom_pcie_deinit_2_7_0, 1427 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1428 }; 1429 1430 /* Qcom IP rev.: 1.9.0 */ 1431 static const struct qcom_pcie_ops ops_1_9_0 = { 1432 .get_resources = qcom_pcie_get_resources_2_7_0, 1433 .init = qcom_pcie_init_2_7_0, 1434 .post_init = qcom_pcie_post_init_2_7_0, 1435 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1436 .deinit = qcom_pcie_deinit_2_7_0, 1437 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1438 .config_sid = qcom_pcie_config_sid_1_9_0, 1439 }; 1440 1441 /* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ 1442 static const struct qcom_pcie_ops ops_1_21_0 = { 1443 .get_resources = qcom_pcie_get_resources_2_7_0, 1444 .init = qcom_pcie_init_2_7_0, 1445 .post_init = qcom_pcie_post_init_2_7_0, 1446 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1447 .deinit = qcom_pcie_deinit_2_7_0, 1448 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1449 }; 1450 1451 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1452 static const struct qcom_pcie_ops ops_2_9_0 = { 1453 .get_resources = qcom_pcie_get_resources_2_9_0, 1454 .init = qcom_pcie_init_2_9_0, 1455 .post_init = qcom_pcie_post_init_2_9_0, 1456 .deinit = qcom_pcie_deinit_2_9_0, 1457 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1458 }; 1459 1460 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1461 .ops = &ops_1_0_0, 1462 }; 1463 1464 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1465 .ops = &ops_1_9_0, 1466 }; 1467 1468 static const struct qcom_pcie_cfg cfg_1_34_0 = { 1469 .ops = &ops_1_9_0, 1470 .override_no_snoop = true, 1471 }; 1472 1473 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1474 .ops = &ops_2_1_0, 1475 }; 1476 1477 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1478 .ops = &ops_2_3_2, 1479 }; 1480 1481 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1482 .ops = &ops_2_3_3, 1483 }; 1484 1485 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1486 .ops = &ops_2_4_0, 1487 }; 1488 1489 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1490 .ops = &ops_2_7_0, 1491 }; 1492 1493 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1494 .ops = &ops_2_9_0, 1495 }; 1496 1497 static const struct qcom_pcie_cfg cfg_sc8280xp = { 1498 .ops = &ops_1_21_0, 1499 .no_l0s = true, 1500 }; 1501 1502 static const struct qcom_pcie_cfg cfg_fw_managed = { 1503 .firmware_managed = true, 1504 }; 1505 1506 static const struct dw_pcie_ops dw_pcie_ops = { 1507 .link_up = qcom_pcie_link_up, 1508 .start_link = qcom_pcie_start_link, 1509 }; 1510 1511 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1512 { 1513 struct dw_pcie *pci = pcie->pci; 1514 int ret; 1515 1516 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1517 if (IS_ERR(pcie->icc_mem)) 1518 return PTR_ERR(pcie->icc_mem); 1519 1520 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); 1521 if (IS_ERR(pcie->icc_cpu)) 1522 return PTR_ERR(pcie->icc_cpu); 1523 /* 1524 * Some Qualcomm platforms require interconnect bandwidth constraints 1525 * to be set before enabling interconnect clocks. 1526 * 1527 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1528 * for the pcie-mem path. 1529 */ 1530 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1531 if (ret) { 1532 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1533 ret); 1534 return ret; 1535 } 1536 1537 /* 1538 * Since the CPU-PCIe path is only used for activities like register 1539 * access of the host controller and endpoint Config/BAR space access, 1540 * HW team has recommended to use a minimal bandwidth of 1KBps just to 1541 * keep the path active. 1542 */ 1543 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); 1544 if (ret) { 1545 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", 1546 ret); 1547 icc_set_bw(pcie->icc_mem, 0, 0); 1548 return ret; 1549 } 1550 1551 return 0; 1552 } 1553 1554 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) 1555 { 1556 u32 offset, status, width, speed; 1557 struct dw_pcie *pci = pcie->pci; 1558 unsigned long freq_kbps; 1559 struct dev_pm_opp *opp; 1560 int ret, freq_mbps; 1561 1562 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1563 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1564 1565 /* Only update constraints if link is up. */ 1566 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1567 return; 1568 1569 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1570 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1571 1572 if (pcie->icc_mem) { 1573 ret = icc_set_bw(pcie->icc_mem, 0, 1574 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1575 if (ret) { 1576 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1577 ret); 1578 } 1579 } else if (pcie->use_pm_opp) { 1580 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); 1581 if (freq_mbps < 0) 1582 return; 1583 1584 freq_kbps = freq_mbps * KILO; 1585 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, 1586 true); 1587 if (!IS_ERR(opp)) { 1588 ret = dev_pm_opp_set_opp(pci->dev, opp); 1589 if (ret) 1590 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", 1591 freq_kbps * width, ret); 1592 dev_pm_opp_put(opp); 1593 } 1594 } 1595 } 1596 1597 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1598 { 1599 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1600 1601 seq_printf(s, "L0s transition count: %u\n", 1602 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1603 1604 seq_printf(s, "L1 transition count: %u\n", 1605 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1606 1607 seq_printf(s, "L1.1 transition count: %u\n", 1608 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1609 1610 seq_printf(s, "L1.2 transition count: %u\n", 1611 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1612 1613 seq_printf(s, "L2 transition count: %u\n", 1614 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1615 1616 return 0; 1617 } 1618 1619 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1620 { 1621 struct dw_pcie *pci = pcie->pci; 1622 struct device *dev = pci->dev; 1623 char *name; 1624 1625 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1626 if (!name) 1627 return; 1628 1629 pcie->debugfs = debugfs_create_dir(name, NULL); 1630 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1631 qcom_pcie_link_transition_count); 1632 } 1633 1634 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) 1635 { 1636 struct qcom_pcie *pcie = data; 1637 struct dw_pcie_rp *pp = &pcie->pci->pp; 1638 struct device *dev = pcie->pci->dev; 1639 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); 1640 1641 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); 1642 1643 if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { 1644 msleep(PCIE_RESET_CONFIG_WAIT_MS); 1645 dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); 1646 /* Rescan the bus to enumerate endpoint devices */ 1647 pci_lock_rescan_remove(); 1648 pci_rescan_bus(pp->bridge->bus); 1649 pci_unlock_rescan_remove(); 1650 1651 qcom_pcie_icc_opp_update(pcie); 1652 } else { 1653 dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", 1654 status); 1655 } 1656 1657 return IRQ_HANDLED; 1658 } 1659 1660 static void qcom_pci_free_msi(void *ptr) 1661 { 1662 struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; 1663 1664 if (pp && pp->has_msi_ctrl) 1665 dw_pcie_free_msi(pp); 1666 } 1667 1668 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) 1669 { 1670 struct device *dev = cfg->parent; 1671 struct dw_pcie_rp *pp; 1672 struct dw_pcie *pci; 1673 int ret; 1674 1675 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1676 if (!pci) 1677 return -ENOMEM; 1678 1679 pci->dev = dev; 1680 pp = &pci->pp; 1681 pci->dbi_base = cfg->win; 1682 pp->num_vectors = MSI_DEF_NUM_VECTORS; 1683 1684 ret = dw_pcie_msi_host_init(pp); 1685 if (ret) 1686 return ret; 1687 1688 pp->has_msi_ctrl = true; 1689 dw_pcie_msi_init(pp); 1690 1691 return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); 1692 } 1693 1694 static const struct pci_ecam_ops pci_qcom_ecam_ops = { 1695 .init = qcom_pcie_ecam_host_init, 1696 .pci_ops = { 1697 .map_bus = pci_ecam_map_bus, 1698 .read = pci_generic_config_read, 1699 .write = pci_generic_config_write, 1700 } 1701 }; 1702 1703 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) 1704 { 1705 struct device *dev = pcie->pci->dev; 1706 struct qcom_pcie_port *port; 1707 struct gpio_desc *reset; 1708 struct phy *phy; 1709 int ret; 1710 1711 reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), 1712 "reset", GPIOD_OUT_HIGH, "PERST#"); 1713 if (IS_ERR(reset)) 1714 return PTR_ERR(reset); 1715 1716 phy = devm_of_phy_get(dev, node, NULL); 1717 if (IS_ERR(phy)) 1718 return PTR_ERR(phy); 1719 1720 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1721 if (!port) 1722 return -ENOMEM; 1723 1724 ret = phy_init(phy); 1725 if (ret) 1726 return ret; 1727 1728 port->reset = reset; 1729 port->phy = phy; 1730 INIT_LIST_HEAD(&port->list); 1731 list_add_tail(&port->list, &pcie->ports); 1732 1733 return 0; 1734 } 1735 1736 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) 1737 { 1738 struct device *dev = pcie->pci->dev; 1739 struct qcom_pcie_port *port, *tmp; 1740 int ret = -ENOENT; 1741 1742 for_each_available_child_of_node_scoped(dev->of_node, of_port) { 1743 ret = qcom_pcie_parse_port(pcie, of_port); 1744 if (ret) 1745 goto err_port_del; 1746 } 1747 1748 return ret; 1749 1750 err_port_del: 1751 list_for_each_entry_safe(port, tmp, &pcie->ports, list) 1752 list_del(&port->list); 1753 1754 return ret; 1755 } 1756 1757 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) 1758 { 1759 struct device *dev = pcie->pci->dev; 1760 int ret; 1761 1762 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1763 if (IS_ERR(pcie->phy)) 1764 return PTR_ERR(pcie->phy); 1765 1766 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1767 if (IS_ERR(pcie->reset)) 1768 return PTR_ERR(pcie->reset); 1769 1770 ret = phy_init(pcie->phy); 1771 if (ret) 1772 return ret; 1773 1774 return 0; 1775 } 1776 1777 static int qcom_pcie_probe(struct platform_device *pdev) 1778 { 1779 const struct qcom_pcie_cfg *pcie_cfg; 1780 unsigned long max_freq = ULONG_MAX; 1781 struct qcom_pcie_port *port, *tmp; 1782 struct device *dev = &pdev->dev; 1783 struct dev_pm_opp *opp; 1784 struct qcom_pcie *pcie; 1785 struct dw_pcie_rp *pp; 1786 struct resource *res; 1787 struct dw_pcie *pci; 1788 int ret, irq; 1789 char *name; 1790 1791 pcie_cfg = of_device_get_match_data(dev); 1792 if (!pcie_cfg) { 1793 dev_err(dev, "No platform data\n"); 1794 return -ENODATA; 1795 } 1796 1797 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { 1798 dev_err(dev, "No platform ops\n"); 1799 return -ENODATA; 1800 } 1801 1802 pm_runtime_enable(dev); 1803 ret = pm_runtime_get_sync(dev); 1804 if (ret < 0) 1805 goto err_pm_runtime_put; 1806 1807 if (pcie_cfg->firmware_managed) { 1808 struct pci_host_bridge *bridge; 1809 struct pci_config_window *cfg; 1810 1811 bridge = devm_pci_alloc_host_bridge(dev, 0); 1812 if (!bridge) { 1813 ret = -ENOMEM; 1814 goto err_pm_runtime_put; 1815 } 1816 1817 /* Parse and map our ECAM configuration space area */ 1818 cfg = pci_host_common_ecam_create(dev, bridge, 1819 &pci_qcom_ecam_ops); 1820 if (IS_ERR(cfg)) { 1821 ret = PTR_ERR(cfg); 1822 goto err_pm_runtime_put; 1823 } 1824 1825 bridge->sysdata = cfg; 1826 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; 1827 bridge->msi_domain = true; 1828 1829 ret = pci_host_probe(bridge); 1830 if (ret) 1831 goto err_pm_runtime_put; 1832 1833 return 0; 1834 } 1835 1836 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1837 if (!pcie) { 1838 ret = -ENOMEM; 1839 goto err_pm_runtime_put; 1840 } 1841 1842 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1843 if (!pci) { 1844 ret = -ENOMEM; 1845 goto err_pm_runtime_put; 1846 } 1847 1848 INIT_LIST_HEAD(&pcie->ports); 1849 1850 pci->dev = dev; 1851 pci->ops = &dw_pcie_ops; 1852 pp = &pci->pp; 1853 1854 pcie->pci = pci; 1855 1856 pcie->cfg = pcie_cfg; 1857 1858 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1859 if (IS_ERR(pcie->parf)) { 1860 ret = PTR_ERR(pcie->parf); 1861 goto err_pm_runtime_put; 1862 } 1863 1864 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1865 if (IS_ERR(pcie->elbi)) { 1866 ret = PTR_ERR(pcie->elbi); 1867 goto err_pm_runtime_put; 1868 } 1869 1870 /* MHI region is optional */ 1871 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1872 if (res) { 1873 pcie->mhi = devm_ioremap_resource(dev, res); 1874 if (IS_ERR(pcie->mhi)) { 1875 ret = PTR_ERR(pcie->mhi); 1876 goto err_pm_runtime_put; 1877 } 1878 } 1879 1880 /* OPP table is optional */ 1881 ret = devm_pm_opp_of_add_table(dev); 1882 if (ret && ret != -ENODEV) { 1883 dev_err_probe(dev, ret, "Failed to add OPP table\n"); 1884 goto err_pm_runtime_put; 1885 } 1886 1887 /* 1888 * Before the PCIe link is initialized, vote for highest OPP in the OPP 1889 * table, so that we are voting for maximum voltage corner for the 1890 * link to come up in maximum supported speed. At the end of the 1891 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). 1892 */ 1893 if (!ret) { 1894 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1895 if (IS_ERR(opp)) { 1896 ret = PTR_ERR(opp); 1897 dev_err_probe(pci->dev, ret, 1898 "Unable to find max freq OPP\n"); 1899 goto err_pm_runtime_put; 1900 } else { 1901 ret = dev_pm_opp_set_opp(dev, opp); 1902 } 1903 1904 dev_pm_opp_put(opp); 1905 if (ret) { 1906 dev_err_probe(pci->dev, ret, 1907 "Failed to set OPP for freq %lu\n", 1908 max_freq); 1909 goto err_pm_runtime_put; 1910 } 1911 1912 pcie->use_pm_opp = true; 1913 } else { 1914 /* Skip ICC init if OPP is supported as it is handled by OPP */ 1915 ret = qcom_pcie_icc_init(pcie); 1916 if (ret) 1917 goto err_pm_runtime_put; 1918 } 1919 1920 ret = pcie->cfg->ops->get_resources(pcie); 1921 if (ret) 1922 goto err_pm_runtime_put; 1923 1924 pp->ops = &qcom_pcie_dw_ops; 1925 1926 ret = qcom_pcie_parse_ports(pcie); 1927 if (ret) { 1928 if (ret != -ENOENT) { 1929 dev_err_probe(pci->dev, ret, 1930 "Failed to parse Root Port: %d\n", ret); 1931 goto err_pm_runtime_put; 1932 } 1933 1934 /* 1935 * In the case of properties not populated in Root Port node, 1936 * fallback to the legacy method of parsing the Host Bridge 1937 * node. This is to maintain DT backwards compatibility. 1938 */ 1939 ret = qcom_pcie_parse_legacy_binding(pcie); 1940 if (ret) 1941 goto err_pm_runtime_put; 1942 } 1943 1944 platform_set_drvdata(pdev, pcie); 1945 1946 irq = platform_get_irq_byname_optional(pdev, "global"); 1947 if (irq > 0) 1948 pp->use_linkup_irq = true; 1949 1950 ret = dw_pcie_host_init(pp); 1951 if (ret) { 1952 dev_err(dev, "cannot initialize host\n"); 1953 goto err_phy_exit; 1954 } 1955 1956 name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d", 1957 pci_domain_nr(pp->bridge->bus)); 1958 if (!name) { 1959 ret = -ENOMEM; 1960 goto err_host_deinit; 1961 } 1962 1963 if (irq > 0) { 1964 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1965 qcom_pcie_global_irq_thread, 1966 IRQF_ONESHOT, name, pcie); 1967 if (ret) { 1968 dev_err_probe(&pdev->dev, ret, 1969 "Failed to request Global IRQ\n"); 1970 goto err_host_deinit; 1971 } 1972 1973 writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7, 1974 pcie->parf + PARF_INT_ALL_MASK); 1975 } 1976 1977 qcom_pcie_icc_opp_update(pcie); 1978 1979 if (pcie->mhi) 1980 qcom_pcie_init_debugfs(pcie); 1981 1982 return 0; 1983 1984 err_host_deinit: 1985 dw_pcie_host_deinit(pp); 1986 err_phy_exit: 1987 qcom_pcie_phy_exit(pcie); 1988 list_for_each_entry_safe(port, tmp, &pcie->ports, list) 1989 list_del(&port->list); 1990 err_pm_runtime_put: 1991 pm_runtime_put(dev); 1992 pm_runtime_disable(dev); 1993 1994 return ret; 1995 } 1996 1997 static int qcom_pcie_suspend_noirq(struct device *dev) 1998 { 1999 struct qcom_pcie *pcie; 2000 int ret = 0; 2001 2002 pcie = dev_get_drvdata(dev); 2003 if (!pcie) 2004 return 0; 2005 2006 /* 2007 * Set minimum bandwidth required to keep data path functional during 2008 * suspend. 2009 */ 2010 if (pcie->icc_mem) { 2011 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 2012 if (ret) { 2013 dev_err(dev, 2014 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 2015 ret); 2016 return ret; 2017 } 2018 } 2019 2020 /* 2021 * Turn OFF the resources only for controllers without active PCIe 2022 * devices. For controllers with active devices, the resources are kept 2023 * ON and the link is expected to be in L0/L1 (sub)states. 2024 * 2025 * Turning OFF the resources for controllers with active PCIe devices 2026 * will trigger access violation during the end of the suspend cycle, 2027 * as kernel tries to access the PCIe devices config space for masking 2028 * MSIs. 2029 * 2030 * Also, it is not desirable to put the link into L2/L3 state as that 2031 * implies VDD supply will be removed and the devices may go into 2032 * powerdown state. This will affect the lifetime of the storage devices 2033 * like NVMe. 2034 */ 2035 if (!dw_pcie_link_up(pcie->pci)) { 2036 qcom_pcie_host_deinit(&pcie->pci->pp); 2037 pcie->suspended = true; 2038 } 2039 2040 /* 2041 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. 2042 * Because on some platforms, DBI access can happen very late during the 2043 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC 2044 * error. 2045 */ 2046 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2047 ret = icc_disable(pcie->icc_cpu); 2048 if (ret) 2049 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); 2050 2051 if (pcie->use_pm_opp) 2052 dev_pm_opp_set_opp(pcie->pci->dev, NULL); 2053 } 2054 return ret; 2055 } 2056 2057 static int qcom_pcie_resume_noirq(struct device *dev) 2058 { 2059 struct qcom_pcie *pcie; 2060 int ret; 2061 2062 pcie = dev_get_drvdata(dev); 2063 if (!pcie) 2064 return 0; 2065 2066 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2067 ret = icc_enable(pcie->icc_cpu); 2068 if (ret) { 2069 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); 2070 return ret; 2071 } 2072 } 2073 2074 if (pcie->suspended) { 2075 ret = qcom_pcie_host_init(&pcie->pci->pp); 2076 if (ret) 2077 return ret; 2078 2079 pcie->suspended = false; 2080 } 2081 2082 qcom_pcie_icc_opp_update(pcie); 2083 2084 return 0; 2085 } 2086 2087 static const struct of_device_id qcom_pcie_match[] = { 2088 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 2089 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 2090 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 2091 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, 2092 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 2093 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 2094 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 2095 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 2096 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 2097 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, 2098 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 2099 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 2100 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, 2101 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, 2102 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, 2103 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 2104 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 2105 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, 2106 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 2107 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 2108 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 2109 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 2110 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 2111 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 2112 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 2113 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 2114 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, 2115 { } 2116 }; 2117 2118 static void qcom_fixup_class(struct pci_dev *dev) 2119 { 2120 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 2121 } 2122 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 2123 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 2124 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 2125 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 2126 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 2127 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 2128 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 2129 2130 static const struct dev_pm_ops qcom_pcie_pm_ops = { 2131 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 2132 }; 2133 2134 static struct platform_driver qcom_pcie_driver = { 2135 .probe = qcom_pcie_probe, 2136 .driver = { 2137 .name = "qcom-pcie", 2138 .suppress_bind_attrs = true, 2139 .of_match_table = qcom_pcie_match, 2140 .pm = &qcom_pcie_pm_ops, 2141 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2142 }, 2143 }; 2144 builtin_platform_driver(qcom_pcie_driver); 2145