1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/limits.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/pci-ecam.h> 27 #include <linux/pm_opp.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy/pcie.h> 31 #include <linux/phy/phy.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/reset.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/units.h> 37 38 #include "../../pci.h" 39 #include "../pci-host-common.h" 40 #include "pcie-designware.h" 41 #include "pcie-qcom-common.h" 42 43 /* PARF registers */ 44 #define PARF_SYS_CTRL 0x00 45 #define PARF_PM_CTRL 0x20 46 #define PARF_PCS_DEEMPH 0x34 47 #define PARF_PCS_SWING 0x38 48 #define PARF_PHY_CTRL 0x40 49 #define PARF_PHY_REFCLK 0x4c 50 #define PARF_CONFIG_BITS 0x50 51 #define PARF_DBI_BASE_ADDR 0x168 52 #define PARF_SLV_ADDR_SPACE_SIZE 0x16c 53 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 54 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 56 #define PARF_Q2A_FLUSH 0x1ac 57 #define PARF_LTSSM 0x1b0 58 #define PARF_INT_ALL_STATUS 0x224 59 #define PARF_INT_ALL_CLEAR 0x228 60 #define PARF_INT_ALL_MASK 0x22c 61 #define PARF_SID_OFFSET 0x234 62 #define PARF_BDF_TRANSLATE_CFG 0x24c 63 #define PARF_DBI_BASE_ADDR_V2 0x350 64 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 65 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 66 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 67 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 68 #define PARF_ATU_BASE_ADDR 0x634 69 #define PARF_ATU_BASE_ADDR_HI 0x638 70 #define PARF_DEVICE_TYPE 0x1000 71 #define PARF_BDF_TO_SID_TABLE_N 0x2000 72 #define PARF_BDF_TO_SID_CFG 0x2c00 73 74 /* ELBI registers */ 75 #define ELBI_SYS_CTRL 0x04 76 77 /* DBI registers */ 78 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 79 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 80 81 /* MHI registers */ 82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 83 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 84 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 85 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 86 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 87 88 /* PARF_SYS_CTRL register fields */ 89 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 90 #define MST_WAKEUP_EN BIT(13) 91 #define SLV_WAKEUP_EN BIT(12) 92 #define MSTR_ACLK_CGC_DIS BIT(10) 93 #define SLV_ACLK_CGC_DIS BIT(9) 94 #define CORE_CLK_CGC_DIS BIT(6) 95 #define AUX_PWR_DET BIT(4) 96 #define L23_CLK_RMV_DIS BIT(2) 97 #define L1_CLK_RMV_DIS BIT(1) 98 99 /* PARF_PM_CTRL register fields */ 100 #define REQ_NOT_ENTR_L1 BIT(5) 101 102 /* PARF_PCS_DEEMPH register fields */ 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 106 107 /* PARF_PCS_SWING register fields */ 108 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 109 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 110 111 /* PARF_PHY_CTRL register fields */ 112 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 113 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 114 #define PHY_TEST_PWR_DOWN BIT(0) 115 116 /* PARF_PHY_REFCLK register fields */ 117 #define PHY_REFCLK_SSP_EN BIT(16) 118 #define PHY_REFCLK_USE_PAD BIT(12) 119 120 /* PARF_CONFIG_BITS register fields */ 121 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 122 123 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 124 #define SLV_ADDR_SPACE_SZ 0x80000000 125 126 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 127 #define AHB_CLK_EN BIT(0) 128 #define MSTR_AXI_CLK_EN BIT(1) 129 #define BYPASS BIT(4) 130 131 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 132 #define EN BIT(31) 133 134 /* PARF_LTSSM register fields */ 135 #define LTSSM_EN BIT(8) 136 137 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 138 #define PARF_INT_ALL_LINK_UP BIT(13) 139 #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) 140 141 /* PARF_NO_SNOOP_OVERRIDE register fields */ 142 #define WR_NO_SNOOP_OVERRIDE_EN BIT(1) 143 #define RD_NO_SNOOP_OVERRIDE_EN BIT(3) 144 145 /* PARF_DEVICE_TYPE register fields */ 146 #define DEVICE_TYPE_RC 0x4 147 148 /* PARF_BDF_TO_SID_CFG fields */ 149 #define BDF_TO_SID_BYPASS BIT(0) 150 151 /* ELBI_SYS_CTRL register fields */ 152 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 153 154 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 155 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 156 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 157 158 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 159 #define CFG_BRIDGE_SB_INIT BIT(0) 160 161 /* PCI_EXP_SLTCAP register fields */ 162 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 163 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 164 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 165 PCI_EXP_SLTCAP_PCP | \ 166 PCI_EXP_SLTCAP_MRLSP | \ 167 PCI_EXP_SLTCAP_AIP | \ 168 PCI_EXP_SLTCAP_PIP | \ 169 PCI_EXP_SLTCAP_HPS | \ 170 PCI_EXP_SLTCAP_EIP | \ 171 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 172 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 173 174 #define PERST_DELAY_US 1000 175 176 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 177 178 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 179 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 180 181 struct qcom_pcie_resources_1_0_0 { 182 struct clk_bulk_data *clks; 183 int num_clks; 184 struct reset_control *core; 185 struct regulator *vdda; 186 }; 187 188 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 189 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 190 struct qcom_pcie_resources_2_1_0 { 191 struct clk_bulk_data *clks; 192 int num_clks; 193 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 194 int num_resets; 195 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 196 }; 197 198 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 199 struct qcom_pcie_resources_2_3_2 { 200 struct clk_bulk_data *clks; 201 int num_clks; 202 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 203 }; 204 205 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 206 struct qcom_pcie_resources_2_3_3 { 207 struct clk_bulk_data *clks; 208 int num_clks; 209 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 210 }; 211 212 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 213 struct qcom_pcie_resources_2_4_0 { 214 struct clk_bulk_data *clks; 215 int num_clks; 216 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 217 int num_resets; 218 }; 219 220 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 221 struct qcom_pcie_resources_2_7_0 { 222 struct clk_bulk_data *clks; 223 int num_clks; 224 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 225 struct reset_control *rst; 226 }; 227 228 struct qcom_pcie_resources_2_9_0 { 229 struct clk_bulk_data *clks; 230 int num_clks; 231 struct reset_control *rst; 232 }; 233 234 union qcom_pcie_resources { 235 struct qcom_pcie_resources_1_0_0 v1_0_0; 236 struct qcom_pcie_resources_2_1_0 v2_1_0; 237 struct qcom_pcie_resources_2_3_2 v2_3_2; 238 struct qcom_pcie_resources_2_3_3 v2_3_3; 239 struct qcom_pcie_resources_2_4_0 v2_4_0; 240 struct qcom_pcie_resources_2_7_0 v2_7_0; 241 struct qcom_pcie_resources_2_9_0 v2_9_0; 242 }; 243 244 struct qcom_pcie; 245 246 struct qcom_pcie_ops { 247 int (*get_resources)(struct qcom_pcie *pcie); 248 int (*init)(struct qcom_pcie *pcie); 249 int (*post_init)(struct qcom_pcie *pcie); 250 void (*host_post_init)(struct qcom_pcie *pcie); 251 void (*deinit)(struct qcom_pcie *pcie); 252 void (*ltssm_enable)(struct qcom_pcie *pcie); 253 int (*config_sid)(struct qcom_pcie *pcie); 254 }; 255 256 /** 257 * struct qcom_pcie_cfg - Per SoC config struct 258 * @ops: qcom PCIe ops structure 259 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache 260 * snooping 261 * @firmware_managed: Set if the Root Complex is firmware managed 262 */ 263 struct qcom_pcie_cfg { 264 const struct qcom_pcie_ops *ops; 265 bool override_no_snoop; 266 bool firmware_managed; 267 bool no_l0s; 268 }; 269 270 struct qcom_pcie_port { 271 struct list_head list; 272 struct gpio_desc *reset; 273 struct phy *phy; 274 }; 275 276 struct qcom_pcie { 277 struct dw_pcie *pci; 278 void __iomem *parf; /* DT parf */ 279 void __iomem *mhi; 280 union qcom_pcie_resources res; 281 struct icc_path *icc_mem; 282 struct icc_path *icc_cpu; 283 const struct qcom_pcie_cfg *cfg; 284 struct dentry *debugfs; 285 struct list_head ports; 286 bool suspended; 287 bool use_pm_opp; 288 }; 289 290 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 291 292 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) 293 { 294 struct qcom_pcie_port *port; 295 int val = assert ? 1 : 0; 296 297 list_for_each_entry(port, &pcie->ports, list) 298 gpiod_set_value_cansleep(port->reset, val); 299 300 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 301 } 302 303 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 304 { 305 qcom_perst_assert(pcie, true); 306 } 307 308 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 309 { 310 /* Ensure that PERST has been asserted for at least 100 ms */ 311 msleep(PCIE_T_PVPERL_MS); 312 qcom_perst_assert(pcie, false); 313 } 314 315 static int qcom_pcie_start_link(struct dw_pcie *pci) 316 { 317 struct qcom_pcie *pcie = to_qcom_pcie(pci); 318 319 qcom_pcie_common_set_equalization(pci); 320 321 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) 322 qcom_pcie_common_set_16gt_lane_margining(pci); 323 324 /* Enable Link Training state machine */ 325 if (pcie->cfg->ops->ltssm_enable) 326 pcie->cfg->ops->ltssm_enable(pcie); 327 328 return 0; 329 } 330 331 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) 332 { 333 struct qcom_pcie *pcie = to_qcom_pcie(pci); 334 u16 offset; 335 u32 val; 336 337 if (!pcie->cfg->no_l0s) 338 return; 339 340 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 341 342 dw_pcie_dbi_ro_wr_en(pci); 343 344 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 345 val &= ~PCI_EXP_LNKCAP_ASPM_L0S; 346 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 347 348 dw_pcie_dbi_ro_wr_dis(pci); 349 } 350 351 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 352 { 353 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 354 u32 val; 355 356 dw_pcie_dbi_ro_wr_en(pci); 357 358 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 359 val &= ~PCI_EXP_SLTCAP_HPC; 360 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 361 362 dw_pcie_dbi_ro_wr_dis(pci); 363 } 364 365 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) 366 { 367 struct dw_pcie *pci = pcie->pci; 368 369 if (pci->dbi_phys_addr) { 370 /* 371 * PARF_DBI_BASE_ADDR register is in CPU domain and require to 372 * be programmed with CPU physical address. 373 */ 374 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 375 PARF_DBI_BASE_ADDR); 376 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 377 PARF_SLV_ADDR_SPACE_SIZE); 378 } 379 } 380 381 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) 382 { 383 struct dw_pcie *pci = pcie->pci; 384 385 if (pci->dbi_phys_addr) { 386 /* 387 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are 388 * in CPU domain and require to be programmed with CPU 389 * physical addresses. 390 */ 391 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 392 PARF_DBI_BASE_ADDR_V2); 393 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + 394 PARF_DBI_BASE_ADDR_V2_HI); 395 396 if (pci->atu_phys_addr) { 397 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + 398 PARF_ATU_BASE_ADDR); 399 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + 400 PARF_ATU_BASE_ADDR_HI); 401 } 402 403 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); 404 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 405 PARF_SLV_ADDR_SPACE_SIZE_V2_HI); 406 } 407 } 408 409 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 410 { 411 struct dw_pcie *pci = pcie->pci; 412 u32 val; 413 414 if (!pci->elbi_base) { 415 dev_err(pci->dev, "ELBI is not present\n"); 416 return; 417 } 418 /* enable link training */ 419 val = readl(pci->elbi_base + ELBI_SYS_CTRL); 420 val |= ELBI_SYS_CTRL_LT_ENABLE; 421 writel(val, pci->elbi_base + ELBI_SYS_CTRL); 422 } 423 424 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 425 { 426 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 427 struct dw_pcie *pci = pcie->pci; 428 struct device *dev = pci->dev; 429 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 430 int ret; 431 432 res->supplies[0].supply = "vdda"; 433 res->supplies[1].supply = "vdda_phy"; 434 res->supplies[2].supply = "vdda_refclk"; 435 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 436 res->supplies); 437 if (ret) 438 return ret; 439 440 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 441 if (res->num_clks < 0) { 442 dev_err(dev, "Failed to get clocks\n"); 443 return res->num_clks; 444 } 445 446 res->resets[0].id = "pci"; 447 res->resets[1].id = "axi"; 448 res->resets[2].id = "ahb"; 449 res->resets[3].id = "por"; 450 res->resets[4].id = "phy"; 451 res->resets[5].id = "ext"; 452 453 /* ext is optional on APQ8016 */ 454 res->num_resets = is_apq ? 5 : 6; 455 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 456 if (ret < 0) 457 return ret; 458 459 return 0; 460 } 461 462 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 463 { 464 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 465 466 clk_bulk_disable_unprepare(res->num_clks, res->clks); 467 reset_control_bulk_assert(res->num_resets, res->resets); 468 469 writel(1, pcie->parf + PARF_PHY_CTRL); 470 471 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 472 } 473 474 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 475 { 476 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 477 struct dw_pcie *pci = pcie->pci; 478 struct device *dev = pci->dev; 479 int ret; 480 481 /* reset the PCIe interface as uboot can leave it undefined state */ 482 ret = reset_control_bulk_assert(res->num_resets, res->resets); 483 if (ret < 0) { 484 dev_err(dev, "cannot assert resets\n"); 485 return ret; 486 } 487 488 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 489 if (ret < 0) { 490 dev_err(dev, "cannot enable regulators\n"); 491 return ret; 492 } 493 494 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 495 if (ret < 0) { 496 dev_err(dev, "cannot deassert resets\n"); 497 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 498 return ret; 499 } 500 501 return 0; 502 } 503 504 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 505 { 506 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 507 struct dw_pcie *pci = pcie->pci; 508 struct device *dev = pci->dev; 509 struct device_node *node = dev->of_node; 510 u32 val; 511 int ret; 512 513 /* enable PCIe clocks and resets */ 514 val = readl(pcie->parf + PARF_PHY_CTRL); 515 val &= ~PHY_TEST_PWR_DOWN; 516 writel(val, pcie->parf + PARF_PHY_CTRL); 517 518 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 519 if (ret) 520 return ret; 521 522 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 523 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 524 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 525 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 526 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 527 pcie->parf + PARF_PCS_DEEMPH); 528 writel(PCS_SWING_TX_SWING_FULL(120) | 529 PCS_SWING_TX_SWING_LOW(120), 530 pcie->parf + PARF_PCS_SWING); 531 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 532 } 533 534 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 535 /* set TX termination offset */ 536 val = readl(pcie->parf + PARF_PHY_CTRL); 537 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 538 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 539 writel(val, pcie->parf + PARF_PHY_CTRL); 540 } 541 542 /* enable external reference clock */ 543 val = readl(pcie->parf + PARF_PHY_REFCLK); 544 /* USE_PAD is required only for ipq806x */ 545 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 546 val &= ~PHY_REFCLK_USE_PAD; 547 val |= PHY_REFCLK_SSP_EN; 548 writel(val, pcie->parf + PARF_PHY_REFCLK); 549 550 /* wait for clock acquisition */ 551 usleep_range(1000, 1500); 552 553 /* Set the Max TLP size to 2K, instead of using default of 4K */ 554 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 555 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 556 writel(CFG_BRIDGE_SB_INIT, 557 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 558 559 qcom_pcie_clear_hpc(pcie->pci); 560 561 return 0; 562 } 563 564 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 565 { 566 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 567 struct dw_pcie *pci = pcie->pci; 568 struct device *dev = pci->dev; 569 570 res->vdda = devm_regulator_get(dev, "vdda"); 571 if (IS_ERR(res->vdda)) 572 return PTR_ERR(res->vdda); 573 574 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 575 if (res->num_clks < 0) { 576 dev_err(dev, "Failed to get clocks\n"); 577 return res->num_clks; 578 } 579 580 res->core = devm_reset_control_get_exclusive(dev, "core"); 581 return PTR_ERR_OR_ZERO(res->core); 582 } 583 584 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 585 { 586 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 587 588 reset_control_assert(res->core); 589 clk_bulk_disable_unprepare(res->num_clks, res->clks); 590 regulator_disable(res->vdda); 591 } 592 593 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 594 { 595 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 596 struct dw_pcie *pci = pcie->pci; 597 struct device *dev = pci->dev; 598 int ret; 599 600 ret = reset_control_deassert(res->core); 601 if (ret) { 602 dev_err(dev, "cannot deassert core reset\n"); 603 return ret; 604 } 605 606 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 607 if (ret) { 608 dev_err(dev, "cannot prepare/enable clocks\n"); 609 goto err_assert_reset; 610 } 611 612 ret = regulator_enable(res->vdda); 613 if (ret) { 614 dev_err(dev, "cannot enable vdda regulator\n"); 615 goto err_disable_clks; 616 } 617 618 return 0; 619 620 err_disable_clks: 621 clk_bulk_disable_unprepare(res->num_clks, res->clks); 622 err_assert_reset: 623 reset_control_assert(res->core); 624 625 return ret; 626 } 627 628 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 629 { 630 qcom_pcie_configure_dbi_base(pcie); 631 632 if (IS_ENABLED(CONFIG_PCI_MSI)) { 633 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 634 635 val |= EN; 636 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 637 } 638 639 qcom_pcie_clear_hpc(pcie->pci); 640 641 return 0; 642 } 643 644 static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert) 645 { 646 struct qcom_pcie *pcie = to_qcom_pcie(pci); 647 648 if (assert) 649 qcom_ep_reset_assert(pcie); 650 else 651 qcom_ep_reset_deassert(pcie); 652 653 return 0; 654 } 655 656 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 657 { 658 u32 val; 659 660 /* enable link training */ 661 val = readl(pcie->parf + PARF_LTSSM); 662 val |= LTSSM_EN; 663 writel(val, pcie->parf + PARF_LTSSM); 664 } 665 666 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 667 { 668 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 669 struct dw_pcie *pci = pcie->pci; 670 struct device *dev = pci->dev; 671 int ret; 672 673 res->supplies[0].supply = "vdda"; 674 res->supplies[1].supply = "vddpe-3v3"; 675 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 676 res->supplies); 677 if (ret) 678 return ret; 679 680 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 681 if (res->num_clks < 0) { 682 dev_err(dev, "Failed to get clocks\n"); 683 return res->num_clks; 684 } 685 686 return 0; 687 } 688 689 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 690 { 691 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 692 693 clk_bulk_disable_unprepare(res->num_clks, res->clks); 694 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 695 } 696 697 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 698 { 699 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 700 struct dw_pcie *pci = pcie->pci; 701 struct device *dev = pci->dev; 702 int ret; 703 704 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 705 if (ret < 0) { 706 dev_err(dev, "cannot enable regulators\n"); 707 return ret; 708 } 709 710 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 711 if (ret) { 712 dev_err(dev, "cannot prepare/enable clocks\n"); 713 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 714 return ret; 715 } 716 717 return 0; 718 } 719 720 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 721 { 722 u32 val; 723 724 /* enable PCIe clocks and resets */ 725 val = readl(pcie->parf + PARF_PHY_CTRL); 726 val &= ~PHY_TEST_PWR_DOWN; 727 writel(val, pcie->parf + PARF_PHY_CTRL); 728 729 qcom_pcie_configure_dbi_base(pcie); 730 731 /* MAC PHY_POWERDOWN MUX DISABLE */ 732 val = readl(pcie->parf + PARF_SYS_CTRL); 733 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 734 writel(val, pcie->parf + PARF_SYS_CTRL); 735 736 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 737 val |= BYPASS; 738 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 739 740 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 741 val |= EN; 742 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 743 744 qcom_pcie_clear_hpc(pcie->pci); 745 746 return 0; 747 } 748 749 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 750 { 751 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 752 struct dw_pcie *pci = pcie->pci; 753 struct device *dev = pci->dev; 754 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 755 int ret; 756 757 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 758 if (res->num_clks < 0) { 759 dev_err(dev, "Failed to get clocks\n"); 760 return res->num_clks; 761 } 762 763 res->resets[0].id = "axi_m"; 764 res->resets[1].id = "axi_s"; 765 res->resets[2].id = "axi_m_sticky"; 766 res->resets[3].id = "pipe_sticky"; 767 res->resets[4].id = "pwr"; 768 res->resets[5].id = "ahb"; 769 res->resets[6].id = "pipe"; 770 res->resets[7].id = "axi_m_vmid"; 771 res->resets[8].id = "axi_s_xpu"; 772 res->resets[9].id = "parf"; 773 res->resets[10].id = "phy"; 774 res->resets[11].id = "phy_ahb"; 775 776 res->num_resets = is_ipq ? 12 : 6; 777 778 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 779 if (ret < 0) 780 return ret; 781 782 return 0; 783 } 784 785 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 786 { 787 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 788 789 reset_control_bulk_assert(res->num_resets, res->resets); 790 clk_bulk_disable_unprepare(res->num_clks, res->clks); 791 } 792 793 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 794 { 795 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 796 struct dw_pcie *pci = pcie->pci; 797 struct device *dev = pci->dev; 798 int ret; 799 800 ret = reset_control_bulk_assert(res->num_resets, res->resets); 801 if (ret < 0) { 802 dev_err(dev, "cannot assert resets\n"); 803 return ret; 804 } 805 806 usleep_range(10000, 12000); 807 808 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 809 if (ret < 0) { 810 dev_err(dev, "cannot deassert resets\n"); 811 return ret; 812 } 813 814 usleep_range(10000, 12000); 815 816 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 817 if (ret) { 818 reset_control_bulk_assert(res->num_resets, res->resets); 819 return ret; 820 } 821 822 return 0; 823 } 824 825 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 826 { 827 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 828 struct dw_pcie *pci = pcie->pci; 829 struct device *dev = pci->dev; 830 int ret; 831 832 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 833 if (res->num_clks < 0) { 834 dev_err(dev, "Failed to get clocks\n"); 835 return res->num_clks; 836 } 837 838 res->rst[0].id = "axi_m"; 839 res->rst[1].id = "axi_s"; 840 res->rst[2].id = "pipe"; 841 res->rst[3].id = "axi_m_sticky"; 842 res->rst[4].id = "sticky"; 843 res->rst[5].id = "ahb"; 844 res->rst[6].id = "sleep"; 845 846 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 847 if (ret < 0) 848 return ret; 849 850 return 0; 851 } 852 853 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 854 { 855 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 856 857 clk_bulk_disable_unprepare(res->num_clks, res->clks); 858 } 859 860 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 861 { 862 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 863 struct dw_pcie *pci = pcie->pci; 864 struct device *dev = pci->dev; 865 int ret; 866 867 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 868 if (ret < 0) { 869 dev_err(dev, "cannot assert resets\n"); 870 return ret; 871 } 872 873 usleep_range(2000, 2500); 874 875 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 876 if (ret < 0) { 877 dev_err(dev, "cannot deassert resets\n"); 878 return ret; 879 } 880 881 /* 882 * Don't have a way to see if the reset has completed. 883 * Wait for some time. 884 */ 885 usleep_range(2000, 2500); 886 887 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 888 if (ret) { 889 dev_err(dev, "cannot prepare/enable clocks\n"); 890 goto err_assert_resets; 891 } 892 893 return 0; 894 895 err_assert_resets: 896 /* 897 * Not checking for failure, will anyway return 898 * the original failure in 'ret'. 899 */ 900 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 901 902 return ret; 903 } 904 905 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 906 { 907 struct dw_pcie *pci = pcie->pci; 908 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 909 u32 val; 910 911 val = readl(pcie->parf + PARF_PHY_CTRL); 912 val &= ~PHY_TEST_PWR_DOWN; 913 writel(val, pcie->parf + PARF_PHY_CTRL); 914 915 qcom_pcie_configure_dbi_atu_base(pcie); 916 917 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 918 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 919 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 920 pcie->parf + PARF_SYS_CTRL); 921 writel(0, pcie->parf + PARF_Q2A_FLUSH); 922 923 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 924 925 dw_pcie_dbi_ro_wr_en(pci); 926 927 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 928 929 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 930 val &= ~PCI_EXP_LNKCAP_ASPMS; 931 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 932 933 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 934 PCI_EXP_DEVCTL2); 935 936 dw_pcie_dbi_ro_wr_dis(pci); 937 938 return 0; 939 } 940 941 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 942 { 943 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 944 struct dw_pcie *pci = pcie->pci; 945 struct device *dev = pci->dev; 946 int ret; 947 948 res->rst = devm_reset_control_array_get_exclusive(dev); 949 if (IS_ERR(res->rst)) 950 return PTR_ERR(res->rst); 951 952 res->supplies[0].supply = "vdda"; 953 res->supplies[1].supply = "vddpe-3v3"; 954 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 955 res->supplies); 956 if (ret) 957 return ret; 958 959 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 960 if (res->num_clks < 0) { 961 dev_err(dev, "Failed to get clocks\n"); 962 return res->num_clks; 963 } 964 965 return 0; 966 } 967 968 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 969 { 970 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 971 struct dw_pcie *pci = pcie->pci; 972 struct device *dev = pci->dev; 973 u32 val; 974 int ret; 975 976 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 977 if (ret < 0) { 978 dev_err(dev, "cannot enable regulators\n"); 979 return ret; 980 } 981 982 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 983 if (ret < 0) 984 goto err_disable_regulators; 985 986 ret = reset_control_assert(res->rst); 987 if (ret) { 988 dev_err(dev, "reset assert failed (%d)\n", ret); 989 goto err_disable_clocks; 990 } 991 992 usleep_range(1000, 1500); 993 994 ret = reset_control_deassert(res->rst); 995 if (ret) { 996 dev_err(dev, "reset deassert failed (%d)\n", ret); 997 goto err_disable_clocks; 998 } 999 1000 /* Wait for reset to complete, required on SM8450 */ 1001 usleep_range(1000, 1500); 1002 1003 /* configure PCIe to RC mode */ 1004 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1005 1006 /* enable PCIe clocks and resets */ 1007 val = readl(pcie->parf + PARF_PHY_CTRL); 1008 val &= ~PHY_TEST_PWR_DOWN; 1009 writel(val, pcie->parf + PARF_PHY_CTRL); 1010 1011 qcom_pcie_configure_dbi_atu_base(pcie); 1012 1013 /* MAC PHY_POWERDOWN MUX DISABLE */ 1014 val = readl(pcie->parf + PARF_SYS_CTRL); 1015 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 1016 writel(val, pcie->parf + PARF_SYS_CTRL); 1017 1018 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1019 val |= BYPASS; 1020 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1021 1022 /* Enable L1 and L1SS */ 1023 val = readl(pcie->parf + PARF_PM_CTRL); 1024 val &= ~REQ_NOT_ENTR_L1; 1025 writel(val, pcie->parf + PARF_PM_CTRL); 1026 1027 pci->l1ss_support = true; 1028 1029 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1030 val |= EN; 1031 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1032 1033 return 0; 1034 err_disable_clocks: 1035 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1036 err_disable_regulators: 1037 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1038 1039 return ret; 1040 } 1041 1042 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1043 { 1044 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; 1045 1046 if (pcie_cfg->override_no_snoop) 1047 writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, 1048 pcie->parf + PARF_NO_SNOOP_OVERRIDE); 1049 1050 qcom_pcie_clear_aspm_l0s(pcie->pci); 1051 qcom_pcie_clear_hpc(pcie->pci); 1052 1053 return 0; 1054 } 1055 1056 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1057 { 1058 /* 1059 * Downstream devices need to be in D0 state before enabling PCI PM 1060 * substates. 1061 */ 1062 pci_set_power_state_locked(pdev, PCI_D0); 1063 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1064 1065 return 0; 1066 } 1067 1068 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1069 { 1070 struct dw_pcie_rp *pp = &pcie->pci->pp; 1071 1072 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1073 } 1074 1075 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1076 { 1077 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1078 1079 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1080 1081 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1082 } 1083 1084 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 1085 { 1086 /* iommu map structure */ 1087 struct { 1088 u32 bdf; 1089 u32 phandle; 1090 u32 smmu_sid; 1091 u32 smmu_sid_len; 1092 } *map; 1093 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1094 struct device *dev = pcie->pci->dev; 1095 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1096 int i, nr_map, size = 0; 1097 u32 smmu_sid_base; 1098 u32 val; 1099 1100 of_get_property(dev->of_node, "iommu-map", &size); 1101 if (!size) 1102 return 0; 1103 1104 /* Enable BDF to SID translation by disabling bypass mode (default) */ 1105 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); 1106 val &= ~BDF_TO_SID_BYPASS; 1107 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); 1108 1109 map = kzalloc(size, GFP_KERNEL); 1110 if (!map) 1111 return -ENOMEM; 1112 1113 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1114 size / sizeof(u32)); 1115 1116 nr_map = size / (sizeof(*map)); 1117 1118 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1119 1120 /* Registers need to be zero out first */ 1121 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1122 1123 /* Extract the SMMU SID base from the first entry of iommu-map */ 1124 smmu_sid_base = map[0].smmu_sid; 1125 1126 /* Look for an available entry to hold the mapping */ 1127 for (i = 0; i < nr_map; i++) { 1128 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1129 u32 val; 1130 u8 hash; 1131 1132 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1133 1134 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1135 1136 /* If the register is already populated, look for next available entry */ 1137 while (val) { 1138 u8 current_hash = hash++; 1139 u8 next_mask = 0xff; 1140 1141 /* If NEXT field is NULL then update it with next hash */ 1142 if (!(val & next_mask)) { 1143 val |= (u32)hash; 1144 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1145 } 1146 1147 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1148 } 1149 1150 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1151 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1152 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1153 } 1154 1155 kfree(map); 1156 1157 return 0; 1158 } 1159 1160 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1161 { 1162 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1163 struct dw_pcie *pci = pcie->pci; 1164 struct device *dev = pci->dev; 1165 1166 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 1167 if (res->num_clks < 0) { 1168 dev_err(dev, "Failed to get clocks\n"); 1169 return res->num_clks; 1170 } 1171 1172 res->rst = devm_reset_control_array_get_exclusive(dev); 1173 if (IS_ERR(res->rst)) 1174 return PTR_ERR(res->rst); 1175 1176 return 0; 1177 } 1178 1179 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1180 { 1181 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1182 1183 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1184 } 1185 1186 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1187 { 1188 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1189 struct device *dev = pcie->pci->dev; 1190 int ret; 1191 1192 ret = reset_control_assert(res->rst); 1193 if (ret) { 1194 dev_err(dev, "reset assert failed (%d)\n", ret); 1195 return ret; 1196 } 1197 1198 /* 1199 * Delay periods before and after reset deassert are working values 1200 * from downstream Codeaurora kernel 1201 */ 1202 usleep_range(2000, 2500); 1203 1204 ret = reset_control_deassert(res->rst); 1205 if (ret) { 1206 dev_err(dev, "reset deassert failed (%d)\n", ret); 1207 return ret; 1208 } 1209 1210 usleep_range(2000, 2500); 1211 1212 return clk_bulk_prepare_enable(res->num_clks, res->clks); 1213 } 1214 1215 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1216 { 1217 struct dw_pcie *pci = pcie->pci; 1218 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1219 u32 val; 1220 int i; 1221 1222 val = readl(pcie->parf + PARF_PHY_CTRL); 1223 val &= ~PHY_TEST_PWR_DOWN; 1224 writel(val, pcie->parf + PARF_PHY_CTRL); 1225 1226 qcom_pcie_configure_dbi_atu_base(pcie); 1227 1228 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1229 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1230 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1231 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1232 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1233 pci->dbi_base + GEN3_RELATED_OFF); 1234 1235 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1236 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1237 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1238 pcie->parf + PARF_SYS_CTRL); 1239 1240 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1241 1242 dw_pcie_dbi_ro_wr_en(pci); 1243 1244 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1245 1246 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1247 val &= ~PCI_EXP_LNKCAP_ASPMS; 1248 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1249 1250 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1251 PCI_EXP_DEVCTL2); 1252 1253 dw_pcie_dbi_ro_wr_dis(pci); 1254 1255 for (i = 0; i < 256; i++) 1256 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1257 1258 return 0; 1259 } 1260 1261 static bool qcom_pcie_link_up(struct dw_pcie *pci) 1262 { 1263 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1264 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1265 1266 return val & PCI_EXP_LNKSTA_DLLLA; 1267 } 1268 1269 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) 1270 { 1271 struct qcom_pcie_port *port; 1272 1273 list_for_each_entry(port, &pcie->ports, list) 1274 phy_power_off(port->phy); 1275 } 1276 1277 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) 1278 { 1279 struct qcom_pcie_port *port; 1280 int ret; 1281 1282 list_for_each_entry(port, &pcie->ports, list) { 1283 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1284 if (ret) 1285 return ret; 1286 1287 ret = phy_power_on(port->phy); 1288 if (ret) { 1289 qcom_pcie_phy_power_off(pcie); 1290 return ret; 1291 } 1292 } 1293 1294 return 0; 1295 } 1296 1297 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1298 { 1299 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1300 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1301 int ret; 1302 1303 qcom_ep_reset_assert(pcie); 1304 1305 ret = pcie->cfg->ops->init(pcie); 1306 if (ret) 1307 return ret; 1308 1309 ret = qcom_pcie_phy_power_on(pcie); 1310 if (ret) 1311 goto err_deinit; 1312 1313 if (pcie->cfg->ops->post_init) { 1314 ret = pcie->cfg->ops->post_init(pcie); 1315 if (ret) 1316 goto err_disable_phy; 1317 } 1318 1319 qcom_ep_reset_deassert(pcie); 1320 1321 if (pcie->cfg->ops->config_sid) { 1322 ret = pcie->cfg->ops->config_sid(pcie); 1323 if (ret) 1324 goto err_assert_reset; 1325 } 1326 1327 return 0; 1328 1329 err_assert_reset: 1330 qcom_ep_reset_assert(pcie); 1331 err_disable_phy: 1332 qcom_pcie_phy_power_off(pcie); 1333 err_deinit: 1334 pcie->cfg->ops->deinit(pcie); 1335 1336 return ret; 1337 } 1338 1339 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1340 { 1341 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1342 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1343 1344 qcom_ep_reset_assert(pcie); 1345 qcom_pcie_phy_power_off(pcie); 1346 pcie->cfg->ops->deinit(pcie); 1347 } 1348 1349 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1350 { 1351 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1352 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1353 1354 if (pcie->cfg->ops->host_post_init) 1355 pcie->cfg->ops->host_post_init(pcie); 1356 } 1357 1358 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1359 .init = qcom_pcie_host_init, 1360 .deinit = qcom_pcie_host_deinit, 1361 .post_init = qcom_pcie_host_post_init, 1362 }; 1363 1364 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1365 static const struct qcom_pcie_ops ops_2_1_0 = { 1366 .get_resources = qcom_pcie_get_resources_2_1_0, 1367 .init = qcom_pcie_init_2_1_0, 1368 .post_init = qcom_pcie_post_init_2_1_0, 1369 .deinit = qcom_pcie_deinit_2_1_0, 1370 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1371 }; 1372 1373 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1374 static const struct qcom_pcie_ops ops_1_0_0 = { 1375 .get_resources = qcom_pcie_get_resources_1_0_0, 1376 .init = qcom_pcie_init_1_0_0, 1377 .post_init = qcom_pcie_post_init_1_0_0, 1378 .deinit = qcom_pcie_deinit_1_0_0, 1379 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1380 }; 1381 1382 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1383 static const struct qcom_pcie_ops ops_2_3_2 = { 1384 .get_resources = qcom_pcie_get_resources_2_3_2, 1385 .init = qcom_pcie_init_2_3_2, 1386 .post_init = qcom_pcie_post_init_2_3_2, 1387 .deinit = qcom_pcie_deinit_2_3_2, 1388 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1389 }; 1390 1391 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1392 static const struct qcom_pcie_ops ops_2_4_0 = { 1393 .get_resources = qcom_pcie_get_resources_2_4_0, 1394 .init = qcom_pcie_init_2_4_0, 1395 .post_init = qcom_pcie_post_init_2_3_2, 1396 .deinit = qcom_pcie_deinit_2_4_0, 1397 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1398 }; 1399 1400 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1401 static const struct qcom_pcie_ops ops_2_3_3 = { 1402 .get_resources = qcom_pcie_get_resources_2_3_3, 1403 .init = qcom_pcie_init_2_3_3, 1404 .post_init = qcom_pcie_post_init_2_3_3, 1405 .deinit = qcom_pcie_deinit_2_3_3, 1406 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1407 }; 1408 1409 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1410 static const struct qcom_pcie_ops ops_2_7_0 = { 1411 .get_resources = qcom_pcie_get_resources_2_7_0, 1412 .init = qcom_pcie_init_2_7_0, 1413 .post_init = qcom_pcie_post_init_2_7_0, 1414 .deinit = qcom_pcie_deinit_2_7_0, 1415 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1416 }; 1417 1418 /* Qcom IP rev.: 1.9.0 */ 1419 static const struct qcom_pcie_ops ops_1_9_0 = { 1420 .get_resources = qcom_pcie_get_resources_2_7_0, 1421 .init = qcom_pcie_init_2_7_0, 1422 .post_init = qcom_pcie_post_init_2_7_0, 1423 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1424 .deinit = qcom_pcie_deinit_2_7_0, 1425 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1426 .config_sid = qcom_pcie_config_sid_1_9_0, 1427 }; 1428 1429 /* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ 1430 static const struct qcom_pcie_ops ops_1_21_0 = { 1431 .get_resources = qcom_pcie_get_resources_2_7_0, 1432 .init = qcom_pcie_init_2_7_0, 1433 .post_init = qcom_pcie_post_init_2_7_0, 1434 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1435 .deinit = qcom_pcie_deinit_2_7_0, 1436 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1437 }; 1438 1439 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1440 static const struct qcom_pcie_ops ops_2_9_0 = { 1441 .get_resources = qcom_pcie_get_resources_2_9_0, 1442 .init = qcom_pcie_init_2_9_0, 1443 .post_init = qcom_pcie_post_init_2_9_0, 1444 .deinit = qcom_pcie_deinit_2_9_0, 1445 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1446 }; 1447 1448 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1449 .ops = &ops_1_0_0, 1450 }; 1451 1452 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1453 .ops = &ops_1_9_0, 1454 }; 1455 1456 static const struct qcom_pcie_cfg cfg_1_34_0 = { 1457 .ops = &ops_1_9_0, 1458 .override_no_snoop = true, 1459 }; 1460 1461 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1462 .ops = &ops_2_1_0, 1463 }; 1464 1465 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1466 .ops = &ops_2_3_2, 1467 }; 1468 1469 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1470 .ops = &ops_2_3_3, 1471 }; 1472 1473 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1474 .ops = &ops_2_4_0, 1475 }; 1476 1477 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1478 .ops = &ops_2_7_0, 1479 }; 1480 1481 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1482 .ops = &ops_2_9_0, 1483 }; 1484 1485 static const struct qcom_pcie_cfg cfg_sc8280xp = { 1486 .ops = &ops_1_21_0, 1487 .no_l0s = true, 1488 }; 1489 1490 static const struct qcom_pcie_cfg cfg_fw_managed = { 1491 .firmware_managed = true, 1492 }; 1493 1494 static const struct dw_pcie_ops dw_pcie_ops = { 1495 .link_up = qcom_pcie_link_up, 1496 .start_link = qcom_pcie_start_link, 1497 .assert_perst = qcom_pcie_assert_perst, 1498 }; 1499 1500 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1501 { 1502 struct dw_pcie *pci = pcie->pci; 1503 int ret; 1504 1505 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1506 if (IS_ERR(pcie->icc_mem)) 1507 return PTR_ERR(pcie->icc_mem); 1508 1509 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); 1510 if (IS_ERR(pcie->icc_cpu)) 1511 return PTR_ERR(pcie->icc_cpu); 1512 /* 1513 * Some Qualcomm platforms require interconnect bandwidth constraints 1514 * to be set before enabling interconnect clocks. 1515 * 1516 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1517 * for the pcie-mem path. 1518 */ 1519 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1520 if (ret) { 1521 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1522 ret); 1523 return ret; 1524 } 1525 1526 /* 1527 * Since the CPU-PCIe path is only used for activities like register 1528 * access of the host controller and endpoint Config/BAR space access, 1529 * HW team has recommended to use a minimal bandwidth of 1KBps just to 1530 * keep the path active. 1531 */ 1532 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); 1533 if (ret) { 1534 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", 1535 ret); 1536 icc_set_bw(pcie->icc_mem, 0, 0); 1537 return ret; 1538 } 1539 1540 return 0; 1541 } 1542 1543 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) 1544 { 1545 u32 offset, status, width, speed; 1546 struct dw_pcie *pci = pcie->pci; 1547 struct dev_pm_opp_key key = {}; 1548 unsigned long freq_kbps; 1549 struct dev_pm_opp *opp; 1550 int ret, freq_mbps; 1551 1552 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1553 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1554 1555 /* Only update constraints if link is up. */ 1556 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1557 return; 1558 1559 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1560 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1561 1562 if (pcie->icc_mem) { 1563 ret = icc_set_bw(pcie->icc_mem, 0, 1564 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1565 if (ret) { 1566 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1567 ret); 1568 } 1569 } else if (pcie->use_pm_opp) { 1570 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); 1571 if (freq_mbps < 0) 1572 return; 1573 1574 freq_kbps = freq_mbps * KILO; 1575 opp = dev_pm_opp_find_level_exact(pci->dev, speed); 1576 if (IS_ERR(opp)) { 1577 /* opp-level is not defined use only frequency */ 1578 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, 1579 true); 1580 } else { 1581 /* put opp-level OPP */ 1582 dev_pm_opp_put(opp); 1583 1584 key.freq = freq_kbps * width; 1585 key.level = speed; 1586 key.bw = 0; 1587 opp = dev_pm_opp_find_key_exact(pci->dev, &key, true); 1588 } 1589 if (!IS_ERR(opp)) { 1590 ret = dev_pm_opp_set_opp(pci->dev, opp); 1591 if (ret) 1592 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", 1593 freq_kbps * width, ret); 1594 dev_pm_opp_put(opp); 1595 } 1596 } 1597 } 1598 1599 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1600 { 1601 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1602 1603 seq_printf(s, "L0s transition count: %u\n", 1604 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1605 1606 seq_printf(s, "L1 transition count: %u\n", 1607 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1608 1609 seq_printf(s, "L1.1 transition count: %u\n", 1610 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1611 1612 seq_printf(s, "L1.2 transition count: %u\n", 1613 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1614 1615 seq_printf(s, "L2 transition count: %u\n", 1616 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1617 1618 return 0; 1619 } 1620 1621 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1622 { 1623 struct dw_pcie *pci = pcie->pci; 1624 struct device *dev = pci->dev; 1625 char *name; 1626 1627 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1628 if (!name) 1629 return; 1630 1631 pcie->debugfs = debugfs_create_dir(name, NULL); 1632 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1633 qcom_pcie_link_transition_count); 1634 } 1635 1636 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) 1637 { 1638 struct qcom_pcie *pcie = data; 1639 struct dw_pcie_rp *pp = &pcie->pci->pp; 1640 struct device *dev = pcie->pci->dev; 1641 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); 1642 1643 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); 1644 1645 if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { 1646 msleep(PCIE_RESET_CONFIG_WAIT_MS); 1647 dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); 1648 /* Rescan the bus to enumerate endpoint devices */ 1649 pci_lock_rescan_remove(); 1650 pci_rescan_bus(pp->bridge->bus); 1651 pci_unlock_rescan_remove(); 1652 1653 qcom_pcie_icc_opp_update(pcie); 1654 } else { 1655 dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", 1656 status); 1657 } 1658 1659 return IRQ_HANDLED; 1660 } 1661 1662 static void qcom_pci_free_msi(void *ptr) 1663 { 1664 struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; 1665 1666 if (pp && pp->has_msi_ctrl) 1667 dw_pcie_free_msi(pp); 1668 } 1669 1670 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) 1671 { 1672 struct device *dev = cfg->parent; 1673 struct dw_pcie_rp *pp; 1674 struct dw_pcie *pci; 1675 int ret; 1676 1677 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1678 if (!pci) 1679 return -ENOMEM; 1680 1681 pci->dev = dev; 1682 pp = &pci->pp; 1683 pci->dbi_base = cfg->win; 1684 pp->num_vectors = MSI_DEF_NUM_VECTORS; 1685 1686 ret = dw_pcie_msi_host_init(pp); 1687 if (ret) 1688 return ret; 1689 1690 pp->has_msi_ctrl = true; 1691 dw_pcie_msi_init(pp); 1692 1693 return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); 1694 } 1695 1696 static const struct pci_ecam_ops pci_qcom_ecam_ops = { 1697 .init = qcom_pcie_ecam_host_init, 1698 .pci_ops = { 1699 .map_bus = pci_ecam_map_bus, 1700 .read = pci_generic_config_read, 1701 .write = pci_generic_config_write, 1702 } 1703 }; 1704 1705 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) 1706 { 1707 struct device *dev = pcie->pci->dev; 1708 struct qcom_pcie_port *port; 1709 struct gpio_desc *reset; 1710 struct phy *phy; 1711 int ret; 1712 1713 reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), 1714 "reset", GPIOD_OUT_HIGH, "PERST#"); 1715 if (IS_ERR(reset)) 1716 return PTR_ERR(reset); 1717 1718 phy = devm_of_phy_get(dev, node, NULL); 1719 if (IS_ERR(phy)) 1720 return PTR_ERR(phy); 1721 1722 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1723 if (!port) 1724 return -ENOMEM; 1725 1726 ret = phy_init(phy); 1727 if (ret) 1728 return ret; 1729 1730 port->reset = reset; 1731 port->phy = phy; 1732 INIT_LIST_HEAD(&port->list); 1733 list_add_tail(&port->list, &pcie->ports); 1734 1735 return 0; 1736 } 1737 1738 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) 1739 { 1740 struct device *dev = pcie->pci->dev; 1741 struct qcom_pcie_port *port, *tmp; 1742 int ret = -ENOENT; 1743 1744 for_each_available_child_of_node_scoped(dev->of_node, of_port) { 1745 if (!of_node_is_type(of_port, "pci")) 1746 continue; 1747 ret = qcom_pcie_parse_port(pcie, of_port); 1748 if (ret) 1749 goto err_port_del; 1750 } 1751 1752 return ret; 1753 1754 err_port_del: 1755 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 1756 phy_exit(port->phy); 1757 list_del(&port->list); 1758 } 1759 1760 return ret; 1761 } 1762 1763 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) 1764 { 1765 struct device *dev = pcie->pci->dev; 1766 struct qcom_pcie_port *port; 1767 struct gpio_desc *reset; 1768 struct phy *phy; 1769 int ret; 1770 1771 phy = devm_phy_optional_get(dev, "pciephy"); 1772 if (IS_ERR(phy)) 1773 return PTR_ERR(phy); 1774 1775 reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1776 if (IS_ERR(reset)) 1777 return PTR_ERR(reset); 1778 1779 ret = phy_init(phy); 1780 if (ret) 1781 return ret; 1782 1783 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1784 if (!port) 1785 return -ENOMEM; 1786 1787 port->reset = reset; 1788 port->phy = phy; 1789 INIT_LIST_HEAD(&port->list); 1790 list_add_tail(&port->list, &pcie->ports); 1791 1792 return 0; 1793 } 1794 1795 static int qcom_pcie_probe(struct platform_device *pdev) 1796 { 1797 const struct qcom_pcie_cfg *pcie_cfg; 1798 unsigned long max_freq = ULONG_MAX; 1799 struct qcom_pcie_port *port, *tmp; 1800 struct device *dev = &pdev->dev; 1801 struct dev_pm_opp *opp; 1802 struct qcom_pcie *pcie; 1803 struct dw_pcie_rp *pp; 1804 struct resource *res; 1805 struct dw_pcie *pci; 1806 int ret, irq; 1807 char *name; 1808 1809 pcie_cfg = of_device_get_match_data(dev); 1810 if (!pcie_cfg) { 1811 dev_err(dev, "No platform data\n"); 1812 return -ENODATA; 1813 } 1814 1815 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { 1816 dev_err(dev, "No platform ops\n"); 1817 return -ENODATA; 1818 } 1819 1820 pm_runtime_enable(dev); 1821 ret = pm_runtime_get_sync(dev); 1822 if (ret < 0) 1823 goto err_pm_runtime_put; 1824 1825 if (pcie_cfg->firmware_managed) { 1826 struct pci_host_bridge *bridge; 1827 struct pci_config_window *cfg; 1828 1829 bridge = devm_pci_alloc_host_bridge(dev, 0); 1830 if (!bridge) { 1831 ret = -ENOMEM; 1832 goto err_pm_runtime_put; 1833 } 1834 1835 /* Parse and map our ECAM configuration space area */ 1836 cfg = pci_host_common_ecam_create(dev, bridge, 1837 &pci_qcom_ecam_ops); 1838 if (IS_ERR(cfg)) { 1839 ret = PTR_ERR(cfg); 1840 goto err_pm_runtime_put; 1841 } 1842 1843 bridge->sysdata = cfg; 1844 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; 1845 bridge->msi_domain = true; 1846 1847 ret = pci_host_probe(bridge); 1848 if (ret) 1849 goto err_pm_runtime_put; 1850 1851 return 0; 1852 } 1853 1854 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1855 if (!pcie) { 1856 ret = -ENOMEM; 1857 goto err_pm_runtime_put; 1858 } 1859 1860 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1861 if (!pci) { 1862 ret = -ENOMEM; 1863 goto err_pm_runtime_put; 1864 } 1865 1866 INIT_LIST_HEAD(&pcie->ports); 1867 1868 pci->dev = dev; 1869 pci->ops = &dw_pcie_ops; 1870 pp = &pci->pp; 1871 1872 pcie->pci = pci; 1873 1874 pcie->cfg = pcie_cfg; 1875 1876 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1877 if (IS_ERR(pcie->parf)) { 1878 ret = PTR_ERR(pcie->parf); 1879 goto err_pm_runtime_put; 1880 } 1881 1882 /* MHI region is optional */ 1883 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1884 if (res) { 1885 pcie->mhi = devm_ioremap_resource(dev, res); 1886 if (IS_ERR(pcie->mhi)) { 1887 ret = PTR_ERR(pcie->mhi); 1888 goto err_pm_runtime_put; 1889 } 1890 } 1891 1892 /* OPP table is optional */ 1893 ret = devm_pm_opp_of_add_table(dev); 1894 if (ret && ret != -ENODEV) { 1895 dev_err_probe(dev, ret, "Failed to add OPP table\n"); 1896 goto err_pm_runtime_put; 1897 } 1898 1899 /* 1900 * Before the PCIe link is initialized, vote for highest OPP in the OPP 1901 * table, so that we are voting for maximum voltage corner for the 1902 * link to come up in maximum supported speed. At the end of the 1903 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). 1904 */ 1905 if (!ret) { 1906 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1907 if (IS_ERR(opp)) { 1908 ret = PTR_ERR(opp); 1909 dev_err_probe(pci->dev, ret, 1910 "Unable to find max freq OPP\n"); 1911 goto err_pm_runtime_put; 1912 } else { 1913 ret = dev_pm_opp_set_opp(dev, opp); 1914 } 1915 1916 dev_pm_opp_put(opp); 1917 if (ret) { 1918 dev_err_probe(pci->dev, ret, 1919 "Failed to set OPP for freq %lu\n", 1920 max_freq); 1921 goto err_pm_runtime_put; 1922 } 1923 1924 pcie->use_pm_opp = true; 1925 } else { 1926 /* Skip ICC init if OPP is supported as it is handled by OPP */ 1927 ret = qcom_pcie_icc_init(pcie); 1928 if (ret) 1929 goto err_pm_runtime_put; 1930 } 1931 1932 ret = pcie->cfg->ops->get_resources(pcie); 1933 if (ret) 1934 goto err_pm_runtime_put; 1935 1936 pp->ops = &qcom_pcie_dw_ops; 1937 1938 ret = qcom_pcie_parse_ports(pcie); 1939 if (ret) { 1940 if (ret != -ENOENT) { 1941 dev_err_probe(pci->dev, ret, 1942 "Failed to parse Root Port: %d\n", ret); 1943 goto err_pm_runtime_put; 1944 } 1945 1946 /* 1947 * In the case of properties not populated in Root Port node, 1948 * fallback to the legacy method of parsing the Host Bridge 1949 * node. This is to maintain DT backwards compatibility. 1950 */ 1951 ret = qcom_pcie_parse_legacy_binding(pcie); 1952 if (ret) 1953 goto err_pm_runtime_put; 1954 } 1955 1956 platform_set_drvdata(pdev, pcie); 1957 1958 irq = platform_get_irq_byname_optional(pdev, "global"); 1959 if (irq > 0) 1960 pp->use_linkup_irq = true; 1961 1962 ret = dw_pcie_host_init(pp); 1963 if (ret) { 1964 dev_err(dev, "cannot initialize host\n"); 1965 goto err_phy_exit; 1966 } 1967 1968 name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d", 1969 pci_domain_nr(pp->bridge->bus)); 1970 if (!name) { 1971 ret = -ENOMEM; 1972 goto err_host_deinit; 1973 } 1974 1975 if (irq > 0) { 1976 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1977 qcom_pcie_global_irq_thread, 1978 IRQF_ONESHOT, name, pcie); 1979 if (ret) { 1980 dev_err_probe(&pdev->dev, ret, 1981 "Failed to request Global IRQ\n"); 1982 goto err_host_deinit; 1983 } 1984 1985 writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7, 1986 pcie->parf + PARF_INT_ALL_MASK); 1987 } 1988 1989 qcom_pcie_icc_opp_update(pcie); 1990 1991 if (pcie->mhi) 1992 qcom_pcie_init_debugfs(pcie); 1993 1994 return 0; 1995 1996 err_host_deinit: 1997 dw_pcie_host_deinit(pp); 1998 err_phy_exit: 1999 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 2000 phy_exit(port->phy); 2001 list_del(&port->list); 2002 } 2003 err_pm_runtime_put: 2004 pm_runtime_put(dev); 2005 pm_runtime_disable(dev); 2006 2007 return ret; 2008 } 2009 2010 static int qcom_pcie_suspend_noirq(struct device *dev) 2011 { 2012 struct qcom_pcie *pcie; 2013 int ret = 0; 2014 2015 pcie = dev_get_drvdata(dev); 2016 if (!pcie) 2017 return 0; 2018 2019 /* 2020 * Set minimum bandwidth required to keep data path functional during 2021 * suspend. 2022 */ 2023 if (pcie->icc_mem) { 2024 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 2025 if (ret) { 2026 dev_err(dev, 2027 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 2028 ret); 2029 return ret; 2030 } 2031 } 2032 2033 /* 2034 * Turn OFF the resources only for controllers without active PCIe 2035 * devices. For controllers with active devices, the resources are kept 2036 * ON and the link is expected to be in L0/L1 (sub)states. 2037 * 2038 * Turning OFF the resources for controllers with active PCIe devices 2039 * will trigger access violation during the end of the suspend cycle, 2040 * as kernel tries to access the PCIe devices config space for masking 2041 * MSIs. 2042 * 2043 * Also, it is not desirable to put the link into L2/L3 state as that 2044 * implies VDD supply will be removed and the devices may go into 2045 * powerdown state. This will affect the lifetime of the storage devices 2046 * like NVMe. 2047 */ 2048 if (!dw_pcie_link_up(pcie->pci)) { 2049 qcom_pcie_host_deinit(&pcie->pci->pp); 2050 pcie->suspended = true; 2051 } 2052 2053 /* 2054 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. 2055 * Because on some platforms, DBI access can happen very late during the 2056 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC 2057 * error. 2058 */ 2059 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2060 ret = icc_disable(pcie->icc_cpu); 2061 if (ret) 2062 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); 2063 2064 if (pcie->use_pm_opp) 2065 dev_pm_opp_set_opp(pcie->pci->dev, NULL); 2066 } 2067 return ret; 2068 } 2069 2070 static int qcom_pcie_resume_noirq(struct device *dev) 2071 { 2072 struct qcom_pcie *pcie; 2073 int ret; 2074 2075 pcie = dev_get_drvdata(dev); 2076 if (!pcie) 2077 return 0; 2078 2079 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2080 ret = icc_enable(pcie->icc_cpu); 2081 if (ret) { 2082 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); 2083 return ret; 2084 } 2085 } 2086 2087 if (pcie->suspended) { 2088 ret = qcom_pcie_host_init(&pcie->pci->pp); 2089 if (ret) 2090 return ret; 2091 2092 pcie->suspended = false; 2093 } 2094 2095 qcom_pcie_icc_opp_update(pcie); 2096 2097 return 0; 2098 } 2099 2100 static const struct of_device_id qcom_pcie_match[] = { 2101 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 2102 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 2103 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 2104 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, 2105 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 2106 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 2107 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 2108 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 2109 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 2110 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, 2111 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 2112 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 2113 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, 2114 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, 2115 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, 2116 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 2117 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 2118 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, 2119 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 2120 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 2121 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 2122 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 2123 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 2124 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 2125 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 2126 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 2127 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, 2128 { } 2129 }; 2130 2131 static void qcom_fixup_class(struct pci_dev *dev) 2132 { 2133 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 2134 } 2135 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 2136 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 2137 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 2138 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 2139 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 2140 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 2141 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 2142 2143 static const struct dev_pm_ops qcom_pcie_pm_ops = { 2144 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 2145 }; 2146 2147 static struct platform_driver qcom_pcie_driver = { 2148 .probe = qcom_pcie_probe, 2149 .driver = { 2150 .name = "qcom-pcie", 2151 .suppress_bind_attrs = true, 2152 .of_match_table = qcom_pcie_match, 2153 .pm = &qcom_pcie_pm_ops, 2154 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2155 }, 2156 }; 2157 builtin_platform_driver(qcom_pcie_driver); 2158