1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/limits.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/pci-ecam.h> 27 #include <linux/pm_opp.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy/pcie.h> 31 #include <linux/phy/phy.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/reset.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/units.h> 37 38 #include "../../pci.h" 39 #include "../pci-host-common.h" 40 #include "pcie-designware.h" 41 #include "pcie-qcom-common.h" 42 43 /* PARF registers */ 44 #define PARF_SYS_CTRL 0x00 45 #define PARF_PM_CTRL 0x20 46 #define PARF_PCS_DEEMPH 0x34 47 #define PARF_PCS_SWING 0x38 48 #define PARF_PHY_CTRL 0x40 49 #define PARF_PHY_REFCLK 0x4c 50 #define PARF_CONFIG_BITS 0x50 51 #define PARF_DBI_BASE_ADDR 0x168 52 #define PARF_SLV_ADDR_SPACE_SIZE 0x16c 53 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 54 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 56 #define PARF_Q2A_FLUSH 0x1ac 57 #define PARF_LTSSM 0x1b0 58 #define PARF_INT_ALL_STATUS 0x224 59 #define PARF_INT_ALL_CLEAR 0x228 60 #define PARF_INT_ALL_MASK 0x22c 61 #define PARF_SID_OFFSET 0x234 62 #define PARF_BDF_TRANSLATE_CFG 0x24c 63 #define PARF_DBI_BASE_ADDR_V2 0x350 64 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 65 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 66 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 67 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 68 #define PARF_ATU_BASE_ADDR 0x634 69 #define PARF_ATU_BASE_ADDR_HI 0x638 70 #define PARF_DEVICE_TYPE 0x1000 71 #define PARF_BDF_TO_SID_TABLE_N 0x2000 72 #define PARF_BDF_TO_SID_CFG 0x2c00 73 74 /* ELBI registers */ 75 #define ELBI_SYS_CTRL 0x04 76 77 /* DBI registers */ 78 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 79 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 80 81 /* MHI registers */ 82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 83 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 84 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 85 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 86 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 87 88 /* PARF_SYS_CTRL register fields */ 89 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 90 #define MST_WAKEUP_EN BIT(13) 91 #define SLV_WAKEUP_EN BIT(12) 92 #define MSTR_ACLK_CGC_DIS BIT(10) 93 #define SLV_ACLK_CGC_DIS BIT(9) 94 #define CORE_CLK_CGC_DIS BIT(6) 95 #define AUX_PWR_DET BIT(4) 96 #define L23_CLK_RMV_DIS BIT(2) 97 #define L1_CLK_RMV_DIS BIT(1) 98 99 /* PARF_PM_CTRL register fields */ 100 #define REQ_NOT_ENTR_L1 BIT(5) 101 102 /* PARF_PCS_DEEMPH register fields */ 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 106 107 /* PARF_PCS_SWING register fields */ 108 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 109 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 110 111 /* PARF_PHY_CTRL register fields */ 112 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 113 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 114 #define PHY_TEST_PWR_DOWN BIT(0) 115 116 /* PARF_PHY_REFCLK register fields */ 117 #define PHY_REFCLK_SSP_EN BIT(16) 118 #define PHY_REFCLK_USE_PAD BIT(12) 119 120 /* PARF_CONFIG_BITS register fields */ 121 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 122 123 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 124 #define SLV_ADDR_SPACE_SZ 0x80000000 125 126 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 127 #define AHB_CLK_EN BIT(0) 128 #define MSTR_AXI_CLK_EN BIT(1) 129 #define BYPASS BIT(4) 130 131 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 132 #define EN BIT(31) 133 134 /* PARF_LTSSM register fields */ 135 #define LTSSM_EN BIT(8) 136 137 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 138 #define PARF_INT_ALL_LINK_UP BIT(13) 139 #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) 140 141 /* PARF_NO_SNOOP_OVERRIDE register fields */ 142 #define WR_NO_SNOOP_OVERRIDE_EN BIT(1) 143 #define RD_NO_SNOOP_OVERRIDE_EN BIT(3) 144 145 /* PARF_DEVICE_TYPE register fields */ 146 #define DEVICE_TYPE_RC 0x4 147 148 /* PARF_BDF_TO_SID_CFG fields */ 149 #define BDF_TO_SID_BYPASS BIT(0) 150 151 /* ELBI_SYS_CTRL register fields */ 152 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 153 154 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 155 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 156 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 157 158 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 159 #define CFG_BRIDGE_SB_INIT BIT(0) 160 161 /* PCI_EXP_SLTCAP register fields */ 162 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 163 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 164 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 165 PCI_EXP_SLTCAP_PCP | \ 166 PCI_EXP_SLTCAP_MRLSP | \ 167 PCI_EXP_SLTCAP_AIP | \ 168 PCI_EXP_SLTCAP_PIP | \ 169 PCI_EXP_SLTCAP_HPS | \ 170 PCI_EXP_SLTCAP_EIP | \ 171 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 172 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 173 174 #define PERST_DELAY_US 1000 175 176 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 177 178 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 179 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 180 181 struct qcom_pcie_resources_1_0_0 { 182 struct clk_bulk_data *clks; 183 int num_clks; 184 struct reset_control *core; 185 struct regulator *vdda; 186 }; 187 188 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 189 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 190 struct qcom_pcie_resources_2_1_0 { 191 struct clk_bulk_data *clks; 192 int num_clks; 193 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 194 int num_resets; 195 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 196 }; 197 198 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 199 struct qcom_pcie_resources_2_3_2 { 200 struct clk_bulk_data *clks; 201 int num_clks; 202 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 203 }; 204 205 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 206 struct qcom_pcie_resources_2_3_3 { 207 struct clk_bulk_data *clks; 208 int num_clks; 209 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 210 }; 211 212 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 213 struct qcom_pcie_resources_2_4_0 { 214 struct clk_bulk_data *clks; 215 int num_clks; 216 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 217 int num_resets; 218 }; 219 220 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 221 struct qcom_pcie_resources_2_7_0 { 222 struct clk_bulk_data *clks; 223 int num_clks; 224 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 225 struct reset_control *rst; 226 }; 227 228 struct qcom_pcie_resources_2_9_0 { 229 struct clk_bulk_data *clks; 230 int num_clks; 231 struct reset_control *rst; 232 }; 233 234 union qcom_pcie_resources { 235 struct qcom_pcie_resources_1_0_0 v1_0_0; 236 struct qcom_pcie_resources_2_1_0 v2_1_0; 237 struct qcom_pcie_resources_2_3_2 v2_3_2; 238 struct qcom_pcie_resources_2_3_3 v2_3_3; 239 struct qcom_pcie_resources_2_4_0 v2_4_0; 240 struct qcom_pcie_resources_2_7_0 v2_7_0; 241 struct qcom_pcie_resources_2_9_0 v2_9_0; 242 }; 243 244 struct qcom_pcie; 245 246 struct qcom_pcie_ops { 247 int (*get_resources)(struct qcom_pcie *pcie); 248 int (*init)(struct qcom_pcie *pcie); 249 int (*post_init)(struct qcom_pcie *pcie); 250 void (*host_post_init)(struct qcom_pcie *pcie); 251 void (*deinit)(struct qcom_pcie *pcie); 252 void (*ltssm_enable)(struct qcom_pcie *pcie); 253 int (*config_sid)(struct qcom_pcie *pcie); 254 }; 255 256 /** 257 * struct qcom_pcie_cfg - Per SoC config struct 258 * @ops: qcom PCIe ops structure 259 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache 260 * snooping 261 * @firmware_managed: Set if the Root Complex is firmware managed 262 */ 263 struct qcom_pcie_cfg { 264 const struct qcom_pcie_ops *ops; 265 bool override_no_snoop; 266 bool firmware_managed; 267 bool no_l0s; 268 }; 269 270 struct qcom_pcie_port { 271 struct list_head list; 272 struct gpio_desc *reset; 273 struct phy *phy; 274 }; 275 276 struct qcom_pcie { 277 struct dw_pcie *pci; 278 void __iomem *parf; /* DT parf */ 279 void __iomem *mhi; 280 union qcom_pcie_resources res; 281 struct icc_path *icc_mem; 282 struct icc_path *icc_cpu; 283 const struct qcom_pcie_cfg *cfg; 284 struct dentry *debugfs; 285 struct list_head ports; 286 bool suspended; 287 bool use_pm_opp; 288 }; 289 290 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 291 292 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) 293 { 294 struct qcom_pcie_port *port; 295 int val = assert ? 1 : 0; 296 297 list_for_each_entry(port, &pcie->ports, list) 298 gpiod_set_value_cansleep(port->reset, val); 299 300 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 301 } 302 303 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 304 { 305 qcom_perst_assert(pcie, true); 306 } 307 308 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 309 { 310 /* Ensure that PERST has been asserted for at least 100 ms */ 311 msleep(PCIE_T_PVPERL_MS); 312 qcom_perst_assert(pcie, false); 313 } 314 315 static int qcom_pcie_start_link(struct dw_pcie *pci) 316 { 317 struct qcom_pcie *pcie = to_qcom_pcie(pci); 318 319 qcom_pcie_common_set_equalization(pci); 320 321 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) 322 qcom_pcie_common_set_16gt_lane_margining(pci); 323 324 /* Enable Link Training state machine */ 325 if (pcie->cfg->ops->ltssm_enable) 326 pcie->cfg->ops->ltssm_enable(pcie); 327 328 return 0; 329 } 330 331 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) 332 { 333 struct qcom_pcie *pcie = to_qcom_pcie(pci); 334 u16 offset; 335 u32 val; 336 337 if (!pcie->cfg->no_l0s) 338 return; 339 340 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 341 342 dw_pcie_dbi_ro_wr_en(pci); 343 344 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 345 val &= ~PCI_EXP_LNKCAP_ASPM_L0S; 346 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 347 348 dw_pcie_dbi_ro_wr_dis(pci); 349 } 350 351 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 352 { 353 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 354 u32 val; 355 356 dw_pcie_dbi_ro_wr_en(pci); 357 358 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 359 val &= ~PCI_EXP_SLTCAP_HPC; 360 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 361 362 dw_pcie_dbi_ro_wr_dis(pci); 363 } 364 365 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) 366 { 367 struct dw_pcie *pci = pcie->pci; 368 369 if (pci->dbi_phys_addr) { 370 /* 371 * PARF_DBI_BASE_ADDR register is in CPU domain and require to 372 * be programmed with CPU physical address. 373 */ 374 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 375 PARF_DBI_BASE_ADDR); 376 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 377 PARF_SLV_ADDR_SPACE_SIZE); 378 } 379 } 380 381 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) 382 { 383 struct dw_pcie *pci = pcie->pci; 384 385 if (pci->dbi_phys_addr) { 386 /* 387 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are 388 * in CPU domain and require to be programmed with CPU 389 * physical addresses. 390 */ 391 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + 392 PARF_DBI_BASE_ADDR_V2); 393 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + 394 PARF_DBI_BASE_ADDR_V2_HI); 395 396 if (pci->atu_phys_addr) { 397 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + 398 PARF_ATU_BASE_ADDR); 399 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + 400 PARF_ATU_BASE_ADDR_HI); 401 } 402 403 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); 404 writel(SLV_ADDR_SPACE_SZ, pcie->parf + 405 PARF_SLV_ADDR_SPACE_SIZE_V2_HI); 406 } 407 } 408 409 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 410 { 411 struct dw_pcie *pci = pcie->pci; 412 u32 val; 413 414 if (!pci->elbi_base) { 415 dev_err(pci->dev, "ELBI is not present\n"); 416 return; 417 } 418 /* enable link training */ 419 val = readl(pci->elbi_base + ELBI_SYS_CTRL); 420 val |= ELBI_SYS_CTRL_LT_ENABLE; 421 writel(val, pci->elbi_base + ELBI_SYS_CTRL); 422 } 423 424 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 425 { 426 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 427 struct dw_pcie *pci = pcie->pci; 428 struct device *dev = pci->dev; 429 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 430 int ret; 431 432 res->supplies[0].supply = "vdda"; 433 res->supplies[1].supply = "vdda_phy"; 434 res->supplies[2].supply = "vdda_refclk"; 435 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 436 res->supplies); 437 if (ret) 438 return ret; 439 440 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 441 if (res->num_clks < 0) { 442 dev_err(dev, "Failed to get clocks\n"); 443 return res->num_clks; 444 } 445 446 res->resets[0].id = "pci"; 447 res->resets[1].id = "axi"; 448 res->resets[2].id = "ahb"; 449 res->resets[3].id = "por"; 450 res->resets[4].id = "phy"; 451 res->resets[5].id = "ext"; 452 453 /* ext is optional on APQ8016 */ 454 res->num_resets = is_apq ? 5 : 6; 455 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 456 if (ret < 0) 457 return ret; 458 459 return 0; 460 } 461 462 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 463 { 464 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 465 466 clk_bulk_disable_unprepare(res->num_clks, res->clks); 467 reset_control_bulk_assert(res->num_resets, res->resets); 468 469 writel(1, pcie->parf + PARF_PHY_CTRL); 470 471 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 472 } 473 474 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 475 { 476 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 477 struct dw_pcie *pci = pcie->pci; 478 struct device *dev = pci->dev; 479 int ret; 480 481 /* reset the PCIe interface as uboot can leave it undefined state */ 482 ret = reset_control_bulk_assert(res->num_resets, res->resets); 483 if (ret < 0) { 484 dev_err(dev, "cannot assert resets\n"); 485 return ret; 486 } 487 488 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 489 if (ret < 0) { 490 dev_err(dev, "cannot enable regulators\n"); 491 return ret; 492 } 493 494 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 495 if (ret < 0) { 496 dev_err(dev, "cannot deassert resets\n"); 497 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 498 return ret; 499 } 500 501 return 0; 502 } 503 504 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 505 { 506 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 507 struct dw_pcie *pci = pcie->pci; 508 struct device *dev = pci->dev; 509 struct device_node *node = dev->of_node; 510 u32 val; 511 int ret; 512 513 /* enable PCIe clocks and resets */ 514 val = readl(pcie->parf + PARF_PHY_CTRL); 515 val &= ~PHY_TEST_PWR_DOWN; 516 writel(val, pcie->parf + PARF_PHY_CTRL); 517 518 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 519 if (ret) 520 return ret; 521 522 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 523 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 524 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 525 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 526 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 527 pcie->parf + PARF_PCS_DEEMPH); 528 writel(PCS_SWING_TX_SWING_FULL(120) | 529 PCS_SWING_TX_SWING_LOW(120), 530 pcie->parf + PARF_PCS_SWING); 531 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 532 } 533 534 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 535 /* set TX termination offset */ 536 val = readl(pcie->parf + PARF_PHY_CTRL); 537 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 538 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 539 writel(val, pcie->parf + PARF_PHY_CTRL); 540 } 541 542 /* enable external reference clock */ 543 val = readl(pcie->parf + PARF_PHY_REFCLK); 544 /* USE_PAD is required only for ipq806x */ 545 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 546 val &= ~PHY_REFCLK_USE_PAD; 547 val |= PHY_REFCLK_SSP_EN; 548 writel(val, pcie->parf + PARF_PHY_REFCLK); 549 550 /* wait for clock acquisition */ 551 usleep_range(1000, 1500); 552 553 /* Set the Max TLP size to 2K, instead of using default of 4K */ 554 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 555 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 556 writel(CFG_BRIDGE_SB_INIT, 557 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 558 559 qcom_pcie_clear_hpc(pcie->pci); 560 561 return 0; 562 } 563 564 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 565 { 566 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 567 struct dw_pcie *pci = pcie->pci; 568 struct device *dev = pci->dev; 569 570 res->vdda = devm_regulator_get(dev, "vdda"); 571 if (IS_ERR(res->vdda)) 572 return PTR_ERR(res->vdda); 573 574 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 575 if (res->num_clks < 0) { 576 dev_err(dev, "Failed to get clocks\n"); 577 return res->num_clks; 578 } 579 580 res->core = devm_reset_control_get_exclusive(dev, "core"); 581 return PTR_ERR_OR_ZERO(res->core); 582 } 583 584 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 585 { 586 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 587 588 reset_control_assert(res->core); 589 clk_bulk_disable_unprepare(res->num_clks, res->clks); 590 regulator_disable(res->vdda); 591 } 592 593 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 594 { 595 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 596 struct dw_pcie *pci = pcie->pci; 597 struct device *dev = pci->dev; 598 int ret; 599 600 ret = reset_control_deassert(res->core); 601 if (ret) { 602 dev_err(dev, "cannot deassert core reset\n"); 603 return ret; 604 } 605 606 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 607 if (ret) { 608 dev_err(dev, "cannot prepare/enable clocks\n"); 609 goto err_assert_reset; 610 } 611 612 ret = regulator_enable(res->vdda); 613 if (ret) { 614 dev_err(dev, "cannot enable vdda regulator\n"); 615 goto err_disable_clks; 616 } 617 618 return 0; 619 620 err_disable_clks: 621 clk_bulk_disable_unprepare(res->num_clks, res->clks); 622 err_assert_reset: 623 reset_control_assert(res->core); 624 625 return ret; 626 } 627 628 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 629 { 630 qcom_pcie_configure_dbi_base(pcie); 631 632 if (IS_ENABLED(CONFIG_PCI_MSI)) { 633 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 634 635 val |= EN; 636 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 637 } 638 639 qcom_pcie_clear_hpc(pcie->pci); 640 641 return 0; 642 } 643 644 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 645 { 646 u32 val; 647 648 /* enable link training */ 649 val = readl(pcie->parf + PARF_LTSSM); 650 val |= LTSSM_EN; 651 writel(val, pcie->parf + PARF_LTSSM); 652 } 653 654 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 655 { 656 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 657 struct dw_pcie *pci = pcie->pci; 658 struct device *dev = pci->dev; 659 int ret; 660 661 res->supplies[0].supply = "vdda"; 662 res->supplies[1].supply = "vddpe-3v3"; 663 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 664 res->supplies); 665 if (ret) 666 return ret; 667 668 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 669 if (res->num_clks < 0) { 670 dev_err(dev, "Failed to get clocks\n"); 671 return res->num_clks; 672 } 673 674 return 0; 675 } 676 677 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 678 { 679 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 680 681 clk_bulk_disable_unprepare(res->num_clks, res->clks); 682 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 683 } 684 685 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 686 { 687 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 688 struct dw_pcie *pci = pcie->pci; 689 struct device *dev = pci->dev; 690 int ret; 691 692 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 693 if (ret < 0) { 694 dev_err(dev, "cannot enable regulators\n"); 695 return ret; 696 } 697 698 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 699 if (ret) { 700 dev_err(dev, "cannot prepare/enable clocks\n"); 701 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 702 return ret; 703 } 704 705 return 0; 706 } 707 708 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 709 { 710 u32 val; 711 712 /* enable PCIe clocks and resets */ 713 val = readl(pcie->parf + PARF_PHY_CTRL); 714 val &= ~PHY_TEST_PWR_DOWN; 715 writel(val, pcie->parf + PARF_PHY_CTRL); 716 717 qcom_pcie_configure_dbi_base(pcie); 718 719 /* MAC PHY_POWERDOWN MUX DISABLE */ 720 val = readl(pcie->parf + PARF_SYS_CTRL); 721 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 722 writel(val, pcie->parf + PARF_SYS_CTRL); 723 724 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 725 val |= BYPASS; 726 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 727 728 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 729 val |= EN; 730 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 731 732 qcom_pcie_clear_hpc(pcie->pci); 733 734 return 0; 735 } 736 737 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 738 { 739 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 740 struct dw_pcie *pci = pcie->pci; 741 struct device *dev = pci->dev; 742 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 743 int ret; 744 745 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 746 if (res->num_clks < 0) { 747 dev_err(dev, "Failed to get clocks\n"); 748 return res->num_clks; 749 } 750 751 res->resets[0].id = "axi_m"; 752 res->resets[1].id = "axi_s"; 753 res->resets[2].id = "axi_m_sticky"; 754 res->resets[3].id = "pipe_sticky"; 755 res->resets[4].id = "pwr"; 756 res->resets[5].id = "ahb"; 757 res->resets[6].id = "pipe"; 758 res->resets[7].id = "axi_m_vmid"; 759 res->resets[8].id = "axi_s_xpu"; 760 res->resets[9].id = "parf"; 761 res->resets[10].id = "phy"; 762 res->resets[11].id = "phy_ahb"; 763 764 res->num_resets = is_ipq ? 12 : 6; 765 766 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 767 if (ret < 0) 768 return ret; 769 770 return 0; 771 } 772 773 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 774 { 775 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 776 777 reset_control_bulk_assert(res->num_resets, res->resets); 778 clk_bulk_disable_unprepare(res->num_clks, res->clks); 779 } 780 781 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 782 { 783 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 784 struct dw_pcie *pci = pcie->pci; 785 struct device *dev = pci->dev; 786 int ret; 787 788 ret = reset_control_bulk_assert(res->num_resets, res->resets); 789 if (ret < 0) { 790 dev_err(dev, "cannot assert resets\n"); 791 return ret; 792 } 793 794 usleep_range(10000, 12000); 795 796 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 797 if (ret < 0) { 798 dev_err(dev, "cannot deassert resets\n"); 799 return ret; 800 } 801 802 usleep_range(10000, 12000); 803 804 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 805 if (ret) { 806 reset_control_bulk_assert(res->num_resets, res->resets); 807 return ret; 808 } 809 810 return 0; 811 } 812 813 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 814 { 815 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 816 struct dw_pcie *pci = pcie->pci; 817 struct device *dev = pci->dev; 818 int ret; 819 820 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 821 if (res->num_clks < 0) { 822 dev_err(dev, "Failed to get clocks\n"); 823 return res->num_clks; 824 } 825 826 res->rst[0].id = "axi_m"; 827 res->rst[1].id = "axi_s"; 828 res->rst[2].id = "pipe"; 829 res->rst[3].id = "axi_m_sticky"; 830 res->rst[4].id = "sticky"; 831 res->rst[5].id = "ahb"; 832 res->rst[6].id = "sleep"; 833 834 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 835 if (ret < 0) 836 return ret; 837 838 return 0; 839 } 840 841 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 842 { 843 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 844 845 clk_bulk_disable_unprepare(res->num_clks, res->clks); 846 } 847 848 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 849 { 850 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 851 struct dw_pcie *pci = pcie->pci; 852 struct device *dev = pci->dev; 853 int ret; 854 855 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 856 if (ret < 0) { 857 dev_err(dev, "cannot assert resets\n"); 858 return ret; 859 } 860 861 usleep_range(2000, 2500); 862 863 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 864 if (ret < 0) { 865 dev_err(dev, "cannot deassert resets\n"); 866 return ret; 867 } 868 869 /* 870 * Don't have a way to see if the reset has completed. 871 * Wait for some time. 872 */ 873 usleep_range(2000, 2500); 874 875 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 876 if (ret) { 877 dev_err(dev, "cannot prepare/enable clocks\n"); 878 goto err_assert_resets; 879 } 880 881 return 0; 882 883 err_assert_resets: 884 /* 885 * Not checking for failure, will anyway return 886 * the original failure in 'ret'. 887 */ 888 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 889 890 return ret; 891 } 892 893 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 894 { 895 struct dw_pcie *pci = pcie->pci; 896 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 897 u32 val; 898 899 val = readl(pcie->parf + PARF_PHY_CTRL); 900 val &= ~PHY_TEST_PWR_DOWN; 901 writel(val, pcie->parf + PARF_PHY_CTRL); 902 903 qcom_pcie_configure_dbi_atu_base(pcie); 904 905 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 906 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 907 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 908 pcie->parf + PARF_SYS_CTRL); 909 writel(0, pcie->parf + PARF_Q2A_FLUSH); 910 911 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 912 913 dw_pcie_dbi_ro_wr_en(pci); 914 915 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 916 917 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 918 val &= ~PCI_EXP_LNKCAP_ASPMS; 919 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 920 921 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 922 PCI_EXP_DEVCTL2); 923 924 dw_pcie_dbi_ro_wr_dis(pci); 925 926 return 0; 927 } 928 929 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 930 { 931 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 932 struct dw_pcie *pci = pcie->pci; 933 struct device *dev = pci->dev; 934 int ret; 935 936 res->rst = devm_reset_control_array_get_exclusive(dev); 937 if (IS_ERR(res->rst)) 938 return PTR_ERR(res->rst); 939 940 res->supplies[0].supply = "vdda"; 941 res->supplies[1].supply = "vddpe-3v3"; 942 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 943 res->supplies); 944 if (ret) 945 return ret; 946 947 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 948 if (res->num_clks < 0) { 949 dev_err(dev, "Failed to get clocks\n"); 950 return res->num_clks; 951 } 952 953 return 0; 954 } 955 956 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 957 { 958 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 959 struct dw_pcie *pci = pcie->pci; 960 struct device *dev = pci->dev; 961 u32 val; 962 int ret; 963 964 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 965 if (ret < 0) { 966 dev_err(dev, "cannot enable regulators\n"); 967 return ret; 968 } 969 970 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 971 if (ret < 0) 972 goto err_disable_regulators; 973 974 ret = reset_control_assert(res->rst); 975 if (ret) { 976 dev_err(dev, "reset assert failed (%d)\n", ret); 977 goto err_disable_clocks; 978 } 979 980 usleep_range(1000, 1500); 981 982 ret = reset_control_deassert(res->rst); 983 if (ret) { 984 dev_err(dev, "reset deassert failed (%d)\n", ret); 985 goto err_disable_clocks; 986 } 987 988 /* Wait for reset to complete, required on SM8450 */ 989 usleep_range(1000, 1500); 990 991 /* configure PCIe to RC mode */ 992 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 993 994 /* enable PCIe clocks and resets */ 995 val = readl(pcie->parf + PARF_PHY_CTRL); 996 val &= ~PHY_TEST_PWR_DOWN; 997 writel(val, pcie->parf + PARF_PHY_CTRL); 998 999 qcom_pcie_configure_dbi_atu_base(pcie); 1000 1001 /* MAC PHY_POWERDOWN MUX DISABLE */ 1002 val = readl(pcie->parf + PARF_SYS_CTRL); 1003 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 1004 writel(val, pcie->parf + PARF_SYS_CTRL); 1005 1006 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1007 val |= BYPASS; 1008 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1009 1010 /* Enable L1 and L1SS */ 1011 val = readl(pcie->parf + PARF_PM_CTRL); 1012 val &= ~REQ_NOT_ENTR_L1; 1013 writel(val, pcie->parf + PARF_PM_CTRL); 1014 1015 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1016 val |= EN; 1017 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1018 1019 return 0; 1020 err_disable_clocks: 1021 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1022 err_disable_regulators: 1023 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1024 1025 return ret; 1026 } 1027 1028 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1029 { 1030 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; 1031 1032 if (pcie_cfg->override_no_snoop) 1033 writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, 1034 pcie->parf + PARF_NO_SNOOP_OVERRIDE); 1035 1036 qcom_pcie_clear_aspm_l0s(pcie->pci); 1037 qcom_pcie_clear_hpc(pcie->pci); 1038 1039 return 0; 1040 } 1041 1042 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 1043 { 1044 /* 1045 * Downstream devices need to be in D0 state before enabling PCI PM 1046 * substates. 1047 */ 1048 pci_set_power_state_locked(pdev, PCI_D0); 1049 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 1050 1051 return 0; 1052 } 1053 1054 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 1055 { 1056 struct dw_pcie_rp *pp = &pcie->pci->pp; 1057 1058 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 1059 } 1060 1061 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1062 { 1063 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1064 1065 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1066 1067 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1068 } 1069 1070 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 1071 { 1072 /* iommu map structure */ 1073 struct { 1074 u32 bdf; 1075 u32 phandle; 1076 u32 smmu_sid; 1077 u32 smmu_sid_len; 1078 } *map; 1079 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1080 struct device *dev = pcie->pci->dev; 1081 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1082 int i, nr_map, size = 0; 1083 u32 smmu_sid_base; 1084 u32 val; 1085 1086 of_get_property(dev->of_node, "iommu-map", &size); 1087 if (!size) 1088 return 0; 1089 1090 /* Enable BDF to SID translation by disabling bypass mode (default) */ 1091 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); 1092 val &= ~BDF_TO_SID_BYPASS; 1093 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); 1094 1095 map = kzalloc(size, GFP_KERNEL); 1096 if (!map) 1097 return -ENOMEM; 1098 1099 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1100 size / sizeof(u32)); 1101 1102 nr_map = size / (sizeof(*map)); 1103 1104 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1105 1106 /* Registers need to be zero out first */ 1107 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1108 1109 /* Extract the SMMU SID base from the first entry of iommu-map */ 1110 smmu_sid_base = map[0].smmu_sid; 1111 1112 /* Look for an available entry to hold the mapping */ 1113 for (i = 0; i < nr_map; i++) { 1114 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1115 u32 val; 1116 u8 hash; 1117 1118 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1119 1120 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1121 1122 /* If the register is already populated, look for next available entry */ 1123 while (val) { 1124 u8 current_hash = hash++; 1125 u8 next_mask = 0xff; 1126 1127 /* If NEXT field is NULL then update it with next hash */ 1128 if (!(val & next_mask)) { 1129 val |= (u32)hash; 1130 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1131 } 1132 1133 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1134 } 1135 1136 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1137 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1138 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1139 } 1140 1141 kfree(map); 1142 1143 return 0; 1144 } 1145 1146 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1147 { 1148 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1149 struct dw_pcie *pci = pcie->pci; 1150 struct device *dev = pci->dev; 1151 1152 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); 1153 if (res->num_clks < 0) { 1154 dev_err(dev, "Failed to get clocks\n"); 1155 return res->num_clks; 1156 } 1157 1158 res->rst = devm_reset_control_array_get_exclusive(dev); 1159 if (IS_ERR(res->rst)) 1160 return PTR_ERR(res->rst); 1161 1162 return 0; 1163 } 1164 1165 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1166 { 1167 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1168 1169 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1170 } 1171 1172 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1173 { 1174 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1175 struct device *dev = pcie->pci->dev; 1176 int ret; 1177 1178 ret = reset_control_assert(res->rst); 1179 if (ret) { 1180 dev_err(dev, "reset assert failed (%d)\n", ret); 1181 return ret; 1182 } 1183 1184 /* 1185 * Delay periods before and after reset deassert are working values 1186 * from downstream Codeaurora kernel 1187 */ 1188 usleep_range(2000, 2500); 1189 1190 ret = reset_control_deassert(res->rst); 1191 if (ret) { 1192 dev_err(dev, "reset deassert failed (%d)\n", ret); 1193 return ret; 1194 } 1195 1196 usleep_range(2000, 2500); 1197 1198 return clk_bulk_prepare_enable(res->num_clks, res->clks); 1199 } 1200 1201 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1202 { 1203 struct dw_pcie *pci = pcie->pci; 1204 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1205 u32 val; 1206 int i; 1207 1208 val = readl(pcie->parf + PARF_PHY_CTRL); 1209 val &= ~PHY_TEST_PWR_DOWN; 1210 writel(val, pcie->parf + PARF_PHY_CTRL); 1211 1212 qcom_pcie_configure_dbi_atu_base(pcie); 1213 1214 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1215 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1216 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1217 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1218 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1219 pci->dbi_base + GEN3_RELATED_OFF); 1220 1221 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1222 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1223 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1224 pcie->parf + PARF_SYS_CTRL); 1225 1226 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1227 1228 dw_pcie_dbi_ro_wr_en(pci); 1229 1230 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1231 1232 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1233 val &= ~PCI_EXP_LNKCAP_ASPMS; 1234 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1235 1236 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1237 PCI_EXP_DEVCTL2); 1238 1239 dw_pcie_dbi_ro_wr_dis(pci); 1240 1241 for (i = 0; i < 256; i++) 1242 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1243 1244 return 0; 1245 } 1246 1247 static bool qcom_pcie_link_up(struct dw_pcie *pci) 1248 { 1249 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1250 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1251 1252 return val & PCI_EXP_LNKSTA_DLLLA; 1253 } 1254 1255 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) 1256 { 1257 struct qcom_pcie_port *port; 1258 1259 list_for_each_entry(port, &pcie->ports, list) 1260 phy_power_off(port->phy); 1261 } 1262 1263 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) 1264 { 1265 struct qcom_pcie_port *port; 1266 int ret; 1267 1268 list_for_each_entry(port, &pcie->ports, list) { 1269 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1270 if (ret) 1271 return ret; 1272 1273 ret = phy_power_on(port->phy); 1274 if (ret) { 1275 qcom_pcie_phy_power_off(pcie); 1276 return ret; 1277 } 1278 } 1279 1280 return 0; 1281 } 1282 1283 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1284 { 1285 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1286 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1287 int ret; 1288 1289 qcom_ep_reset_assert(pcie); 1290 1291 ret = pcie->cfg->ops->init(pcie); 1292 if (ret) 1293 return ret; 1294 1295 ret = qcom_pcie_phy_power_on(pcie); 1296 if (ret) 1297 goto err_deinit; 1298 1299 if (pcie->cfg->ops->post_init) { 1300 ret = pcie->cfg->ops->post_init(pcie); 1301 if (ret) 1302 goto err_disable_phy; 1303 } 1304 1305 qcom_ep_reset_deassert(pcie); 1306 1307 if (pcie->cfg->ops->config_sid) { 1308 ret = pcie->cfg->ops->config_sid(pcie); 1309 if (ret) 1310 goto err_assert_reset; 1311 } 1312 1313 return 0; 1314 1315 err_assert_reset: 1316 qcom_ep_reset_assert(pcie); 1317 err_disable_phy: 1318 qcom_pcie_phy_power_off(pcie); 1319 err_deinit: 1320 pcie->cfg->ops->deinit(pcie); 1321 1322 return ret; 1323 } 1324 1325 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1326 { 1327 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1328 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1329 1330 qcom_ep_reset_assert(pcie); 1331 qcom_pcie_phy_power_off(pcie); 1332 pcie->cfg->ops->deinit(pcie); 1333 } 1334 1335 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1336 { 1337 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1338 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1339 1340 if (pcie->cfg->ops->host_post_init) 1341 pcie->cfg->ops->host_post_init(pcie); 1342 } 1343 1344 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1345 .init = qcom_pcie_host_init, 1346 .deinit = qcom_pcie_host_deinit, 1347 .post_init = qcom_pcie_host_post_init, 1348 }; 1349 1350 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1351 static const struct qcom_pcie_ops ops_2_1_0 = { 1352 .get_resources = qcom_pcie_get_resources_2_1_0, 1353 .init = qcom_pcie_init_2_1_0, 1354 .post_init = qcom_pcie_post_init_2_1_0, 1355 .deinit = qcom_pcie_deinit_2_1_0, 1356 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1357 }; 1358 1359 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1360 static const struct qcom_pcie_ops ops_1_0_0 = { 1361 .get_resources = qcom_pcie_get_resources_1_0_0, 1362 .init = qcom_pcie_init_1_0_0, 1363 .post_init = qcom_pcie_post_init_1_0_0, 1364 .deinit = qcom_pcie_deinit_1_0_0, 1365 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1366 }; 1367 1368 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1369 static const struct qcom_pcie_ops ops_2_3_2 = { 1370 .get_resources = qcom_pcie_get_resources_2_3_2, 1371 .init = qcom_pcie_init_2_3_2, 1372 .post_init = qcom_pcie_post_init_2_3_2, 1373 .deinit = qcom_pcie_deinit_2_3_2, 1374 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1375 }; 1376 1377 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1378 static const struct qcom_pcie_ops ops_2_4_0 = { 1379 .get_resources = qcom_pcie_get_resources_2_4_0, 1380 .init = qcom_pcie_init_2_4_0, 1381 .post_init = qcom_pcie_post_init_2_3_2, 1382 .deinit = qcom_pcie_deinit_2_4_0, 1383 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1384 }; 1385 1386 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1387 static const struct qcom_pcie_ops ops_2_3_3 = { 1388 .get_resources = qcom_pcie_get_resources_2_3_3, 1389 .init = qcom_pcie_init_2_3_3, 1390 .post_init = qcom_pcie_post_init_2_3_3, 1391 .deinit = qcom_pcie_deinit_2_3_3, 1392 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1393 }; 1394 1395 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1396 static const struct qcom_pcie_ops ops_2_7_0 = { 1397 .get_resources = qcom_pcie_get_resources_2_7_0, 1398 .init = qcom_pcie_init_2_7_0, 1399 .post_init = qcom_pcie_post_init_2_7_0, 1400 .deinit = qcom_pcie_deinit_2_7_0, 1401 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1402 }; 1403 1404 /* Qcom IP rev.: 1.9.0 */ 1405 static const struct qcom_pcie_ops ops_1_9_0 = { 1406 .get_resources = qcom_pcie_get_resources_2_7_0, 1407 .init = qcom_pcie_init_2_7_0, 1408 .post_init = qcom_pcie_post_init_2_7_0, 1409 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1410 .deinit = qcom_pcie_deinit_2_7_0, 1411 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1412 .config_sid = qcom_pcie_config_sid_1_9_0, 1413 }; 1414 1415 /* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ 1416 static const struct qcom_pcie_ops ops_1_21_0 = { 1417 .get_resources = qcom_pcie_get_resources_2_7_0, 1418 .init = qcom_pcie_init_2_7_0, 1419 .post_init = qcom_pcie_post_init_2_7_0, 1420 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1421 .deinit = qcom_pcie_deinit_2_7_0, 1422 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1423 }; 1424 1425 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1426 static const struct qcom_pcie_ops ops_2_9_0 = { 1427 .get_resources = qcom_pcie_get_resources_2_9_0, 1428 .init = qcom_pcie_init_2_9_0, 1429 .post_init = qcom_pcie_post_init_2_9_0, 1430 .deinit = qcom_pcie_deinit_2_9_0, 1431 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1432 }; 1433 1434 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1435 .ops = &ops_1_0_0, 1436 }; 1437 1438 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1439 .ops = &ops_1_9_0, 1440 }; 1441 1442 static const struct qcom_pcie_cfg cfg_1_34_0 = { 1443 .ops = &ops_1_9_0, 1444 .override_no_snoop = true, 1445 }; 1446 1447 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1448 .ops = &ops_2_1_0, 1449 }; 1450 1451 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1452 .ops = &ops_2_3_2, 1453 }; 1454 1455 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1456 .ops = &ops_2_3_3, 1457 }; 1458 1459 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1460 .ops = &ops_2_4_0, 1461 }; 1462 1463 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1464 .ops = &ops_2_7_0, 1465 }; 1466 1467 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1468 .ops = &ops_2_9_0, 1469 }; 1470 1471 static const struct qcom_pcie_cfg cfg_sc8280xp = { 1472 .ops = &ops_1_21_0, 1473 .no_l0s = true, 1474 }; 1475 1476 static const struct qcom_pcie_cfg cfg_fw_managed = { 1477 .firmware_managed = true, 1478 }; 1479 1480 static const struct dw_pcie_ops dw_pcie_ops = { 1481 .link_up = qcom_pcie_link_up, 1482 .start_link = qcom_pcie_start_link, 1483 }; 1484 1485 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1486 { 1487 struct dw_pcie *pci = pcie->pci; 1488 int ret; 1489 1490 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1491 if (IS_ERR(pcie->icc_mem)) 1492 return PTR_ERR(pcie->icc_mem); 1493 1494 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); 1495 if (IS_ERR(pcie->icc_cpu)) 1496 return PTR_ERR(pcie->icc_cpu); 1497 /* 1498 * Some Qualcomm platforms require interconnect bandwidth constraints 1499 * to be set before enabling interconnect clocks. 1500 * 1501 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1502 * for the pcie-mem path. 1503 */ 1504 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1505 if (ret) { 1506 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1507 ret); 1508 return ret; 1509 } 1510 1511 /* 1512 * Since the CPU-PCIe path is only used for activities like register 1513 * access of the host controller and endpoint Config/BAR space access, 1514 * HW team has recommended to use a minimal bandwidth of 1KBps just to 1515 * keep the path active. 1516 */ 1517 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); 1518 if (ret) { 1519 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", 1520 ret); 1521 icc_set_bw(pcie->icc_mem, 0, 0); 1522 return ret; 1523 } 1524 1525 return 0; 1526 } 1527 1528 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) 1529 { 1530 u32 offset, status, width, speed; 1531 struct dw_pcie *pci = pcie->pci; 1532 unsigned long freq_kbps; 1533 struct dev_pm_opp *opp; 1534 int ret, freq_mbps; 1535 1536 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1537 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1538 1539 /* Only update constraints if link is up. */ 1540 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1541 return; 1542 1543 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1544 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1545 1546 if (pcie->icc_mem) { 1547 ret = icc_set_bw(pcie->icc_mem, 0, 1548 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1549 if (ret) { 1550 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 1551 ret); 1552 } 1553 } else if (pcie->use_pm_opp) { 1554 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); 1555 if (freq_mbps < 0) 1556 return; 1557 1558 freq_kbps = freq_mbps * KILO; 1559 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, 1560 true); 1561 if (!IS_ERR(opp)) { 1562 ret = dev_pm_opp_set_opp(pci->dev, opp); 1563 if (ret) 1564 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", 1565 freq_kbps * width, ret); 1566 dev_pm_opp_put(opp); 1567 } 1568 } 1569 } 1570 1571 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1572 { 1573 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1574 1575 seq_printf(s, "L0s transition count: %u\n", 1576 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1577 1578 seq_printf(s, "L1 transition count: %u\n", 1579 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1580 1581 seq_printf(s, "L1.1 transition count: %u\n", 1582 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1583 1584 seq_printf(s, "L1.2 transition count: %u\n", 1585 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1586 1587 seq_printf(s, "L2 transition count: %u\n", 1588 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1589 1590 return 0; 1591 } 1592 1593 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1594 { 1595 struct dw_pcie *pci = pcie->pci; 1596 struct device *dev = pci->dev; 1597 char *name; 1598 1599 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1600 if (!name) 1601 return; 1602 1603 pcie->debugfs = debugfs_create_dir(name, NULL); 1604 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1605 qcom_pcie_link_transition_count); 1606 } 1607 1608 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) 1609 { 1610 struct qcom_pcie *pcie = data; 1611 struct dw_pcie_rp *pp = &pcie->pci->pp; 1612 struct device *dev = pcie->pci->dev; 1613 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); 1614 1615 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); 1616 1617 if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { 1618 msleep(PCIE_RESET_CONFIG_WAIT_MS); 1619 dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); 1620 /* Rescan the bus to enumerate endpoint devices */ 1621 pci_lock_rescan_remove(); 1622 pci_rescan_bus(pp->bridge->bus); 1623 pci_unlock_rescan_remove(); 1624 1625 qcom_pcie_icc_opp_update(pcie); 1626 } else { 1627 dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", 1628 status); 1629 } 1630 1631 return IRQ_HANDLED; 1632 } 1633 1634 static void qcom_pci_free_msi(void *ptr) 1635 { 1636 struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; 1637 1638 if (pp && pp->has_msi_ctrl) 1639 dw_pcie_free_msi(pp); 1640 } 1641 1642 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) 1643 { 1644 struct device *dev = cfg->parent; 1645 struct dw_pcie_rp *pp; 1646 struct dw_pcie *pci; 1647 int ret; 1648 1649 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1650 if (!pci) 1651 return -ENOMEM; 1652 1653 pci->dev = dev; 1654 pp = &pci->pp; 1655 pci->dbi_base = cfg->win; 1656 pp->num_vectors = MSI_DEF_NUM_VECTORS; 1657 1658 ret = dw_pcie_msi_host_init(pp); 1659 if (ret) 1660 return ret; 1661 1662 pp->has_msi_ctrl = true; 1663 dw_pcie_msi_init(pp); 1664 1665 return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); 1666 } 1667 1668 static const struct pci_ecam_ops pci_qcom_ecam_ops = { 1669 .init = qcom_pcie_ecam_host_init, 1670 .pci_ops = { 1671 .map_bus = pci_ecam_map_bus, 1672 .read = pci_generic_config_read, 1673 .write = pci_generic_config_write, 1674 } 1675 }; 1676 1677 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) 1678 { 1679 struct device *dev = pcie->pci->dev; 1680 struct qcom_pcie_port *port; 1681 struct gpio_desc *reset; 1682 struct phy *phy; 1683 int ret; 1684 1685 reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), 1686 "reset", GPIOD_OUT_HIGH, "PERST#"); 1687 if (IS_ERR(reset)) 1688 return PTR_ERR(reset); 1689 1690 phy = devm_of_phy_get(dev, node, NULL); 1691 if (IS_ERR(phy)) 1692 return PTR_ERR(phy); 1693 1694 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1695 if (!port) 1696 return -ENOMEM; 1697 1698 ret = phy_init(phy); 1699 if (ret) 1700 return ret; 1701 1702 port->reset = reset; 1703 port->phy = phy; 1704 INIT_LIST_HEAD(&port->list); 1705 list_add_tail(&port->list, &pcie->ports); 1706 1707 return 0; 1708 } 1709 1710 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) 1711 { 1712 struct device *dev = pcie->pci->dev; 1713 struct qcom_pcie_port *port, *tmp; 1714 int ret = -ENOENT; 1715 1716 for_each_available_child_of_node_scoped(dev->of_node, of_port) { 1717 if (!of_node_is_type(of_port, "pci")) 1718 continue; 1719 ret = qcom_pcie_parse_port(pcie, of_port); 1720 if (ret) 1721 goto err_port_del; 1722 } 1723 1724 return ret; 1725 1726 err_port_del: 1727 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 1728 phy_exit(port->phy); 1729 list_del(&port->list); 1730 } 1731 1732 return ret; 1733 } 1734 1735 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) 1736 { 1737 struct device *dev = pcie->pci->dev; 1738 struct qcom_pcie_port *port; 1739 struct gpio_desc *reset; 1740 struct phy *phy; 1741 int ret; 1742 1743 phy = devm_phy_optional_get(dev, "pciephy"); 1744 if (IS_ERR(phy)) 1745 return PTR_ERR(phy); 1746 1747 reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1748 if (IS_ERR(reset)) 1749 return PTR_ERR(reset); 1750 1751 ret = phy_init(phy); 1752 if (ret) 1753 return ret; 1754 1755 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1756 if (!port) 1757 return -ENOMEM; 1758 1759 port->reset = reset; 1760 port->phy = phy; 1761 INIT_LIST_HEAD(&port->list); 1762 list_add_tail(&port->list, &pcie->ports); 1763 1764 return 0; 1765 } 1766 1767 static int qcom_pcie_probe(struct platform_device *pdev) 1768 { 1769 const struct qcom_pcie_cfg *pcie_cfg; 1770 unsigned long max_freq = ULONG_MAX; 1771 struct qcom_pcie_port *port, *tmp; 1772 struct device *dev = &pdev->dev; 1773 struct dev_pm_opp *opp; 1774 struct qcom_pcie *pcie; 1775 struct dw_pcie_rp *pp; 1776 struct resource *res; 1777 struct dw_pcie *pci; 1778 int ret, irq; 1779 char *name; 1780 1781 pcie_cfg = of_device_get_match_data(dev); 1782 if (!pcie_cfg) { 1783 dev_err(dev, "No platform data\n"); 1784 return -ENODATA; 1785 } 1786 1787 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { 1788 dev_err(dev, "No platform ops\n"); 1789 return -ENODATA; 1790 } 1791 1792 pm_runtime_enable(dev); 1793 ret = pm_runtime_get_sync(dev); 1794 if (ret < 0) 1795 goto err_pm_runtime_put; 1796 1797 if (pcie_cfg->firmware_managed) { 1798 struct pci_host_bridge *bridge; 1799 struct pci_config_window *cfg; 1800 1801 bridge = devm_pci_alloc_host_bridge(dev, 0); 1802 if (!bridge) { 1803 ret = -ENOMEM; 1804 goto err_pm_runtime_put; 1805 } 1806 1807 /* Parse and map our ECAM configuration space area */ 1808 cfg = pci_host_common_ecam_create(dev, bridge, 1809 &pci_qcom_ecam_ops); 1810 if (IS_ERR(cfg)) { 1811 ret = PTR_ERR(cfg); 1812 goto err_pm_runtime_put; 1813 } 1814 1815 bridge->sysdata = cfg; 1816 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; 1817 bridge->msi_domain = true; 1818 1819 ret = pci_host_probe(bridge); 1820 if (ret) 1821 goto err_pm_runtime_put; 1822 1823 return 0; 1824 } 1825 1826 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1827 if (!pcie) { 1828 ret = -ENOMEM; 1829 goto err_pm_runtime_put; 1830 } 1831 1832 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1833 if (!pci) { 1834 ret = -ENOMEM; 1835 goto err_pm_runtime_put; 1836 } 1837 1838 INIT_LIST_HEAD(&pcie->ports); 1839 1840 pci->dev = dev; 1841 pci->ops = &dw_pcie_ops; 1842 pp = &pci->pp; 1843 1844 pcie->pci = pci; 1845 1846 pcie->cfg = pcie_cfg; 1847 1848 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1849 if (IS_ERR(pcie->parf)) { 1850 ret = PTR_ERR(pcie->parf); 1851 goto err_pm_runtime_put; 1852 } 1853 1854 /* MHI region is optional */ 1855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1856 if (res) { 1857 pcie->mhi = devm_ioremap_resource(dev, res); 1858 if (IS_ERR(pcie->mhi)) { 1859 ret = PTR_ERR(pcie->mhi); 1860 goto err_pm_runtime_put; 1861 } 1862 } 1863 1864 /* OPP table is optional */ 1865 ret = devm_pm_opp_of_add_table(dev); 1866 if (ret && ret != -ENODEV) { 1867 dev_err_probe(dev, ret, "Failed to add OPP table\n"); 1868 goto err_pm_runtime_put; 1869 } 1870 1871 /* 1872 * Before the PCIe link is initialized, vote for highest OPP in the OPP 1873 * table, so that we are voting for maximum voltage corner for the 1874 * link to come up in maximum supported speed. At the end of the 1875 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). 1876 */ 1877 if (!ret) { 1878 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1879 if (IS_ERR(opp)) { 1880 ret = PTR_ERR(opp); 1881 dev_err_probe(pci->dev, ret, 1882 "Unable to find max freq OPP\n"); 1883 goto err_pm_runtime_put; 1884 } else { 1885 ret = dev_pm_opp_set_opp(dev, opp); 1886 } 1887 1888 dev_pm_opp_put(opp); 1889 if (ret) { 1890 dev_err_probe(pci->dev, ret, 1891 "Failed to set OPP for freq %lu\n", 1892 max_freq); 1893 goto err_pm_runtime_put; 1894 } 1895 1896 pcie->use_pm_opp = true; 1897 } else { 1898 /* Skip ICC init if OPP is supported as it is handled by OPP */ 1899 ret = qcom_pcie_icc_init(pcie); 1900 if (ret) 1901 goto err_pm_runtime_put; 1902 } 1903 1904 ret = pcie->cfg->ops->get_resources(pcie); 1905 if (ret) 1906 goto err_pm_runtime_put; 1907 1908 pp->ops = &qcom_pcie_dw_ops; 1909 1910 ret = qcom_pcie_parse_ports(pcie); 1911 if (ret) { 1912 if (ret != -ENOENT) { 1913 dev_err_probe(pci->dev, ret, 1914 "Failed to parse Root Port: %d\n", ret); 1915 goto err_pm_runtime_put; 1916 } 1917 1918 /* 1919 * In the case of properties not populated in Root Port node, 1920 * fallback to the legacy method of parsing the Host Bridge 1921 * node. This is to maintain DT backwards compatibility. 1922 */ 1923 ret = qcom_pcie_parse_legacy_binding(pcie); 1924 if (ret) 1925 goto err_pm_runtime_put; 1926 } 1927 1928 platform_set_drvdata(pdev, pcie); 1929 1930 irq = platform_get_irq_byname_optional(pdev, "global"); 1931 if (irq > 0) 1932 pp->use_linkup_irq = true; 1933 1934 ret = dw_pcie_host_init(pp); 1935 if (ret) { 1936 dev_err(dev, "cannot initialize host\n"); 1937 goto err_phy_exit; 1938 } 1939 1940 name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d", 1941 pci_domain_nr(pp->bridge->bus)); 1942 if (!name) { 1943 ret = -ENOMEM; 1944 goto err_host_deinit; 1945 } 1946 1947 if (irq > 0) { 1948 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1949 qcom_pcie_global_irq_thread, 1950 IRQF_ONESHOT, name, pcie); 1951 if (ret) { 1952 dev_err_probe(&pdev->dev, ret, 1953 "Failed to request Global IRQ\n"); 1954 goto err_host_deinit; 1955 } 1956 1957 writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7, 1958 pcie->parf + PARF_INT_ALL_MASK); 1959 } 1960 1961 qcom_pcie_icc_opp_update(pcie); 1962 1963 if (pcie->mhi) 1964 qcom_pcie_init_debugfs(pcie); 1965 1966 return 0; 1967 1968 err_host_deinit: 1969 dw_pcie_host_deinit(pp); 1970 err_phy_exit: 1971 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 1972 phy_exit(port->phy); 1973 list_del(&port->list); 1974 } 1975 err_pm_runtime_put: 1976 pm_runtime_put(dev); 1977 pm_runtime_disable(dev); 1978 1979 return ret; 1980 } 1981 1982 static int qcom_pcie_suspend_noirq(struct device *dev) 1983 { 1984 struct qcom_pcie *pcie; 1985 int ret = 0; 1986 1987 pcie = dev_get_drvdata(dev); 1988 if (!pcie) 1989 return 0; 1990 1991 /* 1992 * Set minimum bandwidth required to keep data path functional during 1993 * suspend. 1994 */ 1995 if (pcie->icc_mem) { 1996 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 1997 if (ret) { 1998 dev_err(dev, 1999 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", 2000 ret); 2001 return ret; 2002 } 2003 } 2004 2005 /* 2006 * Turn OFF the resources only for controllers without active PCIe 2007 * devices. For controllers with active devices, the resources are kept 2008 * ON and the link is expected to be in L0/L1 (sub)states. 2009 * 2010 * Turning OFF the resources for controllers with active PCIe devices 2011 * will trigger access violation during the end of the suspend cycle, 2012 * as kernel tries to access the PCIe devices config space for masking 2013 * MSIs. 2014 * 2015 * Also, it is not desirable to put the link into L2/L3 state as that 2016 * implies VDD supply will be removed and the devices may go into 2017 * powerdown state. This will affect the lifetime of the storage devices 2018 * like NVMe. 2019 */ 2020 if (!dw_pcie_link_up(pcie->pci)) { 2021 qcom_pcie_host_deinit(&pcie->pci->pp); 2022 pcie->suspended = true; 2023 } 2024 2025 /* 2026 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. 2027 * Because on some platforms, DBI access can happen very late during the 2028 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC 2029 * error. 2030 */ 2031 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2032 ret = icc_disable(pcie->icc_cpu); 2033 if (ret) 2034 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); 2035 2036 if (pcie->use_pm_opp) 2037 dev_pm_opp_set_opp(pcie->pci->dev, NULL); 2038 } 2039 return ret; 2040 } 2041 2042 static int qcom_pcie_resume_noirq(struct device *dev) 2043 { 2044 struct qcom_pcie *pcie; 2045 int ret; 2046 2047 pcie = dev_get_drvdata(dev); 2048 if (!pcie) 2049 return 0; 2050 2051 if (pm_suspend_target_state != PM_SUSPEND_MEM) { 2052 ret = icc_enable(pcie->icc_cpu); 2053 if (ret) { 2054 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); 2055 return ret; 2056 } 2057 } 2058 2059 if (pcie->suspended) { 2060 ret = qcom_pcie_host_init(&pcie->pci->pp); 2061 if (ret) 2062 return ret; 2063 2064 pcie->suspended = false; 2065 } 2066 2067 qcom_pcie_icc_opp_update(pcie); 2068 2069 return 0; 2070 } 2071 2072 static const struct of_device_id qcom_pcie_match[] = { 2073 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 2074 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 2075 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 2076 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, 2077 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 2078 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 2079 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 2080 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 2081 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 2082 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, 2083 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 2084 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 2085 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, 2086 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, 2087 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, 2088 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 2089 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 2090 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, 2091 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 2092 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 2093 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 2094 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 2095 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 2096 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 2097 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 2098 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 2099 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, 2100 { } 2101 }; 2102 2103 static void qcom_fixup_class(struct pci_dev *dev) 2104 { 2105 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 2106 } 2107 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 2108 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 2109 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 2110 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 2111 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 2112 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 2113 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 2114 2115 static const struct dev_pm_ops qcom_pcie_pm_ops = { 2116 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 2117 }; 2118 2119 static struct platform_driver qcom_pcie_driver = { 2120 .probe = qcom_pcie_probe, 2121 .driver = { 2122 .name = "qcom-pcie", 2123 .suppress_bind_attrs = true, 2124 .of_match_table = qcom_pcie_match, 2125 .pm = &qcom_pcie_pm_ops, 2126 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2127 }, 2128 }; 2129 builtin_platform_driver(qcom_pcie_driver); 2130