1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/pcie.h> 26 #include <linux/phy/phy.h> 27 #include <linux/regulator/consumer.h> 28 #include <linux/reset.h> 29 #include <linux/slab.h> 30 #include <linux/types.h> 31 32 #include "../../pci.h" 33 #include "pcie-designware.h" 34 35 #define PCIE20_PARF_SYS_CTRL 0x00 36 #define MST_WAKEUP_EN BIT(13) 37 #define SLV_WAKEUP_EN BIT(12) 38 #define MSTR_ACLK_CGC_DIS BIT(10) 39 #define SLV_ACLK_CGC_DIS BIT(9) 40 #define CORE_CLK_CGC_DIS BIT(6) 41 #define AUX_PWR_DET BIT(4) 42 #define L23_CLK_RMV_DIS BIT(2) 43 #define L1_CLK_RMV_DIS BIT(1) 44 45 #define PCIE20_PARF_PM_CTRL 0x20 46 #define REQ_NOT_ENTR_L1 BIT(5) 47 48 #define PCIE20_PARF_PHY_CTRL 0x40 49 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 50 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 51 52 #define PCIE20_PARF_PHY_REFCLK 0x4C 53 #define PHY_REFCLK_SSP_EN BIT(16) 54 #define PHY_REFCLK_USE_PAD BIT(12) 55 56 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 57 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 58 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 59 #define AHB_CLK_EN BIT(0) 60 #define MSTR_AXI_CLK_EN BIT(1) 61 #define BYPASS BIT(4) 62 63 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 64 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 65 #define PCIE20_PARF_LTSSM 0x1B0 66 #define PCIE20_PARF_SID_OFFSET 0x234 67 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 68 #define PCIE20_PARF_DEVICE_TYPE 0x1000 69 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 70 71 #define PCIE20_ELBI_SYS_CTRL 0x04 72 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 73 74 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 75 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 76 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 77 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 78 #define CFG_BRIDGE_SB_INIT BIT(0) 79 80 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ 81 250) 82 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ 83 1) 84 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 85 PCI_EXP_SLTCAP_PCP | \ 86 PCI_EXP_SLTCAP_MRLSP | \ 87 PCI_EXP_SLTCAP_AIP | \ 88 PCI_EXP_SLTCAP_PIP | \ 89 PCI_EXP_SLTCAP_HPS | \ 90 PCI_EXP_SLTCAP_HPC | \ 91 PCI_EXP_SLTCAP_EIP | \ 92 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 93 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 94 95 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 96 97 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 98 #define DBI_RO_WR_EN 1 99 100 #define PERST_DELAY_US 1000 101 /* PARF registers */ 102 #define PCIE20_PARF_PCS_DEEMPH 0x34 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 106 107 #define PCIE20_PARF_PCS_SWING 0x38 108 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 109 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 110 111 #define PCIE20_PARF_CONFIG_BITS 0x50 112 #define PHY_RX0_EQ(x) ((x) << 24) 113 114 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 115 #define SLV_ADDR_SPACE_SZ 0x10000000 116 117 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 118 119 #define DEVICE_TYPE_RC 0x4 120 121 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 122 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 123 124 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 125 126 struct qcom_pcie_resources_2_1_0 { 127 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 128 struct reset_control *pci_reset; 129 struct reset_control *axi_reset; 130 struct reset_control *ahb_reset; 131 struct reset_control *por_reset; 132 struct reset_control *phy_reset; 133 struct reset_control *ext_reset; 134 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 135 }; 136 137 struct qcom_pcie_resources_1_0_0 { 138 struct clk *iface; 139 struct clk *aux; 140 struct clk *master_bus; 141 struct clk *slave_bus; 142 struct reset_control *core; 143 struct regulator *vdda; 144 }; 145 146 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 147 struct qcom_pcie_resources_2_3_2 { 148 struct clk *aux_clk; 149 struct clk *master_clk; 150 struct clk *slave_clk; 151 struct clk *cfg_clk; 152 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 153 }; 154 155 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 156 struct qcom_pcie_resources_2_4_0 { 157 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 158 int num_clks; 159 struct reset_control *axi_m_reset; 160 struct reset_control *axi_s_reset; 161 struct reset_control *pipe_reset; 162 struct reset_control *axi_m_vmid_reset; 163 struct reset_control *axi_s_xpu_reset; 164 struct reset_control *parf_reset; 165 struct reset_control *phy_reset; 166 struct reset_control *axi_m_sticky_reset; 167 struct reset_control *pipe_sticky_reset; 168 struct reset_control *pwr_reset; 169 struct reset_control *ahb_reset; 170 struct reset_control *phy_ahb_reset; 171 }; 172 173 struct qcom_pcie_resources_2_3_3 { 174 struct clk *iface; 175 struct clk *axi_m_clk; 176 struct clk *axi_s_clk; 177 struct clk *ahb_clk; 178 struct clk *aux_clk; 179 struct reset_control *rst[7]; 180 }; 181 182 /* 6 clocks typically, 7 for sm8250 */ 183 struct qcom_pcie_resources_2_7_0 { 184 struct clk_bulk_data clks[12]; 185 int num_clks; 186 struct regulator_bulk_data supplies[2]; 187 struct reset_control *pci_reset; 188 }; 189 190 struct qcom_pcie_resources_2_9_0 { 191 struct clk_bulk_data clks[5]; 192 struct reset_control *rst; 193 }; 194 195 union qcom_pcie_resources { 196 struct qcom_pcie_resources_1_0_0 v1_0_0; 197 struct qcom_pcie_resources_2_1_0 v2_1_0; 198 struct qcom_pcie_resources_2_3_2 v2_3_2; 199 struct qcom_pcie_resources_2_3_3 v2_3_3; 200 struct qcom_pcie_resources_2_4_0 v2_4_0; 201 struct qcom_pcie_resources_2_7_0 v2_7_0; 202 struct qcom_pcie_resources_2_9_0 v2_9_0; 203 }; 204 205 struct qcom_pcie; 206 207 struct qcom_pcie_ops { 208 int (*get_resources)(struct qcom_pcie *pcie); 209 int (*init)(struct qcom_pcie *pcie); 210 int (*post_init)(struct qcom_pcie *pcie); 211 void (*deinit)(struct qcom_pcie *pcie); 212 void (*ltssm_enable)(struct qcom_pcie *pcie); 213 int (*config_sid)(struct qcom_pcie *pcie); 214 }; 215 216 struct qcom_pcie_cfg { 217 const struct qcom_pcie_ops *ops; 218 }; 219 220 struct qcom_pcie { 221 struct dw_pcie *pci; 222 void __iomem *parf; /* DT parf */ 223 void __iomem *elbi; /* DT elbi */ 224 union qcom_pcie_resources res; 225 struct phy *phy; 226 struct gpio_desc *reset; 227 const struct qcom_pcie_cfg *cfg; 228 }; 229 230 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 231 232 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 233 { 234 gpiod_set_value_cansleep(pcie->reset, 1); 235 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 236 } 237 238 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 239 { 240 /* Ensure that PERST has been asserted for at least 100 ms */ 241 msleep(100); 242 gpiod_set_value_cansleep(pcie->reset, 0); 243 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 244 } 245 246 static int qcom_pcie_start_link(struct dw_pcie *pci) 247 { 248 struct qcom_pcie *pcie = to_qcom_pcie(pci); 249 250 /* Enable Link Training state machine */ 251 if (pcie->cfg->ops->ltssm_enable) 252 pcie->cfg->ops->ltssm_enable(pcie); 253 254 return 0; 255 } 256 257 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 258 { 259 u32 val; 260 261 /* enable link training */ 262 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 263 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 264 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 265 } 266 267 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 268 { 269 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 270 struct dw_pcie *pci = pcie->pci; 271 struct device *dev = pci->dev; 272 int ret; 273 274 res->supplies[0].supply = "vdda"; 275 res->supplies[1].supply = "vdda_phy"; 276 res->supplies[2].supply = "vdda_refclk"; 277 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 278 res->supplies); 279 if (ret) 280 return ret; 281 282 res->clks[0].id = "iface"; 283 res->clks[1].id = "core"; 284 res->clks[2].id = "phy"; 285 res->clks[3].id = "aux"; 286 res->clks[4].id = "ref"; 287 288 /* iface, core, phy are required */ 289 ret = devm_clk_bulk_get(dev, 3, res->clks); 290 if (ret < 0) 291 return ret; 292 293 /* aux, ref are optional */ 294 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 295 if (ret < 0) 296 return ret; 297 298 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 299 if (IS_ERR(res->pci_reset)) 300 return PTR_ERR(res->pci_reset); 301 302 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 303 if (IS_ERR(res->axi_reset)) 304 return PTR_ERR(res->axi_reset); 305 306 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 307 if (IS_ERR(res->ahb_reset)) 308 return PTR_ERR(res->ahb_reset); 309 310 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 311 if (IS_ERR(res->por_reset)) 312 return PTR_ERR(res->por_reset); 313 314 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 315 if (IS_ERR(res->ext_reset)) 316 return PTR_ERR(res->ext_reset); 317 318 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 319 return PTR_ERR_OR_ZERO(res->phy_reset); 320 } 321 322 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 323 { 324 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 325 326 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 327 reset_control_assert(res->pci_reset); 328 reset_control_assert(res->axi_reset); 329 reset_control_assert(res->ahb_reset); 330 reset_control_assert(res->por_reset); 331 reset_control_assert(res->ext_reset); 332 reset_control_assert(res->phy_reset); 333 334 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 335 336 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 337 } 338 339 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 340 { 341 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 342 struct dw_pcie *pci = pcie->pci; 343 struct device *dev = pci->dev; 344 int ret; 345 346 /* reset the PCIe interface as uboot can leave it undefined state */ 347 reset_control_assert(res->pci_reset); 348 reset_control_assert(res->axi_reset); 349 reset_control_assert(res->ahb_reset); 350 reset_control_assert(res->por_reset); 351 reset_control_assert(res->ext_reset); 352 reset_control_assert(res->phy_reset); 353 354 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 355 if (ret < 0) { 356 dev_err(dev, "cannot enable regulators\n"); 357 return ret; 358 } 359 360 ret = reset_control_deassert(res->ahb_reset); 361 if (ret) { 362 dev_err(dev, "cannot deassert ahb reset\n"); 363 goto err_deassert_ahb; 364 } 365 366 ret = reset_control_deassert(res->ext_reset); 367 if (ret) { 368 dev_err(dev, "cannot deassert ext reset\n"); 369 goto err_deassert_ext; 370 } 371 372 ret = reset_control_deassert(res->phy_reset); 373 if (ret) { 374 dev_err(dev, "cannot deassert phy reset\n"); 375 goto err_deassert_phy; 376 } 377 378 ret = reset_control_deassert(res->pci_reset); 379 if (ret) { 380 dev_err(dev, "cannot deassert pci reset\n"); 381 goto err_deassert_pci; 382 } 383 384 ret = reset_control_deassert(res->por_reset); 385 if (ret) { 386 dev_err(dev, "cannot deassert por reset\n"); 387 goto err_deassert_por; 388 } 389 390 ret = reset_control_deassert(res->axi_reset); 391 if (ret) { 392 dev_err(dev, "cannot deassert axi reset\n"); 393 goto err_deassert_axi; 394 } 395 396 return 0; 397 398 err_deassert_axi: 399 reset_control_assert(res->por_reset); 400 err_deassert_por: 401 reset_control_assert(res->pci_reset); 402 err_deassert_pci: 403 reset_control_assert(res->phy_reset); 404 err_deassert_phy: 405 reset_control_assert(res->ext_reset); 406 err_deassert_ext: 407 reset_control_assert(res->ahb_reset); 408 err_deassert_ahb: 409 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 410 411 return ret; 412 } 413 414 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 415 { 416 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 417 struct dw_pcie *pci = pcie->pci; 418 struct device *dev = pci->dev; 419 struct device_node *node = dev->of_node; 420 u32 val; 421 int ret; 422 423 /* enable PCIe clocks and resets */ 424 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 425 val &= ~BIT(0); 426 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 427 428 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 429 if (ret) 430 return ret; 431 432 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 433 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 434 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 435 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 436 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 437 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 438 writel(PCS_SWING_TX_SWING_FULL(120) | 439 PCS_SWING_TX_SWING_LOW(120), 440 pcie->parf + PCIE20_PARF_PCS_SWING); 441 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 442 } 443 444 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 445 /* set TX termination offset */ 446 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 447 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 448 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 449 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 450 } 451 452 /* enable external reference clock */ 453 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 454 /* USE_PAD is required only for ipq806x */ 455 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 456 val &= ~PHY_REFCLK_USE_PAD; 457 val |= PHY_REFCLK_SSP_EN; 458 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 459 460 /* wait for clock acquisition */ 461 usleep_range(1000, 1500); 462 463 /* Set the Max TLP size to 2K, instead of using default of 4K */ 464 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 465 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 466 writel(CFG_BRIDGE_SB_INIT, 467 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 468 469 return 0; 470 } 471 472 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 473 { 474 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 475 struct dw_pcie *pci = pcie->pci; 476 struct device *dev = pci->dev; 477 478 res->vdda = devm_regulator_get(dev, "vdda"); 479 if (IS_ERR(res->vdda)) 480 return PTR_ERR(res->vdda); 481 482 res->iface = devm_clk_get(dev, "iface"); 483 if (IS_ERR(res->iface)) 484 return PTR_ERR(res->iface); 485 486 res->aux = devm_clk_get(dev, "aux"); 487 if (IS_ERR(res->aux)) 488 return PTR_ERR(res->aux); 489 490 res->master_bus = devm_clk_get(dev, "master_bus"); 491 if (IS_ERR(res->master_bus)) 492 return PTR_ERR(res->master_bus); 493 494 res->slave_bus = devm_clk_get(dev, "slave_bus"); 495 if (IS_ERR(res->slave_bus)) 496 return PTR_ERR(res->slave_bus); 497 498 res->core = devm_reset_control_get_exclusive(dev, "core"); 499 return PTR_ERR_OR_ZERO(res->core); 500 } 501 502 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 503 { 504 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 505 506 reset_control_assert(res->core); 507 clk_disable_unprepare(res->slave_bus); 508 clk_disable_unprepare(res->master_bus); 509 clk_disable_unprepare(res->iface); 510 clk_disable_unprepare(res->aux); 511 regulator_disable(res->vdda); 512 } 513 514 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 515 { 516 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 517 struct dw_pcie *pci = pcie->pci; 518 struct device *dev = pci->dev; 519 int ret; 520 521 ret = reset_control_deassert(res->core); 522 if (ret) { 523 dev_err(dev, "cannot deassert core reset\n"); 524 return ret; 525 } 526 527 ret = clk_prepare_enable(res->aux); 528 if (ret) { 529 dev_err(dev, "cannot prepare/enable aux clock\n"); 530 goto err_res; 531 } 532 533 ret = clk_prepare_enable(res->iface); 534 if (ret) { 535 dev_err(dev, "cannot prepare/enable iface clock\n"); 536 goto err_aux; 537 } 538 539 ret = clk_prepare_enable(res->master_bus); 540 if (ret) { 541 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 542 goto err_iface; 543 } 544 545 ret = clk_prepare_enable(res->slave_bus); 546 if (ret) { 547 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 548 goto err_master; 549 } 550 551 ret = regulator_enable(res->vdda); 552 if (ret) { 553 dev_err(dev, "cannot enable vdda regulator\n"); 554 goto err_slave; 555 } 556 557 return 0; 558 err_slave: 559 clk_disable_unprepare(res->slave_bus); 560 err_master: 561 clk_disable_unprepare(res->master_bus); 562 err_iface: 563 clk_disable_unprepare(res->iface); 564 err_aux: 565 clk_disable_unprepare(res->aux); 566 err_res: 567 reset_control_assert(res->core); 568 569 return ret; 570 } 571 572 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 573 { 574 /* change DBI base address */ 575 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 576 577 if (IS_ENABLED(CONFIG_PCI_MSI)) { 578 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 579 580 val |= BIT(31); 581 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 582 } 583 584 return 0; 585 } 586 587 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 588 { 589 u32 val; 590 591 /* enable link training */ 592 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 593 val |= BIT(8); 594 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 595 } 596 597 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 598 { 599 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 600 struct dw_pcie *pci = pcie->pci; 601 struct device *dev = pci->dev; 602 int ret; 603 604 res->supplies[0].supply = "vdda"; 605 res->supplies[1].supply = "vddpe-3v3"; 606 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 607 res->supplies); 608 if (ret) 609 return ret; 610 611 res->aux_clk = devm_clk_get(dev, "aux"); 612 if (IS_ERR(res->aux_clk)) 613 return PTR_ERR(res->aux_clk); 614 615 res->cfg_clk = devm_clk_get(dev, "cfg"); 616 if (IS_ERR(res->cfg_clk)) 617 return PTR_ERR(res->cfg_clk); 618 619 res->master_clk = devm_clk_get(dev, "bus_master"); 620 if (IS_ERR(res->master_clk)) 621 return PTR_ERR(res->master_clk); 622 623 res->slave_clk = devm_clk_get(dev, "bus_slave"); 624 if (IS_ERR(res->slave_clk)) 625 return PTR_ERR(res->slave_clk); 626 627 return 0; 628 } 629 630 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 631 { 632 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 633 634 clk_disable_unprepare(res->slave_clk); 635 clk_disable_unprepare(res->master_clk); 636 clk_disable_unprepare(res->cfg_clk); 637 clk_disable_unprepare(res->aux_clk); 638 639 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 640 } 641 642 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 643 { 644 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 645 struct dw_pcie *pci = pcie->pci; 646 struct device *dev = pci->dev; 647 int ret; 648 649 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 650 if (ret < 0) { 651 dev_err(dev, "cannot enable regulators\n"); 652 return ret; 653 } 654 655 ret = clk_prepare_enable(res->aux_clk); 656 if (ret) { 657 dev_err(dev, "cannot prepare/enable aux clock\n"); 658 goto err_aux_clk; 659 } 660 661 ret = clk_prepare_enable(res->cfg_clk); 662 if (ret) { 663 dev_err(dev, "cannot prepare/enable cfg clock\n"); 664 goto err_cfg_clk; 665 } 666 667 ret = clk_prepare_enable(res->master_clk); 668 if (ret) { 669 dev_err(dev, "cannot prepare/enable master clock\n"); 670 goto err_master_clk; 671 } 672 673 ret = clk_prepare_enable(res->slave_clk); 674 if (ret) { 675 dev_err(dev, "cannot prepare/enable slave clock\n"); 676 goto err_slave_clk; 677 } 678 679 return 0; 680 681 err_slave_clk: 682 clk_disable_unprepare(res->master_clk); 683 err_master_clk: 684 clk_disable_unprepare(res->cfg_clk); 685 err_cfg_clk: 686 clk_disable_unprepare(res->aux_clk); 687 688 err_aux_clk: 689 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 690 691 return ret; 692 } 693 694 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 695 { 696 u32 val; 697 698 /* enable PCIe clocks and resets */ 699 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 700 val &= ~BIT(0); 701 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 702 703 /* change DBI base address */ 704 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 705 706 /* MAC PHY_POWERDOWN MUX DISABLE */ 707 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 708 val &= ~BIT(29); 709 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 710 711 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 712 val |= BIT(4); 713 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 714 715 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 716 val |= BIT(31); 717 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 718 719 return 0; 720 } 721 722 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 723 { 724 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 725 struct dw_pcie *pci = pcie->pci; 726 struct device *dev = pci->dev; 727 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 728 int ret; 729 730 res->clks[0].id = "aux"; 731 res->clks[1].id = "master_bus"; 732 res->clks[2].id = "slave_bus"; 733 res->clks[3].id = "iface"; 734 735 /* qcom,pcie-ipq4019 is defined without "iface" */ 736 res->num_clks = is_ipq ? 3 : 4; 737 738 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 739 if (ret < 0) 740 return ret; 741 742 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 743 if (IS_ERR(res->axi_m_reset)) 744 return PTR_ERR(res->axi_m_reset); 745 746 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 747 if (IS_ERR(res->axi_s_reset)) 748 return PTR_ERR(res->axi_s_reset); 749 750 if (is_ipq) { 751 /* 752 * These resources relates to the PHY or are secure clocks, but 753 * are controlled here for IPQ4019 754 */ 755 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 756 if (IS_ERR(res->pipe_reset)) 757 return PTR_ERR(res->pipe_reset); 758 759 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 760 "axi_m_vmid"); 761 if (IS_ERR(res->axi_m_vmid_reset)) 762 return PTR_ERR(res->axi_m_vmid_reset); 763 764 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 765 "axi_s_xpu"); 766 if (IS_ERR(res->axi_s_xpu_reset)) 767 return PTR_ERR(res->axi_s_xpu_reset); 768 769 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 770 if (IS_ERR(res->parf_reset)) 771 return PTR_ERR(res->parf_reset); 772 773 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 774 if (IS_ERR(res->phy_reset)) 775 return PTR_ERR(res->phy_reset); 776 } 777 778 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 779 "axi_m_sticky"); 780 if (IS_ERR(res->axi_m_sticky_reset)) 781 return PTR_ERR(res->axi_m_sticky_reset); 782 783 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 784 "pipe_sticky"); 785 if (IS_ERR(res->pipe_sticky_reset)) 786 return PTR_ERR(res->pipe_sticky_reset); 787 788 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 789 if (IS_ERR(res->pwr_reset)) 790 return PTR_ERR(res->pwr_reset); 791 792 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 793 if (IS_ERR(res->ahb_reset)) 794 return PTR_ERR(res->ahb_reset); 795 796 if (is_ipq) { 797 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 798 if (IS_ERR(res->phy_ahb_reset)) 799 return PTR_ERR(res->phy_ahb_reset); 800 } 801 802 return 0; 803 } 804 805 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 806 { 807 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 808 809 reset_control_assert(res->axi_m_reset); 810 reset_control_assert(res->axi_s_reset); 811 reset_control_assert(res->pipe_reset); 812 reset_control_assert(res->pipe_sticky_reset); 813 reset_control_assert(res->phy_reset); 814 reset_control_assert(res->phy_ahb_reset); 815 reset_control_assert(res->axi_m_sticky_reset); 816 reset_control_assert(res->pwr_reset); 817 reset_control_assert(res->ahb_reset); 818 clk_bulk_disable_unprepare(res->num_clks, res->clks); 819 } 820 821 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 822 { 823 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 824 struct dw_pcie *pci = pcie->pci; 825 struct device *dev = pci->dev; 826 int ret; 827 828 ret = reset_control_assert(res->axi_m_reset); 829 if (ret) { 830 dev_err(dev, "cannot assert axi master reset\n"); 831 return ret; 832 } 833 834 ret = reset_control_assert(res->axi_s_reset); 835 if (ret) { 836 dev_err(dev, "cannot assert axi slave reset\n"); 837 return ret; 838 } 839 840 usleep_range(10000, 12000); 841 842 ret = reset_control_assert(res->pipe_reset); 843 if (ret) { 844 dev_err(dev, "cannot assert pipe reset\n"); 845 return ret; 846 } 847 848 ret = reset_control_assert(res->pipe_sticky_reset); 849 if (ret) { 850 dev_err(dev, "cannot assert pipe sticky reset\n"); 851 return ret; 852 } 853 854 ret = reset_control_assert(res->phy_reset); 855 if (ret) { 856 dev_err(dev, "cannot assert phy reset\n"); 857 return ret; 858 } 859 860 ret = reset_control_assert(res->phy_ahb_reset); 861 if (ret) { 862 dev_err(dev, "cannot assert phy ahb reset\n"); 863 return ret; 864 } 865 866 usleep_range(10000, 12000); 867 868 ret = reset_control_assert(res->axi_m_sticky_reset); 869 if (ret) { 870 dev_err(dev, "cannot assert axi master sticky reset\n"); 871 return ret; 872 } 873 874 ret = reset_control_assert(res->pwr_reset); 875 if (ret) { 876 dev_err(dev, "cannot assert power reset\n"); 877 return ret; 878 } 879 880 ret = reset_control_assert(res->ahb_reset); 881 if (ret) { 882 dev_err(dev, "cannot assert ahb reset\n"); 883 return ret; 884 } 885 886 usleep_range(10000, 12000); 887 888 ret = reset_control_deassert(res->phy_ahb_reset); 889 if (ret) { 890 dev_err(dev, "cannot deassert phy ahb reset\n"); 891 return ret; 892 } 893 894 ret = reset_control_deassert(res->phy_reset); 895 if (ret) { 896 dev_err(dev, "cannot deassert phy reset\n"); 897 goto err_rst_phy; 898 } 899 900 ret = reset_control_deassert(res->pipe_reset); 901 if (ret) { 902 dev_err(dev, "cannot deassert pipe reset\n"); 903 goto err_rst_pipe; 904 } 905 906 ret = reset_control_deassert(res->pipe_sticky_reset); 907 if (ret) { 908 dev_err(dev, "cannot deassert pipe sticky reset\n"); 909 goto err_rst_pipe_sticky; 910 } 911 912 usleep_range(10000, 12000); 913 914 ret = reset_control_deassert(res->axi_m_reset); 915 if (ret) { 916 dev_err(dev, "cannot deassert axi master reset\n"); 917 goto err_rst_axi_m; 918 } 919 920 ret = reset_control_deassert(res->axi_m_sticky_reset); 921 if (ret) { 922 dev_err(dev, "cannot deassert axi master sticky reset\n"); 923 goto err_rst_axi_m_sticky; 924 } 925 926 ret = reset_control_deassert(res->axi_s_reset); 927 if (ret) { 928 dev_err(dev, "cannot deassert axi slave reset\n"); 929 goto err_rst_axi_s; 930 } 931 932 ret = reset_control_deassert(res->pwr_reset); 933 if (ret) { 934 dev_err(dev, "cannot deassert power reset\n"); 935 goto err_rst_pwr; 936 } 937 938 ret = reset_control_deassert(res->ahb_reset); 939 if (ret) { 940 dev_err(dev, "cannot deassert ahb reset\n"); 941 goto err_rst_ahb; 942 } 943 944 usleep_range(10000, 12000); 945 946 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 947 if (ret) 948 goto err_clks; 949 950 return 0; 951 952 err_clks: 953 reset_control_assert(res->ahb_reset); 954 err_rst_ahb: 955 reset_control_assert(res->pwr_reset); 956 err_rst_pwr: 957 reset_control_assert(res->axi_s_reset); 958 err_rst_axi_s: 959 reset_control_assert(res->axi_m_sticky_reset); 960 err_rst_axi_m_sticky: 961 reset_control_assert(res->axi_m_reset); 962 err_rst_axi_m: 963 reset_control_assert(res->pipe_sticky_reset); 964 err_rst_pipe_sticky: 965 reset_control_assert(res->pipe_reset); 966 err_rst_pipe: 967 reset_control_assert(res->phy_reset); 968 err_rst_phy: 969 reset_control_assert(res->phy_ahb_reset); 970 return ret; 971 } 972 973 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) 974 { 975 u32 val; 976 977 /* enable PCIe clocks and resets */ 978 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 979 val &= ~BIT(0); 980 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 981 982 /* change DBI base address */ 983 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 984 985 /* MAC PHY_POWERDOWN MUX DISABLE */ 986 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 987 val &= ~BIT(29); 988 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 989 990 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 991 val |= BIT(4); 992 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 993 994 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 995 val |= BIT(31); 996 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 997 998 return 0; 999 } 1000 1001 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 1002 { 1003 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1004 struct dw_pcie *pci = pcie->pci; 1005 struct device *dev = pci->dev; 1006 int i; 1007 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 1008 "axi_m_sticky", "sticky", 1009 "ahb", "sleep", }; 1010 1011 res->iface = devm_clk_get(dev, "iface"); 1012 if (IS_ERR(res->iface)) 1013 return PTR_ERR(res->iface); 1014 1015 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1016 if (IS_ERR(res->axi_m_clk)) 1017 return PTR_ERR(res->axi_m_clk); 1018 1019 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1020 if (IS_ERR(res->axi_s_clk)) 1021 return PTR_ERR(res->axi_s_clk); 1022 1023 res->ahb_clk = devm_clk_get(dev, "ahb"); 1024 if (IS_ERR(res->ahb_clk)) 1025 return PTR_ERR(res->ahb_clk); 1026 1027 res->aux_clk = devm_clk_get(dev, "aux"); 1028 if (IS_ERR(res->aux_clk)) 1029 return PTR_ERR(res->aux_clk); 1030 1031 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1032 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1033 if (IS_ERR(res->rst[i])) 1034 return PTR_ERR(res->rst[i]); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1041 { 1042 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1043 1044 clk_disable_unprepare(res->iface); 1045 clk_disable_unprepare(res->axi_m_clk); 1046 clk_disable_unprepare(res->axi_s_clk); 1047 clk_disable_unprepare(res->ahb_clk); 1048 clk_disable_unprepare(res->aux_clk); 1049 } 1050 1051 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1052 { 1053 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1054 struct dw_pcie *pci = pcie->pci; 1055 struct device *dev = pci->dev; 1056 int i, ret; 1057 1058 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1059 ret = reset_control_assert(res->rst[i]); 1060 if (ret) { 1061 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1062 return ret; 1063 } 1064 } 1065 1066 usleep_range(2000, 2500); 1067 1068 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1069 ret = reset_control_deassert(res->rst[i]); 1070 if (ret) { 1071 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1072 ret); 1073 return ret; 1074 } 1075 } 1076 1077 /* 1078 * Don't have a way to see if the reset has completed. 1079 * Wait for some time. 1080 */ 1081 usleep_range(2000, 2500); 1082 1083 ret = clk_prepare_enable(res->iface); 1084 if (ret) { 1085 dev_err(dev, "cannot prepare/enable core clock\n"); 1086 goto err_clk_iface; 1087 } 1088 1089 ret = clk_prepare_enable(res->axi_m_clk); 1090 if (ret) { 1091 dev_err(dev, "cannot prepare/enable core clock\n"); 1092 goto err_clk_axi_m; 1093 } 1094 1095 ret = clk_prepare_enable(res->axi_s_clk); 1096 if (ret) { 1097 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1098 goto err_clk_axi_s; 1099 } 1100 1101 ret = clk_prepare_enable(res->ahb_clk); 1102 if (ret) { 1103 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1104 goto err_clk_ahb; 1105 } 1106 1107 ret = clk_prepare_enable(res->aux_clk); 1108 if (ret) { 1109 dev_err(dev, "cannot prepare/enable aux clock\n"); 1110 goto err_clk_aux; 1111 } 1112 1113 return 0; 1114 1115 err_clk_aux: 1116 clk_disable_unprepare(res->ahb_clk); 1117 err_clk_ahb: 1118 clk_disable_unprepare(res->axi_s_clk); 1119 err_clk_axi_s: 1120 clk_disable_unprepare(res->axi_m_clk); 1121 err_clk_axi_m: 1122 clk_disable_unprepare(res->iface); 1123 err_clk_iface: 1124 /* 1125 * Not checking for failure, will anyway return 1126 * the original failure in 'ret'. 1127 */ 1128 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1129 reset_control_assert(res->rst[i]); 1130 1131 return ret; 1132 } 1133 1134 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 1135 { 1136 struct dw_pcie *pci = pcie->pci; 1137 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1138 u32 val; 1139 1140 writel(SLV_ADDR_SPACE_SZ, 1141 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1142 1143 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1144 val &= ~BIT(0); 1145 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1146 1147 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1148 1149 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1150 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1151 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1152 pcie->parf + PCIE20_PARF_SYS_CTRL); 1153 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1154 1155 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1156 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1157 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1158 1159 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1160 val &= ~PCI_EXP_LNKCAP_ASPMS; 1161 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1162 1163 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1164 PCI_EXP_DEVCTL2); 1165 1166 return 0; 1167 } 1168 1169 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1170 { 1171 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1172 struct dw_pcie *pci = pcie->pci; 1173 struct device *dev = pci->dev; 1174 unsigned int num_clks, num_opt_clks; 1175 unsigned int idx; 1176 int ret; 1177 1178 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1179 if (IS_ERR(res->pci_reset)) 1180 return PTR_ERR(res->pci_reset); 1181 1182 res->supplies[0].supply = "vdda"; 1183 res->supplies[1].supply = "vddpe-3v3"; 1184 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1185 res->supplies); 1186 if (ret) 1187 return ret; 1188 1189 idx = 0; 1190 res->clks[idx++].id = "aux"; 1191 res->clks[idx++].id = "cfg"; 1192 res->clks[idx++].id = "bus_master"; 1193 res->clks[idx++].id = "bus_slave"; 1194 res->clks[idx++].id = "slave_q2a"; 1195 1196 num_clks = idx; 1197 1198 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 1199 if (ret < 0) 1200 return ret; 1201 1202 res->clks[idx++].id = "tbu"; 1203 res->clks[idx++].id = "ddrss_sf_tbu"; 1204 res->clks[idx++].id = "aggre0"; 1205 res->clks[idx++].id = "aggre1"; 1206 res->clks[idx++].id = "noc_aggr_4"; 1207 res->clks[idx++].id = "noc_aggr_south_sf"; 1208 res->clks[idx++].id = "cnoc_qx"; 1209 1210 num_opt_clks = idx - num_clks; 1211 res->num_clks = idx; 1212 1213 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 1214 if (ret < 0) 1215 return ret; 1216 1217 return 0; 1218 } 1219 1220 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1221 { 1222 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1223 struct dw_pcie *pci = pcie->pci; 1224 struct device *dev = pci->dev; 1225 u32 val; 1226 int ret; 1227 1228 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1229 if (ret < 0) { 1230 dev_err(dev, "cannot enable regulators\n"); 1231 return ret; 1232 } 1233 1234 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1235 if (ret < 0) 1236 goto err_disable_regulators; 1237 1238 ret = reset_control_assert(res->pci_reset); 1239 if (ret < 0) { 1240 dev_err(dev, "cannot deassert pci reset\n"); 1241 goto err_disable_clocks; 1242 } 1243 1244 usleep_range(1000, 1500); 1245 1246 ret = reset_control_deassert(res->pci_reset); 1247 if (ret < 0) { 1248 dev_err(dev, "cannot deassert pci reset\n"); 1249 goto err_disable_clocks; 1250 } 1251 1252 /* Wait for reset to complete, required on SM8450 */ 1253 usleep_range(1000, 1500); 1254 1255 /* configure PCIe to RC mode */ 1256 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1257 1258 /* enable PCIe clocks and resets */ 1259 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1260 val &= ~BIT(0); 1261 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1262 1263 /* change DBI base address */ 1264 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1265 1266 /* MAC PHY_POWERDOWN MUX DISABLE */ 1267 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1268 val &= ~BIT(29); 1269 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1270 1271 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1272 val |= BIT(4); 1273 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1274 1275 /* Enable L1 and L1SS */ 1276 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); 1277 val &= ~REQ_NOT_ENTR_L1; 1278 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); 1279 1280 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1281 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1282 val |= BIT(31); 1283 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1284 } 1285 1286 return 0; 1287 err_disable_clocks: 1288 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1289 err_disable_regulators: 1290 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1291 1292 return ret; 1293 } 1294 1295 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1296 { 1297 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1298 1299 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1300 1301 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1302 } 1303 1304 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1305 { 1306 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1307 struct dw_pcie *pci = pcie->pci; 1308 struct device *dev = pci->dev; 1309 int ret; 1310 1311 res->clks[0].id = "iface"; 1312 res->clks[1].id = "axi_m"; 1313 res->clks[2].id = "axi_s"; 1314 res->clks[3].id = "axi_bridge"; 1315 res->clks[4].id = "rchng"; 1316 1317 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1318 if (ret < 0) 1319 return ret; 1320 1321 res->rst = devm_reset_control_array_get_exclusive(dev); 1322 if (IS_ERR(res->rst)) 1323 return PTR_ERR(res->rst); 1324 1325 return 0; 1326 } 1327 1328 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1329 { 1330 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1331 1332 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1333 } 1334 1335 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1336 { 1337 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1338 struct device *dev = pcie->pci->dev; 1339 int ret; 1340 1341 ret = reset_control_assert(res->rst); 1342 if (ret) { 1343 dev_err(dev, "reset assert failed (%d)\n", ret); 1344 return ret; 1345 } 1346 1347 /* 1348 * Delay periods before and after reset deassert are working values 1349 * from downstream Codeaurora kernel 1350 */ 1351 usleep_range(2000, 2500); 1352 1353 ret = reset_control_deassert(res->rst); 1354 if (ret) { 1355 dev_err(dev, "reset deassert failed (%d)\n", ret); 1356 return ret; 1357 } 1358 1359 usleep_range(2000, 2500); 1360 1361 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1362 } 1363 1364 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1365 { 1366 struct dw_pcie *pci = pcie->pci; 1367 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1368 u32 val; 1369 int i; 1370 1371 writel(SLV_ADDR_SPACE_SZ, 1372 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1373 1374 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1375 val &= ~BIT(0); 1376 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1377 1378 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1379 1380 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1381 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1382 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1383 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1384 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1385 pci->dbi_base + GEN3_RELATED_OFF); 1386 1387 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1388 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1389 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1390 pcie->parf + PCIE20_PARF_SYS_CTRL); 1391 1392 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1393 1394 dw_pcie_dbi_ro_wr_en(pci); 1395 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1396 1397 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1398 val &= ~PCI_EXP_LNKCAP_ASPMS; 1399 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1400 1401 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1402 PCI_EXP_DEVCTL2); 1403 1404 for (i = 0; i < 256; i++) 1405 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1406 1407 return 0; 1408 } 1409 1410 static int qcom_pcie_link_up(struct dw_pcie *pci) 1411 { 1412 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1413 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1414 1415 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1416 } 1417 1418 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1419 { 1420 /* iommu map structure */ 1421 struct { 1422 u32 bdf; 1423 u32 phandle; 1424 u32 smmu_sid; 1425 u32 smmu_sid_len; 1426 } *map; 1427 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1428 struct device *dev = pcie->pci->dev; 1429 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1430 int i, nr_map, size = 0; 1431 u32 smmu_sid_base; 1432 1433 of_get_property(dev->of_node, "iommu-map", &size); 1434 if (!size) 1435 return 0; 1436 1437 map = kzalloc(size, GFP_KERNEL); 1438 if (!map) 1439 return -ENOMEM; 1440 1441 of_property_read_u32_array(dev->of_node, 1442 "iommu-map", (u32 *)map, size / sizeof(u32)); 1443 1444 nr_map = size / (sizeof(*map)); 1445 1446 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1447 1448 /* Registers need to be zero out first */ 1449 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1450 1451 /* Extract the SMMU SID base from the first entry of iommu-map */ 1452 smmu_sid_base = map[0].smmu_sid; 1453 1454 /* Look for an available entry to hold the mapping */ 1455 for (i = 0; i < nr_map; i++) { 1456 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1457 u32 val; 1458 u8 hash; 1459 1460 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1461 0); 1462 1463 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1464 1465 /* If the register is already populated, look for next available entry */ 1466 while (val) { 1467 u8 current_hash = hash++; 1468 u8 next_mask = 0xff; 1469 1470 /* If NEXT field is NULL then update it with next hash */ 1471 if (!(val & next_mask)) { 1472 val |= (u32)hash; 1473 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1474 } 1475 1476 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1477 } 1478 1479 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1480 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1481 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1482 } 1483 1484 kfree(map); 1485 1486 return 0; 1487 } 1488 1489 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1490 { 1491 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1492 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1493 int ret; 1494 1495 qcom_ep_reset_assert(pcie); 1496 1497 ret = pcie->cfg->ops->init(pcie); 1498 if (ret) 1499 return ret; 1500 1501 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1502 if (ret) 1503 goto err_deinit; 1504 1505 ret = phy_power_on(pcie->phy); 1506 if (ret) 1507 goto err_deinit; 1508 1509 if (pcie->cfg->ops->post_init) { 1510 ret = pcie->cfg->ops->post_init(pcie); 1511 if (ret) 1512 goto err_disable_phy; 1513 } 1514 1515 qcom_ep_reset_deassert(pcie); 1516 1517 if (pcie->cfg->ops->config_sid) { 1518 ret = pcie->cfg->ops->config_sid(pcie); 1519 if (ret) 1520 goto err_assert_reset; 1521 } 1522 1523 return 0; 1524 1525 err_assert_reset: 1526 qcom_ep_reset_assert(pcie); 1527 err_disable_phy: 1528 phy_power_off(pcie->phy); 1529 err_deinit: 1530 pcie->cfg->ops->deinit(pcie); 1531 1532 return ret; 1533 } 1534 1535 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1536 .host_init = qcom_pcie_host_init, 1537 }; 1538 1539 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1540 static const struct qcom_pcie_ops ops_2_1_0 = { 1541 .get_resources = qcom_pcie_get_resources_2_1_0, 1542 .init = qcom_pcie_init_2_1_0, 1543 .post_init = qcom_pcie_post_init_2_1_0, 1544 .deinit = qcom_pcie_deinit_2_1_0, 1545 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1546 }; 1547 1548 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1549 static const struct qcom_pcie_ops ops_1_0_0 = { 1550 .get_resources = qcom_pcie_get_resources_1_0_0, 1551 .init = qcom_pcie_init_1_0_0, 1552 .post_init = qcom_pcie_post_init_1_0_0, 1553 .deinit = qcom_pcie_deinit_1_0_0, 1554 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1555 }; 1556 1557 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1558 static const struct qcom_pcie_ops ops_2_3_2 = { 1559 .get_resources = qcom_pcie_get_resources_2_3_2, 1560 .init = qcom_pcie_init_2_3_2, 1561 .post_init = qcom_pcie_post_init_2_3_2, 1562 .deinit = qcom_pcie_deinit_2_3_2, 1563 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1564 }; 1565 1566 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1567 static const struct qcom_pcie_ops ops_2_4_0 = { 1568 .get_resources = qcom_pcie_get_resources_2_4_0, 1569 .init = qcom_pcie_init_2_4_0, 1570 .post_init = qcom_pcie_post_init_2_4_0, 1571 .deinit = qcom_pcie_deinit_2_4_0, 1572 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1573 }; 1574 1575 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1576 static const struct qcom_pcie_ops ops_2_3_3 = { 1577 .get_resources = qcom_pcie_get_resources_2_3_3, 1578 .init = qcom_pcie_init_2_3_3, 1579 .post_init = qcom_pcie_post_init_2_3_3, 1580 .deinit = qcom_pcie_deinit_2_3_3, 1581 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1582 }; 1583 1584 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1585 static const struct qcom_pcie_ops ops_2_7_0 = { 1586 .get_resources = qcom_pcie_get_resources_2_7_0, 1587 .init = qcom_pcie_init_2_7_0, 1588 .deinit = qcom_pcie_deinit_2_7_0, 1589 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1590 }; 1591 1592 /* Qcom IP rev.: 1.9.0 */ 1593 static const struct qcom_pcie_ops ops_1_9_0 = { 1594 .get_resources = qcom_pcie_get_resources_2_7_0, 1595 .init = qcom_pcie_init_2_7_0, 1596 .deinit = qcom_pcie_deinit_2_7_0, 1597 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1598 .config_sid = qcom_pcie_config_sid_sm8250, 1599 }; 1600 1601 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1602 static const struct qcom_pcie_ops ops_2_9_0 = { 1603 .get_resources = qcom_pcie_get_resources_2_9_0, 1604 .init = qcom_pcie_init_2_9_0, 1605 .post_init = qcom_pcie_post_init_2_9_0, 1606 .deinit = qcom_pcie_deinit_2_9_0, 1607 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1608 }; 1609 1610 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1611 .ops = &ops_1_0_0, 1612 }; 1613 1614 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1615 .ops = &ops_1_9_0, 1616 }; 1617 1618 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1619 .ops = &ops_2_1_0, 1620 }; 1621 1622 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1623 .ops = &ops_2_3_2, 1624 }; 1625 1626 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1627 .ops = &ops_2_3_3, 1628 }; 1629 1630 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1631 .ops = &ops_2_4_0, 1632 }; 1633 1634 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1635 .ops = &ops_2_7_0, 1636 }; 1637 1638 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1639 .ops = &ops_2_9_0, 1640 }; 1641 1642 static const struct dw_pcie_ops dw_pcie_ops = { 1643 .link_up = qcom_pcie_link_up, 1644 .start_link = qcom_pcie_start_link, 1645 }; 1646 1647 static int qcom_pcie_probe(struct platform_device *pdev) 1648 { 1649 struct device *dev = &pdev->dev; 1650 struct dw_pcie_rp *pp; 1651 struct dw_pcie *pci; 1652 struct qcom_pcie *pcie; 1653 const struct qcom_pcie_cfg *pcie_cfg; 1654 int ret; 1655 1656 pcie_cfg = of_device_get_match_data(dev); 1657 if (!pcie_cfg || !pcie_cfg->ops) { 1658 dev_err(dev, "Invalid platform data\n"); 1659 return -EINVAL; 1660 } 1661 1662 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1663 if (!pcie) 1664 return -ENOMEM; 1665 1666 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1667 if (!pci) 1668 return -ENOMEM; 1669 1670 pm_runtime_enable(dev); 1671 ret = pm_runtime_get_sync(dev); 1672 if (ret < 0) 1673 goto err_pm_runtime_put; 1674 1675 pci->dev = dev; 1676 pci->ops = &dw_pcie_ops; 1677 pp = &pci->pp; 1678 1679 pcie->pci = pci; 1680 1681 pcie->cfg = pcie_cfg; 1682 1683 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1684 if (IS_ERR(pcie->reset)) { 1685 ret = PTR_ERR(pcie->reset); 1686 goto err_pm_runtime_put; 1687 } 1688 1689 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1690 if (IS_ERR(pcie->parf)) { 1691 ret = PTR_ERR(pcie->parf); 1692 goto err_pm_runtime_put; 1693 } 1694 1695 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1696 if (IS_ERR(pcie->elbi)) { 1697 ret = PTR_ERR(pcie->elbi); 1698 goto err_pm_runtime_put; 1699 } 1700 1701 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1702 if (IS_ERR(pcie->phy)) { 1703 ret = PTR_ERR(pcie->phy); 1704 goto err_pm_runtime_put; 1705 } 1706 1707 ret = pcie->cfg->ops->get_resources(pcie); 1708 if (ret) 1709 goto err_pm_runtime_put; 1710 1711 pp->ops = &qcom_pcie_dw_ops; 1712 1713 ret = phy_init(pcie->phy); 1714 if (ret) 1715 goto err_pm_runtime_put; 1716 1717 platform_set_drvdata(pdev, pcie); 1718 1719 ret = dw_pcie_host_init(pp); 1720 if (ret) { 1721 dev_err(dev, "cannot initialize host\n"); 1722 goto err_phy_exit; 1723 } 1724 1725 return 0; 1726 1727 err_phy_exit: 1728 phy_exit(pcie->phy); 1729 err_pm_runtime_put: 1730 pm_runtime_put(dev); 1731 pm_runtime_disable(dev); 1732 1733 return ret; 1734 } 1735 1736 static const struct of_device_id qcom_pcie_match[] = { 1737 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1738 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1739 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1740 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1741 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1742 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1743 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1744 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1745 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1746 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1747 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1748 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1749 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1750 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1751 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1752 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1753 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1754 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1755 { } 1756 }; 1757 1758 static void qcom_fixup_class(struct pci_dev *dev) 1759 { 1760 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1761 } 1762 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1763 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1764 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1765 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1766 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1767 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1768 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1769 1770 static struct platform_driver qcom_pcie_driver = { 1771 .probe = qcom_pcie_probe, 1772 .driver = { 1773 .name = "qcom-pcie", 1774 .suppress_bind_attrs = true, 1775 .of_match_table = qcom_pcie_match, 1776 }, 1777 }; 1778 builtin_platform_driver(qcom_pcie_driver); 1779