1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "../../pci.h" 37 #include "pcie-designware.h" 38 39 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 41 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 42 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 43 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_PHY_MPLLA_CTRL 0x10 49 #define IMX95_PCIE_PHY_MPLL_STATE BIT(30) 50 51 #define IMX95_PCIE_SS_RW_REG_0 0xf0 52 #define IMX95_PCIE_REF_CLKEN BIT(23) 53 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 54 #define IMX95_PCIE_SS_RW_REG_1 0xf4 55 #define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31) 56 57 #define IMX95_PE0_GEN_CTRL_1 0x1050 58 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 59 60 #define IMX95_PE0_GEN_CTRL_3 0x1058 61 #define IMX95_PCIE_LTSSM_EN BIT(0) 62 63 #define IMX95_PE0_LUT_ACSCTRL 0x1008 64 #define IMX95_PEO_LUT_RWA BIT(16) 65 #define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) 66 67 #define IMX95_PE0_LUT_DATA1 0x100c 68 #define IMX95_PE0_LUT_VLD BIT(31) 69 #define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) 70 #define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) 71 72 #define IMX95_PE0_LUT_DATA2 0x1010 73 #define IMX95_PE0_LUT_REQID GENMASK(31, 16) 74 #define IMX95_PE0_LUT_MASK GENMASK(15, 0) 75 76 #define IMX95_SID_MASK GENMASK(5, 0) 77 #define IMX95_MAX_LUT 32 78 79 #define IMX95_PCIE_RST_CTRL 0x3010 80 #define IMX95_PCIE_COLD_RST BIT(0) 81 82 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 83 84 enum imx_pcie_variants { 85 IMX6Q, 86 IMX6SX, 87 IMX6QP, 88 IMX7D, 89 IMX8MQ, 90 IMX8MM, 91 IMX8MP, 92 IMX8Q, 93 IMX95, 94 IMX8MQ_EP, 95 IMX8MM_EP, 96 IMX8MP_EP, 97 IMX8Q_EP, 98 IMX95_EP, 99 }; 100 101 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 102 #define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1) 103 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 104 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 105 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 106 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 107 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 108 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 109 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 110 /* 111 * Because of ERR005723 (PCIe does not support L2 power down) we need to 112 * workaround suspend resume on some devices which are affected by this errata. 113 */ 114 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 115 #define IMX_PCIE_FLAG_HAS_LUT BIT(10) 116 #define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11) 117 118 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 119 120 #define IMX_PCIE_MAX_INSTANCES 2 121 122 struct imx_pcie; 123 124 struct imx_pcie_drvdata { 125 enum imx_pcie_variants variant; 126 enum dw_pcie_device_mode mode; 127 u32 flags; 128 int dbi_length; 129 const char *gpr; 130 const u32 ltssm_off; 131 const u32 ltssm_mask; 132 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 133 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 134 const struct pci_epc_features *epc_features; 135 int (*init_phy)(struct imx_pcie *pcie); 136 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 137 int (*core_reset)(struct imx_pcie *pcie, bool assert); 138 int (*wait_pll_lock)(struct imx_pcie *pcie); 139 const struct dw_pcie_host_ops *ops; 140 }; 141 142 struct imx_lut_data { 143 u32 data1; 144 u32 data2; 145 }; 146 147 struct imx_pcie { 148 struct dw_pcie *pci; 149 struct gpio_desc *reset_gpiod; 150 struct clk_bulk_data *clks; 151 int num_clks; 152 struct regmap *iomuxc_gpr; 153 u16 msi_ctrl; 154 u32 controller_id; 155 struct reset_control *pciephy_reset; 156 struct reset_control *apps_reset; 157 u32 tx_deemph_gen1; 158 u32 tx_deemph_gen2_3p5db; 159 u32 tx_deemph_gen2_6db; 160 u32 tx_swing_full; 161 u32 tx_swing_low; 162 struct regulator *vpcie; 163 struct regulator *vph; 164 void __iomem *phy_base; 165 166 /* LUT data for pcie */ 167 struct imx_lut_data luts[IMX95_MAX_LUT]; 168 /* power domain for pcie */ 169 struct device *pd_pcie; 170 /* power domain for pcie phy */ 171 struct device *pd_pcie_phy; 172 struct phy *phy; 173 const struct imx_pcie_drvdata *drvdata; 174 175 /* Ensure that only one device's LUT is configured at any given time */ 176 struct mutex lock; 177 }; 178 179 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 180 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 181 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 182 183 /* PCIe Port Logic registers (memory-mapped) */ 184 #define PL_OFFSET 0x700 185 186 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 187 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 188 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 189 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 190 #define PCIE_PHY_CTRL_WR BIT(18) 191 #define PCIE_PHY_CTRL_RD BIT(19) 192 193 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 194 #define PCIE_PHY_STAT_ACK BIT(16) 195 196 /* PHY registers (not memory-mapped) */ 197 #define PCIE_PHY_ATEOVRD 0x10 198 #define PCIE_PHY_ATEOVRD_EN BIT(2) 199 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 200 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 201 202 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 203 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 204 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 205 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 206 207 #define PCIE_PHY_RX_ASIC_OUT 0x100D 208 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 209 210 /* iMX7 PCIe PHY registers */ 211 #define PCIE_PHY_CMN_REG4 0x14 212 /* These are probably the bits that *aren't* DCC_FB_EN */ 213 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 214 215 #define PCIE_PHY_CMN_REG15 0x54 216 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 217 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 218 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 219 220 #define PCIE_PHY_CMN_REG24 0x90 221 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 222 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 223 224 #define PCIE_PHY_CMN_REG26 0x98 225 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 226 227 #define PHY_RX_OVRD_IN_LO 0x1005 228 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 229 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 230 231 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 232 { 233 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 234 imx_pcie->drvdata->variant != IMX8MQ_EP && 235 imx_pcie->drvdata->variant != IMX8MM && 236 imx_pcie->drvdata->variant != IMX8MM_EP && 237 imx_pcie->drvdata->variant != IMX8MP && 238 imx_pcie->drvdata->variant != IMX8MP_EP); 239 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 240 } 241 242 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 243 { 244 /* 245 * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready 246 * Through Beacon or PERST# De-assertion 247 * 248 * When the auxiliary power is not available, the controller 249 * cannot exit from L23 Ready with beacon or PERST# de-assertion 250 * when main power is not removed. 251 * 252 * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1. 253 */ 254 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, 255 IMX95_PCIE_SYS_AUX_PWR_DET); 256 257 regmap_update_bits(imx_pcie->iomuxc_gpr, 258 IMX95_PCIE_SS_RW_REG_0, 259 IMX95_PCIE_PHY_CR_PARA_SEL, 260 IMX95_PCIE_PHY_CR_PARA_SEL); 261 262 regmap_update_bits(imx_pcie->iomuxc_gpr, 263 IMX95_PCIE_PHY_GEN_CTRL, 264 IMX95_PCIE_REF_USE_PAD, 0); 265 regmap_update_bits(imx_pcie->iomuxc_gpr, 266 IMX95_PCIE_SS_RW_REG_0, 267 IMX95_PCIE_REF_CLKEN, 268 IMX95_PCIE_REF_CLKEN); 269 270 return 0; 271 } 272 273 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 274 { 275 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 276 unsigned int mask, val, mode, id; 277 278 if (drvdata->mode == DW_PCIE_EP_TYPE) 279 mode = PCI_EXP_TYPE_ENDPOINT; 280 else 281 mode = PCI_EXP_TYPE_ROOT_PORT; 282 283 id = imx_pcie->controller_id; 284 285 /* If mode_mask is 0, generic PHY driver is used to set the mode */ 286 if (!drvdata->mode_mask[0]) 287 return; 288 289 /* If mode_mask[id] is 0, each controller has its individual GPR */ 290 if (!drvdata->mode_mask[id]) 291 id = 0; 292 293 mask = drvdata->mode_mask[id]; 294 val = mode << (ffs(mask) - 1); 295 296 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 297 } 298 299 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 300 { 301 struct dw_pcie *pci = imx_pcie->pci; 302 bool val; 303 u32 max_iterations = 10; 304 u32 wait_counter = 0; 305 306 do { 307 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 308 PCIE_PHY_STAT_ACK; 309 wait_counter++; 310 311 if (val == exp_val) 312 return 0; 313 314 udelay(1); 315 } while (wait_counter < max_iterations); 316 317 return -ETIMEDOUT; 318 } 319 320 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 321 { 322 struct dw_pcie *pci = imx_pcie->pci; 323 u32 val; 324 int ret; 325 326 val = PCIE_PHY_CTRL_DATA(addr); 327 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 328 329 val |= PCIE_PHY_CTRL_CAP_ADR; 330 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 331 332 ret = pcie_phy_poll_ack(imx_pcie, true); 333 if (ret) 334 return ret; 335 336 val = PCIE_PHY_CTRL_DATA(addr); 337 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 338 339 return pcie_phy_poll_ack(imx_pcie, false); 340 } 341 342 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 343 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 344 { 345 struct dw_pcie *pci = imx_pcie->pci; 346 u32 phy_ctl; 347 int ret; 348 349 ret = pcie_phy_wait_ack(imx_pcie, addr); 350 if (ret) 351 return ret; 352 353 /* assert Read signal */ 354 phy_ctl = PCIE_PHY_CTRL_RD; 355 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 356 357 ret = pcie_phy_poll_ack(imx_pcie, true); 358 if (ret) 359 return ret; 360 361 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 362 363 /* deassert Read signal */ 364 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 365 366 return pcie_phy_poll_ack(imx_pcie, false); 367 } 368 369 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 370 { 371 struct dw_pcie *pci = imx_pcie->pci; 372 u32 var; 373 int ret; 374 375 /* write addr */ 376 /* cap addr */ 377 ret = pcie_phy_wait_ack(imx_pcie, addr); 378 if (ret) 379 return ret; 380 381 var = PCIE_PHY_CTRL_DATA(data); 382 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 383 384 /* capture data */ 385 var |= PCIE_PHY_CTRL_CAP_DAT; 386 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 387 388 ret = pcie_phy_poll_ack(imx_pcie, true); 389 if (ret) 390 return ret; 391 392 /* deassert cap data */ 393 var = PCIE_PHY_CTRL_DATA(data); 394 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 395 396 /* wait for ack de-assertion */ 397 ret = pcie_phy_poll_ack(imx_pcie, false); 398 if (ret) 399 return ret; 400 401 /* assert wr signal */ 402 var = PCIE_PHY_CTRL_WR; 403 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 404 405 /* wait for ack */ 406 ret = pcie_phy_poll_ack(imx_pcie, true); 407 if (ret) 408 return ret; 409 410 /* deassert wr signal */ 411 var = PCIE_PHY_CTRL_DATA(data); 412 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 413 414 /* wait for ack de-assertion */ 415 ret = pcie_phy_poll_ack(imx_pcie, false); 416 if (ret) 417 return ret; 418 419 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 420 421 return 0; 422 } 423 424 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 425 { 426 /* TODO: This code assumes external oscillator is being used */ 427 regmap_update_bits(imx_pcie->iomuxc_gpr, 428 imx_pcie_grp_offset(imx_pcie), 429 IMX8MQ_GPR_PCIE_REF_USE_PAD, 430 IMX8MQ_GPR_PCIE_REF_USE_PAD); 431 /* 432 * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the 433 * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared 434 * to zero. 435 */ 436 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 437 regmap_update_bits(imx_pcie->iomuxc_gpr, 438 imx_pcie_grp_offset(imx_pcie), 439 IMX8MQ_GPR_PCIE_VREG_BYPASS, 440 0); 441 442 return 0; 443 } 444 445 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 446 { 447 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 448 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 449 450 /* configure constant input signal to the pcie ctrl and phy */ 451 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 452 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 453 454 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 455 IMX6Q_GPR8_TX_DEEMPH_GEN1, 456 imx_pcie->tx_deemph_gen1 << 0); 457 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 458 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 459 imx_pcie->tx_deemph_gen2_3p5db << 6); 460 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 461 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 462 imx_pcie->tx_deemph_gen2_6db << 12); 463 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 464 IMX6Q_GPR8_TX_SWING_FULL, 465 imx_pcie->tx_swing_full << 18); 466 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 467 IMX6Q_GPR8_TX_SWING_LOW, 468 imx_pcie->tx_swing_low << 25); 469 return 0; 470 } 471 472 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 473 { 474 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 475 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 476 477 return imx_pcie_init_phy(imx_pcie); 478 } 479 480 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 481 { 482 u32 val; 483 struct device *dev = imx_pcie->pci->dev; 484 485 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 486 IOMUXC_GPR22, val, 487 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 488 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 489 PHY_PLL_LOCK_WAIT_TIMEOUT)) 490 dev_err(dev, "PCIe PLL lock timeout\n"); 491 } 492 493 static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 494 { 495 u32 val; 496 struct device *dev = imx_pcie->pci->dev; 497 498 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 499 IMX95_PCIE_PHY_MPLLA_CTRL, val, 500 val & IMX95_PCIE_PHY_MPLL_STATE, 501 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 502 PHY_PLL_LOCK_WAIT_TIMEOUT)) { 503 dev_err(dev, "PCIe PLL lock timeout\n"); 504 return -ETIMEDOUT; 505 } 506 507 return 0; 508 } 509 510 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 511 { 512 unsigned long phy_rate = 0; 513 int mult, div; 514 u16 val; 515 int i; 516 struct clk_bulk_data *clks = imx_pcie->clks; 517 518 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 519 return 0; 520 521 for (i = 0; i < imx_pcie->num_clks; i++) 522 if (strncmp(clks[i].id, "pcie_phy", 8) == 0) 523 phy_rate = clk_get_rate(clks[i].clk); 524 525 switch (phy_rate) { 526 case 125000000: 527 /* 528 * The default settings of the MPLL are for a 125MHz input 529 * clock, so no need to reconfigure anything in that case. 530 */ 531 return 0; 532 case 100000000: 533 mult = 25; 534 div = 0; 535 break; 536 case 200000000: 537 mult = 25; 538 div = 1; 539 break; 540 default: 541 dev_err(imx_pcie->pci->dev, 542 "Unsupported PHY reference clock rate %lu\n", phy_rate); 543 return -EINVAL; 544 } 545 546 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 547 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 548 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 549 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 550 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 551 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 552 553 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 554 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 555 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 556 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 557 val |= PCIE_PHY_ATEOVRD_EN; 558 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 559 560 return 0; 561 } 562 563 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 564 { 565 u16 tmp; 566 567 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 568 return; 569 570 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 571 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 572 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 573 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 574 575 usleep_range(2000, 3000); 576 577 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 578 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 579 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 580 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 581 } 582 583 #ifdef CONFIG_ARM 584 /* Added for PCI abort handling */ 585 static int imx6q_pcie_abort_handler(unsigned long addr, 586 unsigned int fsr, struct pt_regs *regs) 587 { 588 unsigned long pc = instruction_pointer(regs); 589 unsigned long instr = *(unsigned long *)pc; 590 int reg = (instr >> 12) & 15; 591 592 /* 593 * If the instruction being executed was a read, 594 * make it look like it read all-ones. 595 */ 596 if ((instr & 0x0c100000) == 0x04100000) { 597 unsigned long val; 598 599 if (instr & 0x00400000) 600 val = 255; 601 else 602 val = -1; 603 604 regs->uregs[reg] = val; 605 regs->ARM_pc += 4; 606 return 0; 607 } 608 609 if ((instr & 0x0e100090) == 0x00100090) { 610 regs->uregs[reg] = -1; 611 regs->ARM_pc += 4; 612 return 0; 613 } 614 615 return 1; 616 } 617 #endif 618 619 static int imx_pcie_attach_pd(struct device *dev) 620 { 621 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 622 struct device_link *link; 623 624 /* Do nothing when in a single power domain */ 625 if (dev->pm_domain) 626 return 0; 627 628 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 629 if (IS_ERR(imx_pcie->pd_pcie)) 630 return PTR_ERR(imx_pcie->pd_pcie); 631 /* Do nothing when power domain missing */ 632 if (!imx_pcie->pd_pcie) 633 return 0; 634 link = device_link_add(dev, imx_pcie->pd_pcie, 635 DL_FLAG_STATELESS | 636 DL_FLAG_PM_RUNTIME | 637 DL_FLAG_RPM_ACTIVE); 638 if (!link) { 639 dev_err(dev, "Failed to add device_link to pcie pd\n"); 640 return -EINVAL; 641 } 642 643 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 644 if (IS_ERR(imx_pcie->pd_pcie_phy)) 645 return PTR_ERR(imx_pcie->pd_pcie_phy); 646 647 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 648 DL_FLAG_STATELESS | 649 DL_FLAG_PM_RUNTIME | 650 DL_FLAG_RPM_ACTIVE); 651 if (!link) { 652 dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); 653 return -EINVAL; 654 } 655 656 return 0; 657 } 658 659 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 660 { 661 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 662 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 663 enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 664 return 0; 665 } 666 667 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 668 { 669 if (enable) { 670 /* power up core phy and enable ref clock */ 671 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 672 /* 673 * The async reset input need ref clock to sync internally, 674 * when the ref clock comes after reset, internal synced 675 * reset time is too short, cannot meet the requirement. 676 * Add a ~10us delay here. 677 */ 678 usleep_range(10, 100); 679 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 680 } else { 681 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 682 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 683 } 684 685 return 0; 686 } 687 688 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 689 { 690 int offset = imx_pcie_grp_offset(imx_pcie); 691 692 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 693 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 694 enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 695 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 696 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, 697 enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); 698 return 0; 699 } 700 701 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 702 { 703 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 704 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 705 enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 706 return 0; 707 } 708 709 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 710 { 711 struct dw_pcie *pci = imx_pcie->pci; 712 struct device *dev = pci->dev; 713 int ret; 714 715 ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); 716 if (ret) 717 return ret; 718 719 if (imx_pcie->drvdata->enable_ref_clk) { 720 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 721 if (ret) { 722 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 723 goto err_ref_clk; 724 } 725 } 726 727 /* allow the clocks to stabilize */ 728 usleep_range(200, 500); 729 return 0; 730 731 err_ref_clk: 732 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 733 734 return ret; 735 } 736 737 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 738 { 739 if (imx_pcie->drvdata->enable_ref_clk) 740 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 741 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 742 } 743 744 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 745 { 746 if (assert) 747 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 748 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 749 750 /* Force PCIe PHY reset */ 751 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 752 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 753 return 0; 754 } 755 756 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 757 { 758 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 759 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 760 if (!assert) 761 usleep_range(200, 500); 762 763 return 0; 764 } 765 766 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 767 { 768 if (!assert) 769 return 0; 770 771 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 772 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 773 774 return 0; 775 } 776 777 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 778 { 779 struct dw_pcie *pci = imx_pcie->pci; 780 struct device *dev = pci->dev; 781 782 if (assert) 783 return 0; 784 785 /* 786 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 787 * 788 * PCIe: PLL may fail to lock under corner conditions. 789 * 790 * Initial VCO oscillation may fail under corner conditions such as 791 * cold temperature which will cause the PCIe PLL fail to lock in the 792 * initialization phase. 793 * 794 * The Duty-cycle Corrector calibration must be disabled. 795 * 796 * 1. De-assert the G_RST signal by clearing 797 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 798 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 799 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 800 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 801 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 802 * 4. Assert ATT_MODE by writing data “0xbc” to the register 803 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 804 * 5. De-assert the CMN_RST signal by clearing register bit 805 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 806 */ 807 808 if (likely(imx_pcie->phy_base)) { 809 /* De-assert DCC_FB_EN */ 810 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 811 /* Assert RX_EQS and RX_EQS_SEL */ 812 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 813 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 814 /* Assert ATT_MODE */ 815 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 816 } else { 817 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 818 } 819 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 820 return 0; 821 } 822 823 static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 824 { 825 u32 val; 826 827 if (assert) { 828 /* 829 * From i.MX95 PCIe PHY perspective, the COLD reset toggle 830 * should be complete after power-up by the following sequence. 831 * > 10us(at power-up) 832 * > 10ns(warm reset) 833 * |<------------>| 834 * ______________ 835 * phy_reset ____/ \________________ 836 * ____________ 837 * ref_clk_en_______________________/ 838 * Toggle COLD reset aligned with this sequence for i.MX95 PCIe. 839 */ 840 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 841 IMX95_PCIE_COLD_RST); 842 /* 843 * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the 844 * hardware by doing a read. Otherwise, there is no guarantee 845 * that the write has reached the hardware before udelay(). 846 */ 847 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 848 &val); 849 udelay(15); 850 regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 851 IMX95_PCIE_COLD_RST); 852 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 853 &val); 854 udelay(10); 855 } 856 857 return 0; 858 } 859 860 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 861 { 862 reset_control_assert(imx_pcie->pciephy_reset); 863 reset_control_assert(imx_pcie->apps_reset); 864 865 if (imx_pcie->drvdata->core_reset) 866 imx_pcie->drvdata->core_reset(imx_pcie, true); 867 868 /* Some boards don't have PCIe reset GPIO. */ 869 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 870 } 871 872 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 873 { 874 reset_control_deassert(imx_pcie->pciephy_reset); 875 reset_control_deassert(imx_pcie->apps_reset); 876 877 if (imx_pcie->drvdata->core_reset) 878 imx_pcie->drvdata->core_reset(imx_pcie, false); 879 880 /* Some boards don't have PCIe reset GPIO. */ 881 if (imx_pcie->reset_gpiod) { 882 msleep(100); 883 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 884 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 885 msleep(100); 886 } 887 888 return 0; 889 } 890 891 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 892 { 893 struct dw_pcie *pci = imx_pcie->pci; 894 struct device *dev = pci->dev; 895 u32 tmp; 896 unsigned int retries; 897 898 for (retries = 0; retries < 200; retries++) { 899 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 900 /* Test if the speed change finished. */ 901 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 902 return 0; 903 usleep_range(100, 1000); 904 } 905 906 dev_err(dev, "Speed change timeout\n"); 907 return -ETIMEDOUT; 908 } 909 910 static void imx_pcie_ltssm_enable(struct device *dev) 911 { 912 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 913 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 914 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 915 u32 tmp; 916 917 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 918 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 919 if (drvdata->ltssm_mask) 920 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 921 drvdata->ltssm_mask); 922 923 reset_control_deassert(imx_pcie->apps_reset); 924 } 925 926 static void imx_pcie_ltssm_disable(struct device *dev) 927 { 928 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 929 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 930 931 phy_set_speed(imx_pcie->phy, 0); 932 if (drvdata->ltssm_mask) 933 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 934 drvdata->ltssm_mask, 0); 935 936 reset_control_assert(imx_pcie->apps_reset); 937 } 938 939 static int imx_pcie_start_link(struct dw_pcie *pci) 940 { 941 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 942 struct device *dev = pci->dev; 943 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 944 u32 tmp; 945 int ret; 946 947 if (!(imx_pcie->drvdata->flags & 948 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) { 949 imx_pcie_ltssm_enable(dev); 950 return 0; 951 } 952 953 /* 954 * Force Gen1 operation when starting the link. In case the link is 955 * started in Gen2 mode, there is a possibility the devices on the 956 * bus will not be detected at all. This happens with PCIe switches. 957 */ 958 dw_pcie_dbi_ro_wr_en(pci); 959 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 960 tmp &= ~PCI_EXP_LNKCAP_SLS; 961 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 962 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 963 dw_pcie_dbi_ro_wr_dis(pci); 964 965 /* Start LTSSM. */ 966 imx_pcie_ltssm_enable(dev); 967 968 if (pci->max_link_speed > 1) { 969 ret = dw_pcie_wait_for_link(pci); 970 if (ret) 971 goto err_reset_phy; 972 973 /* Allow faster modes after the link is up */ 974 dw_pcie_dbi_ro_wr_en(pci); 975 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 976 tmp &= ~PCI_EXP_LNKCAP_SLS; 977 tmp |= pci->max_link_speed; 978 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 979 980 /* 981 * Start Directed Speed Change so the best possible 982 * speed both link partners support can be negotiated. 983 */ 984 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 985 tmp |= PORT_LOGIC_SPEED_CHANGE; 986 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 987 dw_pcie_dbi_ro_wr_dis(pci); 988 989 ret = imx_pcie_wait_for_speed_change(imx_pcie); 990 if (ret) { 991 dev_err(dev, "Failed to bring link up!\n"); 992 goto err_reset_phy; 993 } 994 } else { 995 dev_info(dev, "Link: Only Gen1 is enabled\n"); 996 } 997 998 return 0; 999 1000 err_reset_phy: 1001 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 1002 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 1003 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 1004 imx_pcie_reset_phy(imx_pcie); 1005 return 0; 1006 } 1007 1008 static void imx_pcie_stop_link(struct dw_pcie *pci) 1009 { 1010 struct device *dev = pci->dev; 1011 1012 /* Turn off PCIe LTSSM */ 1013 imx_pcie_ltssm_disable(dev); 1014 } 1015 1016 static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) 1017 { 1018 struct dw_pcie *pci = imx_pcie->pci; 1019 struct device *dev = pci->dev; 1020 u32 data1, data2; 1021 int free = -1; 1022 int i; 1023 1024 if (sid >= 64) { 1025 dev_err(dev, "Invalid SID for index %d\n", sid); 1026 return -EINVAL; 1027 } 1028 1029 guard(mutex)(&imx_pcie->lock); 1030 1031 /* 1032 * Iterate through all LUT entries to check for duplicate RID and 1033 * identify the first available entry. Configure this available entry 1034 * immediately after verification to avoid rescanning it. 1035 */ 1036 for (i = 0; i < IMX95_MAX_LUT; i++) { 1037 regmap_write(imx_pcie->iomuxc_gpr, 1038 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1039 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1040 1041 if (!(data1 & IMX95_PE0_LUT_VLD)) { 1042 if (free < 0) 1043 free = i; 1044 continue; 1045 } 1046 1047 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1048 1049 /* Do not add duplicate RID */ 1050 if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { 1051 dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); 1052 return 0; 1053 } 1054 } 1055 1056 if (free < 0) { 1057 dev_err(dev, "LUT entry is not available\n"); 1058 return -ENOSPC; 1059 } 1060 1061 data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); 1062 data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); 1063 data1 |= IMX95_PE0_LUT_VLD; 1064 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); 1065 1066 data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ 1067 data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); 1068 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); 1069 1070 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); 1071 1072 return 0; 1073 } 1074 1075 static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) 1076 { 1077 u32 data2; 1078 int i; 1079 1080 guard(mutex)(&imx_pcie->lock); 1081 1082 for (i = 0; i < IMX95_MAX_LUT; i++) { 1083 regmap_write(imx_pcie->iomuxc_gpr, 1084 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1085 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1086 if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { 1087 regmap_write(imx_pcie->iomuxc_gpr, 1088 IMX95_PE0_LUT_DATA1, 0); 1089 regmap_write(imx_pcie->iomuxc_gpr, 1090 IMX95_PE0_LUT_DATA2, 0); 1091 regmap_write(imx_pcie->iomuxc_gpr, 1092 IMX95_PE0_LUT_ACSCTRL, i); 1093 1094 break; 1095 } 1096 } 1097 } 1098 1099 static int imx_pcie_enable_device(struct pci_host_bridge *bridge, 1100 struct pci_dev *pdev) 1101 { 1102 struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1103 u32 sid_i, sid_m, rid = pci_dev_id(pdev); 1104 struct device_node *target; 1105 struct device *dev; 1106 int err_i, err_m; 1107 u32 sid = 0; 1108 1109 dev = imx_pcie->pci->dev; 1110 1111 target = NULL; 1112 err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", 1113 &target, &sid_i); 1114 if (target) { 1115 of_node_put(target); 1116 } else { 1117 /* 1118 * "target == NULL && err_i == 0" means RID out of map range. 1119 * Use 1:1 map RID to streamID. Hardware can't support this 1120 * because the streamID is only 6 bits 1121 */ 1122 err_i = -EINVAL; 1123 } 1124 1125 target = NULL; 1126 err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", 1127 &target, &sid_m); 1128 1129 /* 1130 * err_m target 1131 * 0 NULL RID out of range. Use 1:1 map RID to 1132 * streamID, Current hardware can't 1133 * support it, so return -EINVAL. 1134 * != 0 NULL msi-map does not exist, use built-in MSI 1135 * 0 != NULL Get correct streamID from RID 1136 * != 0 != NULL Invalid combination 1137 */ 1138 if (!err_m && !target) 1139 return -EINVAL; 1140 else if (target) 1141 of_node_put(target); /* Find streamID map entry for RID in msi-map */ 1142 1143 /* 1144 * msi-map iommu-map 1145 * N N DWC MSI Ctrl 1146 * Y Y ITS + SMMU, require the same SID 1147 * Y N ITS 1148 * N Y DWC MSI Ctrl + SMMU 1149 */ 1150 if (err_i && err_m) 1151 return 0; 1152 1153 if (!err_i && !err_m) { 1154 /* 1155 * Glue Layer 1156 * <==========> 1157 * ┌─────┐ ┌──────────┐ 1158 * │ LUT │ 6-bit streamID │ │ 1159 * │ │─────────────────►│ MSI │ 1160 * └─────┘ 2-bit ctrl ID │ │ 1161 * ┌───────────►│ │ 1162 * (i.MX95) │ │ │ 1163 * 00 PCIe0 │ │ │ 1164 * 01 ENETC │ │ │ 1165 * 10 PCIe1 │ │ │ 1166 * │ └──────────┘ 1167 * The MSI glue layer auto adds 2 bits controller ID ahead of 1168 * streamID, so mask these 2 bits to get streamID. The 1169 * IOMMU glue layer doesn't do that. 1170 */ 1171 if (sid_i != (sid_m & IMX95_SID_MASK)) { 1172 dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); 1173 return -EINVAL; 1174 } 1175 } 1176 1177 if (!err_i) 1178 sid = sid_i; 1179 else if (!err_m) 1180 sid = sid_m & IMX95_SID_MASK; 1181 1182 return imx_pcie_add_lut(imx_pcie, rid, sid); 1183 } 1184 1185 static void imx_pcie_disable_device(struct pci_host_bridge *bridge, 1186 struct pci_dev *pdev) 1187 { 1188 struct imx_pcie *imx_pcie; 1189 1190 imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1191 imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); 1192 } 1193 1194 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 1195 { 1196 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1197 struct device *dev = pci->dev; 1198 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1199 int ret; 1200 1201 if (imx_pcie->vpcie) { 1202 ret = regulator_enable(imx_pcie->vpcie); 1203 if (ret) { 1204 dev_err(dev, "failed to enable vpcie regulator: %d\n", 1205 ret); 1206 return ret; 1207 } 1208 } 1209 1210 if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { 1211 pp->bridge->enable_device = imx_pcie_enable_device; 1212 pp->bridge->disable_device = imx_pcie_disable_device; 1213 } 1214 1215 imx_pcie_assert_core_reset(imx_pcie); 1216 1217 if (imx_pcie->drvdata->init_phy) 1218 imx_pcie->drvdata->init_phy(imx_pcie); 1219 1220 imx_pcie_configure_type(imx_pcie); 1221 1222 ret = imx_pcie_clk_enable(imx_pcie); 1223 if (ret) { 1224 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 1225 goto err_reg_disable; 1226 } 1227 1228 if (imx_pcie->phy) { 1229 ret = phy_init(imx_pcie->phy); 1230 if (ret) { 1231 dev_err(dev, "pcie PHY power up failed\n"); 1232 goto err_clk_disable; 1233 } 1234 1235 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, 1236 imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? 1237 PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); 1238 if (ret) { 1239 dev_err(dev, "unable to set PCIe PHY mode\n"); 1240 goto err_phy_exit; 1241 } 1242 1243 ret = phy_power_on(imx_pcie->phy); 1244 if (ret) { 1245 dev_err(dev, "waiting for PHY ready timeout!\n"); 1246 goto err_phy_exit; 1247 } 1248 } 1249 1250 ret = imx_pcie_deassert_core_reset(imx_pcie); 1251 if (ret < 0) { 1252 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 1253 goto err_phy_off; 1254 } 1255 1256 if (imx_pcie->drvdata->wait_pll_lock) { 1257 ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie); 1258 if (ret < 0) 1259 goto err_phy_off; 1260 } 1261 1262 imx_setup_phy_mpll(imx_pcie); 1263 1264 return 0; 1265 1266 err_phy_off: 1267 phy_power_off(imx_pcie->phy); 1268 err_phy_exit: 1269 phy_exit(imx_pcie->phy); 1270 err_clk_disable: 1271 imx_pcie_clk_disable(imx_pcie); 1272 err_reg_disable: 1273 if (imx_pcie->vpcie) 1274 regulator_disable(imx_pcie->vpcie); 1275 return ret; 1276 } 1277 1278 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1279 { 1280 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1281 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1282 1283 if (imx_pcie->phy) { 1284 if (phy_power_off(imx_pcie->phy)) 1285 dev_err(pci->dev, "unable to power off PHY\n"); 1286 phy_exit(imx_pcie->phy); 1287 } 1288 imx_pcie_clk_disable(imx_pcie); 1289 1290 if (imx_pcie->vpcie) 1291 regulator_disable(imx_pcie->vpcie); 1292 } 1293 1294 static void imx_pcie_host_post_init(struct dw_pcie_rp *pp) 1295 { 1296 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1297 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1298 u32 val; 1299 1300 if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) { 1301 /* 1302 * ERR051586: Compliance with 8GT/s Receiver Impedance ECN 1303 * 1304 * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1305 * is 1 which makes receiver non-compliant with the ZRX-DC 1306 * parameter for 2.5 GT/s when operating at 8 GT/s or higher. 1307 * It causes unnecessary timeout in L1. 1308 * 1309 * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1310 * to 0. 1311 */ 1312 dw_pcie_dbi_ro_wr_en(pci); 1313 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1314 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1315 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1316 dw_pcie_dbi_ro_wr_dis(pci); 1317 } 1318 } 1319 1320 /* 1321 * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 1322 * register is reserved, so the generic DWC implementation of sending the 1323 * PME_Turn_Off message using a dummy MMIO write cannot be used. 1324 */ 1325 static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) 1326 { 1327 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1328 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1329 1330 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1331 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1332 1333 usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); 1334 } 1335 1336 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1337 .init = imx_pcie_host_init, 1338 .deinit = imx_pcie_host_exit, 1339 .pme_turn_off = imx_pcie_pme_turn_off, 1340 }; 1341 1342 static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { 1343 .init = imx_pcie_host_init, 1344 .deinit = imx_pcie_host_exit, 1345 .post_init = imx_pcie_host_post_init, 1346 }; 1347 1348 static const struct dw_pcie_ops dw_pcie_ops = { 1349 .start_link = imx_pcie_start_link, 1350 .stop_link = imx_pcie_stop_link, 1351 }; 1352 1353 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1354 { 1355 enum pci_barno bar; 1356 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1357 1358 for (bar = BAR_0; bar <= BAR_5; bar++) 1359 dw_pcie_ep_reset_bar(pci, bar); 1360 } 1361 1362 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1363 unsigned int type, u16 interrupt_num) 1364 { 1365 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1366 1367 switch (type) { 1368 case PCI_IRQ_INTX: 1369 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1370 case PCI_IRQ_MSI: 1371 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1372 case PCI_IRQ_MSIX: 1373 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1374 default: 1375 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1376 return -EINVAL; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static const struct pci_epc_features imx8m_pcie_epc_features = { 1383 .linkup_notifier = false, 1384 .msi_capable = true, 1385 .msix_capable = false, 1386 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1387 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1388 .align = SZ_64K, 1389 }; 1390 1391 static const struct pci_epc_features imx8q_pcie_epc_features = { 1392 .linkup_notifier = false, 1393 .msi_capable = true, 1394 .msix_capable = false, 1395 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1396 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1397 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1398 .align = SZ_64K, 1399 }; 1400 1401 /* 1402 * | Default | Default | Default | BAR Sizing 1403 * BAR# | Enable? | Type | Size | Scheme 1404 * ======================================================= 1405 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1406 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1407 * (BAR1 should be disabled if BAR0 is 64-bit) 1408 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1409 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1410 * BAR4 | Enable | 32-bit | 1 MB | Programmable Size 1411 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1412 */ 1413 static const struct pci_epc_features imx95_pcie_epc_features = { 1414 .msi_capable = true, 1415 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1416 .align = SZ_4K, 1417 }; 1418 1419 static const struct pci_epc_features* 1420 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1421 { 1422 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1423 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1424 1425 return imx_pcie->drvdata->epc_features; 1426 } 1427 1428 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1429 .init = imx_pcie_ep_init, 1430 .raise_irq = imx_pcie_ep_raise_irq, 1431 .get_features = imx_pcie_ep_get_features, 1432 }; 1433 1434 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1435 struct platform_device *pdev) 1436 { 1437 int ret; 1438 struct dw_pcie_ep *ep; 1439 struct dw_pcie *pci = imx_pcie->pci; 1440 struct dw_pcie_rp *pp = &pci->pp; 1441 struct device *dev = pci->dev; 1442 1443 imx_pcie_host_init(pp); 1444 ep = &pci->ep; 1445 ep->ops = &pcie_ep_ops; 1446 1447 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1448 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1449 1450 ep->page_size = imx_pcie->drvdata->epc_features->align; 1451 1452 ret = dw_pcie_ep_init(ep); 1453 if (ret) { 1454 dev_err(dev, "failed to initialize endpoint\n"); 1455 return ret; 1456 } 1457 imx_pcie_host_post_init(pp); 1458 1459 ret = dw_pcie_ep_init_registers(ep); 1460 if (ret) { 1461 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1462 dw_pcie_ep_deinit(ep); 1463 return ret; 1464 } 1465 1466 pci_epc_init_notify(ep->epc); 1467 1468 /* Start LTSSM. */ 1469 imx_pcie_ltssm_enable(dev); 1470 1471 return 0; 1472 } 1473 1474 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1475 { 1476 u8 offset; 1477 u16 val; 1478 struct dw_pcie *pci = imx_pcie->pci; 1479 1480 if (pci_msi_enabled()) { 1481 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1482 if (save) { 1483 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1484 imx_pcie->msi_ctrl = val; 1485 } else { 1486 dw_pcie_dbi_ro_wr_en(pci); 1487 val = imx_pcie->msi_ctrl; 1488 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1489 dw_pcie_dbi_ro_wr_dis(pci); 1490 } 1491 } 1492 } 1493 1494 static void imx_pcie_lut_save(struct imx_pcie *imx_pcie) 1495 { 1496 u32 data1, data2; 1497 int i; 1498 1499 for (i = 0; i < IMX95_MAX_LUT; i++) { 1500 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, 1501 IMX95_PEO_LUT_RWA | i); 1502 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1503 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1504 if (data1 & IMX95_PE0_LUT_VLD) { 1505 imx_pcie->luts[i].data1 = data1; 1506 imx_pcie->luts[i].data2 = data2; 1507 } else { 1508 imx_pcie->luts[i].data1 = 0; 1509 imx_pcie->luts[i].data2 = 0; 1510 } 1511 } 1512 } 1513 1514 static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie) 1515 { 1516 int i; 1517 1518 for (i = 0; i < IMX95_MAX_LUT; i++) { 1519 if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0) 1520 continue; 1521 1522 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, 1523 imx_pcie->luts[i].data1); 1524 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, 1525 imx_pcie->luts[i].data2); 1526 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i); 1527 } 1528 } 1529 1530 static int imx_pcie_suspend_noirq(struct device *dev) 1531 { 1532 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1533 1534 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1535 return 0; 1536 1537 imx_pcie_msi_save_restore(imx_pcie, true); 1538 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1539 imx_pcie_lut_save(imx_pcie); 1540 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1541 /* 1542 * The minimum for a workaround would be to set PERST# and to 1543 * set the PCIE_TEST_PD flag. However, we can also disable the 1544 * clock which saves some power. 1545 */ 1546 imx_pcie_assert_core_reset(imx_pcie); 1547 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1548 } else { 1549 return dw_pcie_suspend_noirq(imx_pcie->pci); 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int imx_pcie_resume_noirq(struct device *dev) 1556 { 1557 int ret; 1558 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1559 1560 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1561 return 0; 1562 1563 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1564 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1565 if (ret) 1566 return ret; 1567 ret = imx_pcie_deassert_core_reset(imx_pcie); 1568 if (ret) 1569 return ret; 1570 1571 /* 1572 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1573 * root complex. This is why we have to setup the rc again and 1574 * why we have to restore the MSI register. 1575 */ 1576 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1577 if (ret) 1578 return ret; 1579 } else { 1580 ret = dw_pcie_resume_noirq(imx_pcie->pci); 1581 if (ret) 1582 return ret; 1583 } 1584 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1585 imx_pcie_lut_restore(imx_pcie); 1586 imx_pcie_msi_save_restore(imx_pcie, false); 1587 1588 return 0; 1589 } 1590 1591 static const struct dev_pm_ops imx_pcie_pm_ops = { 1592 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1593 imx_pcie_resume_noirq) 1594 }; 1595 1596 static int imx_pcie_probe(struct platform_device *pdev) 1597 { 1598 struct device *dev = &pdev->dev; 1599 struct dw_pcie *pci; 1600 struct imx_pcie *imx_pcie; 1601 struct device_node *np; 1602 struct device_node *node = dev->of_node; 1603 int ret, domain; 1604 u16 val; 1605 1606 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1607 if (!imx_pcie) 1608 return -ENOMEM; 1609 1610 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1611 if (!pci) 1612 return -ENOMEM; 1613 1614 pci->dev = dev; 1615 pci->ops = &dw_pcie_ops; 1616 1617 imx_pcie->pci = pci; 1618 imx_pcie->drvdata = of_device_get_match_data(dev); 1619 1620 mutex_init(&imx_pcie->lock); 1621 1622 if (imx_pcie->drvdata->ops) 1623 pci->pp.ops = imx_pcie->drvdata->ops; 1624 else 1625 pci->pp.ops = &imx_pcie_host_dw_pme_ops; 1626 1627 /* Find the PHY if one is defined, only imx7d uses it */ 1628 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1629 if (np) { 1630 struct resource res; 1631 1632 ret = of_address_to_resource(np, 0, &res); 1633 if (ret) { 1634 dev_err(dev, "Unable to map PCIe PHY\n"); 1635 return ret; 1636 } 1637 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1638 if (IS_ERR(imx_pcie->phy_base)) 1639 return PTR_ERR(imx_pcie->phy_base); 1640 } 1641 1642 /* Fetch GPIOs */ 1643 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1644 if (IS_ERR(imx_pcie->reset_gpiod)) 1645 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1646 "unable to get reset gpio\n"); 1647 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1648 1649 /* Fetch clocks */ 1650 imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); 1651 if (imx_pcie->num_clks < 0) 1652 return dev_err_probe(dev, imx_pcie->num_clks, 1653 "failed to get clocks\n"); 1654 1655 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1656 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1657 if (IS_ERR(imx_pcie->phy)) 1658 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1659 "failed to get pcie phy\n"); 1660 } 1661 1662 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1663 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1664 if (IS_ERR(imx_pcie->apps_reset)) 1665 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1666 "failed to get pcie apps reset control\n"); 1667 } 1668 1669 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1670 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1671 if (IS_ERR(imx_pcie->pciephy_reset)) 1672 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1673 "Failed to get PCIEPHY reset control\n"); 1674 } 1675 1676 switch (imx_pcie->drvdata->variant) { 1677 case IMX8MQ: 1678 case IMX8MQ_EP: 1679 domain = of_get_pci_domain_nr(node); 1680 if (domain < 0 || domain > 1) 1681 return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); 1682 1683 imx_pcie->controller_id = domain; 1684 break; 1685 default: 1686 break; 1687 } 1688 1689 if (imx_pcie->drvdata->gpr) { 1690 /* Grab GPR config register range */ 1691 imx_pcie->iomuxc_gpr = 1692 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1693 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1694 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1695 "unable to find iomuxc registers\n"); 1696 } 1697 1698 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1699 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1700 1701 if (IS_ERR(off)) 1702 return dev_err_probe(dev, PTR_ERR(off), 1703 "unable to find serdes registers\n"); 1704 1705 static const struct regmap_config regmap_config = { 1706 .reg_bits = 32, 1707 .val_bits = 32, 1708 .reg_stride = 4, 1709 }; 1710 1711 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1712 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1713 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1714 "unable to find iomuxc registers\n"); 1715 } 1716 1717 /* Grab PCIe PHY Tx Settings */ 1718 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1719 &imx_pcie->tx_deemph_gen1)) 1720 imx_pcie->tx_deemph_gen1 = 0; 1721 1722 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1723 &imx_pcie->tx_deemph_gen2_3p5db)) 1724 imx_pcie->tx_deemph_gen2_3p5db = 0; 1725 1726 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1727 &imx_pcie->tx_deemph_gen2_6db)) 1728 imx_pcie->tx_deemph_gen2_6db = 20; 1729 1730 if (of_property_read_u32(node, "fsl,tx-swing-full", 1731 &imx_pcie->tx_swing_full)) 1732 imx_pcie->tx_swing_full = 127; 1733 1734 if (of_property_read_u32(node, "fsl,tx-swing-low", 1735 &imx_pcie->tx_swing_low)) 1736 imx_pcie->tx_swing_low = 127; 1737 1738 /* Limit link speed */ 1739 pci->max_link_speed = 1; 1740 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1741 1742 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1743 if (IS_ERR(imx_pcie->vpcie)) { 1744 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1745 return PTR_ERR(imx_pcie->vpcie); 1746 imx_pcie->vpcie = NULL; 1747 } 1748 1749 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1750 if (IS_ERR(imx_pcie->vph)) { 1751 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1752 return PTR_ERR(imx_pcie->vph); 1753 imx_pcie->vph = NULL; 1754 } 1755 1756 platform_set_drvdata(pdev, imx_pcie); 1757 1758 ret = imx_pcie_attach_pd(dev); 1759 if (ret) 1760 return ret; 1761 1762 pci->use_parent_dt_ranges = true; 1763 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1764 ret = imx_add_pcie_ep(imx_pcie, pdev); 1765 if (ret < 0) 1766 return ret; 1767 } else { 1768 pci->pp.use_atu_msg = true; 1769 ret = dw_pcie_host_init(&pci->pp); 1770 if (ret < 0) 1771 return ret; 1772 1773 if (pci_msi_enabled()) { 1774 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1775 1776 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1777 val |= PCI_MSI_FLAGS_ENABLE; 1778 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1779 } 1780 } 1781 1782 return 0; 1783 } 1784 1785 static void imx_pcie_shutdown(struct platform_device *pdev) 1786 { 1787 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1788 1789 /* bring down link, so bootloader gets clean state in case of reboot */ 1790 imx_pcie_assert_core_reset(imx_pcie); 1791 } 1792 1793 static const struct imx_pcie_drvdata drvdata[] = { 1794 [IMX6Q] = { 1795 .variant = IMX6Q, 1796 .flags = IMX_PCIE_FLAG_IMX_PHY | 1797 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1798 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1799 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1800 .dbi_length = 0x200, 1801 .gpr = "fsl,imx6q-iomuxc-gpr", 1802 .ltssm_off = IOMUXC_GPR12, 1803 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1804 .mode_off[0] = IOMUXC_GPR12, 1805 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1806 .init_phy = imx_pcie_init_phy, 1807 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1808 .core_reset = imx6q_pcie_core_reset, 1809 }, 1810 [IMX6SX] = { 1811 .variant = IMX6SX, 1812 .flags = IMX_PCIE_FLAG_IMX_PHY | 1813 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1814 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1815 .gpr = "fsl,imx6q-iomuxc-gpr", 1816 .ltssm_off = IOMUXC_GPR12, 1817 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1818 .mode_off[0] = IOMUXC_GPR12, 1819 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1820 .init_phy = imx6sx_pcie_init_phy, 1821 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1822 .core_reset = imx6sx_pcie_core_reset, 1823 .ops = &imx_pcie_host_ops, 1824 }, 1825 [IMX6QP] = { 1826 .variant = IMX6QP, 1827 .flags = IMX_PCIE_FLAG_IMX_PHY | 1828 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1829 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1830 .dbi_length = 0x200, 1831 .gpr = "fsl,imx6q-iomuxc-gpr", 1832 .ltssm_off = IOMUXC_GPR12, 1833 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1834 .mode_off[0] = IOMUXC_GPR12, 1835 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1836 .init_phy = imx_pcie_init_phy, 1837 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1838 .core_reset = imx6qp_pcie_core_reset, 1839 .ops = &imx_pcie_host_ops, 1840 }, 1841 [IMX7D] = { 1842 .variant = IMX7D, 1843 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1844 IMX_PCIE_FLAG_HAS_APP_RESET | 1845 IMX_PCIE_FLAG_HAS_PHY_RESET, 1846 .gpr = "fsl,imx7d-iomuxc-gpr", 1847 .mode_off[0] = IOMUXC_GPR12, 1848 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1849 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1850 .core_reset = imx7d_pcie_core_reset, 1851 }, 1852 [IMX8MQ] = { 1853 .variant = IMX8MQ, 1854 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1855 IMX_PCIE_FLAG_HAS_PHY_RESET | 1856 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1857 .gpr = "fsl,imx8mq-iomuxc-gpr", 1858 .mode_off[0] = IOMUXC_GPR12, 1859 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1860 .mode_off[1] = IOMUXC_GPR12, 1861 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1862 .init_phy = imx8mq_pcie_init_phy, 1863 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1864 }, 1865 [IMX8MM] = { 1866 .variant = IMX8MM, 1867 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1868 IMX_PCIE_FLAG_HAS_PHYDRV | 1869 IMX_PCIE_FLAG_HAS_APP_RESET, 1870 .gpr = "fsl,imx8mm-iomuxc-gpr", 1871 .mode_off[0] = IOMUXC_GPR12, 1872 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1873 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1874 }, 1875 [IMX8MP] = { 1876 .variant = IMX8MP, 1877 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1878 IMX_PCIE_FLAG_HAS_PHYDRV | 1879 IMX_PCIE_FLAG_HAS_APP_RESET, 1880 .gpr = "fsl,imx8mp-iomuxc-gpr", 1881 .mode_off[0] = IOMUXC_GPR12, 1882 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1883 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1884 }, 1885 [IMX8Q] = { 1886 .variant = IMX8Q, 1887 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1888 IMX_PCIE_FLAG_CPU_ADDR_FIXUP | 1889 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1890 }, 1891 [IMX95] = { 1892 .variant = IMX95, 1893 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1894 IMX_PCIE_FLAG_HAS_LUT | 1895 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 1896 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1897 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1898 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1899 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1900 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1901 .core_reset = imx95_pcie_core_reset, 1902 .init_phy = imx95_pcie_init_phy, 1903 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 1904 }, 1905 [IMX8MQ_EP] = { 1906 .variant = IMX8MQ_EP, 1907 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1908 IMX_PCIE_FLAG_HAS_PHY_RESET, 1909 .mode = DW_PCIE_EP_TYPE, 1910 .gpr = "fsl,imx8mq-iomuxc-gpr", 1911 .mode_off[0] = IOMUXC_GPR12, 1912 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1913 .mode_off[1] = IOMUXC_GPR12, 1914 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1915 .epc_features = &imx8m_pcie_epc_features, 1916 .init_phy = imx8mq_pcie_init_phy, 1917 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1918 }, 1919 [IMX8MM_EP] = { 1920 .variant = IMX8MM_EP, 1921 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1922 IMX_PCIE_FLAG_HAS_PHYDRV, 1923 .mode = DW_PCIE_EP_TYPE, 1924 .gpr = "fsl,imx8mm-iomuxc-gpr", 1925 .mode_off[0] = IOMUXC_GPR12, 1926 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1927 .epc_features = &imx8m_pcie_epc_features, 1928 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1929 }, 1930 [IMX8MP_EP] = { 1931 .variant = IMX8MP_EP, 1932 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1933 IMX_PCIE_FLAG_HAS_PHYDRV, 1934 .mode = DW_PCIE_EP_TYPE, 1935 .gpr = "fsl,imx8mp-iomuxc-gpr", 1936 .mode_off[0] = IOMUXC_GPR12, 1937 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1938 .epc_features = &imx8m_pcie_epc_features, 1939 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1940 }, 1941 [IMX8Q_EP] = { 1942 .variant = IMX8Q_EP, 1943 .flags = IMX_PCIE_FLAG_HAS_PHYDRV, 1944 .mode = DW_PCIE_EP_TYPE, 1945 .epc_features = &imx8q_pcie_epc_features, 1946 }, 1947 [IMX95_EP] = { 1948 .variant = IMX95_EP, 1949 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1950 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 1951 IMX_PCIE_FLAG_SUPPORT_64BIT, 1952 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1953 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1954 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1955 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1956 .init_phy = imx95_pcie_init_phy, 1957 .core_reset = imx95_pcie_core_reset, 1958 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 1959 .epc_features = &imx95_pcie_epc_features, 1960 .mode = DW_PCIE_EP_TYPE, 1961 }, 1962 }; 1963 1964 static const struct of_device_id imx_pcie_of_match[] = { 1965 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1966 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1967 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1968 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1969 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1970 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1971 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1972 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1973 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1974 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1975 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1976 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1977 { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, 1978 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1979 {}, 1980 }; 1981 1982 static struct platform_driver imx_pcie_driver = { 1983 .driver = { 1984 .name = "imx6q-pcie", 1985 .of_match_table = imx_pcie_of_match, 1986 .suppress_bind_attrs = true, 1987 .pm = &imx_pcie_pm_ops, 1988 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1989 }, 1990 .probe = imx_pcie_probe, 1991 .shutdown = imx_pcie_shutdown, 1992 }; 1993 1994 static void imx_pcie_quirk(struct pci_dev *dev) 1995 { 1996 struct pci_bus *bus = dev->bus; 1997 struct dw_pcie_rp *pp = bus->sysdata; 1998 1999 /* Bus parent is the PCI bridge, its parent is this platform driver */ 2000 if (!bus->dev.parent || !bus->dev.parent->parent) 2001 return; 2002 2003 /* Make sure we only quirk devices associated with this driver */ 2004 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 2005 return; 2006 2007 if (pci_is_root_bus(bus)) { 2008 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 2009 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 2010 2011 /* 2012 * Limit config length to avoid the kernel reading beyond 2013 * the register set and causing an abort on i.MX 6Quad 2014 */ 2015 if (imx_pcie->drvdata->dbi_length) { 2016 dev->cfg_size = imx_pcie->drvdata->dbi_length; 2017 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 2018 dev->cfg_size); 2019 } 2020 } 2021 } 2022 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 2023 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 2024 2025 static int __init imx_pcie_init(void) 2026 { 2027 #ifdef CONFIG_ARM 2028 struct device_node *np; 2029 2030 np = of_find_matching_node(NULL, imx_pcie_of_match); 2031 if (!np) 2032 return -ENODEV; 2033 of_node_put(np); 2034 2035 /* 2036 * Since probe() can be deferred we need to make sure that 2037 * hook_fault_code is not called after __init memory is freed 2038 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 2039 * we can install the handler here without risking it 2040 * accessing some uninitialized driver state. 2041 */ 2042 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 2043 "external abort on non-linefetch"); 2044 #endif 2045 2046 return platform_driver_register(&imx_pcie_driver); 2047 } 2048 device_initcall(imx_pcie_init); 2049