1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "pcie-designware.h" 37 38 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 39 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 41 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 42 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 43 #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_SS_RW_REG_0 0xf0 49 #define IMX95_PCIE_REF_CLKEN BIT(23) 50 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 51 52 #define IMX95_PE0_GEN_CTRL_1 0x1050 53 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 54 55 #define IMX95_PE0_GEN_CTRL_3 0x1058 56 #define IMX95_PCIE_LTSSM_EN BIT(0) 57 58 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 59 60 enum imx_pcie_variants { 61 IMX6Q, 62 IMX6SX, 63 IMX6QP, 64 IMX7D, 65 IMX8MQ, 66 IMX8MM, 67 IMX8MP, 68 IMX8Q, 69 IMX95, 70 IMX8MQ_EP, 71 IMX8MM_EP, 72 IMX8MP_EP, 73 IMX95_EP, 74 }; 75 76 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 77 #define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) 78 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 79 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 80 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 81 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 82 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 83 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 84 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 85 /* 86 * Because of ERR005723 (PCIe does not support L2 power down) we need to 87 * workaround suspend resume on some devices which are affected by this errata. 88 */ 89 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 90 91 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 92 93 #define IMX_PCIE_MAX_CLKS 6 94 #define IMX_PCIE_MAX_INSTANCES 2 95 96 struct imx_pcie; 97 98 struct imx_pcie_drvdata { 99 enum imx_pcie_variants variant; 100 enum dw_pcie_device_mode mode; 101 u32 flags; 102 int dbi_length; 103 const char *gpr; 104 const char * const *clk_names; 105 const u32 clks_cnt; 106 const u32 ltssm_off; 107 const u32 ltssm_mask; 108 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 109 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 110 const struct pci_epc_features *epc_features; 111 int (*init_phy)(struct imx_pcie *pcie); 112 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 113 int (*core_reset)(struct imx_pcie *pcie, bool assert); 114 }; 115 116 struct imx_pcie { 117 struct dw_pcie *pci; 118 struct gpio_desc *reset_gpiod; 119 bool link_is_up; 120 struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; 121 struct regmap *iomuxc_gpr; 122 u16 msi_ctrl; 123 u32 controller_id; 124 struct reset_control *pciephy_reset; 125 struct reset_control *apps_reset; 126 struct reset_control *turnoff_reset; 127 u32 tx_deemph_gen1; 128 u32 tx_deemph_gen2_3p5db; 129 u32 tx_deemph_gen2_6db; 130 u32 tx_swing_full; 131 u32 tx_swing_low; 132 struct regulator *vpcie; 133 struct regulator *vph; 134 void __iomem *phy_base; 135 136 /* power domain for pcie */ 137 struct device *pd_pcie; 138 /* power domain for pcie phy */ 139 struct device *pd_pcie_phy; 140 struct phy *phy; 141 const struct imx_pcie_drvdata *drvdata; 142 }; 143 144 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 145 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 146 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 147 148 /* PCIe Port Logic registers (memory-mapped) */ 149 #define PL_OFFSET 0x700 150 151 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 152 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 153 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 154 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 155 #define PCIE_PHY_CTRL_WR BIT(18) 156 #define PCIE_PHY_CTRL_RD BIT(19) 157 158 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 159 #define PCIE_PHY_STAT_ACK BIT(16) 160 161 /* PHY registers (not memory-mapped) */ 162 #define PCIE_PHY_ATEOVRD 0x10 163 #define PCIE_PHY_ATEOVRD_EN BIT(2) 164 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 165 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 166 167 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 168 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 169 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 170 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 171 172 #define PCIE_PHY_RX_ASIC_OUT 0x100D 173 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 174 175 /* iMX7 PCIe PHY registers */ 176 #define PCIE_PHY_CMN_REG4 0x14 177 /* These are probably the bits that *aren't* DCC_FB_EN */ 178 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 179 180 #define PCIE_PHY_CMN_REG15 0x54 181 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 182 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 183 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 184 185 #define PCIE_PHY_CMN_REG24 0x90 186 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 187 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 188 189 #define PCIE_PHY_CMN_REG26 0x98 190 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 191 192 #define PHY_RX_OVRD_IN_LO 0x1005 193 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 194 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 195 196 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 197 { 198 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 199 imx_pcie->drvdata->variant != IMX8MQ_EP && 200 imx_pcie->drvdata->variant != IMX8MM && 201 imx_pcie->drvdata->variant != IMX8MM_EP && 202 imx_pcie->drvdata->variant != IMX8MP && 203 imx_pcie->drvdata->variant != IMX8MP_EP); 204 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 205 } 206 207 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 208 { 209 regmap_update_bits(imx_pcie->iomuxc_gpr, 210 IMX95_PCIE_SS_RW_REG_0, 211 IMX95_PCIE_PHY_CR_PARA_SEL, 212 IMX95_PCIE_PHY_CR_PARA_SEL); 213 214 regmap_update_bits(imx_pcie->iomuxc_gpr, 215 IMX95_PCIE_PHY_GEN_CTRL, 216 IMX95_PCIE_REF_USE_PAD, 0); 217 regmap_update_bits(imx_pcie->iomuxc_gpr, 218 IMX95_PCIE_SS_RW_REG_0, 219 IMX95_PCIE_REF_CLKEN, 220 IMX95_PCIE_REF_CLKEN); 221 222 return 0; 223 } 224 225 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 226 { 227 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 228 unsigned int mask, val, mode, id; 229 230 if (drvdata->mode == DW_PCIE_EP_TYPE) 231 mode = PCI_EXP_TYPE_ENDPOINT; 232 else 233 mode = PCI_EXP_TYPE_ROOT_PORT; 234 235 id = imx_pcie->controller_id; 236 237 /* If mode_mask is 0, then generic PHY driver is used to set the mode */ 238 if (!drvdata->mode_mask[0]) 239 return; 240 241 /* If mode_mask[id] is zero, means each controller have its individual gpr */ 242 if (!drvdata->mode_mask[id]) 243 id = 0; 244 245 mask = drvdata->mode_mask[id]; 246 val = mode << (ffs(mask) - 1); 247 248 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 249 } 250 251 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 252 { 253 struct dw_pcie *pci = imx_pcie->pci; 254 bool val; 255 u32 max_iterations = 10; 256 u32 wait_counter = 0; 257 258 do { 259 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 260 PCIE_PHY_STAT_ACK; 261 wait_counter++; 262 263 if (val == exp_val) 264 return 0; 265 266 udelay(1); 267 } while (wait_counter < max_iterations); 268 269 return -ETIMEDOUT; 270 } 271 272 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 273 { 274 struct dw_pcie *pci = imx_pcie->pci; 275 u32 val; 276 int ret; 277 278 val = PCIE_PHY_CTRL_DATA(addr); 279 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 280 281 val |= PCIE_PHY_CTRL_CAP_ADR; 282 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 283 284 ret = pcie_phy_poll_ack(imx_pcie, true); 285 if (ret) 286 return ret; 287 288 val = PCIE_PHY_CTRL_DATA(addr); 289 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 290 291 return pcie_phy_poll_ack(imx_pcie, false); 292 } 293 294 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 295 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 296 { 297 struct dw_pcie *pci = imx_pcie->pci; 298 u32 phy_ctl; 299 int ret; 300 301 ret = pcie_phy_wait_ack(imx_pcie, addr); 302 if (ret) 303 return ret; 304 305 /* assert Read signal */ 306 phy_ctl = PCIE_PHY_CTRL_RD; 307 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 308 309 ret = pcie_phy_poll_ack(imx_pcie, true); 310 if (ret) 311 return ret; 312 313 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 314 315 /* deassert Read signal */ 316 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 317 318 return pcie_phy_poll_ack(imx_pcie, false); 319 } 320 321 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 322 { 323 struct dw_pcie *pci = imx_pcie->pci; 324 u32 var; 325 int ret; 326 327 /* write addr */ 328 /* cap addr */ 329 ret = pcie_phy_wait_ack(imx_pcie, addr); 330 if (ret) 331 return ret; 332 333 var = PCIE_PHY_CTRL_DATA(data); 334 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 335 336 /* capture data */ 337 var |= PCIE_PHY_CTRL_CAP_DAT; 338 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 339 340 ret = pcie_phy_poll_ack(imx_pcie, true); 341 if (ret) 342 return ret; 343 344 /* deassert cap data */ 345 var = PCIE_PHY_CTRL_DATA(data); 346 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 347 348 /* wait for ack de-assertion */ 349 ret = pcie_phy_poll_ack(imx_pcie, false); 350 if (ret) 351 return ret; 352 353 /* assert wr signal */ 354 var = PCIE_PHY_CTRL_WR; 355 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 356 357 /* wait for ack */ 358 ret = pcie_phy_poll_ack(imx_pcie, true); 359 if (ret) 360 return ret; 361 362 /* deassert wr signal */ 363 var = PCIE_PHY_CTRL_DATA(data); 364 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 365 366 /* wait for ack de-assertion */ 367 ret = pcie_phy_poll_ack(imx_pcie, false); 368 if (ret) 369 return ret; 370 371 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 372 373 return 0; 374 } 375 376 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 377 { 378 /* TODO: Currently this code assumes external oscillator is being used */ 379 regmap_update_bits(imx_pcie->iomuxc_gpr, 380 imx_pcie_grp_offset(imx_pcie), 381 IMX8MQ_GPR_PCIE_REF_USE_PAD, 382 IMX8MQ_GPR_PCIE_REF_USE_PAD); 383 /* 384 * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is 385 * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. 386 */ 387 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 388 regmap_update_bits(imx_pcie->iomuxc_gpr, 389 imx_pcie_grp_offset(imx_pcie), 390 IMX8MQ_GPR_PCIE_VREG_BYPASS, 391 0); 392 393 return 0; 394 } 395 396 static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie) 397 { 398 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); 399 400 return 0; 401 } 402 403 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 404 { 405 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 406 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 407 408 /* configure constant input signal to the pcie ctrl and phy */ 409 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 410 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 411 412 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 413 IMX6Q_GPR8_TX_DEEMPH_GEN1, 414 imx_pcie->tx_deemph_gen1 << 0); 415 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 416 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 417 imx_pcie->tx_deemph_gen2_3p5db << 6); 418 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 419 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 420 imx_pcie->tx_deemph_gen2_6db << 12); 421 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 422 IMX6Q_GPR8_TX_SWING_FULL, 423 imx_pcie->tx_swing_full << 18); 424 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 425 IMX6Q_GPR8_TX_SWING_LOW, 426 imx_pcie->tx_swing_low << 25); 427 return 0; 428 } 429 430 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 431 { 432 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 433 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 434 435 return imx_pcie_init_phy(imx_pcie); 436 } 437 438 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 439 { 440 u32 val; 441 struct device *dev = imx_pcie->pci->dev; 442 443 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 444 IOMUXC_GPR22, val, 445 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 446 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 447 PHY_PLL_LOCK_WAIT_TIMEOUT)) 448 dev_err(dev, "PCIe PLL lock timeout\n"); 449 } 450 451 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 452 { 453 unsigned long phy_rate = 0; 454 int mult, div; 455 u16 val; 456 int i; 457 458 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 459 return 0; 460 461 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 462 if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) 463 phy_rate = clk_get_rate(imx_pcie->clks[i].clk); 464 465 switch (phy_rate) { 466 case 125000000: 467 /* 468 * The default settings of the MPLL are for a 125MHz input 469 * clock, so no need to reconfigure anything in that case. 470 */ 471 return 0; 472 case 100000000: 473 mult = 25; 474 div = 0; 475 break; 476 case 200000000: 477 mult = 25; 478 div = 1; 479 break; 480 default: 481 dev_err(imx_pcie->pci->dev, 482 "Unsupported PHY reference clock rate %lu\n", phy_rate); 483 return -EINVAL; 484 } 485 486 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 487 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 488 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 489 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 490 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 491 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 492 493 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 494 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 495 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 496 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 497 val |= PCIE_PHY_ATEOVRD_EN; 498 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 499 500 return 0; 501 } 502 503 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 504 { 505 u16 tmp; 506 507 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 508 return; 509 510 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 511 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 512 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 513 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 514 515 usleep_range(2000, 3000); 516 517 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 518 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 519 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 520 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 521 } 522 523 #ifdef CONFIG_ARM 524 /* Added for PCI abort handling */ 525 static int imx6q_pcie_abort_handler(unsigned long addr, 526 unsigned int fsr, struct pt_regs *regs) 527 { 528 unsigned long pc = instruction_pointer(regs); 529 unsigned long instr = *(unsigned long *)pc; 530 int reg = (instr >> 12) & 15; 531 532 /* 533 * If the instruction being executed was a read, 534 * make it look like it read all-ones. 535 */ 536 if ((instr & 0x0c100000) == 0x04100000) { 537 unsigned long val; 538 539 if (instr & 0x00400000) 540 val = 255; 541 else 542 val = -1; 543 544 regs->uregs[reg] = val; 545 regs->ARM_pc += 4; 546 return 0; 547 } 548 549 if ((instr & 0x0e100090) == 0x00100090) { 550 regs->uregs[reg] = -1; 551 regs->ARM_pc += 4; 552 return 0; 553 } 554 555 return 1; 556 } 557 #endif 558 559 static int imx_pcie_attach_pd(struct device *dev) 560 { 561 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 562 struct device_link *link; 563 564 /* Do nothing when in a single power domain */ 565 if (dev->pm_domain) 566 return 0; 567 568 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 569 if (IS_ERR(imx_pcie->pd_pcie)) 570 return PTR_ERR(imx_pcie->pd_pcie); 571 /* Do nothing when power domain missing */ 572 if (!imx_pcie->pd_pcie) 573 return 0; 574 link = device_link_add(dev, imx_pcie->pd_pcie, 575 DL_FLAG_STATELESS | 576 DL_FLAG_PM_RUNTIME | 577 DL_FLAG_RPM_ACTIVE); 578 if (!link) { 579 dev_err(dev, "Failed to add device_link to pcie pd.\n"); 580 return -EINVAL; 581 } 582 583 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 584 if (IS_ERR(imx_pcie->pd_pcie_phy)) 585 return PTR_ERR(imx_pcie->pd_pcie_phy); 586 587 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 588 DL_FLAG_STATELESS | 589 DL_FLAG_PM_RUNTIME | 590 DL_FLAG_RPM_ACTIVE); 591 if (!link) { 592 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); 593 return -EINVAL; 594 } 595 596 return 0; 597 } 598 599 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 600 { 601 if (enable) 602 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 603 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 604 605 return 0; 606 } 607 608 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 609 { 610 if (enable) { 611 /* power up core phy and enable ref clock */ 612 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 613 /* 614 * the async reset input need ref clock to sync internally, 615 * when the ref clock comes after reset, internal synced 616 * reset time is too short, cannot meet the requirement. 617 * add one ~10us delay here. 618 */ 619 usleep_range(10, 100); 620 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 621 } else { 622 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 623 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 624 } 625 626 return 0; 627 } 628 629 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 630 { 631 int offset = imx_pcie_grp_offset(imx_pcie); 632 633 if (enable) { 634 regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 635 regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); 636 } 637 638 return 0; 639 } 640 641 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 642 { 643 if (!enable) 644 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 645 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 646 return 0; 647 } 648 649 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 650 { 651 struct dw_pcie *pci = imx_pcie->pci; 652 struct device *dev = pci->dev; 653 int ret; 654 655 ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 656 if (ret) 657 return ret; 658 659 if (imx_pcie->drvdata->enable_ref_clk) { 660 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 661 if (ret) { 662 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 663 goto err_ref_clk; 664 } 665 } 666 667 /* allow the clocks to stabilize */ 668 usleep_range(200, 500); 669 return 0; 670 671 err_ref_clk: 672 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 673 674 return ret; 675 } 676 677 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 678 { 679 if (imx_pcie->drvdata->enable_ref_clk) 680 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 681 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 682 } 683 684 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 685 { 686 if (assert) 687 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 688 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 689 690 /* Force PCIe PHY reset */ 691 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 692 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 693 return 0; 694 } 695 696 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 697 { 698 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 699 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 700 if (!assert) 701 usleep_range(200, 500); 702 703 return 0; 704 } 705 706 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 707 { 708 if (!assert) 709 return 0; 710 711 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 712 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 713 714 return 0; 715 } 716 717 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 718 { 719 struct dw_pcie *pci = imx_pcie->pci; 720 struct device *dev = pci->dev; 721 722 if (assert) 723 return 0; 724 725 /* 726 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 727 * 728 * PCIe: PLL may fail to lock under corner conditions. 729 * 730 * Initial VCO oscillation may fail under corner conditions such as 731 * cold temperature which will cause the PCIe PLL fail to lock in the 732 * initialization phase. 733 * 734 * The Duty-cycle Corrector calibration must be disabled. 735 * 736 * 1. De-assert the G_RST signal by clearing 737 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 738 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 739 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 740 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 741 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 742 * 4. Assert ATT_MODE by writing data “0xbc” to the register 743 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 744 * 5. De-assert the CMN_RST signal by clearing register bit 745 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 746 */ 747 748 if (likely(imx_pcie->phy_base)) { 749 /* De-assert DCC_FB_EN */ 750 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 751 /* Assert RX_EQS and RX_EQS_SEL */ 752 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 753 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 754 /* Assert ATT_MODE */ 755 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 756 } else { 757 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 758 } 759 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 760 return 0; 761 } 762 763 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 764 { 765 reset_control_assert(imx_pcie->pciephy_reset); 766 reset_control_assert(imx_pcie->apps_reset); 767 768 if (imx_pcie->drvdata->core_reset) 769 imx_pcie->drvdata->core_reset(imx_pcie, true); 770 771 /* Some boards don't have PCIe reset GPIO. */ 772 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 773 } 774 775 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 776 { 777 reset_control_deassert(imx_pcie->pciephy_reset); 778 779 if (imx_pcie->drvdata->core_reset) 780 imx_pcie->drvdata->core_reset(imx_pcie, false); 781 782 /* Some boards don't have PCIe reset GPIO. */ 783 if (imx_pcie->reset_gpiod) { 784 msleep(100); 785 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 786 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 787 msleep(100); 788 } 789 790 return 0; 791 } 792 793 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 794 { 795 struct dw_pcie *pci = imx_pcie->pci; 796 struct device *dev = pci->dev; 797 u32 tmp; 798 unsigned int retries; 799 800 for (retries = 0; retries < 200; retries++) { 801 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 802 /* Test if the speed change finished. */ 803 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 804 return 0; 805 usleep_range(100, 1000); 806 } 807 808 dev_err(dev, "Speed change timeout\n"); 809 return -ETIMEDOUT; 810 } 811 812 static void imx_pcie_ltssm_enable(struct device *dev) 813 { 814 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 815 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 816 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 817 u32 tmp; 818 819 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 820 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 821 if (drvdata->ltssm_mask) 822 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 823 drvdata->ltssm_mask); 824 825 reset_control_deassert(imx_pcie->apps_reset); 826 } 827 828 static void imx_pcie_ltssm_disable(struct device *dev) 829 { 830 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 831 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 832 833 phy_set_speed(imx_pcie->phy, 0); 834 if (drvdata->ltssm_mask) 835 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 836 drvdata->ltssm_mask, 0); 837 838 reset_control_assert(imx_pcie->apps_reset); 839 } 840 841 static int imx_pcie_start_link(struct dw_pcie *pci) 842 { 843 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 844 struct device *dev = pci->dev; 845 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 846 u32 tmp; 847 int ret; 848 849 /* 850 * Force Gen1 operation when starting the link. In case the link is 851 * started in Gen2 mode, there is a possibility the devices on the 852 * bus will not be detected at all. This happens with PCIe switches. 853 */ 854 dw_pcie_dbi_ro_wr_en(pci); 855 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 856 tmp &= ~PCI_EXP_LNKCAP_SLS; 857 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 858 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 859 dw_pcie_dbi_ro_wr_dis(pci); 860 861 /* Start LTSSM. */ 862 imx_pcie_ltssm_enable(dev); 863 864 ret = dw_pcie_wait_for_link(pci); 865 if (ret) 866 goto err_reset_phy; 867 868 if (pci->max_link_speed > 1) { 869 /* Allow faster modes after the link is up */ 870 dw_pcie_dbi_ro_wr_en(pci); 871 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 872 tmp &= ~PCI_EXP_LNKCAP_SLS; 873 tmp |= pci->max_link_speed; 874 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 875 876 /* 877 * Start Directed Speed Change so the best possible 878 * speed both link partners support can be negotiated. 879 */ 880 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 881 tmp |= PORT_LOGIC_SPEED_CHANGE; 882 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 883 dw_pcie_dbi_ro_wr_dis(pci); 884 885 if (imx_pcie->drvdata->flags & 886 IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { 887 /* 888 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 889 * from i.MX6 family when no link speed transition 890 * occurs and we go Gen1 -> yep, Gen1. The difference 891 * is that, in such case, it will not be cleared by HW 892 * which will cause the following code to report false 893 * failure. 894 */ 895 896 ret = imx_pcie_wait_for_speed_change(imx_pcie); 897 if (ret) { 898 dev_err(dev, "Failed to bring link up!\n"); 899 goto err_reset_phy; 900 } 901 } 902 903 /* Make sure link training is finished as well! */ 904 ret = dw_pcie_wait_for_link(pci); 905 if (ret) 906 goto err_reset_phy; 907 } else { 908 dev_info(dev, "Link: Only Gen1 is enabled\n"); 909 } 910 911 imx_pcie->link_is_up = true; 912 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); 913 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); 914 return 0; 915 916 err_reset_phy: 917 imx_pcie->link_is_up = false; 918 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 919 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 920 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 921 imx_pcie_reset_phy(imx_pcie); 922 return 0; 923 } 924 925 static void imx_pcie_stop_link(struct dw_pcie *pci) 926 { 927 struct device *dev = pci->dev; 928 929 /* Turn off PCIe LTSSM */ 930 imx_pcie_ltssm_disable(dev); 931 } 932 933 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 934 { 935 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 936 struct device *dev = pci->dev; 937 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 938 int ret; 939 940 if (imx_pcie->vpcie) { 941 ret = regulator_enable(imx_pcie->vpcie); 942 if (ret) { 943 dev_err(dev, "failed to enable vpcie regulator: %d\n", 944 ret); 945 return ret; 946 } 947 } 948 949 imx_pcie_assert_core_reset(imx_pcie); 950 951 if (imx_pcie->drvdata->init_phy) 952 imx_pcie->drvdata->init_phy(imx_pcie); 953 954 imx_pcie_configure_type(imx_pcie); 955 956 ret = imx_pcie_clk_enable(imx_pcie); 957 if (ret) { 958 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 959 goto err_reg_disable; 960 } 961 962 if (imx_pcie->phy) { 963 ret = phy_init(imx_pcie->phy); 964 if (ret) { 965 dev_err(dev, "pcie PHY power up failed\n"); 966 goto err_clk_disable; 967 } 968 969 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 970 if (ret) { 971 dev_err(dev, "unable to set PCIe PHY mode\n"); 972 goto err_phy_exit; 973 } 974 975 ret = phy_power_on(imx_pcie->phy); 976 if (ret) { 977 dev_err(dev, "waiting for PHY ready timeout!\n"); 978 goto err_phy_exit; 979 } 980 } 981 982 ret = imx_pcie_deassert_core_reset(imx_pcie); 983 if (ret < 0) { 984 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 985 goto err_phy_off; 986 } 987 988 imx_setup_phy_mpll(imx_pcie); 989 990 return 0; 991 992 err_phy_off: 993 phy_power_off(imx_pcie->phy); 994 err_phy_exit: 995 phy_exit(imx_pcie->phy); 996 err_clk_disable: 997 imx_pcie_clk_disable(imx_pcie); 998 err_reg_disable: 999 if (imx_pcie->vpcie) 1000 regulator_disable(imx_pcie->vpcie); 1001 return ret; 1002 } 1003 1004 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1005 { 1006 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1007 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1008 1009 if (imx_pcie->phy) { 1010 if (phy_power_off(imx_pcie->phy)) 1011 dev_err(pci->dev, "unable to power off PHY\n"); 1012 phy_exit(imx_pcie->phy); 1013 } 1014 imx_pcie_clk_disable(imx_pcie); 1015 1016 if (imx_pcie->vpcie) 1017 regulator_disable(imx_pcie->vpcie); 1018 } 1019 1020 static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) 1021 { 1022 struct imx_pcie *imx_pcie = to_imx_pcie(pcie); 1023 struct dw_pcie_rp *pp = &pcie->pp; 1024 struct resource_entry *entry; 1025 1026 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) 1027 return cpu_addr; 1028 1029 entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); 1030 if (!entry) 1031 return cpu_addr; 1032 1033 return cpu_addr - entry->offset; 1034 } 1035 1036 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1037 .init = imx_pcie_host_init, 1038 .deinit = imx_pcie_host_exit, 1039 }; 1040 1041 static const struct dw_pcie_ops dw_pcie_ops = { 1042 .start_link = imx_pcie_start_link, 1043 .stop_link = imx_pcie_stop_link, 1044 .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, 1045 }; 1046 1047 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1048 { 1049 enum pci_barno bar; 1050 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1051 1052 for (bar = BAR_0; bar <= BAR_5; bar++) 1053 dw_pcie_ep_reset_bar(pci, bar); 1054 } 1055 1056 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1057 unsigned int type, u16 interrupt_num) 1058 { 1059 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1060 1061 switch (type) { 1062 case PCI_IRQ_INTX: 1063 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1064 case PCI_IRQ_MSI: 1065 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1066 case PCI_IRQ_MSIX: 1067 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1068 default: 1069 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1070 return -EINVAL; 1071 } 1072 1073 return 0; 1074 } 1075 1076 static const struct pci_epc_features imx8m_pcie_epc_features = { 1077 .linkup_notifier = false, 1078 .msi_capable = true, 1079 .msix_capable = false, 1080 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1081 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1082 .align = SZ_64K, 1083 }; 1084 1085 /* 1086 * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme 1087 * ================================================================================================ 1088 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1089 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1090 * BAR1 should be disabled if BAR0 is 64bit. 1091 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1092 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1093 * BAR4 | Enable | 32-bit | 1M | Programmable Size 1094 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1095 */ 1096 static const struct pci_epc_features imx95_pcie_epc_features = { 1097 .msi_capable = true, 1098 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1099 .align = SZ_4K, 1100 }; 1101 1102 static const struct pci_epc_features* 1103 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1104 { 1105 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1106 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1107 1108 return imx_pcie->drvdata->epc_features; 1109 } 1110 1111 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1112 .init = imx_pcie_ep_init, 1113 .raise_irq = imx_pcie_ep_raise_irq, 1114 .get_features = imx_pcie_ep_get_features, 1115 }; 1116 1117 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1118 struct platform_device *pdev) 1119 { 1120 int ret; 1121 unsigned int pcie_dbi2_offset; 1122 struct dw_pcie_ep *ep; 1123 struct dw_pcie *pci = imx_pcie->pci; 1124 struct dw_pcie_rp *pp = &pci->pp; 1125 struct device *dev = pci->dev; 1126 1127 imx_pcie_host_init(pp); 1128 ep = &pci->ep; 1129 ep->ops = &pcie_ep_ops; 1130 1131 switch (imx_pcie->drvdata->variant) { 1132 case IMX8MQ_EP: 1133 case IMX8MM_EP: 1134 case IMX8MP_EP: 1135 pcie_dbi2_offset = SZ_1M; 1136 break; 1137 default: 1138 pcie_dbi2_offset = SZ_4K; 1139 break; 1140 } 1141 1142 pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; 1143 1144 /* 1145 * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining 1146 * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC 1147 * core code can fetch that from DT. But once all platform DTs were fixed, this and the 1148 * above "dbi_base2" setting should be removed. 1149 */ 1150 if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) 1151 pci->dbi_base2 = NULL; 1152 1153 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1154 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1155 1156 ep->page_size = imx_pcie->drvdata->epc_features->align; 1157 1158 ret = dw_pcie_ep_init(ep); 1159 if (ret) { 1160 dev_err(dev, "failed to initialize endpoint\n"); 1161 return ret; 1162 } 1163 1164 ret = dw_pcie_ep_init_registers(ep); 1165 if (ret) { 1166 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1167 dw_pcie_ep_deinit(ep); 1168 return ret; 1169 } 1170 1171 pci_epc_init_notify(ep->epc); 1172 1173 /* Start LTSSM. */ 1174 imx_pcie_ltssm_enable(dev); 1175 1176 return 0; 1177 } 1178 1179 static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie) 1180 { 1181 struct device *dev = imx_pcie->pci->dev; 1182 1183 /* Some variants have a turnoff reset in DT */ 1184 if (imx_pcie->turnoff_reset) { 1185 reset_control_assert(imx_pcie->turnoff_reset); 1186 reset_control_deassert(imx_pcie->turnoff_reset); 1187 goto pm_turnoff_sleep; 1188 } 1189 1190 /* Others poke directly at IOMUXC registers */ 1191 switch (imx_pcie->drvdata->variant) { 1192 case IMX6SX: 1193 case IMX6QP: 1194 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 1195 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 1196 IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1197 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 1198 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); 1199 break; 1200 default: 1201 dev_err(dev, "PME_Turn_Off not implemented\n"); 1202 return; 1203 } 1204 1205 /* 1206 * Components with an upstream port must respond to 1207 * PME_Turn_Off with PME_TO_Ack but we can't check. 1208 * 1209 * The standard recommends a 1-10ms timeout after which to 1210 * proceed anyway as if acks were received. 1211 */ 1212 pm_turnoff_sleep: 1213 usleep_range(1000, 10000); 1214 } 1215 1216 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1217 { 1218 u8 offset; 1219 u16 val; 1220 struct dw_pcie *pci = imx_pcie->pci; 1221 1222 if (pci_msi_enabled()) { 1223 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1224 if (save) { 1225 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1226 imx_pcie->msi_ctrl = val; 1227 } else { 1228 dw_pcie_dbi_ro_wr_en(pci); 1229 val = imx_pcie->msi_ctrl; 1230 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1231 dw_pcie_dbi_ro_wr_dis(pci); 1232 } 1233 } 1234 } 1235 1236 static int imx_pcie_suspend_noirq(struct device *dev) 1237 { 1238 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1239 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; 1240 1241 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1242 return 0; 1243 1244 imx_pcie_msi_save_restore(imx_pcie, true); 1245 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1246 /* 1247 * The minimum for a workaround would be to set PERST# and to 1248 * set the PCIE_TEST_PD flag. However, we can also disable the 1249 * clock which saves some power. 1250 */ 1251 imx_pcie_assert_core_reset(imx_pcie); 1252 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1253 } else { 1254 imx_pcie_pm_turnoff(imx_pcie); 1255 imx_pcie_stop_link(imx_pcie->pci); 1256 imx_pcie_host_exit(pp); 1257 } 1258 1259 return 0; 1260 } 1261 1262 static int imx_pcie_resume_noirq(struct device *dev) 1263 { 1264 int ret; 1265 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1266 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; 1267 1268 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1269 return 0; 1270 1271 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1272 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1273 if (ret) 1274 return ret; 1275 ret = imx_pcie_deassert_core_reset(imx_pcie); 1276 if (ret) 1277 return ret; 1278 /* 1279 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1280 * root complex. This is why we have to setup the rc again and 1281 * why we have to restore the MSI register. 1282 */ 1283 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1284 if (ret) 1285 return ret; 1286 imx_pcie_msi_save_restore(imx_pcie, false); 1287 } else { 1288 ret = imx_pcie_host_init(pp); 1289 if (ret) 1290 return ret; 1291 imx_pcie_msi_save_restore(imx_pcie, false); 1292 dw_pcie_setup_rc(pp); 1293 1294 if (imx_pcie->link_is_up) 1295 imx_pcie_start_link(imx_pcie->pci); 1296 } 1297 1298 return 0; 1299 } 1300 1301 static const struct dev_pm_ops imx_pcie_pm_ops = { 1302 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1303 imx_pcie_resume_noirq) 1304 }; 1305 1306 static int imx_pcie_probe(struct platform_device *pdev) 1307 { 1308 struct device *dev = &pdev->dev; 1309 struct dw_pcie *pci; 1310 struct imx_pcie *imx_pcie; 1311 struct device_node *np; 1312 struct resource *dbi_base; 1313 struct device_node *node = dev->of_node; 1314 int ret; 1315 u16 val; 1316 int i; 1317 1318 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1319 if (!imx_pcie) 1320 return -ENOMEM; 1321 1322 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1323 if (!pci) 1324 return -ENOMEM; 1325 1326 pci->dev = dev; 1327 pci->ops = &dw_pcie_ops; 1328 pci->pp.ops = &imx_pcie_host_ops; 1329 1330 imx_pcie->pci = pci; 1331 imx_pcie->drvdata = of_device_get_match_data(dev); 1332 1333 /* Find the PHY if one is defined, only imx7d uses it */ 1334 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1335 if (np) { 1336 struct resource res; 1337 1338 ret = of_address_to_resource(np, 0, &res); 1339 if (ret) { 1340 dev_err(dev, "Unable to map PCIe PHY\n"); 1341 return ret; 1342 } 1343 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1344 if (IS_ERR(imx_pcie->phy_base)) 1345 return PTR_ERR(imx_pcie->phy_base); 1346 } 1347 1348 pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); 1349 if (IS_ERR(pci->dbi_base)) 1350 return PTR_ERR(pci->dbi_base); 1351 1352 /* Fetch GPIOs */ 1353 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1354 if (IS_ERR(imx_pcie->reset_gpiod)) 1355 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1356 "unable to get reset gpio\n"); 1357 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1358 1359 if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) 1360 return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); 1361 1362 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 1363 imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; 1364 1365 /* Fetch clocks */ 1366 ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 1367 if (ret) 1368 return ret; 1369 1370 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1371 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1372 if (IS_ERR(imx_pcie->phy)) 1373 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1374 "failed to get pcie phy\n"); 1375 } 1376 1377 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1378 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1379 if (IS_ERR(imx_pcie->apps_reset)) 1380 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1381 "failed to get pcie apps reset control\n"); 1382 } 1383 1384 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1385 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1386 if (IS_ERR(imx_pcie->pciephy_reset)) 1387 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1388 "Failed to get PCIEPHY reset control\n"); 1389 } 1390 1391 switch (imx_pcie->drvdata->variant) { 1392 case IMX8MQ: 1393 case IMX8MQ_EP: 1394 case IMX7D: 1395 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) 1396 imx_pcie->controller_id = 1; 1397 break; 1398 default: 1399 break; 1400 } 1401 1402 /* Grab turnoff reset */ 1403 imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); 1404 if (IS_ERR(imx_pcie->turnoff_reset)) { 1405 dev_err(dev, "Failed to get TURNOFF reset control\n"); 1406 return PTR_ERR(imx_pcie->turnoff_reset); 1407 } 1408 1409 if (imx_pcie->drvdata->gpr) { 1410 /* Grab GPR config register range */ 1411 imx_pcie->iomuxc_gpr = 1412 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1413 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1414 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1415 "unable to find iomuxc registers\n"); 1416 } 1417 1418 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1419 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1420 1421 if (IS_ERR(off)) 1422 return dev_err_probe(dev, PTR_ERR(off), 1423 "unable to find serdes registers\n"); 1424 1425 static const struct regmap_config regmap_config = { 1426 .reg_bits = 32, 1427 .val_bits = 32, 1428 .reg_stride = 4, 1429 }; 1430 1431 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1432 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1433 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1434 "unable to find iomuxc registers\n"); 1435 } 1436 1437 /* Grab PCIe PHY Tx Settings */ 1438 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1439 &imx_pcie->tx_deemph_gen1)) 1440 imx_pcie->tx_deemph_gen1 = 0; 1441 1442 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1443 &imx_pcie->tx_deemph_gen2_3p5db)) 1444 imx_pcie->tx_deemph_gen2_3p5db = 0; 1445 1446 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1447 &imx_pcie->tx_deemph_gen2_6db)) 1448 imx_pcie->tx_deemph_gen2_6db = 20; 1449 1450 if (of_property_read_u32(node, "fsl,tx-swing-full", 1451 &imx_pcie->tx_swing_full)) 1452 imx_pcie->tx_swing_full = 127; 1453 1454 if (of_property_read_u32(node, "fsl,tx-swing-low", 1455 &imx_pcie->tx_swing_low)) 1456 imx_pcie->tx_swing_low = 127; 1457 1458 /* Limit link speed */ 1459 pci->max_link_speed = 1; 1460 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1461 1462 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1463 if (IS_ERR(imx_pcie->vpcie)) { 1464 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1465 return PTR_ERR(imx_pcie->vpcie); 1466 imx_pcie->vpcie = NULL; 1467 } 1468 1469 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1470 if (IS_ERR(imx_pcie->vph)) { 1471 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1472 return PTR_ERR(imx_pcie->vph); 1473 imx_pcie->vph = NULL; 1474 } 1475 1476 platform_set_drvdata(pdev, imx_pcie); 1477 1478 ret = imx_pcie_attach_pd(dev); 1479 if (ret) 1480 return ret; 1481 1482 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1483 ret = imx_add_pcie_ep(imx_pcie, pdev); 1484 if (ret < 0) 1485 return ret; 1486 } else { 1487 ret = dw_pcie_host_init(&pci->pp); 1488 if (ret < 0) 1489 return ret; 1490 1491 if (pci_msi_enabled()) { 1492 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1493 1494 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1495 val |= PCI_MSI_FLAGS_ENABLE; 1496 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1497 } 1498 } 1499 1500 return 0; 1501 } 1502 1503 static void imx_pcie_shutdown(struct platform_device *pdev) 1504 { 1505 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1506 1507 /* bring down link, so bootloader gets clean state in case of reboot */ 1508 imx_pcie_assert_core_reset(imx_pcie); 1509 } 1510 1511 static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; 1512 static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; 1513 static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; 1514 static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; 1515 static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; 1516 1517 static const struct imx_pcie_drvdata drvdata[] = { 1518 [IMX6Q] = { 1519 .variant = IMX6Q, 1520 .flags = IMX_PCIE_FLAG_IMX_PHY | 1521 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1522 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1523 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1524 .dbi_length = 0x200, 1525 .gpr = "fsl,imx6q-iomuxc-gpr", 1526 .clk_names = imx6q_clks, 1527 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1528 .ltssm_off = IOMUXC_GPR12, 1529 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1530 .mode_off[0] = IOMUXC_GPR12, 1531 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1532 .init_phy = imx_pcie_init_phy, 1533 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1534 .core_reset = imx6q_pcie_core_reset, 1535 }, 1536 [IMX6SX] = { 1537 .variant = IMX6SX, 1538 .flags = IMX_PCIE_FLAG_IMX_PHY | 1539 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1540 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1541 .gpr = "fsl,imx6q-iomuxc-gpr", 1542 .clk_names = imx6sx_clks, 1543 .clks_cnt = ARRAY_SIZE(imx6sx_clks), 1544 .ltssm_off = IOMUXC_GPR12, 1545 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1546 .mode_off[0] = IOMUXC_GPR12, 1547 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1548 .init_phy = imx6sx_pcie_init_phy, 1549 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1550 .core_reset = imx6sx_pcie_core_reset, 1551 }, 1552 [IMX6QP] = { 1553 .variant = IMX6QP, 1554 .flags = IMX_PCIE_FLAG_IMX_PHY | 1555 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1556 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1557 .dbi_length = 0x200, 1558 .gpr = "fsl,imx6q-iomuxc-gpr", 1559 .clk_names = imx6q_clks, 1560 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1561 .ltssm_off = IOMUXC_GPR12, 1562 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1563 .mode_off[0] = IOMUXC_GPR12, 1564 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1565 .init_phy = imx_pcie_init_phy, 1566 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1567 .core_reset = imx6qp_pcie_core_reset, 1568 }, 1569 [IMX7D] = { 1570 .variant = IMX7D, 1571 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1572 IMX_PCIE_FLAG_HAS_APP_RESET | 1573 IMX_PCIE_FLAG_HAS_PHY_RESET, 1574 .gpr = "fsl,imx7d-iomuxc-gpr", 1575 .clk_names = imx6q_clks, 1576 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1577 .mode_off[0] = IOMUXC_GPR12, 1578 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1579 .init_phy = imx7d_pcie_init_phy, 1580 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1581 .core_reset = imx7d_pcie_core_reset, 1582 }, 1583 [IMX8MQ] = { 1584 .variant = IMX8MQ, 1585 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1586 IMX_PCIE_FLAG_HAS_PHY_RESET, 1587 .gpr = "fsl,imx8mq-iomuxc-gpr", 1588 .clk_names = imx8mq_clks, 1589 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1590 .mode_off[0] = IOMUXC_GPR12, 1591 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1592 .mode_off[1] = IOMUXC_GPR12, 1593 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1594 .init_phy = imx8mq_pcie_init_phy, 1595 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1596 }, 1597 [IMX8MM] = { 1598 .variant = IMX8MM, 1599 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1600 IMX_PCIE_FLAG_HAS_PHYDRV | 1601 IMX_PCIE_FLAG_HAS_APP_RESET, 1602 .gpr = "fsl,imx8mm-iomuxc-gpr", 1603 .clk_names = imx8mm_clks, 1604 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1605 .mode_off[0] = IOMUXC_GPR12, 1606 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1607 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1608 }, 1609 [IMX8MP] = { 1610 .variant = IMX8MP, 1611 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1612 IMX_PCIE_FLAG_HAS_PHYDRV | 1613 IMX_PCIE_FLAG_HAS_APP_RESET, 1614 .gpr = "fsl,imx8mp-iomuxc-gpr", 1615 .clk_names = imx8mm_clks, 1616 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1617 .mode_off[0] = IOMUXC_GPR12, 1618 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1619 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1620 }, 1621 [IMX8Q] = { 1622 .variant = IMX8Q, 1623 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1624 IMX_PCIE_FLAG_CPU_ADDR_FIXUP, 1625 .clk_names = imx8q_clks, 1626 .clks_cnt = ARRAY_SIZE(imx8q_clks), 1627 }, 1628 [IMX95] = { 1629 .variant = IMX95, 1630 .flags = IMX_PCIE_FLAG_HAS_SERDES, 1631 .clk_names = imx8mq_clks, 1632 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1633 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1634 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1635 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1636 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1637 .init_phy = imx95_pcie_init_phy, 1638 }, 1639 [IMX8MQ_EP] = { 1640 .variant = IMX8MQ_EP, 1641 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1642 IMX_PCIE_FLAG_HAS_PHY_RESET, 1643 .mode = DW_PCIE_EP_TYPE, 1644 .gpr = "fsl,imx8mq-iomuxc-gpr", 1645 .clk_names = imx8mq_clks, 1646 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1647 .mode_off[0] = IOMUXC_GPR12, 1648 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1649 .mode_off[1] = IOMUXC_GPR12, 1650 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1651 .epc_features = &imx8m_pcie_epc_features, 1652 .init_phy = imx8mq_pcie_init_phy, 1653 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1654 }, 1655 [IMX8MM_EP] = { 1656 .variant = IMX8MM_EP, 1657 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1658 IMX_PCIE_FLAG_HAS_PHYDRV, 1659 .mode = DW_PCIE_EP_TYPE, 1660 .gpr = "fsl,imx8mm-iomuxc-gpr", 1661 .clk_names = imx8mm_clks, 1662 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1663 .mode_off[0] = IOMUXC_GPR12, 1664 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1665 .epc_features = &imx8m_pcie_epc_features, 1666 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1667 }, 1668 [IMX8MP_EP] = { 1669 .variant = IMX8MP_EP, 1670 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1671 IMX_PCIE_FLAG_HAS_PHYDRV, 1672 .mode = DW_PCIE_EP_TYPE, 1673 .gpr = "fsl,imx8mp-iomuxc-gpr", 1674 .clk_names = imx8mm_clks, 1675 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1676 .mode_off[0] = IOMUXC_GPR12, 1677 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1678 .epc_features = &imx8m_pcie_epc_features, 1679 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1680 }, 1681 [IMX95_EP] = { 1682 .variant = IMX95_EP, 1683 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1684 IMX_PCIE_FLAG_SUPPORT_64BIT, 1685 .clk_names = imx8mq_clks, 1686 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1687 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1688 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1689 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1690 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1691 .init_phy = imx95_pcie_init_phy, 1692 .epc_features = &imx95_pcie_epc_features, 1693 .mode = DW_PCIE_EP_TYPE, 1694 }, 1695 }; 1696 1697 static const struct of_device_id imx_pcie_of_match[] = { 1698 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1699 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1700 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1701 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1702 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1703 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1704 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1705 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1706 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1707 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1708 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1709 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1710 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1711 {}, 1712 }; 1713 1714 static struct platform_driver imx_pcie_driver = { 1715 .driver = { 1716 .name = "imx6q-pcie", 1717 .of_match_table = imx_pcie_of_match, 1718 .suppress_bind_attrs = true, 1719 .pm = &imx_pcie_pm_ops, 1720 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1721 }, 1722 .probe = imx_pcie_probe, 1723 .shutdown = imx_pcie_shutdown, 1724 }; 1725 1726 static void imx_pcie_quirk(struct pci_dev *dev) 1727 { 1728 struct pci_bus *bus = dev->bus; 1729 struct dw_pcie_rp *pp = bus->sysdata; 1730 1731 /* Bus parent is the PCI bridge, its parent is this platform driver */ 1732 if (!bus->dev.parent || !bus->dev.parent->parent) 1733 return; 1734 1735 /* Make sure we only quirk devices associated with this driver */ 1736 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 1737 return; 1738 1739 if (pci_is_root_bus(bus)) { 1740 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1741 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1742 1743 /* 1744 * Limit config length to avoid the kernel reading beyond 1745 * the register set and causing an abort on i.MX 6Quad 1746 */ 1747 if (imx_pcie->drvdata->dbi_length) { 1748 dev->cfg_size = imx_pcie->drvdata->dbi_length; 1749 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 1750 dev->cfg_size); 1751 } 1752 } 1753 } 1754 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 1755 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 1756 1757 static int __init imx_pcie_init(void) 1758 { 1759 #ifdef CONFIG_ARM 1760 struct device_node *np; 1761 1762 np = of_find_matching_node(NULL, imx_pcie_of_match); 1763 if (!np) 1764 return -ENODEV; 1765 of_node_put(np); 1766 1767 /* 1768 * Since probe() can be deferred we need to make sure that 1769 * hook_fault_code is not called after __init memory is freed 1770 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1771 * we can install the handler here without risking it 1772 * accessing some uninitialized driver state. 1773 */ 1774 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1775 "external abort on non-linefetch"); 1776 #endif 1777 1778 return platform_driver_register(&imx_pcie_driver); 1779 } 1780 device_initcall(imx_pcie_init); 1781