1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "pcie-designware.h" 37 38 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 39 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 41 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 42 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 43 #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_SS_RW_REG_0 0xf0 49 #define IMX95_PCIE_REF_CLKEN BIT(23) 50 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 51 52 #define IMX95_PE0_GEN_CTRL_1 0x1050 53 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 54 55 #define IMX95_PE0_GEN_CTRL_3 0x1058 56 #define IMX95_PCIE_LTSSM_EN BIT(0) 57 58 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 59 60 enum imx_pcie_variants { 61 IMX6Q, 62 IMX6SX, 63 IMX6QP, 64 IMX7D, 65 IMX8MQ, 66 IMX8MM, 67 IMX8MP, 68 IMX8Q, 69 IMX95, 70 IMX8MQ_EP, 71 IMX8MM_EP, 72 IMX8MP_EP, 73 IMX95_EP, 74 }; 75 76 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 77 #define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) 78 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 79 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 80 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 81 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 82 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 83 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 84 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 85 86 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 87 88 #define IMX_PCIE_MAX_CLKS 6 89 #define IMX_PCIE_MAX_INSTANCES 2 90 91 struct imx_pcie; 92 93 struct imx_pcie_drvdata { 94 enum imx_pcie_variants variant; 95 enum dw_pcie_device_mode mode; 96 u32 flags; 97 int dbi_length; 98 const char *gpr; 99 const char * const *clk_names; 100 const u32 clks_cnt; 101 const u32 ltssm_off; 102 const u32 ltssm_mask; 103 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 104 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 105 const struct pci_epc_features *epc_features; 106 int (*init_phy)(struct imx_pcie *pcie); 107 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 108 int (*core_reset)(struct imx_pcie *pcie, bool assert); 109 }; 110 111 struct imx_pcie { 112 struct dw_pcie *pci; 113 struct gpio_desc *reset_gpiod; 114 bool link_is_up; 115 struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; 116 struct regmap *iomuxc_gpr; 117 u16 msi_ctrl; 118 u32 controller_id; 119 struct reset_control *pciephy_reset; 120 struct reset_control *apps_reset; 121 struct reset_control *turnoff_reset; 122 u32 tx_deemph_gen1; 123 u32 tx_deemph_gen2_3p5db; 124 u32 tx_deemph_gen2_6db; 125 u32 tx_swing_full; 126 u32 tx_swing_low; 127 struct regulator *vpcie; 128 struct regulator *vph; 129 void __iomem *phy_base; 130 131 /* power domain for pcie */ 132 struct device *pd_pcie; 133 /* power domain for pcie phy */ 134 struct device *pd_pcie_phy; 135 struct phy *phy; 136 const struct imx_pcie_drvdata *drvdata; 137 }; 138 139 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 140 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 141 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 142 143 /* PCIe Port Logic registers (memory-mapped) */ 144 #define PL_OFFSET 0x700 145 146 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 147 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 148 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 149 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 150 #define PCIE_PHY_CTRL_WR BIT(18) 151 #define PCIE_PHY_CTRL_RD BIT(19) 152 153 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 154 #define PCIE_PHY_STAT_ACK BIT(16) 155 156 /* PHY registers (not memory-mapped) */ 157 #define PCIE_PHY_ATEOVRD 0x10 158 #define PCIE_PHY_ATEOVRD_EN BIT(2) 159 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 160 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 161 162 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 163 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 164 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 165 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 166 167 #define PCIE_PHY_RX_ASIC_OUT 0x100D 168 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 169 170 /* iMX7 PCIe PHY registers */ 171 #define PCIE_PHY_CMN_REG4 0x14 172 /* These are probably the bits that *aren't* DCC_FB_EN */ 173 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 174 175 #define PCIE_PHY_CMN_REG15 0x54 176 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 177 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 178 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 179 180 #define PCIE_PHY_CMN_REG24 0x90 181 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 182 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 183 184 #define PCIE_PHY_CMN_REG26 0x98 185 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 186 187 #define PHY_RX_OVRD_IN_LO 0x1005 188 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 189 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 190 191 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 192 { 193 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 194 imx_pcie->drvdata->variant != IMX8MQ_EP && 195 imx_pcie->drvdata->variant != IMX8MM && 196 imx_pcie->drvdata->variant != IMX8MM_EP && 197 imx_pcie->drvdata->variant != IMX8MP && 198 imx_pcie->drvdata->variant != IMX8MP_EP); 199 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 200 } 201 202 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 203 { 204 regmap_update_bits(imx_pcie->iomuxc_gpr, 205 IMX95_PCIE_SS_RW_REG_0, 206 IMX95_PCIE_PHY_CR_PARA_SEL, 207 IMX95_PCIE_PHY_CR_PARA_SEL); 208 209 regmap_update_bits(imx_pcie->iomuxc_gpr, 210 IMX95_PCIE_PHY_GEN_CTRL, 211 IMX95_PCIE_REF_USE_PAD, 0); 212 regmap_update_bits(imx_pcie->iomuxc_gpr, 213 IMX95_PCIE_SS_RW_REG_0, 214 IMX95_PCIE_REF_CLKEN, 215 IMX95_PCIE_REF_CLKEN); 216 217 return 0; 218 } 219 220 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 221 { 222 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 223 unsigned int mask, val, mode, id; 224 225 if (drvdata->mode == DW_PCIE_EP_TYPE) 226 mode = PCI_EXP_TYPE_ENDPOINT; 227 else 228 mode = PCI_EXP_TYPE_ROOT_PORT; 229 230 id = imx_pcie->controller_id; 231 232 /* If mode_mask is 0, then generic PHY driver is used to set the mode */ 233 if (!drvdata->mode_mask[0]) 234 return; 235 236 /* If mode_mask[id] is zero, means each controller have its individual gpr */ 237 if (!drvdata->mode_mask[id]) 238 id = 0; 239 240 mask = drvdata->mode_mask[id]; 241 val = mode << (ffs(mask) - 1); 242 243 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 244 } 245 246 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 247 { 248 struct dw_pcie *pci = imx_pcie->pci; 249 bool val; 250 u32 max_iterations = 10; 251 u32 wait_counter = 0; 252 253 do { 254 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 255 PCIE_PHY_STAT_ACK; 256 wait_counter++; 257 258 if (val == exp_val) 259 return 0; 260 261 udelay(1); 262 } while (wait_counter < max_iterations); 263 264 return -ETIMEDOUT; 265 } 266 267 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 268 { 269 struct dw_pcie *pci = imx_pcie->pci; 270 u32 val; 271 int ret; 272 273 val = PCIE_PHY_CTRL_DATA(addr); 274 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 275 276 val |= PCIE_PHY_CTRL_CAP_ADR; 277 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 278 279 ret = pcie_phy_poll_ack(imx_pcie, true); 280 if (ret) 281 return ret; 282 283 val = PCIE_PHY_CTRL_DATA(addr); 284 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 285 286 return pcie_phy_poll_ack(imx_pcie, false); 287 } 288 289 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 290 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 291 { 292 struct dw_pcie *pci = imx_pcie->pci; 293 u32 phy_ctl; 294 int ret; 295 296 ret = pcie_phy_wait_ack(imx_pcie, addr); 297 if (ret) 298 return ret; 299 300 /* assert Read signal */ 301 phy_ctl = PCIE_PHY_CTRL_RD; 302 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 303 304 ret = pcie_phy_poll_ack(imx_pcie, true); 305 if (ret) 306 return ret; 307 308 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 309 310 /* deassert Read signal */ 311 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 312 313 return pcie_phy_poll_ack(imx_pcie, false); 314 } 315 316 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 317 { 318 struct dw_pcie *pci = imx_pcie->pci; 319 u32 var; 320 int ret; 321 322 /* write addr */ 323 /* cap addr */ 324 ret = pcie_phy_wait_ack(imx_pcie, addr); 325 if (ret) 326 return ret; 327 328 var = PCIE_PHY_CTRL_DATA(data); 329 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 330 331 /* capture data */ 332 var |= PCIE_PHY_CTRL_CAP_DAT; 333 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 334 335 ret = pcie_phy_poll_ack(imx_pcie, true); 336 if (ret) 337 return ret; 338 339 /* deassert cap data */ 340 var = PCIE_PHY_CTRL_DATA(data); 341 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 342 343 /* wait for ack de-assertion */ 344 ret = pcie_phy_poll_ack(imx_pcie, false); 345 if (ret) 346 return ret; 347 348 /* assert wr signal */ 349 var = PCIE_PHY_CTRL_WR; 350 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 351 352 /* wait for ack */ 353 ret = pcie_phy_poll_ack(imx_pcie, true); 354 if (ret) 355 return ret; 356 357 /* deassert wr signal */ 358 var = PCIE_PHY_CTRL_DATA(data); 359 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 360 361 /* wait for ack de-assertion */ 362 ret = pcie_phy_poll_ack(imx_pcie, false); 363 if (ret) 364 return ret; 365 366 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 367 368 return 0; 369 } 370 371 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 372 { 373 /* TODO: Currently this code assumes external oscillator is being used */ 374 regmap_update_bits(imx_pcie->iomuxc_gpr, 375 imx_pcie_grp_offset(imx_pcie), 376 IMX8MQ_GPR_PCIE_REF_USE_PAD, 377 IMX8MQ_GPR_PCIE_REF_USE_PAD); 378 /* 379 * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is 380 * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. 381 */ 382 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 383 regmap_update_bits(imx_pcie->iomuxc_gpr, 384 imx_pcie_grp_offset(imx_pcie), 385 IMX8MQ_GPR_PCIE_VREG_BYPASS, 386 0); 387 388 return 0; 389 } 390 391 static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie) 392 { 393 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); 394 395 return 0; 396 } 397 398 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 399 { 400 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 401 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 402 403 /* configure constant input signal to the pcie ctrl and phy */ 404 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 405 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 406 407 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 408 IMX6Q_GPR8_TX_DEEMPH_GEN1, 409 imx_pcie->tx_deemph_gen1 << 0); 410 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 411 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 412 imx_pcie->tx_deemph_gen2_3p5db << 6); 413 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 414 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 415 imx_pcie->tx_deemph_gen2_6db << 12); 416 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 417 IMX6Q_GPR8_TX_SWING_FULL, 418 imx_pcie->tx_swing_full << 18); 419 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 420 IMX6Q_GPR8_TX_SWING_LOW, 421 imx_pcie->tx_swing_low << 25); 422 return 0; 423 } 424 425 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 426 { 427 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 428 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 429 430 return imx_pcie_init_phy(imx_pcie); 431 } 432 433 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 434 { 435 u32 val; 436 struct device *dev = imx_pcie->pci->dev; 437 438 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 439 IOMUXC_GPR22, val, 440 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 441 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 442 PHY_PLL_LOCK_WAIT_TIMEOUT)) 443 dev_err(dev, "PCIe PLL lock timeout\n"); 444 } 445 446 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 447 { 448 unsigned long phy_rate = 0; 449 int mult, div; 450 u16 val; 451 int i; 452 453 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 454 return 0; 455 456 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 457 if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) 458 phy_rate = clk_get_rate(imx_pcie->clks[i].clk); 459 460 switch (phy_rate) { 461 case 125000000: 462 /* 463 * The default settings of the MPLL are for a 125MHz input 464 * clock, so no need to reconfigure anything in that case. 465 */ 466 return 0; 467 case 100000000: 468 mult = 25; 469 div = 0; 470 break; 471 case 200000000: 472 mult = 25; 473 div = 1; 474 break; 475 default: 476 dev_err(imx_pcie->pci->dev, 477 "Unsupported PHY reference clock rate %lu\n", phy_rate); 478 return -EINVAL; 479 } 480 481 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 482 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 483 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 484 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 485 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 486 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 487 488 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 489 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 490 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 491 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 492 val |= PCIE_PHY_ATEOVRD_EN; 493 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 494 495 return 0; 496 } 497 498 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 499 { 500 u16 tmp; 501 502 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 503 return; 504 505 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 506 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 507 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 508 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 509 510 usleep_range(2000, 3000); 511 512 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 513 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 514 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 515 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 516 } 517 518 #ifdef CONFIG_ARM 519 /* Added for PCI abort handling */ 520 static int imx6q_pcie_abort_handler(unsigned long addr, 521 unsigned int fsr, struct pt_regs *regs) 522 { 523 unsigned long pc = instruction_pointer(regs); 524 unsigned long instr = *(unsigned long *)pc; 525 int reg = (instr >> 12) & 15; 526 527 /* 528 * If the instruction being executed was a read, 529 * make it look like it read all-ones. 530 */ 531 if ((instr & 0x0c100000) == 0x04100000) { 532 unsigned long val; 533 534 if (instr & 0x00400000) 535 val = 255; 536 else 537 val = -1; 538 539 regs->uregs[reg] = val; 540 regs->ARM_pc += 4; 541 return 0; 542 } 543 544 if ((instr & 0x0e100090) == 0x00100090) { 545 regs->uregs[reg] = -1; 546 regs->ARM_pc += 4; 547 return 0; 548 } 549 550 return 1; 551 } 552 #endif 553 554 static int imx_pcie_attach_pd(struct device *dev) 555 { 556 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 557 struct device_link *link; 558 559 /* Do nothing when in a single power domain */ 560 if (dev->pm_domain) 561 return 0; 562 563 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 564 if (IS_ERR(imx_pcie->pd_pcie)) 565 return PTR_ERR(imx_pcie->pd_pcie); 566 /* Do nothing when power domain missing */ 567 if (!imx_pcie->pd_pcie) 568 return 0; 569 link = device_link_add(dev, imx_pcie->pd_pcie, 570 DL_FLAG_STATELESS | 571 DL_FLAG_PM_RUNTIME | 572 DL_FLAG_RPM_ACTIVE); 573 if (!link) { 574 dev_err(dev, "Failed to add device_link to pcie pd.\n"); 575 return -EINVAL; 576 } 577 578 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 579 if (IS_ERR(imx_pcie->pd_pcie_phy)) 580 return PTR_ERR(imx_pcie->pd_pcie_phy); 581 582 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 583 DL_FLAG_STATELESS | 584 DL_FLAG_PM_RUNTIME | 585 DL_FLAG_RPM_ACTIVE); 586 if (!link) { 587 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); 588 return -EINVAL; 589 } 590 591 return 0; 592 } 593 594 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 595 { 596 if (enable) 597 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 598 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 599 600 return 0; 601 } 602 603 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 604 { 605 if (enable) { 606 /* power up core phy and enable ref clock */ 607 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 608 /* 609 * the async reset input need ref clock to sync internally, 610 * when the ref clock comes after reset, internal synced 611 * reset time is too short, cannot meet the requirement. 612 * add one ~10us delay here. 613 */ 614 usleep_range(10, 100); 615 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 616 } else { 617 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 618 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 619 } 620 621 return 0; 622 } 623 624 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 625 { 626 int offset = imx_pcie_grp_offset(imx_pcie); 627 628 if (enable) { 629 regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 630 regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); 631 } 632 633 return 0; 634 } 635 636 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 637 { 638 if (!enable) 639 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 640 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 641 return 0; 642 } 643 644 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 645 { 646 struct dw_pcie *pci = imx_pcie->pci; 647 struct device *dev = pci->dev; 648 int ret; 649 650 ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 651 if (ret) 652 return ret; 653 654 if (imx_pcie->drvdata->enable_ref_clk) { 655 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 656 if (ret) { 657 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 658 goto err_ref_clk; 659 } 660 } 661 662 /* allow the clocks to stabilize */ 663 usleep_range(200, 500); 664 return 0; 665 666 err_ref_clk: 667 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 668 669 return ret; 670 } 671 672 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 673 { 674 if (imx_pcie->drvdata->enable_ref_clk) 675 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 676 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 677 } 678 679 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 680 { 681 if (assert) 682 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 683 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 684 685 /* Force PCIe PHY reset */ 686 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 687 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 688 return 0; 689 } 690 691 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 692 { 693 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 694 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 695 if (!assert) 696 usleep_range(200, 500); 697 698 return 0; 699 } 700 701 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 702 { 703 if (!assert) 704 return 0; 705 706 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 707 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 708 709 return 0; 710 } 711 712 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 713 { 714 struct dw_pcie *pci = imx_pcie->pci; 715 struct device *dev = pci->dev; 716 717 if (assert) 718 return 0; 719 720 /* 721 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 722 * 723 * PCIe: PLL may fail to lock under corner conditions. 724 * 725 * Initial VCO oscillation may fail under corner conditions such as 726 * cold temperature which will cause the PCIe PLL fail to lock in the 727 * initialization phase. 728 * 729 * The Duty-cycle Corrector calibration must be disabled. 730 * 731 * 1. De-assert the G_RST signal by clearing 732 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 733 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 734 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 735 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 736 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 737 * 4. Assert ATT_MODE by writing data “0xbc” to the register 738 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 739 * 5. De-assert the CMN_RST signal by clearing register bit 740 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 741 */ 742 743 if (likely(imx_pcie->phy_base)) { 744 /* De-assert DCC_FB_EN */ 745 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 746 /* Assert RX_EQS and RX_EQS_SEL */ 747 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 748 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 749 /* Assert ATT_MODE */ 750 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 751 } else { 752 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 753 } 754 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 755 return 0; 756 } 757 758 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 759 { 760 reset_control_assert(imx_pcie->pciephy_reset); 761 reset_control_assert(imx_pcie->apps_reset); 762 763 if (imx_pcie->drvdata->core_reset) 764 imx_pcie->drvdata->core_reset(imx_pcie, true); 765 766 /* Some boards don't have PCIe reset GPIO. */ 767 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 768 } 769 770 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 771 { 772 reset_control_deassert(imx_pcie->pciephy_reset); 773 774 if (imx_pcie->drvdata->core_reset) 775 imx_pcie->drvdata->core_reset(imx_pcie, false); 776 777 /* Some boards don't have PCIe reset GPIO. */ 778 if (imx_pcie->reset_gpiod) { 779 msleep(100); 780 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 781 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 782 msleep(100); 783 } 784 785 return 0; 786 } 787 788 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 789 { 790 struct dw_pcie *pci = imx_pcie->pci; 791 struct device *dev = pci->dev; 792 u32 tmp; 793 unsigned int retries; 794 795 for (retries = 0; retries < 200; retries++) { 796 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 797 /* Test if the speed change finished. */ 798 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 799 return 0; 800 usleep_range(100, 1000); 801 } 802 803 dev_err(dev, "Speed change timeout\n"); 804 return -ETIMEDOUT; 805 } 806 807 static void imx_pcie_ltssm_enable(struct device *dev) 808 { 809 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 810 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 811 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 812 u32 tmp; 813 814 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 815 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 816 if (drvdata->ltssm_mask) 817 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 818 drvdata->ltssm_mask); 819 820 reset_control_deassert(imx_pcie->apps_reset); 821 } 822 823 static void imx_pcie_ltssm_disable(struct device *dev) 824 { 825 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 826 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 827 828 phy_set_speed(imx_pcie->phy, 0); 829 if (drvdata->ltssm_mask) 830 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 831 drvdata->ltssm_mask, 0); 832 833 reset_control_assert(imx_pcie->apps_reset); 834 } 835 836 static int imx_pcie_start_link(struct dw_pcie *pci) 837 { 838 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 839 struct device *dev = pci->dev; 840 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 841 u32 tmp; 842 int ret; 843 844 /* 845 * Force Gen1 operation when starting the link. In case the link is 846 * started in Gen2 mode, there is a possibility the devices on the 847 * bus will not be detected at all. This happens with PCIe switches. 848 */ 849 dw_pcie_dbi_ro_wr_en(pci); 850 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 851 tmp &= ~PCI_EXP_LNKCAP_SLS; 852 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 853 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 854 dw_pcie_dbi_ro_wr_dis(pci); 855 856 /* Start LTSSM. */ 857 imx_pcie_ltssm_enable(dev); 858 859 ret = dw_pcie_wait_for_link(pci); 860 if (ret) 861 goto err_reset_phy; 862 863 if (pci->max_link_speed > 1) { 864 /* Allow faster modes after the link is up */ 865 dw_pcie_dbi_ro_wr_en(pci); 866 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 867 tmp &= ~PCI_EXP_LNKCAP_SLS; 868 tmp |= pci->max_link_speed; 869 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 870 871 /* 872 * Start Directed Speed Change so the best possible 873 * speed both link partners support can be negotiated. 874 */ 875 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 876 tmp |= PORT_LOGIC_SPEED_CHANGE; 877 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 878 dw_pcie_dbi_ro_wr_dis(pci); 879 880 if (imx_pcie->drvdata->flags & 881 IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { 882 /* 883 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 884 * from i.MX6 family when no link speed transition 885 * occurs and we go Gen1 -> yep, Gen1. The difference 886 * is that, in such case, it will not be cleared by HW 887 * which will cause the following code to report false 888 * failure. 889 */ 890 891 ret = imx_pcie_wait_for_speed_change(imx_pcie); 892 if (ret) { 893 dev_err(dev, "Failed to bring link up!\n"); 894 goto err_reset_phy; 895 } 896 } 897 898 /* Make sure link training is finished as well! */ 899 ret = dw_pcie_wait_for_link(pci); 900 if (ret) 901 goto err_reset_phy; 902 } else { 903 dev_info(dev, "Link: Only Gen1 is enabled\n"); 904 } 905 906 imx_pcie->link_is_up = true; 907 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); 908 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); 909 return 0; 910 911 err_reset_phy: 912 imx_pcie->link_is_up = false; 913 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 914 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 915 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 916 imx_pcie_reset_phy(imx_pcie); 917 return 0; 918 } 919 920 static void imx_pcie_stop_link(struct dw_pcie *pci) 921 { 922 struct device *dev = pci->dev; 923 924 /* Turn off PCIe LTSSM */ 925 imx_pcie_ltssm_disable(dev); 926 } 927 928 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 929 { 930 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 931 struct device *dev = pci->dev; 932 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 933 int ret; 934 935 if (imx_pcie->vpcie) { 936 ret = regulator_enable(imx_pcie->vpcie); 937 if (ret) { 938 dev_err(dev, "failed to enable vpcie regulator: %d\n", 939 ret); 940 return ret; 941 } 942 } 943 944 imx_pcie_assert_core_reset(imx_pcie); 945 946 if (imx_pcie->drvdata->init_phy) 947 imx_pcie->drvdata->init_phy(imx_pcie); 948 949 imx_pcie_configure_type(imx_pcie); 950 951 ret = imx_pcie_clk_enable(imx_pcie); 952 if (ret) { 953 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 954 goto err_reg_disable; 955 } 956 957 if (imx_pcie->phy) { 958 ret = phy_init(imx_pcie->phy); 959 if (ret) { 960 dev_err(dev, "pcie PHY power up failed\n"); 961 goto err_clk_disable; 962 } 963 964 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 965 if (ret) { 966 dev_err(dev, "unable to set PCIe PHY mode\n"); 967 goto err_phy_exit; 968 } 969 970 ret = phy_power_on(imx_pcie->phy); 971 if (ret) { 972 dev_err(dev, "waiting for PHY ready timeout!\n"); 973 goto err_phy_exit; 974 } 975 } 976 977 ret = imx_pcie_deassert_core_reset(imx_pcie); 978 if (ret < 0) { 979 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 980 goto err_phy_off; 981 } 982 983 imx_setup_phy_mpll(imx_pcie); 984 985 return 0; 986 987 err_phy_off: 988 phy_power_off(imx_pcie->phy); 989 err_phy_exit: 990 phy_exit(imx_pcie->phy); 991 err_clk_disable: 992 imx_pcie_clk_disable(imx_pcie); 993 err_reg_disable: 994 if (imx_pcie->vpcie) 995 regulator_disable(imx_pcie->vpcie); 996 return ret; 997 } 998 999 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1000 { 1001 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1002 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1003 1004 if (imx_pcie->phy) { 1005 if (phy_power_off(imx_pcie->phy)) 1006 dev_err(pci->dev, "unable to power off PHY\n"); 1007 phy_exit(imx_pcie->phy); 1008 } 1009 imx_pcie_clk_disable(imx_pcie); 1010 1011 if (imx_pcie->vpcie) 1012 regulator_disable(imx_pcie->vpcie); 1013 } 1014 1015 static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) 1016 { 1017 struct imx_pcie *imx_pcie = to_imx_pcie(pcie); 1018 struct dw_pcie_rp *pp = &pcie->pp; 1019 struct resource_entry *entry; 1020 1021 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) 1022 return cpu_addr; 1023 1024 entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); 1025 if (!entry) 1026 return cpu_addr; 1027 1028 return cpu_addr - entry->offset; 1029 } 1030 1031 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1032 .init = imx_pcie_host_init, 1033 .deinit = imx_pcie_host_exit, 1034 }; 1035 1036 static const struct dw_pcie_ops dw_pcie_ops = { 1037 .start_link = imx_pcie_start_link, 1038 .stop_link = imx_pcie_stop_link, 1039 .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, 1040 }; 1041 1042 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1043 { 1044 enum pci_barno bar; 1045 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1046 1047 for (bar = BAR_0; bar <= BAR_5; bar++) 1048 dw_pcie_ep_reset_bar(pci, bar); 1049 } 1050 1051 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1052 unsigned int type, u16 interrupt_num) 1053 { 1054 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1055 1056 switch (type) { 1057 case PCI_IRQ_INTX: 1058 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1059 case PCI_IRQ_MSI: 1060 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1061 case PCI_IRQ_MSIX: 1062 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1063 default: 1064 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1065 return -EINVAL; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static const struct pci_epc_features imx8m_pcie_epc_features = { 1072 .linkup_notifier = false, 1073 .msi_capable = true, 1074 .msix_capable = false, 1075 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1076 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1077 .align = SZ_64K, 1078 }; 1079 1080 /* 1081 * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme 1082 * ================================================================================================ 1083 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1084 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1085 * BAR1 should be disabled if BAR0 is 64bit. 1086 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1087 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1088 * BAR4 | Enable | 32-bit | 1M | Programmable Size 1089 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1090 */ 1091 static const struct pci_epc_features imx95_pcie_epc_features = { 1092 .msi_capable = true, 1093 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1094 .align = SZ_4K, 1095 }; 1096 1097 static const struct pci_epc_features* 1098 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1099 { 1100 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1101 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1102 1103 return imx_pcie->drvdata->epc_features; 1104 } 1105 1106 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1107 .init = imx_pcie_ep_init, 1108 .raise_irq = imx_pcie_ep_raise_irq, 1109 .get_features = imx_pcie_ep_get_features, 1110 }; 1111 1112 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1113 struct platform_device *pdev) 1114 { 1115 int ret; 1116 unsigned int pcie_dbi2_offset; 1117 struct dw_pcie_ep *ep; 1118 struct dw_pcie *pci = imx_pcie->pci; 1119 struct dw_pcie_rp *pp = &pci->pp; 1120 struct device *dev = pci->dev; 1121 1122 imx_pcie_host_init(pp); 1123 ep = &pci->ep; 1124 ep->ops = &pcie_ep_ops; 1125 1126 switch (imx_pcie->drvdata->variant) { 1127 case IMX8MQ_EP: 1128 case IMX8MM_EP: 1129 case IMX8MP_EP: 1130 pcie_dbi2_offset = SZ_1M; 1131 break; 1132 default: 1133 pcie_dbi2_offset = SZ_4K; 1134 break; 1135 } 1136 1137 pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; 1138 1139 /* 1140 * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining 1141 * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC 1142 * core code can fetch that from DT. But once all platform DTs were fixed, this and the 1143 * above "dbi_base2" setting should be removed. 1144 */ 1145 if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) 1146 pci->dbi_base2 = NULL; 1147 1148 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1149 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1150 1151 ep->page_size = imx_pcie->drvdata->epc_features->align; 1152 1153 ret = dw_pcie_ep_init(ep); 1154 if (ret) { 1155 dev_err(dev, "failed to initialize endpoint\n"); 1156 return ret; 1157 } 1158 1159 ret = dw_pcie_ep_init_registers(ep); 1160 if (ret) { 1161 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1162 dw_pcie_ep_deinit(ep); 1163 return ret; 1164 } 1165 1166 pci_epc_init_notify(ep->epc); 1167 1168 /* Start LTSSM. */ 1169 imx_pcie_ltssm_enable(dev); 1170 1171 return 0; 1172 } 1173 1174 static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie) 1175 { 1176 struct device *dev = imx_pcie->pci->dev; 1177 1178 /* Some variants have a turnoff reset in DT */ 1179 if (imx_pcie->turnoff_reset) { 1180 reset_control_assert(imx_pcie->turnoff_reset); 1181 reset_control_deassert(imx_pcie->turnoff_reset); 1182 goto pm_turnoff_sleep; 1183 } 1184 1185 /* Others poke directly at IOMUXC registers */ 1186 switch (imx_pcie->drvdata->variant) { 1187 case IMX6SX: 1188 case IMX6QP: 1189 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 1190 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 1191 IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1192 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 1193 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); 1194 break; 1195 default: 1196 dev_err(dev, "PME_Turn_Off not implemented\n"); 1197 return; 1198 } 1199 1200 /* 1201 * Components with an upstream port must respond to 1202 * PME_Turn_Off with PME_TO_Ack but we can't check. 1203 * 1204 * The standard recommends a 1-10ms timeout after which to 1205 * proceed anyway as if acks were received. 1206 */ 1207 pm_turnoff_sleep: 1208 usleep_range(1000, 10000); 1209 } 1210 1211 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1212 { 1213 u8 offset; 1214 u16 val; 1215 struct dw_pcie *pci = imx_pcie->pci; 1216 1217 if (pci_msi_enabled()) { 1218 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1219 if (save) { 1220 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1221 imx_pcie->msi_ctrl = val; 1222 } else { 1223 dw_pcie_dbi_ro_wr_en(pci); 1224 val = imx_pcie->msi_ctrl; 1225 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1226 dw_pcie_dbi_ro_wr_dis(pci); 1227 } 1228 } 1229 } 1230 1231 static int imx_pcie_suspend_noirq(struct device *dev) 1232 { 1233 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1234 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; 1235 1236 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1237 return 0; 1238 1239 imx_pcie_msi_save_restore(imx_pcie, true); 1240 imx_pcie_pm_turnoff(imx_pcie); 1241 imx_pcie_stop_link(imx_pcie->pci); 1242 imx_pcie_host_exit(pp); 1243 1244 return 0; 1245 } 1246 1247 static int imx_pcie_resume_noirq(struct device *dev) 1248 { 1249 int ret; 1250 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1251 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; 1252 1253 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1254 return 0; 1255 1256 ret = imx_pcie_host_init(pp); 1257 if (ret) 1258 return ret; 1259 imx_pcie_msi_save_restore(imx_pcie, false); 1260 dw_pcie_setup_rc(pp); 1261 1262 if (imx_pcie->link_is_up) 1263 imx_pcie_start_link(imx_pcie->pci); 1264 1265 return 0; 1266 } 1267 1268 static const struct dev_pm_ops imx_pcie_pm_ops = { 1269 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1270 imx_pcie_resume_noirq) 1271 }; 1272 1273 static int imx_pcie_probe(struct platform_device *pdev) 1274 { 1275 struct device *dev = &pdev->dev; 1276 struct dw_pcie *pci; 1277 struct imx_pcie *imx_pcie; 1278 struct device_node *np; 1279 struct resource *dbi_base; 1280 struct device_node *node = dev->of_node; 1281 int ret; 1282 u16 val; 1283 int i; 1284 1285 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1286 if (!imx_pcie) 1287 return -ENOMEM; 1288 1289 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1290 if (!pci) 1291 return -ENOMEM; 1292 1293 pci->dev = dev; 1294 pci->ops = &dw_pcie_ops; 1295 pci->pp.ops = &imx_pcie_host_ops; 1296 1297 imx_pcie->pci = pci; 1298 imx_pcie->drvdata = of_device_get_match_data(dev); 1299 1300 /* Find the PHY if one is defined, only imx7d uses it */ 1301 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1302 if (np) { 1303 struct resource res; 1304 1305 ret = of_address_to_resource(np, 0, &res); 1306 if (ret) { 1307 dev_err(dev, "Unable to map PCIe PHY\n"); 1308 return ret; 1309 } 1310 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1311 if (IS_ERR(imx_pcie->phy_base)) 1312 return PTR_ERR(imx_pcie->phy_base); 1313 } 1314 1315 pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); 1316 if (IS_ERR(pci->dbi_base)) 1317 return PTR_ERR(pci->dbi_base); 1318 1319 /* Fetch GPIOs */ 1320 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1321 if (IS_ERR(imx_pcie->reset_gpiod)) 1322 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1323 "unable to get reset gpio\n"); 1324 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1325 1326 if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) 1327 return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); 1328 1329 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 1330 imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; 1331 1332 /* Fetch clocks */ 1333 ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 1334 if (ret) 1335 return ret; 1336 1337 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1338 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1339 if (IS_ERR(imx_pcie->phy)) 1340 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1341 "failed to get pcie phy\n"); 1342 } 1343 1344 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1345 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1346 if (IS_ERR(imx_pcie->apps_reset)) 1347 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1348 "failed to get pcie apps reset control\n"); 1349 } 1350 1351 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1352 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1353 if (IS_ERR(imx_pcie->pciephy_reset)) 1354 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1355 "Failed to get PCIEPHY reset control\n"); 1356 } 1357 1358 switch (imx_pcie->drvdata->variant) { 1359 case IMX8MQ: 1360 case IMX8MQ_EP: 1361 case IMX7D: 1362 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) 1363 imx_pcie->controller_id = 1; 1364 break; 1365 default: 1366 break; 1367 } 1368 1369 /* Grab turnoff reset */ 1370 imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); 1371 if (IS_ERR(imx_pcie->turnoff_reset)) { 1372 dev_err(dev, "Failed to get TURNOFF reset control\n"); 1373 return PTR_ERR(imx_pcie->turnoff_reset); 1374 } 1375 1376 if (imx_pcie->drvdata->gpr) { 1377 /* Grab GPR config register range */ 1378 imx_pcie->iomuxc_gpr = 1379 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1380 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1381 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1382 "unable to find iomuxc registers\n"); 1383 } 1384 1385 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1386 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1387 1388 if (IS_ERR(off)) 1389 return dev_err_probe(dev, PTR_ERR(off), 1390 "unable to find serdes registers\n"); 1391 1392 static const struct regmap_config regmap_config = { 1393 .reg_bits = 32, 1394 .val_bits = 32, 1395 .reg_stride = 4, 1396 }; 1397 1398 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1399 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1400 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1401 "unable to find iomuxc registers\n"); 1402 } 1403 1404 /* Grab PCIe PHY Tx Settings */ 1405 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1406 &imx_pcie->tx_deemph_gen1)) 1407 imx_pcie->tx_deemph_gen1 = 0; 1408 1409 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1410 &imx_pcie->tx_deemph_gen2_3p5db)) 1411 imx_pcie->tx_deemph_gen2_3p5db = 0; 1412 1413 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1414 &imx_pcie->tx_deemph_gen2_6db)) 1415 imx_pcie->tx_deemph_gen2_6db = 20; 1416 1417 if (of_property_read_u32(node, "fsl,tx-swing-full", 1418 &imx_pcie->tx_swing_full)) 1419 imx_pcie->tx_swing_full = 127; 1420 1421 if (of_property_read_u32(node, "fsl,tx-swing-low", 1422 &imx_pcie->tx_swing_low)) 1423 imx_pcie->tx_swing_low = 127; 1424 1425 /* Limit link speed */ 1426 pci->max_link_speed = 1; 1427 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1428 1429 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1430 if (IS_ERR(imx_pcie->vpcie)) { 1431 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1432 return PTR_ERR(imx_pcie->vpcie); 1433 imx_pcie->vpcie = NULL; 1434 } 1435 1436 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1437 if (IS_ERR(imx_pcie->vph)) { 1438 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1439 return PTR_ERR(imx_pcie->vph); 1440 imx_pcie->vph = NULL; 1441 } 1442 1443 platform_set_drvdata(pdev, imx_pcie); 1444 1445 ret = imx_pcie_attach_pd(dev); 1446 if (ret) 1447 return ret; 1448 1449 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1450 ret = imx_add_pcie_ep(imx_pcie, pdev); 1451 if (ret < 0) 1452 return ret; 1453 } else { 1454 ret = dw_pcie_host_init(&pci->pp); 1455 if (ret < 0) 1456 return ret; 1457 1458 if (pci_msi_enabled()) { 1459 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1460 1461 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1462 val |= PCI_MSI_FLAGS_ENABLE; 1463 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 static void imx_pcie_shutdown(struct platform_device *pdev) 1471 { 1472 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1473 1474 /* bring down link, so bootloader gets clean state in case of reboot */ 1475 imx_pcie_assert_core_reset(imx_pcie); 1476 } 1477 1478 static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; 1479 static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; 1480 static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; 1481 static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; 1482 static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; 1483 1484 static const struct imx_pcie_drvdata drvdata[] = { 1485 [IMX6Q] = { 1486 .variant = IMX6Q, 1487 .flags = IMX_PCIE_FLAG_IMX_PHY | 1488 IMX_PCIE_FLAG_IMX_SPEED_CHANGE, 1489 .dbi_length = 0x200, 1490 .gpr = "fsl,imx6q-iomuxc-gpr", 1491 .clk_names = imx6q_clks, 1492 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1493 .ltssm_off = IOMUXC_GPR12, 1494 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1495 .mode_off[0] = IOMUXC_GPR12, 1496 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1497 .init_phy = imx_pcie_init_phy, 1498 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1499 .core_reset = imx6q_pcie_core_reset, 1500 }, 1501 [IMX6SX] = { 1502 .variant = IMX6SX, 1503 .flags = IMX_PCIE_FLAG_IMX_PHY | 1504 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1505 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1506 .gpr = "fsl,imx6q-iomuxc-gpr", 1507 .clk_names = imx6sx_clks, 1508 .clks_cnt = ARRAY_SIZE(imx6sx_clks), 1509 .ltssm_off = IOMUXC_GPR12, 1510 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1511 .mode_off[0] = IOMUXC_GPR12, 1512 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1513 .init_phy = imx6sx_pcie_init_phy, 1514 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1515 .core_reset = imx6sx_pcie_core_reset, 1516 }, 1517 [IMX6QP] = { 1518 .variant = IMX6QP, 1519 .flags = IMX_PCIE_FLAG_IMX_PHY | 1520 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1521 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1522 .dbi_length = 0x200, 1523 .gpr = "fsl,imx6q-iomuxc-gpr", 1524 .clk_names = imx6q_clks, 1525 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1526 .ltssm_off = IOMUXC_GPR12, 1527 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1528 .mode_off[0] = IOMUXC_GPR12, 1529 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1530 .init_phy = imx_pcie_init_phy, 1531 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1532 .core_reset = imx6qp_pcie_core_reset, 1533 }, 1534 [IMX7D] = { 1535 .variant = IMX7D, 1536 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1537 IMX_PCIE_FLAG_HAS_APP_RESET | 1538 IMX_PCIE_FLAG_HAS_PHY_RESET, 1539 .gpr = "fsl,imx7d-iomuxc-gpr", 1540 .clk_names = imx6q_clks, 1541 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1542 .mode_off[0] = IOMUXC_GPR12, 1543 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1544 .init_phy = imx7d_pcie_init_phy, 1545 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1546 .core_reset = imx7d_pcie_core_reset, 1547 }, 1548 [IMX8MQ] = { 1549 .variant = IMX8MQ, 1550 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1551 IMX_PCIE_FLAG_HAS_PHY_RESET, 1552 .gpr = "fsl,imx8mq-iomuxc-gpr", 1553 .clk_names = imx8mq_clks, 1554 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1555 .mode_off[0] = IOMUXC_GPR12, 1556 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1557 .mode_off[1] = IOMUXC_GPR12, 1558 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1559 .init_phy = imx8mq_pcie_init_phy, 1560 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1561 }, 1562 [IMX8MM] = { 1563 .variant = IMX8MM, 1564 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1565 IMX_PCIE_FLAG_HAS_PHYDRV | 1566 IMX_PCIE_FLAG_HAS_APP_RESET, 1567 .gpr = "fsl,imx8mm-iomuxc-gpr", 1568 .clk_names = imx8mm_clks, 1569 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1570 .mode_off[0] = IOMUXC_GPR12, 1571 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1572 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1573 }, 1574 [IMX8MP] = { 1575 .variant = IMX8MP, 1576 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1577 IMX_PCIE_FLAG_HAS_PHYDRV | 1578 IMX_PCIE_FLAG_HAS_APP_RESET, 1579 .gpr = "fsl,imx8mp-iomuxc-gpr", 1580 .clk_names = imx8mm_clks, 1581 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1582 .mode_off[0] = IOMUXC_GPR12, 1583 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1584 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1585 }, 1586 [IMX8Q] = { 1587 .variant = IMX8Q, 1588 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1589 IMX_PCIE_FLAG_CPU_ADDR_FIXUP, 1590 .clk_names = imx8q_clks, 1591 .clks_cnt = ARRAY_SIZE(imx8q_clks), 1592 }, 1593 [IMX95] = { 1594 .variant = IMX95, 1595 .flags = IMX_PCIE_FLAG_HAS_SERDES, 1596 .clk_names = imx8mq_clks, 1597 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1598 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1599 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1600 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1601 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1602 .init_phy = imx95_pcie_init_phy, 1603 }, 1604 [IMX8MQ_EP] = { 1605 .variant = IMX8MQ_EP, 1606 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1607 IMX_PCIE_FLAG_HAS_PHY_RESET, 1608 .mode = DW_PCIE_EP_TYPE, 1609 .gpr = "fsl,imx8mq-iomuxc-gpr", 1610 .clk_names = imx8mq_clks, 1611 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1612 .mode_off[0] = IOMUXC_GPR12, 1613 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1614 .mode_off[1] = IOMUXC_GPR12, 1615 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1616 .epc_features = &imx8m_pcie_epc_features, 1617 .init_phy = imx8mq_pcie_init_phy, 1618 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1619 }, 1620 [IMX8MM_EP] = { 1621 .variant = IMX8MM_EP, 1622 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1623 IMX_PCIE_FLAG_HAS_PHYDRV, 1624 .mode = DW_PCIE_EP_TYPE, 1625 .gpr = "fsl,imx8mm-iomuxc-gpr", 1626 .clk_names = imx8mm_clks, 1627 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1628 .mode_off[0] = IOMUXC_GPR12, 1629 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1630 .epc_features = &imx8m_pcie_epc_features, 1631 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1632 }, 1633 [IMX8MP_EP] = { 1634 .variant = IMX8MP_EP, 1635 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1636 IMX_PCIE_FLAG_HAS_PHYDRV, 1637 .mode = DW_PCIE_EP_TYPE, 1638 .gpr = "fsl,imx8mp-iomuxc-gpr", 1639 .clk_names = imx8mm_clks, 1640 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1641 .mode_off[0] = IOMUXC_GPR12, 1642 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1643 .epc_features = &imx8m_pcie_epc_features, 1644 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1645 }, 1646 [IMX95_EP] = { 1647 .variant = IMX95_EP, 1648 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1649 IMX_PCIE_FLAG_SUPPORT_64BIT, 1650 .clk_names = imx8mq_clks, 1651 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1652 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1653 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1654 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1655 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1656 .init_phy = imx95_pcie_init_phy, 1657 .epc_features = &imx95_pcie_epc_features, 1658 .mode = DW_PCIE_EP_TYPE, 1659 }, 1660 }; 1661 1662 static const struct of_device_id imx_pcie_of_match[] = { 1663 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1664 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1665 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1666 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1667 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1668 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1669 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1670 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1671 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1672 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1673 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1674 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1675 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1676 {}, 1677 }; 1678 1679 static struct platform_driver imx_pcie_driver = { 1680 .driver = { 1681 .name = "imx6q-pcie", 1682 .of_match_table = imx_pcie_of_match, 1683 .suppress_bind_attrs = true, 1684 .pm = &imx_pcie_pm_ops, 1685 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1686 }, 1687 .probe = imx_pcie_probe, 1688 .shutdown = imx_pcie_shutdown, 1689 }; 1690 1691 static void imx_pcie_quirk(struct pci_dev *dev) 1692 { 1693 struct pci_bus *bus = dev->bus; 1694 struct dw_pcie_rp *pp = bus->sysdata; 1695 1696 /* Bus parent is the PCI bridge, its parent is this platform driver */ 1697 if (!bus->dev.parent || !bus->dev.parent->parent) 1698 return; 1699 1700 /* Make sure we only quirk devices associated with this driver */ 1701 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 1702 return; 1703 1704 if (pci_is_root_bus(bus)) { 1705 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1706 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1707 1708 /* 1709 * Limit config length to avoid the kernel reading beyond 1710 * the register set and causing an abort on i.MX 6Quad 1711 */ 1712 if (imx_pcie->drvdata->dbi_length) { 1713 dev->cfg_size = imx_pcie->drvdata->dbi_length; 1714 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 1715 dev->cfg_size); 1716 } 1717 } 1718 } 1719 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 1720 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 1721 1722 static int __init imx_pcie_init(void) 1723 { 1724 #ifdef CONFIG_ARM 1725 struct device_node *np; 1726 1727 np = of_find_matching_node(NULL, imx_pcie_of_match); 1728 if (!np) 1729 return -ENODEV; 1730 of_node_put(np); 1731 1732 /* 1733 * Since probe() can be deferred we need to make sure that 1734 * hook_fault_code is not called after __init memory is freed 1735 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1736 * we can install the handler here without risking it 1737 * accessing some uninitialized driver state. 1738 */ 1739 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1740 "external abort on non-linefetch"); 1741 #endif 1742 1743 return platform_driver_register(&imx_pcie_driver); 1744 } 1745 device_initcall(imx_pcie_init); 1746