1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "../../pci.h" 37 #include "pcie-designware.h" 38 39 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 41 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 42 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 43 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_SS_RW_REG_0 0xf0 49 #define IMX95_PCIE_REF_CLKEN BIT(23) 50 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 51 52 #define IMX95_PE0_GEN_CTRL_1 0x1050 53 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 54 55 #define IMX95_PE0_GEN_CTRL_3 0x1058 56 #define IMX95_PCIE_LTSSM_EN BIT(0) 57 58 #define IMX95_PE0_LUT_ACSCTRL 0x1008 59 #define IMX95_PEO_LUT_RWA BIT(16) 60 #define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) 61 62 #define IMX95_PE0_LUT_DATA1 0x100c 63 #define IMX95_PE0_LUT_VLD BIT(31) 64 #define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) 65 #define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) 66 67 #define IMX95_PE0_LUT_DATA2 0x1010 68 #define IMX95_PE0_LUT_REQID GENMASK(31, 16) 69 #define IMX95_PE0_LUT_MASK GENMASK(15, 0) 70 71 #define IMX95_SID_MASK GENMASK(5, 0) 72 #define IMX95_MAX_LUT 32 73 74 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 75 76 enum imx_pcie_variants { 77 IMX6Q, 78 IMX6SX, 79 IMX6QP, 80 IMX7D, 81 IMX8MQ, 82 IMX8MM, 83 IMX8MP, 84 IMX8Q, 85 IMX95, 86 IMX8MQ_EP, 87 IMX8MM_EP, 88 IMX8MP_EP, 89 IMX8Q_EP, 90 IMX95_EP, 91 }; 92 93 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 94 #define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) 95 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 96 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 97 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 98 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 99 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 100 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 101 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 102 /* 103 * Because of ERR005723 (PCIe does not support L2 power down) we need to 104 * workaround suspend resume on some devices which are affected by this errata. 105 */ 106 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 107 #define IMX_PCIE_FLAG_HAS_LUT BIT(10) 108 109 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 110 111 #define IMX_PCIE_MAX_INSTANCES 2 112 113 struct imx_pcie; 114 115 struct imx_pcie_drvdata { 116 enum imx_pcie_variants variant; 117 enum dw_pcie_device_mode mode; 118 u32 flags; 119 int dbi_length; 120 const char *gpr; 121 const u32 ltssm_off; 122 const u32 ltssm_mask; 123 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 124 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 125 const struct pci_epc_features *epc_features; 126 int (*init_phy)(struct imx_pcie *pcie); 127 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 128 int (*core_reset)(struct imx_pcie *pcie, bool assert); 129 const struct dw_pcie_host_ops *ops; 130 }; 131 132 struct imx_pcie { 133 struct dw_pcie *pci; 134 struct gpio_desc *reset_gpiod; 135 struct clk_bulk_data *clks; 136 int num_clks; 137 struct regmap *iomuxc_gpr; 138 u16 msi_ctrl; 139 u32 controller_id; 140 struct reset_control *pciephy_reset; 141 struct reset_control *apps_reset; 142 u32 tx_deemph_gen1; 143 u32 tx_deemph_gen2_3p5db; 144 u32 tx_deemph_gen2_6db; 145 u32 tx_swing_full; 146 u32 tx_swing_low; 147 struct regulator *vpcie; 148 struct regulator *vph; 149 void __iomem *phy_base; 150 151 /* power domain for pcie */ 152 struct device *pd_pcie; 153 /* power domain for pcie phy */ 154 struct device *pd_pcie_phy; 155 struct phy *phy; 156 const struct imx_pcie_drvdata *drvdata; 157 158 /* Ensure that only one device's LUT is configured at any given time */ 159 struct mutex lock; 160 }; 161 162 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 163 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 164 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 165 166 /* PCIe Port Logic registers (memory-mapped) */ 167 #define PL_OFFSET 0x700 168 169 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 170 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 171 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 172 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 173 #define PCIE_PHY_CTRL_WR BIT(18) 174 #define PCIE_PHY_CTRL_RD BIT(19) 175 176 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 177 #define PCIE_PHY_STAT_ACK BIT(16) 178 179 /* PHY registers (not memory-mapped) */ 180 #define PCIE_PHY_ATEOVRD 0x10 181 #define PCIE_PHY_ATEOVRD_EN BIT(2) 182 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 183 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 184 185 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 186 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 187 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 188 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 189 190 #define PCIE_PHY_RX_ASIC_OUT 0x100D 191 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 192 193 /* iMX7 PCIe PHY registers */ 194 #define PCIE_PHY_CMN_REG4 0x14 195 /* These are probably the bits that *aren't* DCC_FB_EN */ 196 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 197 198 #define PCIE_PHY_CMN_REG15 0x54 199 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 200 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 201 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 202 203 #define PCIE_PHY_CMN_REG24 0x90 204 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 205 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 206 207 #define PCIE_PHY_CMN_REG26 0x98 208 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 209 210 #define PHY_RX_OVRD_IN_LO 0x1005 211 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 212 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 213 214 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 215 { 216 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 217 imx_pcie->drvdata->variant != IMX8MQ_EP && 218 imx_pcie->drvdata->variant != IMX8MM && 219 imx_pcie->drvdata->variant != IMX8MM_EP && 220 imx_pcie->drvdata->variant != IMX8MP && 221 imx_pcie->drvdata->variant != IMX8MP_EP); 222 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 223 } 224 225 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 226 { 227 regmap_update_bits(imx_pcie->iomuxc_gpr, 228 IMX95_PCIE_SS_RW_REG_0, 229 IMX95_PCIE_PHY_CR_PARA_SEL, 230 IMX95_PCIE_PHY_CR_PARA_SEL); 231 232 regmap_update_bits(imx_pcie->iomuxc_gpr, 233 IMX95_PCIE_PHY_GEN_CTRL, 234 IMX95_PCIE_REF_USE_PAD, 0); 235 regmap_update_bits(imx_pcie->iomuxc_gpr, 236 IMX95_PCIE_SS_RW_REG_0, 237 IMX95_PCIE_REF_CLKEN, 238 IMX95_PCIE_REF_CLKEN); 239 240 return 0; 241 } 242 243 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 244 { 245 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 246 unsigned int mask, val, mode, id; 247 248 if (drvdata->mode == DW_PCIE_EP_TYPE) 249 mode = PCI_EXP_TYPE_ENDPOINT; 250 else 251 mode = PCI_EXP_TYPE_ROOT_PORT; 252 253 id = imx_pcie->controller_id; 254 255 /* If mode_mask is 0, generic PHY driver is used to set the mode */ 256 if (!drvdata->mode_mask[0]) 257 return; 258 259 /* If mode_mask[id] is 0, each controller has its individual GPR */ 260 if (!drvdata->mode_mask[id]) 261 id = 0; 262 263 mask = drvdata->mode_mask[id]; 264 val = mode << (ffs(mask) - 1); 265 266 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 267 } 268 269 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 270 { 271 struct dw_pcie *pci = imx_pcie->pci; 272 bool val; 273 u32 max_iterations = 10; 274 u32 wait_counter = 0; 275 276 do { 277 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 278 PCIE_PHY_STAT_ACK; 279 wait_counter++; 280 281 if (val == exp_val) 282 return 0; 283 284 udelay(1); 285 } while (wait_counter < max_iterations); 286 287 return -ETIMEDOUT; 288 } 289 290 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 291 { 292 struct dw_pcie *pci = imx_pcie->pci; 293 u32 val; 294 int ret; 295 296 val = PCIE_PHY_CTRL_DATA(addr); 297 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 298 299 val |= PCIE_PHY_CTRL_CAP_ADR; 300 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 301 302 ret = pcie_phy_poll_ack(imx_pcie, true); 303 if (ret) 304 return ret; 305 306 val = PCIE_PHY_CTRL_DATA(addr); 307 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 308 309 return pcie_phy_poll_ack(imx_pcie, false); 310 } 311 312 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 313 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 314 { 315 struct dw_pcie *pci = imx_pcie->pci; 316 u32 phy_ctl; 317 int ret; 318 319 ret = pcie_phy_wait_ack(imx_pcie, addr); 320 if (ret) 321 return ret; 322 323 /* assert Read signal */ 324 phy_ctl = PCIE_PHY_CTRL_RD; 325 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 326 327 ret = pcie_phy_poll_ack(imx_pcie, true); 328 if (ret) 329 return ret; 330 331 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 332 333 /* deassert Read signal */ 334 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 335 336 return pcie_phy_poll_ack(imx_pcie, false); 337 } 338 339 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 340 { 341 struct dw_pcie *pci = imx_pcie->pci; 342 u32 var; 343 int ret; 344 345 /* write addr */ 346 /* cap addr */ 347 ret = pcie_phy_wait_ack(imx_pcie, addr); 348 if (ret) 349 return ret; 350 351 var = PCIE_PHY_CTRL_DATA(data); 352 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 353 354 /* capture data */ 355 var |= PCIE_PHY_CTRL_CAP_DAT; 356 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 357 358 ret = pcie_phy_poll_ack(imx_pcie, true); 359 if (ret) 360 return ret; 361 362 /* deassert cap data */ 363 var = PCIE_PHY_CTRL_DATA(data); 364 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 365 366 /* wait for ack de-assertion */ 367 ret = pcie_phy_poll_ack(imx_pcie, false); 368 if (ret) 369 return ret; 370 371 /* assert wr signal */ 372 var = PCIE_PHY_CTRL_WR; 373 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 374 375 /* wait for ack */ 376 ret = pcie_phy_poll_ack(imx_pcie, true); 377 if (ret) 378 return ret; 379 380 /* deassert wr signal */ 381 var = PCIE_PHY_CTRL_DATA(data); 382 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 383 384 /* wait for ack de-assertion */ 385 ret = pcie_phy_poll_ack(imx_pcie, false); 386 if (ret) 387 return ret; 388 389 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 390 391 return 0; 392 } 393 394 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 395 { 396 /* TODO: This code assumes external oscillator is being used */ 397 regmap_update_bits(imx_pcie->iomuxc_gpr, 398 imx_pcie_grp_offset(imx_pcie), 399 IMX8MQ_GPR_PCIE_REF_USE_PAD, 400 IMX8MQ_GPR_PCIE_REF_USE_PAD); 401 /* 402 * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the 403 * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared 404 * to zero. 405 */ 406 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 407 regmap_update_bits(imx_pcie->iomuxc_gpr, 408 imx_pcie_grp_offset(imx_pcie), 409 IMX8MQ_GPR_PCIE_VREG_BYPASS, 410 0); 411 412 return 0; 413 } 414 415 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 416 { 417 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 418 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 419 420 /* configure constant input signal to the pcie ctrl and phy */ 421 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 422 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 423 424 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 425 IMX6Q_GPR8_TX_DEEMPH_GEN1, 426 imx_pcie->tx_deemph_gen1 << 0); 427 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 428 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 429 imx_pcie->tx_deemph_gen2_3p5db << 6); 430 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 431 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 432 imx_pcie->tx_deemph_gen2_6db << 12); 433 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 434 IMX6Q_GPR8_TX_SWING_FULL, 435 imx_pcie->tx_swing_full << 18); 436 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 437 IMX6Q_GPR8_TX_SWING_LOW, 438 imx_pcie->tx_swing_low << 25); 439 return 0; 440 } 441 442 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 443 { 444 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 445 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 446 447 return imx_pcie_init_phy(imx_pcie); 448 } 449 450 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 451 { 452 u32 val; 453 struct device *dev = imx_pcie->pci->dev; 454 455 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 456 IOMUXC_GPR22, val, 457 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 458 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 459 PHY_PLL_LOCK_WAIT_TIMEOUT)) 460 dev_err(dev, "PCIe PLL lock timeout\n"); 461 } 462 463 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 464 { 465 unsigned long phy_rate = 0; 466 int mult, div; 467 u16 val; 468 int i; 469 struct clk_bulk_data *clks = imx_pcie->clks; 470 471 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 472 return 0; 473 474 for (i = 0; i < imx_pcie->num_clks; i++) 475 if (strncmp(clks[i].id, "pcie_phy", 8) == 0) 476 phy_rate = clk_get_rate(clks[i].clk); 477 478 switch (phy_rate) { 479 case 125000000: 480 /* 481 * The default settings of the MPLL are for a 125MHz input 482 * clock, so no need to reconfigure anything in that case. 483 */ 484 return 0; 485 case 100000000: 486 mult = 25; 487 div = 0; 488 break; 489 case 200000000: 490 mult = 25; 491 div = 1; 492 break; 493 default: 494 dev_err(imx_pcie->pci->dev, 495 "Unsupported PHY reference clock rate %lu\n", phy_rate); 496 return -EINVAL; 497 } 498 499 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 500 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 501 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 502 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 503 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 504 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 505 506 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 507 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 508 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 509 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 510 val |= PCIE_PHY_ATEOVRD_EN; 511 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 512 513 return 0; 514 } 515 516 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 517 { 518 u16 tmp; 519 520 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 521 return; 522 523 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 524 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 525 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 526 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 527 528 usleep_range(2000, 3000); 529 530 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 531 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 532 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 533 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 534 } 535 536 #ifdef CONFIG_ARM 537 /* Added for PCI abort handling */ 538 static int imx6q_pcie_abort_handler(unsigned long addr, 539 unsigned int fsr, struct pt_regs *regs) 540 { 541 unsigned long pc = instruction_pointer(regs); 542 unsigned long instr = *(unsigned long *)pc; 543 int reg = (instr >> 12) & 15; 544 545 /* 546 * If the instruction being executed was a read, 547 * make it look like it read all-ones. 548 */ 549 if ((instr & 0x0c100000) == 0x04100000) { 550 unsigned long val; 551 552 if (instr & 0x00400000) 553 val = 255; 554 else 555 val = -1; 556 557 regs->uregs[reg] = val; 558 regs->ARM_pc += 4; 559 return 0; 560 } 561 562 if ((instr & 0x0e100090) == 0x00100090) { 563 regs->uregs[reg] = -1; 564 regs->ARM_pc += 4; 565 return 0; 566 } 567 568 return 1; 569 } 570 #endif 571 572 static int imx_pcie_attach_pd(struct device *dev) 573 { 574 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 575 struct device_link *link; 576 577 /* Do nothing when in a single power domain */ 578 if (dev->pm_domain) 579 return 0; 580 581 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 582 if (IS_ERR(imx_pcie->pd_pcie)) 583 return PTR_ERR(imx_pcie->pd_pcie); 584 /* Do nothing when power domain missing */ 585 if (!imx_pcie->pd_pcie) 586 return 0; 587 link = device_link_add(dev, imx_pcie->pd_pcie, 588 DL_FLAG_STATELESS | 589 DL_FLAG_PM_RUNTIME | 590 DL_FLAG_RPM_ACTIVE); 591 if (!link) { 592 dev_err(dev, "Failed to add device_link to pcie pd\n"); 593 return -EINVAL; 594 } 595 596 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 597 if (IS_ERR(imx_pcie->pd_pcie_phy)) 598 return PTR_ERR(imx_pcie->pd_pcie_phy); 599 600 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 601 DL_FLAG_STATELESS | 602 DL_FLAG_PM_RUNTIME | 603 DL_FLAG_RPM_ACTIVE); 604 if (!link) { 605 dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); 606 return -EINVAL; 607 } 608 609 return 0; 610 } 611 612 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 613 { 614 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 615 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 616 enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 617 return 0; 618 } 619 620 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 621 { 622 if (enable) { 623 /* power up core phy and enable ref clock */ 624 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 625 /* 626 * The async reset input need ref clock to sync internally, 627 * when the ref clock comes after reset, internal synced 628 * reset time is too short, cannot meet the requirement. 629 * Add a ~10us delay here. 630 */ 631 usleep_range(10, 100); 632 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 633 } else { 634 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 635 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 636 } 637 638 return 0; 639 } 640 641 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 642 { 643 int offset = imx_pcie_grp_offset(imx_pcie); 644 645 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 646 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 647 enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 648 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 649 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, 650 enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); 651 return 0; 652 } 653 654 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 655 { 656 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 657 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 658 enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 659 return 0; 660 } 661 662 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 663 { 664 struct dw_pcie *pci = imx_pcie->pci; 665 struct device *dev = pci->dev; 666 int ret; 667 668 ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); 669 if (ret) 670 return ret; 671 672 if (imx_pcie->drvdata->enable_ref_clk) { 673 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 674 if (ret) { 675 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 676 goto err_ref_clk; 677 } 678 } 679 680 /* allow the clocks to stabilize */ 681 usleep_range(200, 500); 682 return 0; 683 684 err_ref_clk: 685 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 686 687 return ret; 688 } 689 690 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 691 { 692 if (imx_pcie->drvdata->enable_ref_clk) 693 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 694 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 695 } 696 697 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 698 { 699 if (assert) 700 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 701 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 702 703 /* Force PCIe PHY reset */ 704 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 705 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 706 return 0; 707 } 708 709 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 710 { 711 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 712 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 713 if (!assert) 714 usleep_range(200, 500); 715 716 return 0; 717 } 718 719 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 720 { 721 if (!assert) 722 return 0; 723 724 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 725 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 726 727 return 0; 728 } 729 730 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 731 { 732 struct dw_pcie *pci = imx_pcie->pci; 733 struct device *dev = pci->dev; 734 735 if (assert) 736 return 0; 737 738 /* 739 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 740 * 741 * PCIe: PLL may fail to lock under corner conditions. 742 * 743 * Initial VCO oscillation may fail under corner conditions such as 744 * cold temperature which will cause the PCIe PLL fail to lock in the 745 * initialization phase. 746 * 747 * The Duty-cycle Corrector calibration must be disabled. 748 * 749 * 1. De-assert the G_RST signal by clearing 750 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 751 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 752 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 753 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 754 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 755 * 4. Assert ATT_MODE by writing data “0xbc” to the register 756 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 757 * 5. De-assert the CMN_RST signal by clearing register bit 758 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 759 */ 760 761 if (likely(imx_pcie->phy_base)) { 762 /* De-assert DCC_FB_EN */ 763 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 764 /* Assert RX_EQS and RX_EQS_SEL */ 765 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 766 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 767 /* Assert ATT_MODE */ 768 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 769 } else { 770 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 771 } 772 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 773 return 0; 774 } 775 776 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 777 { 778 reset_control_assert(imx_pcie->pciephy_reset); 779 reset_control_assert(imx_pcie->apps_reset); 780 781 if (imx_pcie->drvdata->core_reset) 782 imx_pcie->drvdata->core_reset(imx_pcie, true); 783 784 /* Some boards don't have PCIe reset GPIO. */ 785 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 786 } 787 788 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 789 { 790 reset_control_deassert(imx_pcie->pciephy_reset); 791 reset_control_deassert(imx_pcie->apps_reset); 792 793 if (imx_pcie->drvdata->core_reset) 794 imx_pcie->drvdata->core_reset(imx_pcie, false); 795 796 /* Some boards don't have PCIe reset GPIO. */ 797 if (imx_pcie->reset_gpiod) { 798 msleep(100); 799 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 800 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 801 msleep(100); 802 } 803 804 return 0; 805 } 806 807 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 808 { 809 struct dw_pcie *pci = imx_pcie->pci; 810 struct device *dev = pci->dev; 811 u32 tmp; 812 unsigned int retries; 813 814 for (retries = 0; retries < 200; retries++) { 815 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 816 /* Test if the speed change finished. */ 817 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 818 return 0; 819 usleep_range(100, 1000); 820 } 821 822 dev_err(dev, "Speed change timeout\n"); 823 return -ETIMEDOUT; 824 } 825 826 static void imx_pcie_ltssm_enable(struct device *dev) 827 { 828 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 829 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 830 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 831 u32 tmp; 832 833 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 834 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 835 if (drvdata->ltssm_mask) 836 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 837 drvdata->ltssm_mask); 838 839 reset_control_deassert(imx_pcie->apps_reset); 840 } 841 842 static void imx_pcie_ltssm_disable(struct device *dev) 843 { 844 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 845 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 846 847 phy_set_speed(imx_pcie->phy, 0); 848 if (drvdata->ltssm_mask) 849 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 850 drvdata->ltssm_mask, 0); 851 852 reset_control_assert(imx_pcie->apps_reset); 853 } 854 855 static int imx_pcie_start_link(struct dw_pcie *pci) 856 { 857 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 858 struct device *dev = pci->dev; 859 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 860 u32 tmp; 861 int ret; 862 863 /* 864 * Force Gen1 operation when starting the link. In case the link is 865 * started in Gen2 mode, there is a possibility the devices on the 866 * bus will not be detected at all. This happens with PCIe switches. 867 */ 868 dw_pcie_dbi_ro_wr_en(pci); 869 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 870 tmp &= ~PCI_EXP_LNKCAP_SLS; 871 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 872 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 873 dw_pcie_dbi_ro_wr_dis(pci); 874 875 /* Start LTSSM. */ 876 imx_pcie_ltssm_enable(dev); 877 878 ret = dw_pcie_wait_for_link(pci); 879 if (ret) 880 goto err_reset_phy; 881 882 if (pci->max_link_speed > 1) { 883 /* Allow faster modes after the link is up */ 884 dw_pcie_dbi_ro_wr_en(pci); 885 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 886 tmp &= ~PCI_EXP_LNKCAP_SLS; 887 tmp |= pci->max_link_speed; 888 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 889 890 /* 891 * Start Directed Speed Change so the best possible 892 * speed both link partners support can be negotiated. 893 */ 894 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 895 tmp |= PORT_LOGIC_SPEED_CHANGE; 896 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 897 dw_pcie_dbi_ro_wr_dis(pci); 898 899 if (imx_pcie->drvdata->flags & 900 IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { 901 902 /* 903 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 904 * from i.MX6 family when no link speed transition 905 * occurs and we go Gen1 -> yep, Gen1. The difference 906 * is that, in such case, it will not be cleared by HW 907 * which will cause the following code to report false 908 * failure. 909 */ 910 ret = imx_pcie_wait_for_speed_change(imx_pcie); 911 if (ret) { 912 dev_err(dev, "Failed to bring link up!\n"); 913 goto err_reset_phy; 914 } 915 } 916 917 /* Make sure link training is finished as well! */ 918 ret = dw_pcie_wait_for_link(pci); 919 if (ret) 920 goto err_reset_phy; 921 } else { 922 dev_info(dev, "Link: Only Gen1 is enabled\n"); 923 } 924 925 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); 926 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); 927 return 0; 928 929 err_reset_phy: 930 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 931 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 932 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 933 imx_pcie_reset_phy(imx_pcie); 934 return 0; 935 } 936 937 static void imx_pcie_stop_link(struct dw_pcie *pci) 938 { 939 struct device *dev = pci->dev; 940 941 /* Turn off PCIe LTSSM */ 942 imx_pcie_ltssm_disable(dev); 943 } 944 945 static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) 946 { 947 struct dw_pcie *pci = imx_pcie->pci; 948 struct device *dev = pci->dev; 949 u32 data1, data2; 950 int free = -1; 951 int i; 952 953 if (sid >= 64) { 954 dev_err(dev, "Invalid SID for index %d\n", sid); 955 return -EINVAL; 956 } 957 958 guard(mutex)(&imx_pcie->lock); 959 960 /* 961 * Iterate through all LUT entries to check for duplicate RID and 962 * identify the first available entry. Configure this available entry 963 * immediately after verification to avoid rescanning it. 964 */ 965 for (i = 0; i < IMX95_MAX_LUT; i++) { 966 regmap_write(imx_pcie->iomuxc_gpr, 967 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 968 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 969 970 if (!(data1 & IMX95_PE0_LUT_VLD)) { 971 if (free < 0) 972 free = i; 973 continue; 974 } 975 976 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 977 978 /* Do not add duplicate RID */ 979 if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { 980 dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); 981 return 0; 982 } 983 } 984 985 if (free < 0) { 986 dev_err(dev, "LUT entry is not available\n"); 987 return -ENOSPC; 988 } 989 990 data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); 991 data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); 992 data1 |= IMX95_PE0_LUT_VLD; 993 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); 994 995 data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ 996 data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); 997 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); 998 999 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); 1000 1001 return 0; 1002 } 1003 1004 static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) 1005 { 1006 u32 data2; 1007 int i; 1008 1009 guard(mutex)(&imx_pcie->lock); 1010 1011 for (i = 0; i < IMX95_MAX_LUT; i++) { 1012 regmap_write(imx_pcie->iomuxc_gpr, 1013 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1014 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1015 if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { 1016 regmap_write(imx_pcie->iomuxc_gpr, 1017 IMX95_PE0_LUT_DATA1, 0); 1018 regmap_write(imx_pcie->iomuxc_gpr, 1019 IMX95_PE0_LUT_DATA2, 0); 1020 regmap_write(imx_pcie->iomuxc_gpr, 1021 IMX95_PE0_LUT_ACSCTRL, i); 1022 1023 break; 1024 } 1025 } 1026 } 1027 1028 static int imx_pcie_enable_device(struct pci_host_bridge *bridge, 1029 struct pci_dev *pdev) 1030 { 1031 struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1032 u32 sid_i, sid_m, rid = pci_dev_id(pdev); 1033 struct device_node *target; 1034 struct device *dev; 1035 int err_i, err_m; 1036 u32 sid = 0; 1037 1038 dev = imx_pcie->pci->dev; 1039 1040 target = NULL; 1041 err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", 1042 &target, &sid_i); 1043 if (target) { 1044 of_node_put(target); 1045 } else { 1046 /* 1047 * "target == NULL && err_i == 0" means RID out of map range. 1048 * Use 1:1 map RID to streamID. Hardware can't support this 1049 * because the streamID is only 6 bits 1050 */ 1051 err_i = -EINVAL; 1052 } 1053 1054 target = NULL; 1055 err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", 1056 &target, &sid_m); 1057 1058 /* 1059 * err_m target 1060 * 0 NULL RID out of range. Use 1:1 map RID to 1061 * streamID, Current hardware can't 1062 * support it, so return -EINVAL. 1063 * != 0 NULL msi-map does not exist, use built-in MSI 1064 * 0 != NULL Get correct streamID from RID 1065 * != 0 != NULL Invalid combination 1066 */ 1067 if (!err_m && !target) 1068 return -EINVAL; 1069 else if (target) 1070 of_node_put(target); /* Find streamID map entry for RID in msi-map */ 1071 1072 /* 1073 * msi-map iommu-map 1074 * N N DWC MSI Ctrl 1075 * Y Y ITS + SMMU, require the same SID 1076 * Y N ITS 1077 * N Y DWC MSI Ctrl + SMMU 1078 */ 1079 if (err_i && err_m) 1080 return 0; 1081 1082 if (!err_i && !err_m) { 1083 /* 1084 * Glue Layer 1085 * <==========> 1086 * ┌─────┐ ┌──────────┐ 1087 * │ LUT │ 6-bit streamID │ │ 1088 * │ │─────────────────►│ MSI │ 1089 * └─────┘ 2-bit ctrl ID │ │ 1090 * ┌───────────►│ │ 1091 * (i.MX95) │ │ │ 1092 * 00 PCIe0 │ │ │ 1093 * 01 ENETC │ │ │ 1094 * 10 PCIe1 │ │ │ 1095 * │ └──────────┘ 1096 * The MSI glue layer auto adds 2 bits controller ID ahead of 1097 * streamID, so mask these 2 bits to get streamID. The 1098 * IOMMU glue layer doesn't do that. 1099 */ 1100 if (sid_i != (sid_m & IMX95_SID_MASK)) { 1101 dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); 1102 return -EINVAL; 1103 } 1104 } 1105 1106 if (!err_i) 1107 sid = sid_i; 1108 else if (!err_m) 1109 sid = sid_m & IMX95_SID_MASK; 1110 1111 return imx_pcie_add_lut(imx_pcie, rid, sid); 1112 } 1113 1114 static void imx_pcie_disable_device(struct pci_host_bridge *bridge, 1115 struct pci_dev *pdev) 1116 { 1117 struct imx_pcie *imx_pcie; 1118 1119 imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1120 imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); 1121 } 1122 1123 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 1124 { 1125 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1126 struct device *dev = pci->dev; 1127 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1128 int ret; 1129 1130 if (imx_pcie->vpcie) { 1131 ret = regulator_enable(imx_pcie->vpcie); 1132 if (ret) { 1133 dev_err(dev, "failed to enable vpcie regulator: %d\n", 1134 ret); 1135 return ret; 1136 } 1137 } 1138 1139 if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { 1140 pp->bridge->enable_device = imx_pcie_enable_device; 1141 pp->bridge->disable_device = imx_pcie_disable_device; 1142 } 1143 1144 imx_pcie_assert_core_reset(imx_pcie); 1145 1146 if (imx_pcie->drvdata->init_phy) 1147 imx_pcie->drvdata->init_phy(imx_pcie); 1148 1149 imx_pcie_configure_type(imx_pcie); 1150 1151 ret = imx_pcie_clk_enable(imx_pcie); 1152 if (ret) { 1153 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 1154 goto err_reg_disable; 1155 } 1156 1157 if (imx_pcie->phy) { 1158 ret = phy_init(imx_pcie->phy); 1159 if (ret) { 1160 dev_err(dev, "pcie PHY power up failed\n"); 1161 goto err_clk_disable; 1162 } 1163 1164 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, 1165 imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? 1166 PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); 1167 if (ret) { 1168 dev_err(dev, "unable to set PCIe PHY mode\n"); 1169 goto err_phy_exit; 1170 } 1171 1172 ret = phy_power_on(imx_pcie->phy); 1173 if (ret) { 1174 dev_err(dev, "waiting for PHY ready timeout!\n"); 1175 goto err_phy_exit; 1176 } 1177 } 1178 1179 ret = imx_pcie_deassert_core_reset(imx_pcie); 1180 if (ret < 0) { 1181 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 1182 goto err_phy_off; 1183 } 1184 1185 imx_setup_phy_mpll(imx_pcie); 1186 1187 return 0; 1188 1189 err_phy_off: 1190 phy_power_off(imx_pcie->phy); 1191 err_phy_exit: 1192 phy_exit(imx_pcie->phy); 1193 err_clk_disable: 1194 imx_pcie_clk_disable(imx_pcie); 1195 err_reg_disable: 1196 if (imx_pcie->vpcie) 1197 regulator_disable(imx_pcie->vpcie); 1198 return ret; 1199 } 1200 1201 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1202 { 1203 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1204 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1205 1206 if (imx_pcie->phy) { 1207 if (phy_power_off(imx_pcie->phy)) 1208 dev_err(pci->dev, "unable to power off PHY\n"); 1209 phy_exit(imx_pcie->phy); 1210 } 1211 imx_pcie_clk_disable(imx_pcie); 1212 1213 if (imx_pcie->vpcie) 1214 regulator_disable(imx_pcie->vpcie); 1215 } 1216 1217 /* 1218 * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 1219 * register is reserved, so the generic DWC implementation of sending the 1220 * PME_Turn_Off message using a dummy MMIO write cannot be used. 1221 */ 1222 static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) 1223 { 1224 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1225 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1226 1227 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1228 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1229 1230 usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); 1231 } 1232 1233 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1234 .init = imx_pcie_host_init, 1235 .deinit = imx_pcie_host_exit, 1236 .pme_turn_off = imx_pcie_pme_turn_off, 1237 }; 1238 1239 static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { 1240 .init = imx_pcie_host_init, 1241 .deinit = imx_pcie_host_exit, 1242 }; 1243 1244 static const struct dw_pcie_ops dw_pcie_ops = { 1245 .start_link = imx_pcie_start_link, 1246 .stop_link = imx_pcie_stop_link, 1247 }; 1248 1249 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1250 { 1251 enum pci_barno bar; 1252 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1253 1254 for (bar = BAR_0; bar <= BAR_5; bar++) 1255 dw_pcie_ep_reset_bar(pci, bar); 1256 } 1257 1258 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1259 unsigned int type, u16 interrupt_num) 1260 { 1261 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1262 1263 switch (type) { 1264 case PCI_IRQ_INTX: 1265 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1266 case PCI_IRQ_MSI: 1267 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1268 case PCI_IRQ_MSIX: 1269 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1270 default: 1271 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1272 return -EINVAL; 1273 } 1274 1275 return 0; 1276 } 1277 1278 static const struct pci_epc_features imx8m_pcie_epc_features = { 1279 .linkup_notifier = false, 1280 .msi_capable = true, 1281 .msix_capable = false, 1282 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1283 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1284 .align = SZ_64K, 1285 }; 1286 1287 static const struct pci_epc_features imx8q_pcie_epc_features = { 1288 .linkup_notifier = false, 1289 .msi_capable = true, 1290 .msix_capable = false, 1291 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1292 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1293 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1294 .align = SZ_64K, 1295 }; 1296 1297 /* 1298 * | Default | Default | Default | BAR Sizing 1299 * BAR# | Enable? | Type | Size | Scheme 1300 * ======================================================= 1301 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1302 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1303 * (BAR1 should be disabled if BAR0 is 64-bit) 1304 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1305 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1306 * BAR4 | Enable | 32-bit | 1 MB | Programmable Size 1307 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1308 */ 1309 static const struct pci_epc_features imx95_pcie_epc_features = { 1310 .msi_capable = true, 1311 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1312 .align = SZ_4K, 1313 }; 1314 1315 static const struct pci_epc_features* 1316 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1317 { 1318 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1319 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1320 1321 return imx_pcie->drvdata->epc_features; 1322 } 1323 1324 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1325 .init = imx_pcie_ep_init, 1326 .raise_irq = imx_pcie_ep_raise_irq, 1327 .get_features = imx_pcie_ep_get_features, 1328 }; 1329 1330 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1331 struct platform_device *pdev) 1332 { 1333 int ret; 1334 struct dw_pcie_ep *ep; 1335 struct dw_pcie *pci = imx_pcie->pci; 1336 struct dw_pcie_rp *pp = &pci->pp; 1337 struct device *dev = pci->dev; 1338 1339 imx_pcie_host_init(pp); 1340 ep = &pci->ep; 1341 ep->ops = &pcie_ep_ops; 1342 1343 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1344 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1345 1346 ep->page_size = imx_pcie->drvdata->epc_features->align; 1347 1348 ret = dw_pcie_ep_init(ep); 1349 if (ret) { 1350 dev_err(dev, "failed to initialize endpoint\n"); 1351 return ret; 1352 } 1353 1354 ret = dw_pcie_ep_init_registers(ep); 1355 if (ret) { 1356 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1357 dw_pcie_ep_deinit(ep); 1358 return ret; 1359 } 1360 1361 pci_epc_init_notify(ep->epc); 1362 1363 /* Start LTSSM. */ 1364 imx_pcie_ltssm_enable(dev); 1365 1366 return 0; 1367 } 1368 1369 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1370 { 1371 u8 offset; 1372 u16 val; 1373 struct dw_pcie *pci = imx_pcie->pci; 1374 1375 if (pci_msi_enabled()) { 1376 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1377 if (save) { 1378 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1379 imx_pcie->msi_ctrl = val; 1380 } else { 1381 dw_pcie_dbi_ro_wr_en(pci); 1382 val = imx_pcie->msi_ctrl; 1383 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1384 dw_pcie_dbi_ro_wr_dis(pci); 1385 } 1386 } 1387 } 1388 1389 static int imx_pcie_suspend_noirq(struct device *dev) 1390 { 1391 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1392 1393 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1394 return 0; 1395 1396 imx_pcie_msi_save_restore(imx_pcie, true); 1397 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1398 /* 1399 * The minimum for a workaround would be to set PERST# and to 1400 * set the PCIE_TEST_PD flag. However, we can also disable the 1401 * clock which saves some power. 1402 */ 1403 imx_pcie_assert_core_reset(imx_pcie); 1404 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1405 } else { 1406 return dw_pcie_suspend_noirq(imx_pcie->pci); 1407 } 1408 1409 return 0; 1410 } 1411 1412 static int imx_pcie_resume_noirq(struct device *dev) 1413 { 1414 int ret; 1415 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1416 1417 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1418 return 0; 1419 1420 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1421 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1422 if (ret) 1423 return ret; 1424 ret = imx_pcie_deassert_core_reset(imx_pcie); 1425 if (ret) 1426 return ret; 1427 1428 /* 1429 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1430 * root complex. This is why we have to setup the rc again and 1431 * why we have to restore the MSI register. 1432 */ 1433 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1434 if (ret) 1435 return ret; 1436 } else { 1437 ret = dw_pcie_resume_noirq(imx_pcie->pci); 1438 if (ret) 1439 return ret; 1440 } 1441 imx_pcie_msi_save_restore(imx_pcie, false); 1442 1443 return 0; 1444 } 1445 1446 static const struct dev_pm_ops imx_pcie_pm_ops = { 1447 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1448 imx_pcie_resume_noirq) 1449 }; 1450 1451 static int imx_pcie_probe(struct platform_device *pdev) 1452 { 1453 struct device *dev = &pdev->dev; 1454 struct dw_pcie *pci; 1455 struct imx_pcie *imx_pcie; 1456 struct device_node *np; 1457 struct device_node *node = dev->of_node; 1458 int ret, domain; 1459 u16 val; 1460 1461 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1462 if (!imx_pcie) 1463 return -ENOMEM; 1464 1465 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1466 if (!pci) 1467 return -ENOMEM; 1468 1469 pci->dev = dev; 1470 pci->ops = &dw_pcie_ops; 1471 1472 imx_pcie->pci = pci; 1473 imx_pcie->drvdata = of_device_get_match_data(dev); 1474 1475 mutex_init(&imx_pcie->lock); 1476 1477 if (imx_pcie->drvdata->ops) 1478 pci->pp.ops = imx_pcie->drvdata->ops; 1479 else 1480 pci->pp.ops = &imx_pcie_host_dw_pme_ops; 1481 1482 /* Find the PHY if one is defined, only imx7d uses it */ 1483 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1484 if (np) { 1485 struct resource res; 1486 1487 ret = of_address_to_resource(np, 0, &res); 1488 if (ret) { 1489 dev_err(dev, "Unable to map PCIe PHY\n"); 1490 return ret; 1491 } 1492 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1493 if (IS_ERR(imx_pcie->phy_base)) 1494 return PTR_ERR(imx_pcie->phy_base); 1495 } 1496 1497 /* Fetch GPIOs */ 1498 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1499 if (IS_ERR(imx_pcie->reset_gpiod)) 1500 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1501 "unable to get reset gpio\n"); 1502 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1503 1504 /* Fetch clocks */ 1505 imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); 1506 if (imx_pcie->num_clks < 0) 1507 return dev_err_probe(dev, imx_pcie->num_clks, 1508 "failed to get clocks\n"); 1509 1510 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1511 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1512 if (IS_ERR(imx_pcie->phy)) 1513 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1514 "failed to get pcie phy\n"); 1515 } 1516 1517 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1518 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1519 if (IS_ERR(imx_pcie->apps_reset)) 1520 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1521 "failed to get pcie apps reset control\n"); 1522 } 1523 1524 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1525 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1526 if (IS_ERR(imx_pcie->pciephy_reset)) 1527 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1528 "Failed to get PCIEPHY reset control\n"); 1529 } 1530 1531 switch (imx_pcie->drvdata->variant) { 1532 case IMX8MQ: 1533 case IMX8MQ_EP: 1534 domain = of_get_pci_domain_nr(node); 1535 if (domain < 0 || domain > 1) 1536 return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); 1537 1538 imx_pcie->controller_id = domain; 1539 break; 1540 default: 1541 break; 1542 } 1543 1544 if (imx_pcie->drvdata->gpr) { 1545 /* Grab GPR config register range */ 1546 imx_pcie->iomuxc_gpr = 1547 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1548 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1549 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1550 "unable to find iomuxc registers\n"); 1551 } 1552 1553 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1554 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1555 1556 if (IS_ERR(off)) 1557 return dev_err_probe(dev, PTR_ERR(off), 1558 "unable to find serdes registers\n"); 1559 1560 static const struct regmap_config regmap_config = { 1561 .reg_bits = 32, 1562 .val_bits = 32, 1563 .reg_stride = 4, 1564 }; 1565 1566 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1567 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1568 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1569 "unable to find iomuxc registers\n"); 1570 } 1571 1572 /* Grab PCIe PHY Tx Settings */ 1573 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1574 &imx_pcie->tx_deemph_gen1)) 1575 imx_pcie->tx_deemph_gen1 = 0; 1576 1577 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1578 &imx_pcie->tx_deemph_gen2_3p5db)) 1579 imx_pcie->tx_deemph_gen2_3p5db = 0; 1580 1581 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1582 &imx_pcie->tx_deemph_gen2_6db)) 1583 imx_pcie->tx_deemph_gen2_6db = 20; 1584 1585 if (of_property_read_u32(node, "fsl,tx-swing-full", 1586 &imx_pcie->tx_swing_full)) 1587 imx_pcie->tx_swing_full = 127; 1588 1589 if (of_property_read_u32(node, "fsl,tx-swing-low", 1590 &imx_pcie->tx_swing_low)) 1591 imx_pcie->tx_swing_low = 127; 1592 1593 /* Limit link speed */ 1594 pci->max_link_speed = 1; 1595 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1596 1597 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1598 if (IS_ERR(imx_pcie->vpcie)) { 1599 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1600 return PTR_ERR(imx_pcie->vpcie); 1601 imx_pcie->vpcie = NULL; 1602 } 1603 1604 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1605 if (IS_ERR(imx_pcie->vph)) { 1606 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1607 return PTR_ERR(imx_pcie->vph); 1608 imx_pcie->vph = NULL; 1609 } 1610 1611 platform_set_drvdata(pdev, imx_pcie); 1612 1613 ret = imx_pcie_attach_pd(dev); 1614 if (ret) 1615 return ret; 1616 1617 pci->use_parent_dt_ranges = true; 1618 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1619 ret = imx_add_pcie_ep(imx_pcie, pdev); 1620 if (ret < 0) 1621 return ret; 1622 } else { 1623 pci->pp.use_atu_msg = true; 1624 ret = dw_pcie_host_init(&pci->pp); 1625 if (ret < 0) 1626 return ret; 1627 1628 if (pci_msi_enabled()) { 1629 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1630 1631 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1632 val |= PCI_MSI_FLAGS_ENABLE; 1633 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1634 } 1635 } 1636 1637 return 0; 1638 } 1639 1640 static void imx_pcie_shutdown(struct platform_device *pdev) 1641 { 1642 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1643 1644 /* bring down link, so bootloader gets clean state in case of reboot */ 1645 imx_pcie_assert_core_reset(imx_pcie); 1646 } 1647 1648 static const struct imx_pcie_drvdata drvdata[] = { 1649 [IMX6Q] = { 1650 .variant = IMX6Q, 1651 .flags = IMX_PCIE_FLAG_IMX_PHY | 1652 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1653 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1654 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1655 .dbi_length = 0x200, 1656 .gpr = "fsl,imx6q-iomuxc-gpr", 1657 .ltssm_off = IOMUXC_GPR12, 1658 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1659 .mode_off[0] = IOMUXC_GPR12, 1660 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1661 .init_phy = imx_pcie_init_phy, 1662 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1663 .core_reset = imx6q_pcie_core_reset, 1664 }, 1665 [IMX6SX] = { 1666 .variant = IMX6SX, 1667 .flags = IMX_PCIE_FLAG_IMX_PHY | 1668 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1669 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1670 .gpr = "fsl,imx6q-iomuxc-gpr", 1671 .ltssm_off = IOMUXC_GPR12, 1672 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1673 .mode_off[0] = IOMUXC_GPR12, 1674 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1675 .init_phy = imx6sx_pcie_init_phy, 1676 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1677 .core_reset = imx6sx_pcie_core_reset, 1678 .ops = &imx_pcie_host_ops, 1679 }, 1680 [IMX6QP] = { 1681 .variant = IMX6QP, 1682 .flags = IMX_PCIE_FLAG_IMX_PHY | 1683 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1684 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1685 .dbi_length = 0x200, 1686 .gpr = "fsl,imx6q-iomuxc-gpr", 1687 .ltssm_off = IOMUXC_GPR12, 1688 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1689 .mode_off[0] = IOMUXC_GPR12, 1690 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1691 .init_phy = imx_pcie_init_phy, 1692 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1693 .core_reset = imx6qp_pcie_core_reset, 1694 .ops = &imx_pcie_host_ops, 1695 }, 1696 [IMX7D] = { 1697 .variant = IMX7D, 1698 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1699 IMX_PCIE_FLAG_HAS_APP_RESET | 1700 IMX_PCIE_FLAG_HAS_PHY_RESET, 1701 .gpr = "fsl,imx7d-iomuxc-gpr", 1702 .mode_off[0] = IOMUXC_GPR12, 1703 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1704 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1705 .core_reset = imx7d_pcie_core_reset, 1706 }, 1707 [IMX8MQ] = { 1708 .variant = IMX8MQ, 1709 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1710 IMX_PCIE_FLAG_HAS_PHY_RESET | 1711 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1712 .gpr = "fsl,imx8mq-iomuxc-gpr", 1713 .mode_off[0] = IOMUXC_GPR12, 1714 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1715 .mode_off[1] = IOMUXC_GPR12, 1716 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1717 .init_phy = imx8mq_pcie_init_phy, 1718 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1719 }, 1720 [IMX8MM] = { 1721 .variant = IMX8MM, 1722 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1723 IMX_PCIE_FLAG_HAS_PHYDRV | 1724 IMX_PCIE_FLAG_HAS_APP_RESET, 1725 .gpr = "fsl,imx8mm-iomuxc-gpr", 1726 .mode_off[0] = IOMUXC_GPR12, 1727 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1728 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1729 }, 1730 [IMX8MP] = { 1731 .variant = IMX8MP, 1732 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1733 IMX_PCIE_FLAG_HAS_PHYDRV | 1734 IMX_PCIE_FLAG_HAS_APP_RESET, 1735 .gpr = "fsl,imx8mp-iomuxc-gpr", 1736 .mode_off[0] = IOMUXC_GPR12, 1737 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1738 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1739 }, 1740 [IMX8Q] = { 1741 .variant = IMX8Q, 1742 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1743 IMX_PCIE_FLAG_CPU_ADDR_FIXUP | 1744 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1745 }, 1746 [IMX95] = { 1747 .variant = IMX95, 1748 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1749 IMX_PCIE_FLAG_HAS_LUT | 1750 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1751 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1752 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1753 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1754 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1755 .init_phy = imx95_pcie_init_phy, 1756 }, 1757 [IMX8MQ_EP] = { 1758 .variant = IMX8MQ_EP, 1759 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1760 IMX_PCIE_FLAG_HAS_PHY_RESET, 1761 .mode = DW_PCIE_EP_TYPE, 1762 .gpr = "fsl,imx8mq-iomuxc-gpr", 1763 .mode_off[0] = IOMUXC_GPR12, 1764 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1765 .mode_off[1] = IOMUXC_GPR12, 1766 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1767 .epc_features = &imx8m_pcie_epc_features, 1768 .init_phy = imx8mq_pcie_init_phy, 1769 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1770 }, 1771 [IMX8MM_EP] = { 1772 .variant = IMX8MM_EP, 1773 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1774 IMX_PCIE_FLAG_HAS_PHYDRV, 1775 .mode = DW_PCIE_EP_TYPE, 1776 .gpr = "fsl,imx8mm-iomuxc-gpr", 1777 .mode_off[0] = IOMUXC_GPR12, 1778 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1779 .epc_features = &imx8m_pcie_epc_features, 1780 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1781 }, 1782 [IMX8MP_EP] = { 1783 .variant = IMX8MP_EP, 1784 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1785 IMX_PCIE_FLAG_HAS_PHYDRV, 1786 .mode = DW_PCIE_EP_TYPE, 1787 .gpr = "fsl,imx8mp-iomuxc-gpr", 1788 .mode_off[0] = IOMUXC_GPR12, 1789 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1790 .epc_features = &imx8m_pcie_epc_features, 1791 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1792 }, 1793 [IMX8Q_EP] = { 1794 .variant = IMX8Q_EP, 1795 .flags = IMX_PCIE_FLAG_HAS_PHYDRV, 1796 .mode = DW_PCIE_EP_TYPE, 1797 .epc_features = &imx8q_pcie_epc_features, 1798 }, 1799 [IMX95_EP] = { 1800 .variant = IMX95_EP, 1801 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1802 IMX_PCIE_FLAG_SUPPORT_64BIT, 1803 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1804 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1805 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1806 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1807 .init_phy = imx95_pcie_init_phy, 1808 .epc_features = &imx95_pcie_epc_features, 1809 .mode = DW_PCIE_EP_TYPE, 1810 }, 1811 }; 1812 1813 static const struct of_device_id imx_pcie_of_match[] = { 1814 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1815 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1816 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1817 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1818 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1819 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1820 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1821 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1822 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1823 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1824 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1825 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1826 { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, 1827 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1828 {}, 1829 }; 1830 1831 static struct platform_driver imx_pcie_driver = { 1832 .driver = { 1833 .name = "imx6q-pcie", 1834 .of_match_table = imx_pcie_of_match, 1835 .suppress_bind_attrs = true, 1836 .pm = &imx_pcie_pm_ops, 1837 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1838 }, 1839 .probe = imx_pcie_probe, 1840 .shutdown = imx_pcie_shutdown, 1841 }; 1842 1843 static void imx_pcie_quirk(struct pci_dev *dev) 1844 { 1845 struct pci_bus *bus = dev->bus; 1846 struct dw_pcie_rp *pp = bus->sysdata; 1847 1848 /* Bus parent is the PCI bridge, its parent is this platform driver */ 1849 if (!bus->dev.parent || !bus->dev.parent->parent) 1850 return; 1851 1852 /* Make sure we only quirk devices associated with this driver */ 1853 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 1854 return; 1855 1856 if (pci_is_root_bus(bus)) { 1857 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1858 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1859 1860 /* 1861 * Limit config length to avoid the kernel reading beyond 1862 * the register set and causing an abort on i.MX 6Quad 1863 */ 1864 if (imx_pcie->drvdata->dbi_length) { 1865 dev->cfg_size = imx_pcie->drvdata->dbi_length; 1866 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 1867 dev->cfg_size); 1868 } 1869 } 1870 } 1871 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 1872 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 1873 1874 static int __init imx_pcie_init(void) 1875 { 1876 #ifdef CONFIG_ARM 1877 struct device_node *np; 1878 1879 np = of_find_matching_node(NULL, imx_pcie_of_match); 1880 if (!np) 1881 return -ENODEV; 1882 of_node_put(np); 1883 1884 /* 1885 * Since probe() can be deferred we need to make sure that 1886 * hook_fault_code is not called after __init memory is freed 1887 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1888 * we can install the handler here without risking it 1889 * accessing some uninitialized driver state. 1890 */ 1891 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1892 "external abort on non-linefetch"); 1893 #endif 1894 1895 return platform_driver_register(&imx_pcie_driver); 1896 } 1897 device_initcall(imx_pcie_init); 1898