1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "../../pci.h" 37 #include "pcie-designware.h" 38 39 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 41 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 42 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 43 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 44 #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 45 46 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 47 #define IMX95_PCIE_REF_USE_PAD BIT(17) 48 49 #define IMX95_PCIE_SS_RW_REG_0 0xf0 50 #define IMX95_PCIE_REF_CLKEN BIT(23) 51 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 52 53 #define IMX95_PE0_GEN_CTRL_1 0x1050 54 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 55 56 #define IMX95_PE0_GEN_CTRL_3 0x1058 57 #define IMX95_PCIE_LTSSM_EN BIT(0) 58 59 #define IMX95_PE0_LUT_ACSCTRL 0x1008 60 #define IMX95_PEO_LUT_RWA BIT(16) 61 #define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) 62 63 #define IMX95_PE0_LUT_DATA1 0x100c 64 #define IMX95_PE0_LUT_VLD BIT(31) 65 #define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) 66 #define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) 67 68 #define IMX95_PE0_LUT_DATA2 0x1010 69 #define IMX95_PE0_LUT_REQID GENMASK(31, 16) 70 #define IMX95_PE0_LUT_MASK GENMASK(15, 0) 71 72 #define IMX95_SID_MASK GENMASK(5, 0) 73 #define IMX95_MAX_LUT 32 74 75 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 76 77 enum imx_pcie_variants { 78 IMX6Q, 79 IMX6SX, 80 IMX6QP, 81 IMX7D, 82 IMX8MQ, 83 IMX8MM, 84 IMX8MP, 85 IMX8Q, 86 IMX95, 87 IMX8MQ_EP, 88 IMX8MM_EP, 89 IMX8MP_EP, 90 IMX8Q_EP, 91 IMX95_EP, 92 }; 93 94 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 95 #define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) 96 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 97 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 98 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 99 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 100 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 101 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 102 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 103 /* 104 * Because of ERR005723 (PCIe does not support L2 power down) we need to 105 * workaround suspend resume on some devices which are affected by this errata. 106 */ 107 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 108 #define IMX_PCIE_FLAG_HAS_LUT BIT(10) 109 110 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 111 112 #define IMX_PCIE_MAX_CLKS 6 113 #define IMX_PCIE_MAX_INSTANCES 2 114 115 struct imx_pcie; 116 117 struct imx_pcie_drvdata { 118 enum imx_pcie_variants variant; 119 enum dw_pcie_device_mode mode; 120 u32 flags; 121 int dbi_length; 122 const char *gpr; 123 const char * const *clk_names; 124 const u32 clks_cnt; 125 const u32 clks_optional_cnt; 126 const u32 ltssm_off; 127 const u32 ltssm_mask; 128 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 129 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 130 const struct pci_epc_features *epc_features; 131 int (*init_phy)(struct imx_pcie *pcie); 132 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 133 int (*core_reset)(struct imx_pcie *pcie, bool assert); 134 const struct dw_pcie_host_ops *ops; 135 }; 136 137 struct imx_pcie { 138 struct dw_pcie *pci; 139 struct gpio_desc *reset_gpiod; 140 struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; 141 struct regmap *iomuxc_gpr; 142 u16 msi_ctrl; 143 u32 controller_id; 144 struct reset_control *pciephy_reset; 145 struct reset_control *apps_reset; 146 u32 tx_deemph_gen1; 147 u32 tx_deemph_gen2_3p5db; 148 u32 tx_deemph_gen2_6db; 149 u32 tx_swing_full; 150 u32 tx_swing_low; 151 struct regulator *vpcie; 152 struct regulator *vph; 153 void __iomem *phy_base; 154 155 /* power domain for pcie */ 156 struct device *pd_pcie; 157 /* power domain for pcie phy */ 158 struct device *pd_pcie_phy; 159 struct phy *phy; 160 const struct imx_pcie_drvdata *drvdata; 161 162 /* Ensure that only one device's LUT is configured at any given time */ 163 struct mutex lock; 164 }; 165 166 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 167 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 168 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 169 170 /* PCIe Port Logic registers (memory-mapped) */ 171 #define PL_OFFSET 0x700 172 173 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 174 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 175 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 176 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 177 #define PCIE_PHY_CTRL_WR BIT(18) 178 #define PCIE_PHY_CTRL_RD BIT(19) 179 180 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 181 #define PCIE_PHY_STAT_ACK BIT(16) 182 183 /* PHY registers (not memory-mapped) */ 184 #define PCIE_PHY_ATEOVRD 0x10 185 #define PCIE_PHY_ATEOVRD_EN BIT(2) 186 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 187 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 188 189 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 190 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 191 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 192 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 193 194 #define PCIE_PHY_RX_ASIC_OUT 0x100D 195 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 196 197 /* iMX7 PCIe PHY registers */ 198 #define PCIE_PHY_CMN_REG4 0x14 199 /* These are probably the bits that *aren't* DCC_FB_EN */ 200 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 201 202 #define PCIE_PHY_CMN_REG15 0x54 203 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 204 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 205 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 206 207 #define PCIE_PHY_CMN_REG24 0x90 208 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 209 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 210 211 #define PCIE_PHY_CMN_REG26 0x98 212 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 213 214 #define PHY_RX_OVRD_IN_LO 0x1005 215 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 216 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 217 218 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 219 { 220 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 221 imx_pcie->drvdata->variant != IMX8MQ_EP && 222 imx_pcie->drvdata->variant != IMX8MM && 223 imx_pcie->drvdata->variant != IMX8MM_EP && 224 imx_pcie->drvdata->variant != IMX8MP && 225 imx_pcie->drvdata->variant != IMX8MP_EP); 226 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 227 } 228 229 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 230 { 231 regmap_update_bits(imx_pcie->iomuxc_gpr, 232 IMX95_PCIE_SS_RW_REG_0, 233 IMX95_PCIE_PHY_CR_PARA_SEL, 234 IMX95_PCIE_PHY_CR_PARA_SEL); 235 236 regmap_update_bits(imx_pcie->iomuxc_gpr, 237 IMX95_PCIE_PHY_GEN_CTRL, 238 IMX95_PCIE_REF_USE_PAD, 0); 239 regmap_update_bits(imx_pcie->iomuxc_gpr, 240 IMX95_PCIE_SS_RW_REG_0, 241 IMX95_PCIE_REF_CLKEN, 242 IMX95_PCIE_REF_CLKEN); 243 244 return 0; 245 } 246 247 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 248 { 249 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 250 unsigned int mask, val, mode, id; 251 252 if (drvdata->mode == DW_PCIE_EP_TYPE) 253 mode = PCI_EXP_TYPE_ENDPOINT; 254 else 255 mode = PCI_EXP_TYPE_ROOT_PORT; 256 257 id = imx_pcie->controller_id; 258 259 /* If mode_mask is 0, generic PHY driver is used to set the mode */ 260 if (!drvdata->mode_mask[0]) 261 return; 262 263 /* If mode_mask[id] is 0, each controller has its individual GPR */ 264 if (!drvdata->mode_mask[id]) 265 id = 0; 266 267 mask = drvdata->mode_mask[id]; 268 val = mode << (ffs(mask) - 1); 269 270 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 271 } 272 273 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 274 { 275 struct dw_pcie *pci = imx_pcie->pci; 276 bool val; 277 u32 max_iterations = 10; 278 u32 wait_counter = 0; 279 280 do { 281 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 282 PCIE_PHY_STAT_ACK; 283 wait_counter++; 284 285 if (val == exp_val) 286 return 0; 287 288 udelay(1); 289 } while (wait_counter < max_iterations); 290 291 return -ETIMEDOUT; 292 } 293 294 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 295 { 296 struct dw_pcie *pci = imx_pcie->pci; 297 u32 val; 298 int ret; 299 300 val = PCIE_PHY_CTRL_DATA(addr); 301 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 302 303 val |= PCIE_PHY_CTRL_CAP_ADR; 304 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 305 306 ret = pcie_phy_poll_ack(imx_pcie, true); 307 if (ret) 308 return ret; 309 310 val = PCIE_PHY_CTRL_DATA(addr); 311 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 312 313 return pcie_phy_poll_ack(imx_pcie, false); 314 } 315 316 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 317 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 318 { 319 struct dw_pcie *pci = imx_pcie->pci; 320 u32 phy_ctl; 321 int ret; 322 323 ret = pcie_phy_wait_ack(imx_pcie, addr); 324 if (ret) 325 return ret; 326 327 /* assert Read signal */ 328 phy_ctl = PCIE_PHY_CTRL_RD; 329 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 330 331 ret = pcie_phy_poll_ack(imx_pcie, true); 332 if (ret) 333 return ret; 334 335 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 336 337 /* deassert Read signal */ 338 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 339 340 return pcie_phy_poll_ack(imx_pcie, false); 341 } 342 343 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 344 { 345 struct dw_pcie *pci = imx_pcie->pci; 346 u32 var; 347 int ret; 348 349 /* write addr */ 350 /* cap addr */ 351 ret = pcie_phy_wait_ack(imx_pcie, addr); 352 if (ret) 353 return ret; 354 355 var = PCIE_PHY_CTRL_DATA(data); 356 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 357 358 /* capture data */ 359 var |= PCIE_PHY_CTRL_CAP_DAT; 360 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 361 362 ret = pcie_phy_poll_ack(imx_pcie, true); 363 if (ret) 364 return ret; 365 366 /* deassert cap data */ 367 var = PCIE_PHY_CTRL_DATA(data); 368 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 369 370 /* wait for ack de-assertion */ 371 ret = pcie_phy_poll_ack(imx_pcie, false); 372 if (ret) 373 return ret; 374 375 /* assert wr signal */ 376 var = PCIE_PHY_CTRL_WR; 377 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 378 379 /* wait for ack */ 380 ret = pcie_phy_poll_ack(imx_pcie, true); 381 if (ret) 382 return ret; 383 384 /* deassert wr signal */ 385 var = PCIE_PHY_CTRL_DATA(data); 386 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 387 388 /* wait for ack de-assertion */ 389 ret = pcie_phy_poll_ack(imx_pcie, false); 390 if (ret) 391 return ret; 392 393 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 394 395 return 0; 396 } 397 398 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 399 { 400 /* TODO: This code assumes external oscillator is being used */ 401 regmap_update_bits(imx_pcie->iomuxc_gpr, 402 imx_pcie_grp_offset(imx_pcie), 403 IMX8MQ_GPR_PCIE_REF_USE_PAD, 404 IMX8MQ_GPR_PCIE_REF_USE_PAD); 405 /* 406 * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the 407 * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared 408 * to zero. 409 */ 410 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 411 regmap_update_bits(imx_pcie->iomuxc_gpr, 412 imx_pcie_grp_offset(imx_pcie), 413 IMX8MQ_GPR_PCIE_VREG_BYPASS, 414 0); 415 416 return 0; 417 } 418 419 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 420 { 421 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 422 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 423 424 /* configure constant input signal to the pcie ctrl and phy */ 425 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 426 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 427 428 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 429 IMX6Q_GPR8_TX_DEEMPH_GEN1, 430 imx_pcie->tx_deemph_gen1 << 0); 431 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 432 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 433 imx_pcie->tx_deemph_gen2_3p5db << 6); 434 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 435 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 436 imx_pcie->tx_deemph_gen2_6db << 12); 437 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 438 IMX6Q_GPR8_TX_SWING_FULL, 439 imx_pcie->tx_swing_full << 18); 440 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 441 IMX6Q_GPR8_TX_SWING_LOW, 442 imx_pcie->tx_swing_low << 25); 443 return 0; 444 } 445 446 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 447 { 448 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 449 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 450 451 return imx_pcie_init_phy(imx_pcie); 452 } 453 454 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 455 { 456 u32 val; 457 struct device *dev = imx_pcie->pci->dev; 458 459 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 460 IOMUXC_GPR22, val, 461 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 462 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 463 PHY_PLL_LOCK_WAIT_TIMEOUT)) 464 dev_err(dev, "PCIe PLL lock timeout\n"); 465 } 466 467 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 468 { 469 unsigned long phy_rate = 0; 470 int mult, div; 471 u16 val; 472 int i; 473 474 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 475 return 0; 476 477 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 478 if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) 479 phy_rate = clk_get_rate(imx_pcie->clks[i].clk); 480 481 switch (phy_rate) { 482 case 125000000: 483 /* 484 * The default settings of the MPLL are for a 125MHz input 485 * clock, so no need to reconfigure anything in that case. 486 */ 487 return 0; 488 case 100000000: 489 mult = 25; 490 div = 0; 491 break; 492 case 200000000: 493 mult = 25; 494 div = 1; 495 break; 496 default: 497 dev_err(imx_pcie->pci->dev, 498 "Unsupported PHY reference clock rate %lu\n", phy_rate); 499 return -EINVAL; 500 } 501 502 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 503 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 504 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 505 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 506 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 507 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 508 509 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 510 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 511 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 512 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 513 val |= PCIE_PHY_ATEOVRD_EN; 514 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 515 516 return 0; 517 } 518 519 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 520 { 521 u16 tmp; 522 523 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 524 return; 525 526 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 527 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 528 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 529 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 530 531 usleep_range(2000, 3000); 532 533 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 534 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 535 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 536 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 537 } 538 539 #ifdef CONFIG_ARM 540 /* Added for PCI abort handling */ 541 static int imx6q_pcie_abort_handler(unsigned long addr, 542 unsigned int fsr, struct pt_regs *regs) 543 { 544 unsigned long pc = instruction_pointer(regs); 545 unsigned long instr = *(unsigned long *)pc; 546 int reg = (instr >> 12) & 15; 547 548 /* 549 * If the instruction being executed was a read, 550 * make it look like it read all-ones. 551 */ 552 if ((instr & 0x0c100000) == 0x04100000) { 553 unsigned long val; 554 555 if (instr & 0x00400000) 556 val = 255; 557 else 558 val = -1; 559 560 regs->uregs[reg] = val; 561 regs->ARM_pc += 4; 562 return 0; 563 } 564 565 if ((instr & 0x0e100090) == 0x00100090) { 566 regs->uregs[reg] = -1; 567 regs->ARM_pc += 4; 568 return 0; 569 } 570 571 return 1; 572 } 573 #endif 574 575 static int imx_pcie_attach_pd(struct device *dev) 576 { 577 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 578 struct device_link *link; 579 580 /* Do nothing when in a single power domain */ 581 if (dev->pm_domain) 582 return 0; 583 584 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 585 if (IS_ERR(imx_pcie->pd_pcie)) 586 return PTR_ERR(imx_pcie->pd_pcie); 587 /* Do nothing when power domain missing */ 588 if (!imx_pcie->pd_pcie) 589 return 0; 590 link = device_link_add(dev, imx_pcie->pd_pcie, 591 DL_FLAG_STATELESS | 592 DL_FLAG_PM_RUNTIME | 593 DL_FLAG_RPM_ACTIVE); 594 if (!link) { 595 dev_err(dev, "Failed to add device_link to pcie pd\n"); 596 return -EINVAL; 597 } 598 599 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 600 if (IS_ERR(imx_pcie->pd_pcie_phy)) 601 return PTR_ERR(imx_pcie->pd_pcie_phy); 602 603 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 604 DL_FLAG_STATELESS | 605 DL_FLAG_PM_RUNTIME | 606 DL_FLAG_RPM_ACTIVE); 607 if (!link) { 608 dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); 609 return -EINVAL; 610 } 611 612 return 0; 613 } 614 615 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 616 { 617 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 618 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 619 enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 620 return 0; 621 } 622 623 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 624 { 625 if (enable) { 626 /* power up core phy and enable ref clock */ 627 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 628 /* 629 * The async reset input need ref clock to sync internally, 630 * when the ref clock comes after reset, internal synced 631 * reset time is too short, cannot meet the requirement. 632 * Add a ~10us delay here. 633 */ 634 usleep_range(10, 100); 635 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 636 } else { 637 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 638 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 639 } 640 641 return 0; 642 } 643 644 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 645 { 646 int offset = imx_pcie_grp_offset(imx_pcie); 647 648 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 649 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 650 enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 651 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 652 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, 653 enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); 654 return 0; 655 } 656 657 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 658 { 659 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 660 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 661 enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 662 return 0; 663 } 664 665 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 666 { 667 struct dw_pcie *pci = imx_pcie->pci; 668 struct device *dev = pci->dev; 669 int ret; 670 671 ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 672 if (ret) 673 return ret; 674 675 if (imx_pcie->drvdata->enable_ref_clk) { 676 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 677 if (ret) { 678 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 679 goto err_ref_clk; 680 } 681 } 682 683 /* allow the clocks to stabilize */ 684 usleep_range(200, 500); 685 return 0; 686 687 err_ref_clk: 688 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 689 690 return ret; 691 } 692 693 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 694 { 695 if (imx_pcie->drvdata->enable_ref_clk) 696 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 697 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); 698 } 699 700 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 701 { 702 if (assert) 703 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 704 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 705 706 /* Force PCIe PHY reset */ 707 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 708 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 709 return 0; 710 } 711 712 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 713 { 714 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 715 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 716 if (!assert) 717 usleep_range(200, 500); 718 719 return 0; 720 } 721 722 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 723 { 724 if (!assert) 725 return 0; 726 727 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 728 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 729 730 return 0; 731 } 732 733 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 734 { 735 struct dw_pcie *pci = imx_pcie->pci; 736 struct device *dev = pci->dev; 737 738 if (assert) 739 return 0; 740 741 /* 742 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 743 * 744 * PCIe: PLL may fail to lock under corner conditions. 745 * 746 * Initial VCO oscillation may fail under corner conditions such as 747 * cold temperature which will cause the PCIe PLL fail to lock in the 748 * initialization phase. 749 * 750 * The Duty-cycle Corrector calibration must be disabled. 751 * 752 * 1. De-assert the G_RST signal by clearing 753 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 754 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 755 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 756 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 757 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 758 * 4. Assert ATT_MODE by writing data “0xbc” to the register 759 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 760 * 5. De-assert the CMN_RST signal by clearing register bit 761 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 762 */ 763 764 if (likely(imx_pcie->phy_base)) { 765 /* De-assert DCC_FB_EN */ 766 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 767 /* Assert RX_EQS and RX_EQS_SEL */ 768 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 769 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 770 /* Assert ATT_MODE */ 771 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 772 } else { 773 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 774 } 775 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 776 return 0; 777 } 778 779 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 780 { 781 reset_control_assert(imx_pcie->pciephy_reset); 782 reset_control_assert(imx_pcie->apps_reset); 783 784 if (imx_pcie->drvdata->core_reset) 785 imx_pcie->drvdata->core_reset(imx_pcie, true); 786 787 /* Some boards don't have PCIe reset GPIO. */ 788 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 789 } 790 791 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 792 { 793 reset_control_deassert(imx_pcie->pciephy_reset); 794 reset_control_deassert(imx_pcie->apps_reset); 795 796 if (imx_pcie->drvdata->core_reset) 797 imx_pcie->drvdata->core_reset(imx_pcie, false); 798 799 /* Some boards don't have PCIe reset GPIO. */ 800 if (imx_pcie->reset_gpiod) { 801 msleep(100); 802 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 803 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 804 msleep(100); 805 } 806 807 return 0; 808 } 809 810 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 811 { 812 struct dw_pcie *pci = imx_pcie->pci; 813 struct device *dev = pci->dev; 814 u32 tmp; 815 unsigned int retries; 816 817 for (retries = 0; retries < 200; retries++) { 818 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 819 /* Test if the speed change finished. */ 820 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 821 return 0; 822 usleep_range(100, 1000); 823 } 824 825 dev_err(dev, "Speed change timeout\n"); 826 return -ETIMEDOUT; 827 } 828 829 static void imx_pcie_ltssm_enable(struct device *dev) 830 { 831 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 832 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 833 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 834 u32 tmp; 835 836 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 837 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 838 if (drvdata->ltssm_mask) 839 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 840 drvdata->ltssm_mask); 841 842 reset_control_deassert(imx_pcie->apps_reset); 843 } 844 845 static void imx_pcie_ltssm_disable(struct device *dev) 846 { 847 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 848 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 849 850 phy_set_speed(imx_pcie->phy, 0); 851 if (drvdata->ltssm_mask) 852 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 853 drvdata->ltssm_mask, 0); 854 855 reset_control_assert(imx_pcie->apps_reset); 856 } 857 858 static int imx_pcie_start_link(struct dw_pcie *pci) 859 { 860 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 861 struct device *dev = pci->dev; 862 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 863 u32 tmp; 864 int ret; 865 866 /* 867 * Force Gen1 operation when starting the link. In case the link is 868 * started in Gen2 mode, there is a possibility the devices on the 869 * bus will not be detected at all. This happens with PCIe switches. 870 */ 871 dw_pcie_dbi_ro_wr_en(pci); 872 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 873 tmp &= ~PCI_EXP_LNKCAP_SLS; 874 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 875 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 876 dw_pcie_dbi_ro_wr_dis(pci); 877 878 /* Start LTSSM. */ 879 imx_pcie_ltssm_enable(dev); 880 881 ret = dw_pcie_wait_for_link(pci); 882 if (ret) 883 goto err_reset_phy; 884 885 if (pci->max_link_speed > 1) { 886 /* Allow faster modes after the link is up */ 887 dw_pcie_dbi_ro_wr_en(pci); 888 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 889 tmp &= ~PCI_EXP_LNKCAP_SLS; 890 tmp |= pci->max_link_speed; 891 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 892 893 /* 894 * Start Directed Speed Change so the best possible 895 * speed both link partners support can be negotiated. 896 */ 897 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 898 tmp |= PORT_LOGIC_SPEED_CHANGE; 899 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 900 dw_pcie_dbi_ro_wr_dis(pci); 901 902 if (imx_pcie->drvdata->flags & 903 IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { 904 905 /* 906 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 907 * from i.MX6 family when no link speed transition 908 * occurs and we go Gen1 -> yep, Gen1. The difference 909 * is that, in such case, it will not be cleared by HW 910 * which will cause the following code to report false 911 * failure. 912 */ 913 ret = imx_pcie_wait_for_speed_change(imx_pcie); 914 if (ret) { 915 dev_err(dev, "Failed to bring link up!\n"); 916 goto err_reset_phy; 917 } 918 } 919 920 /* Make sure link training is finished as well! */ 921 ret = dw_pcie_wait_for_link(pci); 922 if (ret) 923 goto err_reset_phy; 924 } else { 925 dev_info(dev, "Link: Only Gen1 is enabled\n"); 926 } 927 928 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); 929 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); 930 return 0; 931 932 err_reset_phy: 933 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 934 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 935 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 936 imx_pcie_reset_phy(imx_pcie); 937 return 0; 938 } 939 940 static void imx_pcie_stop_link(struct dw_pcie *pci) 941 { 942 struct device *dev = pci->dev; 943 944 /* Turn off PCIe LTSSM */ 945 imx_pcie_ltssm_disable(dev); 946 } 947 948 static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) 949 { 950 struct dw_pcie *pci = imx_pcie->pci; 951 struct device *dev = pci->dev; 952 u32 data1, data2; 953 int free = -1; 954 int i; 955 956 if (sid >= 64) { 957 dev_err(dev, "Invalid SID for index %d\n", sid); 958 return -EINVAL; 959 } 960 961 guard(mutex)(&imx_pcie->lock); 962 963 /* 964 * Iterate through all LUT entries to check for duplicate RID and 965 * identify the first available entry. Configure this available entry 966 * immediately after verification to avoid rescanning it. 967 */ 968 for (i = 0; i < IMX95_MAX_LUT; i++) { 969 regmap_write(imx_pcie->iomuxc_gpr, 970 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 971 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 972 973 if (!(data1 & IMX95_PE0_LUT_VLD)) { 974 if (free < 0) 975 free = i; 976 continue; 977 } 978 979 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 980 981 /* Do not add duplicate RID */ 982 if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { 983 dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); 984 return 0; 985 } 986 } 987 988 if (free < 0) { 989 dev_err(dev, "LUT entry is not available\n"); 990 return -ENOSPC; 991 } 992 993 data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); 994 data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); 995 data1 |= IMX95_PE0_LUT_VLD; 996 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); 997 998 data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ 999 data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); 1000 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); 1001 1002 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); 1003 1004 return 0; 1005 } 1006 1007 static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) 1008 { 1009 u32 data2; 1010 int i; 1011 1012 guard(mutex)(&imx_pcie->lock); 1013 1014 for (i = 0; i < IMX95_MAX_LUT; i++) { 1015 regmap_write(imx_pcie->iomuxc_gpr, 1016 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1017 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1018 if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { 1019 regmap_write(imx_pcie->iomuxc_gpr, 1020 IMX95_PE0_LUT_DATA1, 0); 1021 regmap_write(imx_pcie->iomuxc_gpr, 1022 IMX95_PE0_LUT_DATA2, 0); 1023 regmap_write(imx_pcie->iomuxc_gpr, 1024 IMX95_PE0_LUT_ACSCTRL, i); 1025 1026 break; 1027 } 1028 } 1029 } 1030 1031 static int imx_pcie_enable_device(struct pci_host_bridge *bridge, 1032 struct pci_dev *pdev) 1033 { 1034 struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1035 u32 sid_i, sid_m, rid = pci_dev_id(pdev); 1036 struct device_node *target; 1037 struct device *dev; 1038 int err_i, err_m; 1039 u32 sid = 0; 1040 1041 dev = imx_pcie->pci->dev; 1042 1043 target = NULL; 1044 err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", 1045 &target, &sid_i); 1046 if (target) { 1047 of_node_put(target); 1048 } else { 1049 /* 1050 * "target == NULL && err_i == 0" means RID out of map range. 1051 * Use 1:1 map RID to streamID. Hardware can't support this 1052 * because the streamID is only 6 bits 1053 */ 1054 err_i = -EINVAL; 1055 } 1056 1057 target = NULL; 1058 err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", 1059 &target, &sid_m); 1060 1061 /* 1062 * err_m target 1063 * 0 NULL RID out of range. Use 1:1 map RID to 1064 * streamID, Current hardware can't 1065 * support it, so return -EINVAL. 1066 * != 0 NULL msi-map does not exist, use built-in MSI 1067 * 0 != NULL Get correct streamID from RID 1068 * != 0 != NULL Invalid combination 1069 */ 1070 if (!err_m && !target) 1071 return -EINVAL; 1072 else if (target) 1073 of_node_put(target); /* Find streamID map entry for RID in msi-map */ 1074 1075 /* 1076 * msi-map iommu-map 1077 * N N DWC MSI Ctrl 1078 * Y Y ITS + SMMU, require the same SID 1079 * Y N ITS 1080 * N Y DWC MSI Ctrl + SMMU 1081 */ 1082 if (err_i && err_m) 1083 return 0; 1084 1085 if (!err_i && !err_m) { 1086 /* 1087 * Glue Layer 1088 * <==========> 1089 * ┌─────┐ ┌──────────┐ 1090 * │ LUT │ 6-bit streamID │ │ 1091 * │ │─────────────────►│ MSI │ 1092 * └─────┘ 2-bit ctrl ID │ │ 1093 * ┌───────────►│ │ 1094 * (i.MX95) │ │ │ 1095 * 00 PCIe0 │ │ │ 1096 * 01 ENETC │ │ │ 1097 * 10 PCIe1 │ │ │ 1098 * │ └──────────┘ 1099 * The MSI glue layer auto adds 2 bits controller ID ahead of 1100 * streamID, so mask these 2 bits to get streamID. The 1101 * IOMMU glue layer doesn't do that. 1102 */ 1103 if (sid_i != (sid_m & IMX95_SID_MASK)) { 1104 dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); 1105 return -EINVAL; 1106 } 1107 } 1108 1109 if (!err_i) 1110 sid = sid_i; 1111 else if (!err_m) 1112 sid = sid_m & IMX95_SID_MASK; 1113 1114 return imx_pcie_add_lut(imx_pcie, rid, sid); 1115 } 1116 1117 static void imx_pcie_disable_device(struct pci_host_bridge *bridge, 1118 struct pci_dev *pdev) 1119 { 1120 struct imx_pcie *imx_pcie; 1121 1122 imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1123 imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); 1124 } 1125 1126 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 1127 { 1128 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1129 struct device *dev = pci->dev; 1130 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1131 int ret; 1132 1133 if (imx_pcie->vpcie) { 1134 ret = regulator_enable(imx_pcie->vpcie); 1135 if (ret) { 1136 dev_err(dev, "failed to enable vpcie regulator: %d\n", 1137 ret); 1138 return ret; 1139 } 1140 } 1141 1142 if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { 1143 pp->bridge->enable_device = imx_pcie_enable_device; 1144 pp->bridge->disable_device = imx_pcie_disable_device; 1145 } 1146 1147 imx_pcie_assert_core_reset(imx_pcie); 1148 1149 if (imx_pcie->drvdata->init_phy) 1150 imx_pcie->drvdata->init_phy(imx_pcie); 1151 1152 imx_pcie_configure_type(imx_pcie); 1153 1154 ret = imx_pcie_clk_enable(imx_pcie); 1155 if (ret) { 1156 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 1157 goto err_reg_disable; 1158 } 1159 1160 if (imx_pcie->phy) { 1161 ret = phy_init(imx_pcie->phy); 1162 if (ret) { 1163 dev_err(dev, "pcie PHY power up failed\n"); 1164 goto err_clk_disable; 1165 } 1166 1167 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, 1168 imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? 1169 PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); 1170 if (ret) { 1171 dev_err(dev, "unable to set PCIe PHY mode\n"); 1172 goto err_phy_exit; 1173 } 1174 1175 ret = phy_power_on(imx_pcie->phy); 1176 if (ret) { 1177 dev_err(dev, "waiting for PHY ready timeout!\n"); 1178 goto err_phy_exit; 1179 } 1180 } 1181 1182 ret = imx_pcie_deassert_core_reset(imx_pcie); 1183 if (ret < 0) { 1184 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 1185 goto err_phy_off; 1186 } 1187 1188 imx_setup_phy_mpll(imx_pcie); 1189 1190 return 0; 1191 1192 err_phy_off: 1193 phy_power_off(imx_pcie->phy); 1194 err_phy_exit: 1195 phy_exit(imx_pcie->phy); 1196 err_clk_disable: 1197 imx_pcie_clk_disable(imx_pcie); 1198 err_reg_disable: 1199 if (imx_pcie->vpcie) 1200 regulator_disable(imx_pcie->vpcie); 1201 return ret; 1202 } 1203 1204 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1205 { 1206 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1207 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1208 1209 if (imx_pcie->phy) { 1210 if (phy_power_off(imx_pcie->phy)) 1211 dev_err(pci->dev, "unable to power off PHY\n"); 1212 phy_exit(imx_pcie->phy); 1213 } 1214 imx_pcie_clk_disable(imx_pcie); 1215 1216 if (imx_pcie->vpcie) 1217 regulator_disable(imx_pcie->vpcie); 1218 } 1219 1220 static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) 1221 { 1222 struct imx_pcie *imx_pcie = to_imx_pcie(pcie); 1223 struct dw_pcie_rp *pp = &pcie->pp; 1224 struct resource_entry *entry; 1225 1226 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) 1227 return cpu_addr; 1228 1229 entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); 1230 if (!entry) 1231 return cpu_addr; 1232 1233 return cpu_addr - entry->offset; 1234 } 1235 1236 /* 1237 * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 1238 * register is reserved, so the generic DWC implementation of sending the 1239 * PME_Turn_Off message using a dummy MMIO write cannot be used. 1240 */ 1241 static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) 1242 { 1243 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1244 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1245 1246 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1247 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1248 1249 usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); 1250 } 1251 1252 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1253 .init = imx_pcie_host_init, 1254 .deinit = imx_pcie_host_exit, 1255 .pme_turn_off = imx_pcie_pme_turn_off, 1256 }; 1257 1258 static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { 1259 .init = imx_pcie_host_init, 1260 .deinit = imx_pcie_host_exit, 1261 }; 1262 1263 static const struct dw_pcie_ops dw_pcie_ops = { 1264 .start_link = imx_pcie_start_link, 1265 .stop_link = imx_pcie_stop_link, 1266 .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, 1267 }; 1268 1269 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1270 { 1271 enum pci_barno bar; 1272 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1273 1274 for (bar = BAR_0; bar <= BAR_5; bar++) 1275 dw_pcie_ep_reset_bar(pci, bar); 1276 } 1277 1278 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1279 unsigned int type, u16 interrupt_num) 1280 { 1281 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1282 1283 switch (type) { 1284 case PCI_IRQ_INTX: 1285 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1286 case PCI_IRQ_MSI: 1287 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1288 case PCI_IRQ_MSIX: 1289 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1290 default: 1291 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1292 return -EINVAL; 1293 } 1294 1295 return 0; 1296 } 1297 1298 static const struct pci_epc_features imx8m_pcie_epc_features = { 1299 .linkup_notifier = false, 1300 .msi_capable = true, 1301 .msix_capable = false, 1302 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1303 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1304 .align = SZ_64K, 1305 }; 1306 1307 static const struct pci_epc_features imx8q_pcie_epc_features = { 1308 .linkup_notifier = false, 1309 .msi_capable = true, 1310 .msix_capable = false, 1311 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1312 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1313 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1314 .align = SZ_64K, 1315 }; 1316 1317 /* 1318 * | Default | Default | Default | BAR Sizing 1319 * BAR# | Enable? | Type | Size | Scheme 1320 * ======================================================= 1321 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1322 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1323 * (BAR1 should be disabled if BAR0 is 64-bit) 1324 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1325 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1326 * BAR4 | Enable | 32-bit | 1 MB | Programmable Size 1327 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1328 */ 1329 static const struct pci_epc_features imx95_pcie_epc_features = { 1330 .msi_capable = true, 1331 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1332 .align = SZ_4K, 1333 }; 1334 1335 static const struct pci_epc_features* 1336 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1337 { 1338 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1339 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1340 1341 return imx_pcie->drvdata->epc_features; 1342 } 1343 1344 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1345 .init = imx_pcie_ep_init, 1346 .raise_irq = imx_pcie_ep_raise_irq, 1347 .get_features = imx_pcie_ep_get_features, 1348 }; 1349 1350 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1351 struct platform_device *pdev) 1352 { 1353 int ret; 1354 struct dw_pcie_ep *ep; 1355 struct dw_pcie *pci = imx_pcie->pci; 1356 struct dw_pcie_rp *pp = &pci->pp; 1357 struct device *dev = pci->dev; 1358 1359 imx_pcie_host_init(pp); 1360 ep = &pci->ep; 1361 ep->ops = &pcie_ep_ops; 1362 1363 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1364 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1365 1366 ep->page_size = imx_pcie->drvdata->epc_features->align; 1367 1368 ret = dw_pcie_ep_init(ep); 1369 if (ret) { 1370 dev_err(dev, "failed to initialize endpoint\n"); 1371 return ret; 1372 } 1373 1374 ret = dw_pcie_ep_init_registers(ep); 1375 if (ret) { 1376 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1377 dw_pcie_ep_deinit(ep); 1378 return ret; 1379 } 1380 1381 pci_epc_init_notify(ep->epc); 1382 1383 /* Start LTSSM. */ 1384 imx_pcie_ltssm_enable(dev); 1385 1386 return 0; 1387 } 1388 1389 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1390 { 1391 u8 offset; 1392 u16 val; 1393 struct dw_pcie *pci = imx_pcie->pci; 1394 1395 if (pci_msi_enabled()) { 1396 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1397 if (save) { 1398 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1399 imx_pcie->msi_ctrl = val; 1400 } else { 1401 dw_pcie_dbi_ro_wr_en(pci); 1402 val = imx_pcie->msi_ctrl; 1403 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1404 dw_pcie_dbi_ro_wr_dis(pci); 1405 } 1406 } 1407 } 1408 1409 static int imx_pcie_suspend_noirq(struct device *dev) 1410 { 1411 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1412 1413 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1414 return 0; 1415 1416 imx_pcie_msi_save_restore(imx_pcie, true); 1417 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1418 /* 1419 * The minimum for a workaround would be to set PERST# and to 1420 * set the PCIE_TEST_PD flag. However, we can also disable the 1421 * clock which saves some power. 1422 */ 1423 imx_pcie_assert_core_reset(imx_pcie); 1424 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1425 } else { 1426 return dw_pcie_suspend_noirq(imx_pcie->pci); 1427 } 1428 1429 return 0; 1430 } 1431 1432 static int imx_pcie_resume_noirq(struct device *dev) 1433 { 1434 int ret; 1435 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1436 1437 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1438 return 0; 1439 1440 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1441 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1442 if (ret) 1443 return ret; 1444 ret = imx_pcie_deassert_core_reset(imx_pcie); 1445 if (ret) 1446 return ret; 1447 1448 /* 1449 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1450 * root complex. This is why we have to setup the rc again and 1451 * why we have to restore the MSI register. 1452 */ 1453 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1454 if (ret) 1455 return ret; 1456 } else { 1457 ret = dw_pcie_resume_noirq(imx_pcie->pci); 1458 if (ret) 1459 return ret; 1460 } 1461 imx_pcie_msi_save_restore(imx_pcie, false); 1462 1463 return 0; 1464 } 1465 1466 static const struct dev_pm_ops imx_pcie_pm_ops = { 1467 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1468 imx_pcie_resume_noirq) 1469 }; 1470 1471 static int imx_pcie_probe(struct platform_device *pdev) 1472 { 1473 struct device *dev = &pdev->dev; 1474 struct dw_pcie *pci; 1475 struct imx_pcie *imx_pcie; 1476 struct device_node *np; 1477 struct resource *dbi_base; 1478 struct device_node *node = dev->of_node; 1479 int i, ret, req_cnt; 1480 u16 val; 1481 1482 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1483 if (!imx_pcie) 1484 return -ENOMEM; 1485 1486 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1487 if (!pci) 1488 return -ENOMEM; 1489 1490 pci->dev = dev; 1491 pci->ops = &dw_pcie_ops; 1492 1493 imx_pcie->pci = pci; 1494 imx_pcie->drvdata = of_device_get_match_data(dev); 1495 1496 mutex_init(&imx_pcie->lock); 1497 1498 if (imx_pcie->drvdata->ops) 1499 pci->pp.ops = imx_pcie->drvdata->ops; 1500 else 1501 pci->pp.ops = &imx_pcie_host_dw_pme_ops; 1502 1503 /* Find the PHY if one is defined, only imx7d uses it */ 1504 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1505 if (np) { 1506 struct resource res; 1507 1508 ret = of_address_to_resource(np, 0, &res); 1509 if (ret) { 1510 dev_err(dev, "Unable to map PCIe PHY\n"); 1511 return ret; 1512 } 1513 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1514 if (IS_ERR(imx_pcie->phy_base)) 1515 return PTR_ERR(imx_pcie->phy_base); 1516 } 1517 1518 pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); 1519 if (IS_ERR(pci->dbi_base)) 1520 return PTR_ERR(pci->dbi_base); 1521 1522 /* Fetch GPIOs */ 1523 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1524 if (IS_ERR(imx_pcie->reset_gpiod)) 1525 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1526 "unable to get reset gpio\n"); 1527 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1528 1529 if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) 1530 return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); 1531 1532 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) 1533 imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; 1534 1535 /* Fetch clocks */ 1536 req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt; 1537 ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks); 1538 if (ret) 1539 return ret; 1540 imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref"); 1541 if (IS_ERR(imx_pcie->clks[req_cnt].clk)) 1542 return PTR_ERR(imx_pcie->clks[req_cnt].clk); 1543 1544 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1545 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1546 if (IS_ERR(imx_pcie->phy)) 1547 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1548 "failed to get pcie phy\n"); 1549 } 1550 1551 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1552 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1553 if (IS_ERR(imx_pcie->apps_reset)) 1554 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1555 "failed to get pcie apps reset control\n"); 1556 } 1557 1558 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1559 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1560 if (IS_ERR(imx_pcie->pciephy_reset)) 1561 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1562 "Failed to get PCIEPHY reset control\n"); 1563 } 1564 1565 switch (imx_pcie->drvdata->variant) { 1566 case IMX8MQ: 1567 case IMX8MQ_EP: 1568 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) 1569 imx_pcie->controller_id = 1; 1570 break; 1571 default: 1572 break; 1573 } 1574 1575 if (imx_pcie->drvdata->gpr) { 1576 /* Grab GPR config register range */ 1577 imx_pcie->iomuxc_gpr = 1578 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1579 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1580 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1581 "unable to find iomuxc registers\n"); 1582 } 1583 1584 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1585 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1586 1587 if (IS_ERR(off)) 1588 return dev_err_probe(dev, PTR_ERR(off), 1589 "unable to find serdes registers\n"); 1590 1591 static const struct regmap_config regmap_config = { 1592 .reg_bits = 32, 1593 .val_bits = 32, 1594 .reg_stride = 4, 1595 }; 1596 1597 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1598 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1599 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1600 "unable to find iomuxc registers\n"); 1601 } 1602 1603 /* Grab PCIe PHY Tx Settings */ 1604 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1605 &imx_pcie->tx_deemph_gen1)) 1606 imx_pcie->tx_deemph_gen1 = 0; 1607 1608 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1609 &imx_pcie->tx_deemph_gen2_3p5db)) 1610 imx_pcie->tx_deemph_gen2_3p5db = 0; 1611 1612 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1613 &imx_pcie->tx_deemph_gen2_6db)) 1614 imx_pcie->tx_deemph_gen2_6db = 20; 1615 1616 if (of_property_read_u32(node, "fsl,tx-swing-full", 1617 &imx_pcie->tx_swing_full)) 1618 imx_pcie->tx_swing_full = 127; 1619 1620 if (of_property_read_u32(node, "fsl,tx-swing-low", 1621 &imx_pcie->tx_swing_low)) 1622 imx_pcie->tx_swing_low = 127; 1623 1624 /* Limit link speed */ 1625 pci->max_link_speed = 1; 1626 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1627 1628 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1629 if (IS_ERR(imx_pcie->vpcie)) { 1630 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1631 return PTR_ERR(imx_pcie->vpcie); 1632 imx_pcie->vpcie = NULL; 1633 } 1634 1635 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1636 if (IS_ERR(imx_pcie->vph)) { 1637 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1638 return PTR_ERR(imx_pcie->vph); 1639 imx_pcie->vph = NULL; 1640 } 1641 1642 platform_set_drvdata(pdev, imx_pcie); 1643 1644 ret = imx_pcie_attach_pd(dev); 1645 if (ret) 1646 return ret; 1647 1648 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1649 ret = imx_add_pcie_ep(imx_pcie, pdev); 1650 if (ret < 0) 1651 return ret; 1652 } else { 1653 pci->pp.use_atu_msg = true; 1654 ret = dw_pcie_host_init(&pci->pp); 1655 if (ret < 0) 1656 return ret; 1657 1658 if (pci_msi_enabled()) { 1659 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1660 1661 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1662 val |= PCI_MSI_FLAGS_ENABLE; 1663 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1664 } 1665 } 1666 1667 return 0; 1668 } 1669 1670 static void imx_pcie_shutdown(struct platform_device *pdev) 1671 { 1672 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1673 1674 /* bring down link, so bootloader gets clean state in case of reboot */ 1675 imx_pcie_assert_core_reset(imx_pcie); 1676 } 1677 1678 static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; 1679 static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; 1680 static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; 1681 static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; 1682 static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; 1683 static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"}; 1684 1685 static const struct imx_pcie_drvdata drvdata[] = { 1686 [IMX6Q] = { 1687 .variant = IMX6Q, 1688 .flags = IMX_PCIE_FLAG_IMX_PHY | 1689 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1690 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1691 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1692 .dbi_length = 0x200, 1693 .gpr = "fsl,imx6q-iomuxc-gpr", 1694 .clk_names = imx6q_clks, 1695 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1696 .ltssm_off = IOMUXC_GPR12, 1697 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1698 .mode_off[0] = IOMUXC_GPR12, 1699 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1700 .init_phy = imx_pcie_init_phy, 1701 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1702 .core_reset = imx6q_pcie_core_reset, 1703 }, 1704 [IMX6SX] = { 1705 .variant = IMX6SX, 1706 .flags = IMX_PCIE_FLAG_IMX_PHY | 1707 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1708 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1709 .gpr = "fsl,imx6q-iomuxc-gpr", 1710 .clk_names = imx6sx_clks, 1711 .clks_cnt = ARRAY_SIZE(imx6sx_clks), 1712 .ltssm_off = IOMUXC_GPR12, 1713 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1714 .mode_off[0] = IOMUXC_GPR12, 1715 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1716 .init_phy = imx6sx_pcie_init_phy, 1717 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1718 .core_reset = imx6sx_pcie_core_reset, 1719 .ops = &imx_pcie_host_ops, 1720 }, 1721 [IMX6QP] = { 1722 .variant = IMX6QP, 1723 .flags = IMX_PCIE_FLAG_IMX_PHY | 1724 IMX_PCIE_FLAG_IMX_SPEED_CHANGE | 1725 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1726 .dbi_length = 0x200, 1727 .gpr = "fsl,imx6q-iomuxc-gpr", 1728 .clk_names = imx6q_clks, 1729 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1730 .ltssm_off = IOMUXC_GPR12, 1731 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1732 .mode_off[0] = IOMUXC_GPR12, 1733 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1734 .init_phy = imx_pcie_init_phy, 1735 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1736 .core_reset = imx6qp_pcie_core_reset, 1737 .ops = &imx_pcie_host_ops, 1738 }, 1739 [IMX7D] = { 1740 .variant = IMX7D, 1741 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1742 IMX_PCIE_FLAG_HAS_APP_RESET | 1743 IMX_PCIE_FLAG_HAS_PHY_RESET, 1744 .gpr = "fsl,imx7d-iomuxc-gpr", 1745 .clk_names = imx6q_clks, 1746 .clks_cnt = ARRAY_SIZE(imx6q_clks), 1747 .mode_off[0] = IOMUXC_GPR12, 1748 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1749 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1750 .core_reset = imx7d_pcie_core_reset, 1751 }, 1752 [IMX8MQ] = { 1753 .variant = IMX8MQ, 1754 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1755 IMX_PCIE_FLAG_HAS_PHY_RESET | 1756 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1757 .gpr = "fsl,imx8mq-iomuxc-gpr", 1758 .clk_names = imx8mq_clks, 1759 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1760 .mode_off[0] = IOMUXC_GPR12, 1761 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1762 .mode_off[1] = IOMUXC_GPR12, 1763 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1764 .init_phy = imx8mq_pcie_init_phy, 1765 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1766 }, 1767 [IMX8MM] = { 1768 .variant = IMX8MM, 1769 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1770 IMX_PCIE_FLAG_HAS_PHYDRV | 1771 IMX_PCIE_FLAG_HAS_APP_RESET, 1772 .gpr = "fsl,imx8mm-iomuxc-gpr", 1773 .clk_names = imx8mm_clks, 1774 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1775 .mode_off[0] = IOMUXC_GPR12, 1776 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1777 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1778 }, 1779 [IMX8MP] = { 1780 .variant = IMX8MP, 1781 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1782 IMX_PCIE_FLAG_HAS_PHYDRV | 1783 IMX_PCIE_FLAG_HAS_APP_RESET, 1784 .gpr = "fsl,imx8mp-iomuxc-gpr", 1785 .clk_names = imx8mm_clks, 1786 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1787 .mode_off[0] = IOMUXC_GPR12, 1788 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1789 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1790 }, 1791 [IMX8Q] = { 1792 .variant = IMX8Q, 1793 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1794 IMX_PCIE_FLAG_CPU_ADDR_FIXUP | 1795 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1796 .clk_names = imx8q_clks, 1797 .clks_cnt = ARRAY_SIZE(imx8q_clks), 1798 }, 1799 [IMX95] = { 1800 .variant = IMX95, 1801 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1802 IMX_PCIE_FLAG_HAS_LUT | 1803 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1804 .clk_names = imx95_clks, 1805 .clks_cnt = ARRAY_SIZE(imx95_clks), 1806 .clks_optional_cnt = 1, 1807 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1808 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1809 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1810 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1811 .init_phy = imx95_pcie_init_phy, 1812 }, 1813 [IMX8MQ_EP] = { 1814 .variant = IMX8MQ_EP, 1815 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1816 IMX_PCIE_FLAG_HAS_PHY_RESET, 1817 .mode = DW_PCIE_EP_TYPE, 1818 .gpr = "fsl,imx8mq-iomuxc-gpr", 1819 .clk_names = imx8mq_clks, 1820 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1821 .mode_off[0] = IOMUXC_GPR12, 1822 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1823 .mode_off[1] = IOMUXC_GPR12, 1824 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1825 .epc_features = &imx8m_pcie_epc_features, 1826 .init_phy = imx8mq_pcie_init_phy, 1827 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1828 }, 1829 [IMX8MM_EP] = { 1830 .variant = IMX8MM_EP, 1831 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1832 IMX_PCIE_FLAG_HAS_PHYDRV, 1833 .mode = DW_PCIE_EP_TYPE, 1834 .gpr = "fsl,imx8mm-iomuxc-gpr", 1835 .clk_names = imx8mm_clks, 1836 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1837 .mode_off[0] = IOMUXC_GPR12, 1838 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1839 .epc_features = &imx8m_pcie_epc_features, 1840 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1841 }, 1842 [IMX8MP_EP] = { 1843 .variant = IMX8MP_EP, 1844 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1845 IMX_PCIE_FLAG_HAS_PHYDRV, 1846 .mode = DW_PCIE_EP_TYPE, 1847 .gpr = "fsl,imx8mp-iomuxc-gpr", 1848 .clk_names = imx8mm_clks, 1849 .clks_cnt = ARRAY_SIZE(imx8mm_clks), 1850 .mode_off[0] = IOMUXC_GPR12, 1851 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1852 .epc_features = &imx8m_pcie_epc_features, 1853 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1854 }, 1855 [IMX8Q_EP] = { 1856 .variant = IMX8Q_EP, 1857 .flags = IMX_PCIE_FLAG_HAS_PHYDRV, 1858 .mode = DW_PCIE_EP_TYPE, 1859 .epc_features = &imx8q_pcie_epc_features, 1860 .clk_names = imx8q_clks, 1861 .clks_cnt = ARRAY_SIZE(imx8q_clks), 1862 }, 1863 [IMX95_EP] = { 1864 .variant = IMX95_EP, 1865 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1866 IMX_PCIE_FLAG_SUPPORT_64BIT, 1867 .clk_names = imx8mq_clks, 1868 .clks_cnt = ARRAY_SIZE(imx8mq_clks), 1869 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1870 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1871 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1872 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1873 .init_phy = imx95_pcie_init_phy, 1874 .epc_features = &imx95_pcie_epc_features, 1875 .mode = DW_PCIE_EP_TYPE, 1876 }, 1877 }; 1878 1879 static const struct of_device_id imx_pcie_of_match[] = { 1880 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1881 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1882 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1883 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1884 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1885 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1886 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1887 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1888 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1889 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1890 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1891 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1892 { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, 1893 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1894 {}, 1895 }; 1896 1897 static struct platform_driver imx_pcie_driver = { 1898 .driver = { 1899 .name = "imx6q-pcie", 1900 .of_match_table = imx_pcie_of_match, 1901 .suppress_bind_attrs = true, 1902 .pm = &imx_pcie_pm_ops, 1903 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1904 }, 1905 .probe = imx_pcie_probe, 1906 .shutdown = imx_pcie_shutdown, 1907 }; 1908 1909 static void imx_pcie_quirk(struct pci_dev *dev) 1910 { 1911 struct pci_bus *bus = dev->bus; 1912 struct dw_pcie_rp *pp = bus->sysdata; 1913 1914 /* Bus parent is the PCI bridge, its parent is this platform driver */ 1915 if (!bus->dev.parent || !bus->dev.parent->parent) 1916 return; 1917 1918 /* Make sure we only quirk devices associated with this driver */ 1919 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 1920 return; 1921 1922 if (pci_is_root_bus(bus)) { 1923 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1924 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1925 1926 /* 1927 * Limit config length to avoid the kernel reading beyond 1928 * the register set and causing an abort on i.MX 6Quad 1929 */ 1930 if (imx_pcie->drvdata->dbi_length) { 1931 dev->cfg_size = imx_pcie->drvdata->dbi_length; 1932 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 1933 dev->cfg_size); 1934 } 1935 } 1936 } 1937 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 1938 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 1939 1940 static int __init imx_pcie_init(void) 1941 { 1942 #ifdef CONFIG_ARM 1943 struct device_node *np; 1944 1945 np = of_find_matching_node(NULL, imx_pcie_of_match); 1946 if (!np) 1947 return -ENODEV; 1948 of_node_put(np); 1949 1950 /* 1951 * Since probe() can be deferred we need to make sure that 1952 * hook_fault_code is not called after __init memory is freed 1953 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1954 * we can install the handler here without risking it 1955 * accessing some uninitialized driver state. 1956 */ 1957 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1958 "external abort on non-linefetch"); 1959 #endif 1960 1961 return platform_driver_register(&imx_pcie_driver); 1962 } 1963 device_initcall(imx_pcie_init); 1964