1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "../../pci.h" 37 #include "pcie-designware.h" 38 39 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 41 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 42 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 43 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_PHY_MPLLA_CTRL 0x10 49 #define IMX95_PCIE_PHY_MPLL_STATE BIT(30) 50 51 #define IMX95_PCIE_SS_RW_REG_0 0xf0 52 #define IMX95_PCIE_REF_CLKEN BIT(23) 53 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 54 #define IMX95_PCIE_SS_RW_REG_1 0xf4 55 #define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31) 56 57 #define IMX95_PE0_GEN_CTRL_1 0x1050 58 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 59 60 #define IMX95_PE0_GEN_CTRL_3 0x1058 61 #define IMX95_PCIE_LTSSM_EN BIT(0) 62 63 #define IMX95_PE0_LUT_ACSCTRL 0x1008 64 #define IMX95_PEO_LUT_RWA BIT(16) 65 #define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) 66 67 #define IMX95_PE0_LUT_DATA1 0x100c 68 #define IMX95_PE0_LUT_VLD BIT(31) 69 #define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) 70 #define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) 71 72 #define IMX95_PE0_LUT_DATA2 0x1010 73 #define IMX95_PE0_LUT_REQID GENMASK(31, 16) 74 #define IMX95_PE0_LUT_MASK GENMASK(15, 0) 75 76 #define IMX95_SID_MASK GENMASK(5, 0) 77 #define IMX95_MAX_LUT 32 78 79 #define IMX95_PCIE_RST_CTRL 0x3010 80 #define IMX95_PCIE_COLD_RST BIT(0) 81 82 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 83 84 enum imx_pcie_variants { 85 IMX6Q, 86 IMX6SX, 87 IMX6QP, 88 IMX7D, 89 IMX8MQ, 90 IMX8MM, 91 IMX8MP, 92 IMX8Q, 93 IMX95, 94 IMX8MQ_EP, 95 IMX8MM_EP, 96 IMX8MP_EP, 97 IMX8Q_EP, 98 IMX95_EP, 99 }; 100 101 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 102 #define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1) 103 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 104 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 105 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 106 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 107 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 108 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 109 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 110 /* 111 * Because of ERR005723 (PCIe does not support L2 power down) we need to 112 * workaround suspend resume on some devices which are affected by this errata. 113 */ 114 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 115 #define IMX_PCIE_FLAG_HAS_LUT BIT(10) 116 #define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11) 117 118 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 119 120 #define IMX_PCIE_MAX_INSTANCES 2 121 122 struct imx_pcie; 123 124 struct imx_pcie_drvdata { 125 enum imx_pcie_variants variant; 126 enum dw_pcie_device_mode mode; 127 u32 flags; 128 int dbi_length; 129 const char *gpr; 130 const u32 ltssm_off; 131 const u32 ltssm_mask; 132 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 133 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 134 const struct pci_epc_features *epc_features; 135 int (*init_phy)(struct imx_pcie *pcie); 136 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 137 int (*core_reset)(struct imx_pcie *pcie, bool assert); 138 int (*wait_pll_lock)(struct imx_pcie *pcie); 139 const struct dw_pcie_host_ops *ops; 140 }; 141 142 struct imx_lut_data { 143 u32 data1; 144 u32 data2; 145 }; 146 147 struct imx_pcie { 148 struct dw_pcie *pci; 149 struct gpio_desc *reset_gpiod; 150 struct clk_bulk_data *clks; 151 int num_clks; 152 struct regmap *iomuxc_gpr; 153 u16 msi_ctrl; 154 u32 controller_id; 155 struct reset_control *pciephy_reset; 156 struct reset_control *apps_reset; 157 u32 tx_deemph_gen1; 158 u32 tx_deemph_gen2_3p5db; 159 u32 tx_deemph_gen2_6db; 160 u32 tx_swing_full; 161 u32 tx_swing_low; 162 struct regulator *vpcie; 163 struct regulator *vph; 164 void __iomem *phy_base; 165 166 /* LUT data for pcie */ 167 struct imx_lut_data luts[IMX95_MAX_LUT]; 168 /* power domain for pcie */ 169 struct device *pd_pcie; 170 /* power domain for pcie phy */ 171 struct device *pd_pcie_phy; 172 struct phy *phy; 173 const struct imx_pcie_drvdata *drvdata; 174 175 /* Ensure that only one device's LUT is configured at any given time */ 176 struct mutex lock; 177 }; 178 179 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 180 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 181 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 182 183 /* PCIe Port Logic registers (memory-mapped) */ 184 #define PL_OFFSET 0x700 185 186 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 187 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 188 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 189 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 190 #define PCIE_PHY_CTRL_WR BIT(18) 191 #define PCIE_PHY_CTRL_RD BIT(19) 192 193 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 194 #define PCIE_PHY_STAT_ACK BIT(16) 195 196 /* PHY registers (not memory-mapped) */ 197 #define PCIE_PHY_ATEOVRD 0x10 198 #define PCIE_PHY_ATEOVRD_EN BIT(2) 199 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 200 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 201 202 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 203 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 204 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 205 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 206 207 #define PCIE_PHY_RX_ASIC_OUT 0x100D 208 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 209 210 /* iMX7 PCIe PHY registers */ 211 #define PCIE_PHY_CMN_REG4 0x14 212 /* These are probably the bits that *aren't* DCC_FB_EN */ 213 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 214 215 #define PCIE_PHY_CMN_REG15 0x54 216 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 217 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 218 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 219 220 #define PCIE_PHY_CMN_REG24 0x90 221 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 222 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 223 224 #define PCIE_PHY_CMN_REG26 0x98 225 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 226 227 #define PHY_RX_OVRD_IN_LO 0x1005 228 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 229 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 230 231 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 232 { 233 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 234 imx_pcie->drvdata->variant != IMX8MQ_EP && 235 imx_pcie->drvdata->variant != IMX8MM && 236 imx_pcie->drvdata->variant != IMX8MM_EP && 237 imx_pcie->drvdata->variant != IMX8MP && 238 imx_pcie->drvdata->variant != IMX8MP_EP); 239 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 240 } 241 242 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 243 { 244 /* 245 * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready 246 * Through Beacon or PERST# De-assertion 247 * 248 * When the auxiliary power is not available, the controller 249 * cannot exit from L23 Ready with beacon or PERST# de-assertion 250 * when main power is not removed. 251 * 252 * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1. 253 */ 254 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, 255 IMX95_PCIE_SYS_AUX_PWR_DET); 256 257 regmap_update_bits(imx_pcie->iomuxc_gpr, 258 IMX95_PCIE_SS_RW_REG_0, 259 IMX95_PCIE_PHY_CR_PARA_SEL, 260 IMX95_PCIE_PHY_CR_PARA_SEL); 261 262 regmap_update_bits(imx_pcie->iomuxc_gpr, 263 IMX95_PCIE_PHY_GEN_CTRL, 264 IMX95_PCIE_REF_USE_PAD, 0); 265 regmap_update_bits(imx_pcie->iomuxc_gpr, 266 IMX95_PCIE_SS_RW_REG_0, 267 IMX95_PCIE_REF_CLKEN, 268 IMX95_PCIE_REF_CLKEN); 269 270 return 0; 271 } 272 273 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 274 { 275 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 276 unsigned int mask, val, mode, id; 277 278 if (drvdata->mode == DW_PCIE_EP_TYPE) 279 mode = PCI_EXP_TYPE_ENDPOINT; 280 else 281 mode = PCI_EXP_TYPE_ROOT_PORT; 282 283 id = imx_pcie->controller_id; 284 285 /* If mode_mask is 0, generic PHY driver is used to set the mode */ 286 if (!drvdata->mode_mask[0]) 287 return; 288 289 /* If mode_mask[id] is 0, each controller has its individual GPR */ 290 if (!drvdata->mode_mask[id]) 291 id = 0; 292 293 mask = drvdata->mode_mask[id]; 294 val = mode << (ffs(mask) - 1); 295 296 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 297 } 298 299 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 300 { 301 struct dw_pcie *pci = imx_pcie->pci; 302 bool val; 303 u32 max_iterations = 10; 304 u32 wait_counter = 0; 305 306 do { 307 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 308 PCIE_PHY_STAT_ACK; 309 wait_counter++; 310 311 if (val == exp_val) 312 return 0; 313 314 udelay(1); 315 } while (wait_counter < max_iterations); 316 317 return -ETIMEDOUT; 318 } 319 320 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 321 { 322 struct dw_pcie *pci = imx_pcie->pci; 323 u32 val; 324 int ret; 325 326 val = PCIE_PHY_CTRL_DATA(addr); 327 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 328 329 val |= PCIE_PHY_CTRL_CAP_ADR; 330 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 331 332 ret = pcie_phy_poll_ack(imx_pcie, true); 333 if (ret) 334 return ret; 335 336 val = PCIE_PHY_CTRL_DATA(addr); 337 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 338 339 return pcie_phy_poll_ack(imx_pcie, false); 340 } 341 342 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 343 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 344 { 345 struct dw_pcie *pci = imx_pcie->pci; 346 u32 phy_ctl; 347 int ret; 348 349 ret = pcie_phy_wait_ack(imx_pcie, addr); 350 if (ret) 351 return ret; 352 353 /* assert Read signal */ 354 phy_ctl = PCIE_PHY_CTRL_RD; 355 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 356 357 ret = pcie_phy_poll_ack(imx_pcie, true); 358 if (ret) 359 return ret; 360 361 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 362 363 /* deassert Read signal */ 364 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 365 366 return pcie_phy_poll_ack(imx_pcie, false); 367 } 368 369 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 370 { 371 struct dw_pcie *pci = imx_pcie->pci; 372 u32 var; 373 int ret; 374 375 /* write addr */ 376 /* cap addr */ 377 ret = pcie_phy_wait_ack(imx_pcie, addr); 378 if (ret) 379 return ret; 380 381 var = PCIE_PHY_CTRL_DATA(data); 382 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 383 384 /* capture data */ 385 var |= PCIE_PHY_CTRL_CAP_DAT; 386 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 387 388 ret = pcie_phy_poll_ack(imx_pcie, true); 389 if (ret) 390 return ret; 391 392 /* deassert cap data */ 393 var = PCIE_PHY_CTRL_DATA(data); 394 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 395 396 /* wait for ack de-assertion */ 397 ret = pcie_phy_poll_ack(imx_pcie, false); 398 if (ret) 399 return ret; 400 401 /* assert wr signal */ 402 var = PCIE_PHY_CTRL_WR; 403 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 404 405 /* wait for ack */ 406 ret = pcie_phy_poll_ack(imx_pcie, true); 407 if (ret) 408 return ret; 409 410 /* deassert wr signal */ 411 var = PCIE_PHY_CTRL_DATA(data); 412 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 413 414 /* wait for ack de-assertion */ 415 ret = pcie_phy_poll_ack(imx_pcie, false); 416 if (ret) 417 return ret; 418 419 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 420 421 return 0; 422 } 423 424 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 425 { 426 /* TODO: This code assumes external oscillator is being used */ 427 regmap_update_bits(imx_pcie->iomuxc_gpr, 428 imx_pcie_grp_offset(imx_pcie), 429 IMX8MQ_GPR_PCIE_REF_USE_PAD, 430 IMX8MQ_GPR_PCIE_REF_USE_PAD); 431 /* 432 * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the 433 * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared 434 * to zero. 435 */ 436 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 437 regmap_update_bits(imx_pcie->iomuxc_gpr, 438 imx_pcie_grp_offset(imx_pcie), 439 IMX8MQ_GPR_PCIE_VREG_BYPASS, 440 0); 441 442 return 0; 443 } 444 445 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 446 { 447 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 448 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 449 450 /* configure constant input signal to the pcie ctrl and phy */ 451 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 452 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 453 454 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 455 IMX6Q_GPR8_TX_DEEMPH_GEN1, 456 imx_pcie->tx_deemph_gen1 << 0); 457 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 458 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 459 imx_pcie->tx_deemph_gen2_3p5db << 6); 460 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 461 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 462 imx_pcie->tx_deemph_gen2_6db << 12); 463 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 464 IMX6Q_GPR8_TX_SWING_FULL, 465 imx_pcie->tx_swing_full << 18); 466 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 467 IMX6Q_GPR8_TX_SWING_LOW, 468 imx_pcie->tx_swing_low << 25); 469 return 0; 470 } 471 472 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 473 { 474 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 475 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 476 477 return imx_pcie_init_phy(imx_pcie); 478 } 479 480 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 481 { 482 u32 val; 483 struct device *dev = imx_pcie->pci->dev; 484 485 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 486 IOMUXC_GPR22, val, 487 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 488 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 489 PHY_PLL_LOCK_WAIT_TIMEOUT)) 490 dev_err(dev, "PCIe PLL lock timeout\n"); 491 } 492 493 static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 494 { 495 u32 val; 496 struct device *dev = imx_pcie->pci->dev; 497 498 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 499 IMX95_PCIE_PHY_MPLLA_CTRL, val, 500 val & IMX95_PCIE_PHY_MPLL_STATE, 501 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 502 PHY_PLL_LOCK_WAIT_TIMEOUT)) { 503 dev_err(dev, "PCIe PLL lock timeout\n"); 504 return -ETIMEDOUT; 505 } 506 507 return 0; 508 } 509 510 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 511 { 512 unsigned long phy_rate = 0; 513 int mult, div; 514 u16 val; 515 int i; 516 struct clk_bulk_data *clks = imx_pcie->clks; 517 518 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 519 return 0; 520 521 for (i = 0; i < imx_pcie->num_clks; i++) 522 if (strncmp(clks[i].id, "pcie_phy", 8) == 0) 523 phy_rate = clk_get_rate(clks[i].clk); 524 525 switch (phy_rate) { 526 case 125000000: 527 /* 528 * The default settings of the MPLL are for a 125MHz input 529 * clock, so no need to reconfigure anything in that case. 530 */ 531 return 0; 532 case 100000000: 533 mult = 25; 534 div = 0; 535 break; 536 case 200000000: 537 mult = 25; 538 div = 1; 539 break; 540 default: 541 dev_err(imx_pcie->pci->dev, 542 "Unsupported PHY reference clock rate %lu\n", phy_rate); 543 return -EINVAL; 544 } 545 546 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 547 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 548 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 549 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 550 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 551 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 552 553 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 554 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 555 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 556 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 557 val |= PCIE_PHY_ATEOVRD_EN; 558 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 559 560 return 0; 561 } 562 563 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 564 { 565 u16 tmp; 566 567 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 568 return; 569 570 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 571 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 572 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 573 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 574 575 usleep_range(2000, 3000); 576 577 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 578 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 579 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 580 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 581 } 582 583 #ifdef CONFIG_ARM 584 /* Added for PCI abort handling */ 585 static int imx6q_pcie_abort_handler(unsigned long addr, 586 unsigned int fsr, struct pt_regs *regs) 587 { 588 unsigned long pc = instruction_pointer(regs); 589 unsigned long instr = *(unsigned long *)pc; 590 int reg = (instr >> 12) & 15; 591 592 /* 593 * If the instruction being executed was a read, 594 * make it look like it read all-ones. 595 */ 596 if ((instr & 0x0c100000) == 0x04100000) { 597 unsigned long val; 598 599 if (instr & 0x00400000) 600 val = 255; 601 else 602 val = -1; 603 604 regs->uregs[reg] = val; 605 regs->ARM_pc += 4; 606 return 0; 607 } 608 609 if ((instr & 0x0e100090) == 0x00100090) { 610 regs->uregs[reg] = -1; 611 regs->ARM_pc += 4; 612 return 0; 613 } 614 615 return 1; 616 } 617 #endif 618 619 static int imx_pcie_attach_pd(struct device *dev) 620 { 621 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 622 struct device_link *link; 623 624 /* Do nothing when in a single power domain */ 625 if (dev->pm_domain) 626 return 0; 627 628 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 629 if (IS_ERR(imx_pcie->pd_pcie)) 630 return PTR_ERR(imx_pcie->pd_pcie); 631 /* Do nothing when power domain missing */ 632 if (!imx_pcie->pd_pcie) 633 return 0; 634 link = device_link_add(dev, imx_pcie->pd_pcie, 635 DL_FLAG_STATELESS | 636 DL_FLAG_PM_RUNTIME | 637 DL_FLAG_RPM_ACTIVE); 638 if (!link) { 639 dev_err(dev, "Failed to add device_link to pcie pd\n"); 640 return -EINVAL; 641 } 642 643 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 644 if (IS_ERR(imx_pcie->pd_pcie_phy)) 645 return PTR_ERR(imx_pcie->pd_pcie_phy); 646 647 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 648 DL_FLAG_STATELESS | 649 DL_FLAG_PM_RUNTIME | 650 DL_FLAG_RPM_ACTIVE); 651 if (!link) { 652 dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); 653 return -EINVAL; 654 } 655 656 return 0; 657 } 658 659 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 660 { 661 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 662 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 663 enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 664 return 0; 665 } 666 667 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 668 { 669 if (enable) { 670 /* power up core phy and enable ref clock */ 671 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 672 /* 673 * The async reset input need ref clock to sync internally, 674 * when the ref clock comes after reset, internal synced 675 * reset time is too short, cannot meet the requirement. 676 * Add a ~10us delay here. 677 */ 678 usleep_range(10, 100); 679 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 680 } else { 681 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 682 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 683 } 684 685 return 0; 686 } 687 688 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 689 { 690 int offset = imx_pcie_grp_offset(imx_pcie); 691 692 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 693 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 694 enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 695 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 696 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, 697 enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); 698 return 0; 699 } 700 701 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 702 { 703 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 704 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 705 enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 706 return 0; 707 } 708 709 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 710 { 711 struct dw_pcie *pci = imx_pcie->pci; 712 struct device *dev = pci->dev; 713 int ret; 714 715 ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); 716 if (ret) 717 return ret; 718 719 if (imx_pcie->drvdata->enable_ref_clk) { 720 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 721 if (ret) { 722 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 723 goto err_ref_clk; 724 } 725 } 726 727 /* allow the clocks to stabilize */ 728 usleep_range(200, 500); 729 return 0; 730 731 err_ref_clk: 732 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 733 734 return ret; 735 } 736 737 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 738 { 739 if (imx_pcie->drvdata->enable_ref_clk) 740 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 741 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 742 } 743 744 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 745 { 746 if (assert) 747 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 748 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 749 750 /* Force PCIe PHY reset */ 751 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 752 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 753 return 0; 754 } 755 756 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 757 { 758 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 759 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 760 if (!assert) 761 usleep_range(200, 500); 762 763 return 0; 764 } 765 766 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 767 { 768 if (!assert) 769 return 0; 770 771 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 772 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 773 774 return 0; 775 } 776 777 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 778 { 779 struct dw_pcie *pci = imx_pcie->pci; 780 struct device *dev = pci->dev; 781 782 if (assert) 783 return 0; 784 785 /* 786 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 787 * 788 * PCIe: PLL may fail to lock under corner conditions. 789 * 790 * Initial VCO oscillation may fail under corner conditions such as 791 * cold temperature which will cause the PCIe PLL fail to lock in the 792 * initialization phase. 793 * 794 * The Duty-cycle Corrector calibration must be disabled. 795 * 796 * 1. De-assert the G_RST signal by clearing 797 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 798 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 799 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 800 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 801 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 802 * 4. Assert ATT_MODE by writing data “0xbc” to the register 803 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 804 * 5. De-assert the CMN_RST signal by clearing register bit 805 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 806 */ 807 808 if (likely(imx_pcie->phy_base)) { 809 /* De-assert DCC_FB_EN */ 810 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 811 /* Assert RX_EQS and RX_EQS_SEL */ 812 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 813 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 814 /* Assert ATT_MODE */ 815 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 816 } else { 817 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 818 } 819 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 820 return 0; 821 } 822 823 static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 824 { 825 u32 val; 826 827 if (assert) { 828 /* 829 * From i.MX95 PCIe PHY perspective, the COLD reset toggle 830 * should be complete after power-up by the following sequence. 831 * > 10us(at power-up) 832 * > 10ns(warm reset) 833 * |<------------>| 834 * ______________ 835 * phy_reset ____/ \________________ 836 * ____________ 837 * ref_clk_en_______________________/ 838 * Toggle COLD reset aligned with this sequence for i.MX95 PCIe. 839 */ 840 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 841 IMX95_PCIE_COLD_RST); 842 /* 843 * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the 844 * hardware by doing a read. Otherwise, there is no guarantee 845 * that the write has reached the hardware before udelay(). 846 */ 847 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 848 &val); 849 udelay(15); 850 regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 851 IMX95_PCIE_COLD_RST); 852 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 853 &val); 854 udelay(10); 855 } 856 857 return 0; 858 } 859 860 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 861 { 862 reset_control_assert(imx_pcie->pciephy_reset); 863 864 if (imx_pcie->drvdata->core_reset) 865 imx_pcie->drvdata->core_reset(imx_pcie, true); 866 867 /* Some boards don't have PCIe reset GPIO. */ 868 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 869 } 870 871 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 872 { 873 reset_control_deassert(imx_pcie->pciephy_reset); 874 875 if (imx_pcie->drvdata->core_reset) 876 imx_pcie->drvdata->core_reset(imx_pcie, false); 877 878 /* Some boards don't have PCIe reset GPIO. */ 879 if (imx_pcie->reset_gpiod) { 880 msleep(100); 881 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 882 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 883 msleep(100); 884 } 885 886 return 0; 887 } 888 889 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 890 { 891 struct dw_pcie *pci = imx_pcie->pci; 892 struct device *dev = pci->dev; 893 u32 tmp; 894 unsigned int retries; 895 896 for (retries = 0; retries < 200; retries++) { 897 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 898 /* Test if the speed change finished. */ 899 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 900 return 0; 901 usleep_range(100, 1000); 902 } 903 904 dev_err(dev, "Speed change timeout\n"); 905 return -ETIMEDOUT; 906 } 907 908 static void imx_pcie_ltssm_enable(struct device *dev) 909 { 910 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 911 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 912 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 913 u32 tmp; 914 915 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 916 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 917 if (drvdata->ltssm_mask) 918 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 919 drvdata->ltssm_mask); 920 921 reset_control_deassert(imx_pcie->apps_reset); 922 } 923 924 static void imx_pcie_ltssm_disable(struct device *dev) 925 { 926 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 927 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 928 929 phy_set_speed(imx_pcie->phy, 0); 930 if (drvdata->ltssm_mask) 931 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 932 drvdata->ltssm_mask, 0); 933 934 reset_control_assert(imx_pcie->apps_reset); 935 } 936 937 static int imx_pcie_start_link(struct dw_pcie *pci) 938 { 939 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 940 struct device *dev = pci->dev; 941 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 942 u32 tmp; 943 int ret; 944 945 if (!(imx_pcie->drvdata->flags & 946 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) { 947 imx_pcie_ltssm_enable(dev); 948 return 0; 949 } 950 951 /* 952 * Force Gen1 operation when starting the link. In case the link is 953 * started in Gen2 mode, there is a possibility the devices on the 954 * bus will not be detected at all. This happens with PCIe switches. 955 */ 956 dw_pcie_dbi_ro_wr_en(pci); 957 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 958 tmp &= ~PCI_EXP_LNKCAP_SLS; 959 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 960 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 961 dw_pcie_dbi_ro_wr_dis(pci); 962 963 /* Start LTSSM. */ 964 imx_pcie_ltssm_enable(dev); 965 966 if (pci->max_link_speed > 1) { 967 ret = dw_pcie_wait_for_link(pci); 968 if (ret) 969 goto err_reset_phy; 970 971 /* Allow faster modes after the link is up */ 972 dw_pcie_dbi_ro_wr_en(pci); 973 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 974 tmp &= ~PCI_EXP_LNKCAP_SLS; 975 tmp |= pci->max_link_speed; 976 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 977 978 /* 979 * Start Directed Speed Change so the best possible 980 * speed both link partners support can be negotiated. 981 */ 982 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 983 tmp |= PORT_LOGIC_SPEED_CHANGE; 984 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 985 dw_pcie_dbi_ro_wr_dis(pci); 986 987 ret = imx_pcie_wait_for_speed_change(imx_pcie); 988 if (ret) { 989 dev_err(dev, "Failed to bring link up!\n"); 990 goto err_reset_phy; 991 } 992 } else { 993 dev_info(dev, "Link: Only Gen1 is enabled\n"); 994 } 995 996 return 0; 997 998 err_reset_phy: 999 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 1000 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 1001 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 1002 imx_pcie_reset_phy(imx_pcie); 1003 return 0; 1004 } 1005 1006 static void imx_pcie_stop_link(struct dw_pcie *pci) 1007 { 1008 struct device *dev = pci->dev; 1009 1010 /* Turn off PCIe LTSSM */ 1011 imx_pcie_ltssm_disable(dev); 1012 } 1013 1014 static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) 1015 { 1016 struct dw_pcie *pci = imx_pcie->pci; 1017 struct device *dev = pci->dev; 1018 u32 data1, data2; 1019 int free = -1; 1020 int i; 1021 1022 if (sid >= 64) { 1023 dev_err(dev, "Invalid SID for index %d\n", sid); 1024 return -EINVAL; 1025 } 1026 1027 guard(mutex)(&imx_pcie->lock); 1028 1029 /* 1030 * Iterate through all LUT entries to check for duplicate RID and 1031 * identify the first available entry. Configure this available entry 1032 * immediately after verification to avoid rescanning it. 1033 */ 1034 for (i = 0; i < IMX95_MAX_LUT; i++) { 1035 regmap_write(imx_pcie->iomuxc_gpr, 1036 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1037 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1038 1039 if (!(data1 & IMX95_PE0_LUT_VLD)) { 1040 if (free < 0) 1041 free = i; 1042 continue; 1043 } 1044 1045 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1046 1047 /* Do not add duplicate RID */ 1048 if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { 1049 dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); 1050 return 0; 1051 } 1052 } 1053 1054 if (free < 0) { 1055 dev_err(dev, "LUT entry is not available\n"); 1056 return -ENOSPC; 1057 } 1058 1059 data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); 1060 data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); 1061 data1 |= IMX95_PE0_LUT_VLD; 1062 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); 1063 1064 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) 1065 data2 = 0x7; /* In the EP mode, only 'Device ID' is required */ 1066 else 1067 data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ 1068 data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); 1069 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); 1070 1071 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); 1072 1073 return 0; 1074 } 1075 1076 static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) 1077 { 1078 u32 data2; 1079 int i; 1080 1081 guard(mutex)(&imx_pcie->lock); 1082 1083 for (i = 0; i < IMX95_MAX_LUT; i++) { 1084 regmap_write(imx_pcie->iomuxc_gpr, 1085 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1086 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1087 if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { 1088 regmap_write(imx_pcie->iomuxc_gpr, 1089 IMX95_PE0_LUT_DATA1, 0); 1090 regmap_write(imx_pcie->iomuxc_gpr, 1091 IMX95_PE0_LUT_DATA2, 0); 1092 regmap_write(imx_pcie->iomuxc_gpr, 1093 IMX95_PE0_LUT_ACSCTRL, i); 1094 1095 break; 1096 } 1097 } 1098 } 1099 1100 static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid) 1101 { 1102 struct device *dev = imx_pcie->pci->dev; 1103 struct device_node *target; 1104 u32 sid_i, sid_m; 1105 int err_i, err_m; 1106 u32 sid = 0; 1107 1108 target = NULL; 1109 err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", 1110 &target, &sid_i); 1111 if (target) { 1112 of_node_put(target); 1113 } else { 1114 /* 1115 * "target == NULL && err_i == 0" means RID out of map range. 1116 * Use 1:1 map RID to streamID. Hardware can't support this 1117 * because the streamID is only 6 bits 1118 */ 1119 err_i = -EINVAL; 1120 } 1121 1122 target = NULL; 1123 err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", 1124 &target, &sid_m); 1125 1126 /* 1127 * err_m target 1128 * 0 NULL RID out of range. Use 1:1 map RID to 1129 * streamID, Current hardware can't 1130 * support it, so return -EINVAL. 1131 * != 0 NULL msi-map does not exist, use built-in MSI 1132 * 0 != NULL Get correct streamID from RID 1133 * != 0 != NULL Invalid combination 1134 */ 1135 if (!err_m && !target) 1136 return -EINVAL; 1137 else if (target) 1138 of_node_put(target); /* Find streamID map entry for RID in msi-map */ 1139 1140 /* 1141 * msi-map iommu-map 1142 * N N DWC MSI Ctrl 1143 * Y Y ITS + SMMU, require the same SID 1144 * Y N ITS 1145 * N Y DWC MSI Ctrl + SMMU 1146 */ 1147 if (err_i && err_m) 1148 return 0; 1149 1150 if (!err_i && !err_m) { 1151 /* 1152 * Glue Layer 1153 * <==========> 1154 * ┌─────┐ ┌──────────┐ 1155 * │ LUT │ 6-bit streamID │ │ 1156 * │ │─────────────────►│ MSI │ 1157 * └─────┘ 2-bit ctrl ID │ │ 1158 * ┌───────────►│ │ 1159 * (i.MX95) │ │ │ 1160 * 00 PCIe0 │ │ │ 1161 * 01 ENETC │ │ │ 1162 * 10 PCIe1 │ │ │ 1163 * │ └──────────┘ 1164 * The MSI glue layer auto adds 2 bits controller ID ahead of 1165 * streamID, so mask these 2 bits to get streamID. The 1166 * IOMMU glue layer doesn't do that. 1167 */ 1168 if (sid_i != (sid_m & IMX95_SID_MASK)) { 1169 dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); 1170 return -EINVAL; 1171 } 1172 } 1173 1174 if (!err_i) 1175 sid = sid_i; 1176 else if (!err_m) 1177 sid = sid_m & IMX95_SID_MASK; 1178 1179 return imx_pcie_add_lut(imx_pcie, rid, sid); 1180 } 1181 1182 static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) 1183 { 1184 struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1185 1186 return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev)); 1187 } 1188 1189 static void imx_pcie_disable_device(struct pci_host_bridge *bridge, 1190 struct pci_dev *pdev) 1191 { 1192 struct imx_pcie *imx_pcie; 1193 1194 imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1195 imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); 1196 } 1197 1198 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 1199 { 1200 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1201 struct device *dev = pci->dev; 1202 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1203 int ret; 1204 1205 if (imx_pcie->vpcie) { 1206 ret = regulator_enable(imx_pcie->vpcie); 1207 if (ret) { 1208 dev_err(dev, "failed to enable vpcie regulator: %d\n", 1209 ret); 1210 return ret; 1211 } 1212 } 1213 1214 if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { 1215 pp->bridge->enable_device = imx_pcie_enable_device; 1216 pp->bridge->disable_device = imx_pcie_disable_device; 1217 } 1218 1219 imx_pcie_assert_core_reset(imx_pcie); 1220 1221 if (imx_pcie->drvdata->init_phy) 1222 imx_pcie->drvdata->init_phy(imx_pcie); 1223 1224 imx_pcie_configure_type(imx_pcie); 1225 1226 ret = imx_pcie_clk_enable(imx_pcie); 1227 if (ret) { 1228 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 1229 goto err_reg_disable; 1230 } 1231 1232 if (imx_pcie->phy) { 1233 ret = phy_init(imx_pcie->phy); 1234 if (ret) { 1235 dev_err(dev, "pcie PHY power up failed\n"); 1236 goto err_clk_disable; 1237 } 1238 1239 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, 1240 imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? 1241 PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); 1242 if (ret) { 1243 dev_err(dev, "unable to set PCIe PHY mode\n"); 1244 goto err_phy_exit; 1245 } 1246 1247 ret = phy_power_on(imx_pcie->phy); 1248 if (ret) { 1249 dev_err(dev, "waiting for PHY ready timeout!\n"); 1250 goto err_phy_exit; 1251 } 1252 } 1253 1254 /* Make sure that PCIe LTSSM is cleared */ 1255 imx_pcie_ltssm_disable(dev); 1256 1257 ret = imx_pcie_deassert_core_reset(imx_pcie); 1258 if (ret < 0) { 1259 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 1260 goto err_phy_off; 1261 } 1262 1263 if (imx_pcie->drvdata->wait_pll_lock) { 1264 ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie); 1265 if (ret < 0) 1266 goto err_phy_off; 1267 } 1268 1269 imx_setup_phy_mpll(imx_pcie); 1270 1271 return 0; 1272 1273 err_phy_off: 1274 phy_power_off(imx_pcie->phy); 1275 err_phy_exit: 1276 phy_exit(imx_pcie->phy); 1277 err_clk_disable: 1278 imx_pcie_clk_disable(imx_pcie); 1279 err_reg_disable: 1280 if (imx_pcie->vpcie) 1281 regulator_disable(imx_pcie->vpcie); 1282 return ret; 1283 } 1284 1285 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1286 { 1287 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1288 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1289 1290 if (imx_pcie->phy) { 1291 if (phy_power_off(imx_pcie->phy)) 1292 dev_err(pci->dev, "unable to power off PHY\n"); 1293 phy_exit(imx_pcie->phy); 1294 } 1295 imx_pcie_clk_disable(imx_pcie); 1296 1297 if (imx_pcie->vpcie) 1298 regulator_disable(imx_pcie->vpcie); 1299 } 1300 1301 static void imx_pcie_host_post_init(struct dw_pcie_rp *pp) 1302 { 1303 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1304 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1305 u32 val; 1306 1307 if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) { 1308 /* 1309 * ERR051586: Compliance with 8GT/s Receiver Impedance ECN 1310 * 1311 * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1312 * is 1 which makes receiver non-compliant with the ZRX-DC 1313 * parameter for 2.5 GT/s when operating at 8 GT/s or higher. 1314 * It causes unnecessary timeout in L1. 1315 * 1316 * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1317 * to 0. 1318 */ 1319 dw_pcie_dbi_ro_wr_en(pci); 1320 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1321 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1322 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1323 dw_pcie_dbi_ro_wr_dis(pci); 1324 } 1325 } 1326 1327 /* 1328 * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 1329 * register is reserved, so the generic DWC implementation of sending the 1330 * PME_Turn_Off message using a dummy MMIO write cannot be used. 1331 */ 1332 static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) 1333 { 1334 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1335 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1336 1337 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1338 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1339 1340 usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); 1341 } 1342 1343 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1344 .init = imx_pcie_host_init, 1345 .deinit = imx_pcie_host_exit, 1346 .pme_turn_off = imx_pcie_pme_turn_off, 1347 }; 1348 1349 static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { 1350 .init = imx_pcie_host_init, 1351 .deinit = imx_pcie_host_exit, 1352 .post_init = imx_pcie_host_post_init, 1353 }; 1354 1355 static const struct dw_pcie_ops dw_pcie_ops = { 1356 .start_link = imx_pcie_start_link, 1357 .stop_link = imx_pcie_stop_link, 1358 }; 1359 1360 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1361 { 1362 enum pci_barno bar; 1363 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1364 1365 for (bar = BAR_0; bar <= BAR_5; bar++) 1366 dw_pcie_ep_reset_bar(pci, bar); 1367 } 1368 1369 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1370 unsigned int type, u16 interrupt_num) 1371 { 1372 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1373 1374 switch (type) { 1375 case PCI_IRQ_INTX: 1376 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1377 case PCI_IRQ_MSI: 1378 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1379 case PCI_IRQ_MSIX: 1380 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1381 default: 1382 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1383 return -EINVAL; 1384 } 1385 1386 return 0; 1387 } 1388 1389 static const struct pci_epc_features imx8m_pcie_epc_features = { 1390 .linkup_notifier = false, 1391 .msi_capable = true, 1392 .msix_capable = false, 1393 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1394 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1395 .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, }, 1396 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1397 .align = SZ_64K, 1398 }; 1399 1400 static const struct pci_epc_features imx8q_pcie_epc_features = { 1401 .linkup_notifier = false, 1402 .msi_capable = true, 1403 .msix_capable = false, 1404 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1405 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1406 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1407 .align = SZ_64K, 1408 }; 1409 1410 /* 1411 * | Default | Default | Default | BAR Sizing 1412 * BAR# | Enable? | Type | Size | Scheme 1413 * ======================================================= 1414 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1415 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1416 * (BAR1 should be disabled if BAR0 is 64-bit) 1417 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1418 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1419 * BAR4 | Enable | 32-bit | 1 MB | Programmable Size 1420 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1421 */ 1422 static const struct pci_epc_features imx95_pcie_epc_features = { 1423 .msi_capable = true, 1424 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1425 .align = SZ_4K, 1426 }; 1427 1428 static const struct pci_epc_features* 1429 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1430 { 1431 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1432 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1433 1434 return imx_pcie->drvdata->epc_features; 1435 } 1436 1437 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1438 .init = imx_pcie_ep_init, 1439 .raise_irq = imx_pcie_ep_raise_irq, 1440 .get_features = imx_pcie_ep_get_features, 1441 }; 1442 1443 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1444 struct platform_device *pdev) 1445 { 1446 int ret; 1447 struct dw_pcie_ep *ep; 1448 struct dw_pcie *pci = imx_pcie->pci; 1449 struct dw_pcie_rp *pp = &pci->pp; 1450 struct device *dev = pci->dev; 1451 1452 imx_pcie_host_init(pp); 1453 ep = &pci->ep; 1454 ep->ops = &pcie_ep_ops; 1455 1456 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1457 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1458 1459 ep->page_size = imx_pcie->drvdata->epc_features->align; 1460 1461 ret = dw_pcie_ep_init(ep); 1462 if (ret) { 1463 dev_err(dev, "failed to initialize endpoint\n"); 1464 return ret; 1465 } 1466 imx_pcie_host_post_init(pp); 1467 1468 ret = dw_pcie_ep_init_registers(ep); 1469 if (ret) { 1470 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1471 dw_pcie_ep_deinit(ep); 1472 return ret; 1473 } 1474 1475 pci_epc_init_notify(ep->epc); 1476 1477 return 0; 1478 } 1479 1480 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1481 { 1482 u8 offset; 1483 u16 val; 1484 struct dw_pcie *pci = imx_pcie->pci; 1485 1486 if (pci_msi_enabled()) { 1487 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1488 if (save) { 1489 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1490 imx_pcie->msi_ctrl = val; 1491 } else { 1492 dw_pcie_dbi_ro_wr_en(pci); 1493 val = imx_pcie->msi_ctrl; 1494 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1495 dw_pcie_dbi_ro_wr_dis(pci); 1496 } 1497 } 1498 } 1499 1500 static void imx_pcie_lut_save(struct imx_pcie *imx_pcie) 1501 { 1502 u32 data1, data2; 1503 int i; 1504 1505 for (i = 0; i < IMX95_MAX_LUT; i++) { 1506 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, 1507 IMX95_PEO_LUT_RWA | i); 1508 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1509 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1510 if (data1 & IMX95_PE0_LUT_VLD) { 1511 imx_pcie->luts[i].data1 = data1; 1512 imx_pcie->luts[i].data2 = data2; 1513 } else { 1514 imx_pcie->luts[i].data1 = 0; 1515 imx_pcie->luts[i].data2 = 0; 1516 } 1517 } 1518 } 1519 1520 static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie) 1521 { 1522 int i; 1523 1524 for (i = 0; i < IMX95_MAX_LUT; i++) { 1525 if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0) 1526 continue; 1527 1528 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, 1529 imx_pcie->luts[i].data1); 1530 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, 1531 imx_pcie->luts[i].data2); 1532 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i); 1533 } 1534 } 1535 1536 static int imx_pcie_suspend_noirq(struct device *dev) 1537 { 1538 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1539 1540 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1541 return 0; 1542 1543 imx_pcie_msi_save_restore(imx_pcie, true); 1544 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1545 imx_pcie_lut_save(imx_pcie); 1546 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1547 /* 1548 * The minimum for a workaround would be to set PERST# and to 1549 * set the PCIE_TEST_PD flag. However, we can also disable the 1550 * clock which saves some power. 1551 */ 1552 imx_pcie_assert_core_reset(imx_pcie); 1553 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1554 } else { 1555 return dw_pcie_suspend_noirq(imx_pcie->pci); 1556 } 1557 1558 return 0; 1559 } 1560 1561 static int imx_pcie_resume_noirq(struct device *dev) 1562 { 1563 int ret; 1564 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1565 1566 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1567 return 0; 1568 1569 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1570 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1571 if (ret) 1572 return ret; 1573 ret = imx_pcie_deassert_core_reset(imx_pcie); 1574 if (ret) 1575 return ret; 1576 1577 /* 1578 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1579 * root complex. This is why we have to setup the rc again and 1580 * why we have to restore the MSI register. 1581 */ 1582 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1583 if (ret) 1584 return ret; 1585 } else { 1586 ret = dw_pcie_resume_noirq(imx_pcie->pci); 1587 if (ret) 1588 return ret; 1589 } 1590 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1591 imx_pcie_lut_restore(imx_pcie); 1592 imx_pcie_msi_save_restore(imx_pcie, false); 1593 1594 return 0; 1595 } 1596 1597 static const struct dev_pm_ops imx_pcie_pm_ops = { 1598 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1599 imx_pcie_resume_noirq) 1600 }; 1601 1602 static int imx_pcie_probe(struct platform_device *pdev) 1603 { 1604 struct device *dev = &pdev->dev; 1605 struct dw_pcie *pci; 1606 struct imx_pcie *imx_pcie; 1607 struct device_node *np; 1608 struct device_node *node = dev->of_node; 1609 int ret, domain; 1610 u16 val; 1611 1612 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1613 if (!imx_pcie) 1614 return -ENOMEM; 1615 1616 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1617 if (!pci) 1618 return -ENOMEM; 1619 1620 pci->dev = dev; 1621 pci->ops = &dw_pcie_ops; 1622 1623 imx_pcie->pci = pci; 1624 imx_pcie->drvdata = of_device_get_match_data(dev); 1625 1626 mutex_init(&imx_pcie->lock); 1627 1628 if (imx_pcie->drvdata->ops) 1629 pci->pp.ops = imx_pcie->drvdata->ops; 1630 else 1631 pci->pp.ops = &imx_pcie_host_dw_pme_ops; 1632 1633 /* Find the PHY if one is defined, only imx7d uses it */ 1634 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1635 if (np) { 1636 struct resource res; 1637 1638 ret = of_address_to_resource(np, 0, &res); 1639 if (ret) { 1640 dev_err(dev, "Unable to map PCIe PHY\n"); 1641 return ret; 1642 } 1643 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1644 if (IS_ERR(imx_pcie->phy_base)) 1645 return PTR_ERR(imx_pcie->phy_base); 1646 } 1647 1648 /* Fetch GPIOs */ 1649 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1650 if (IS_ERR(imx_pcie->reset_gpiod)) 1651 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1652 "unable to get reset gpio\n"); 1653 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1654 1655 /* Fetch clocks */ 1656 imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); 1657 if (imx_pcie->num_clks < 0) 1658 return dev_err_probe(dev, imx_pcie->num_clks, 1659 "failed to get clocks\n"); 1660 1661 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1662 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1663 if (IS_ERR(imx_pcie->phy)) 1664 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1665 "failed to get pcie phy\n"); 1666 } 1667 1668 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1669 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1670 if (IS_ERR(imx_pcie->apps_reset)) 1671 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1672 "failed to get pcie apps reset control\n"); 1673 } 1674 1675 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1676 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1677 if (IS_ERR(imx_pcie->pciephy_reset)) 1678 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1679 "Failed to get PCIEPHY reset control\n"); 1680 } 1681 1682 switch (imx_pcie->drvdata->variant) { 1683 case IMX8MQ: 1684 case IMX8MQ_EP: 1685 domain = of_get_pci_domain_nr(node); 1686 if (domain < 0 || domain > 1) 1687 return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); 1688 1689 imx_pcie->controller_id = domain; 1690 break; 1691 default: 1692 break; 1693 } 1694 1695 if (imx_pcie->drvdata->gpr) { 1696 /* Grab GPR config register range */ 1697 imx_pcie->iomuxc_gpr = 1698 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1699 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1700 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1701 "unable to find iomuxc registers\n"); 1702 } 1703 1704 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1705 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1706 1707 if (IS_ERR(off)) 1708 return dev_err_probe(dev, PTR_ERR(off), 1709 "unable to find serdes registers\n"); 1710 1711 static const struct regmap_config regmap_config = { 1712 .reg_bits = 32, 1713 .val_bits = 32, 1714 .reg_stride = 4, 1715 }; 1716 1717 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1718 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1719 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1720 "unable to find iomuxc registers\n"); 1721 } 1722 1723 /* Grab PCIe PHY Tx Settings */ 1724 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1725 &imx_pcie->tx_deemph_gen1)) 1726 imx_pcie->tx_deemph_gen1 = 0; 1727 1728 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1729 &imx_pcie->tx_deemph_gen2_3p5db)) 1730 imx_pcie->tx_deemph_gen2_3p5db = 0; 1731 1732 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1733 &imx_pcie->tx_deemph_gen2_6db)) 1734 imx_pcie->tx_deemph_gen2_6db = 20; 1735 1736 if (of_property_read_u32(node, "fsl,tx-swing-full", 1737 &imx_pcie->tx_swing_full)) 1738 imx_pcie->tx_swing_full = 127; 1739 1740 if (of_property_read_u32(node, "fsl,tx-swing-low", 1741 &imx_pcie->tx_swing_low)) 1742 imx_pcie->tx_swing_low = 127; 1743 1744 /* Limit link speed */ 1745 pci->max_link_speed = 1; 1746 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1747 1748 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1749 if (IS_ERR(imx_pcie->vpcie)) { 1750 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1751 return PTR_ERR(imx_pcie->vpcie); 1752 imx_pcie->vpcie = NULL; 1753 } 1754 1755 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1756 if (IS_ERR(imx_pcie->vph)) { 1757 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1758 return PTR_ERR(imx_pcie->vph); 1759 imx_pcie->vph = NULL; 1760 } 1761 1762 platform_set_drvdata(pdev, imx_pcie); 1763 1764 ret = imx_pcie_attach_pd(dev); 1765 if (ret) 1766 return ret; 1767 1768 pci->use_parent_dt_ranges = true; 1769 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1770 ret = imx_add_pcie_ep(imx_pcie, pdev); 1771 if (ret < 0) 1772 return ret; 1773 1774 /* 1775 * FIXME: Only single Device (EPF) is supported due to the 1776 * Endpoint framework limitation. 1777 */ 1778 imx_pcie_add_lut_by_rid(imx_pcie, 0); 1779 } else { 1780 pci->pp.use_atu_msg = true; 1781 ret = dw_pcie_host_init(&pci->pp); 1782 if (ret < 0) 1783 return ret; 1784 1785 if (pci_msi_enabled()) { 1786 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1787 1788 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1789 val |= PCI_MSI_FLAGS_ENABLE; 1790 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1791 } 1792 } 1793 1794 return 0; 1795 } 1796 1797 static void imx_pcie_shutdown(struct platform_device *pdev) 1798 { 1799 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1800 1801 /* bring down link, so bootloader gets clean state in case of reboot */ 1802 imx_pcie_assert_core_reset(imx_pcie); 1803 } 1804 1805 static const struct imx_pcie_drvdata drvdata[] = { 1806 [IMX6Q] = { 1807 .variant = IMX6Q, 1808 .flags = IMX_PCIE_FLAG_IMX_PHY | 1809 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1810 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1811 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1812 .dbi_length = 0x200, 1813 .gpr = "fsl,imx6q-iomuxc-gpr", 1814 .ltssm_off = IOMUXC_GPR12, 1815 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1816 .mode_off[0] = IOMUXC_GPR12, 1817 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1818 .init_phy = imx_pcie_init_phy, 1819 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1820 .core_reset = imx6q_pcie_core_reset, 1821 }, 1822 [IMX6SX] = { 1823 .variant = IMX6SX, 1824 .flags = IMX_PCIE_FLAG_IMX_PHY | 1825 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1826 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1827 .gpr = "fsl,imx6q-iomuxc-gpr", 1828 .ltssm_off = IOMUXC_GPR12, 1829 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1830 .mode_off[0] = IOMUXC_GPR12, 1831 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1832 .init_phy = imx6sx_pcie_init_phy, 1833 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1834 .core_reset = imx6sx_pcie_core_reset, 1835 .ops = &imx_pcie_host_ops, 1836 }, 1837 [IMX6QP] = { 1838 .variant = IMX6QP, 1839 .flags = IMX_PCIE_FLAG_IMX_PHY | 1840 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1841 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1842 .dbi_length = 0x200, 1843 .gpr = "fsl,imx6q-iomuxc-gpr", 1844 .ltssm_off = IOMUXC_GPR12, 1845 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1846 .mode_off[0] = IOMUXC_GPR12, 1847 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1848 .init_phy = imx_pcie_init_phy, 1849 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1850 .core_reset = imx6qp_pcie_core_reset, 1851 .ops = &imx_pcie_host_ops, 1852 }, 1853 [IMX7D] = { 1854 .variant = IMX7D, 1855 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1856 IMX_PCIE_FLAG_HAS_APP_RESET | 1857 IMX_PCIE_FLAG_HAS_PHY_RESET, 1858 .gpr = "fsl,imx7d-iomuxc-gpr", 1859 .mode_off[0] = IOMUXC_GPR12, 1860 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1861 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1862 .core_reset = imx7d_pcie_core_reset, 1863 }, 1864 [IMX8MQ] = { 1865 .variant = IMX8MQ, 1866 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1867 IMX_PCIE_FLAG_HAS_PHY_RESET | 1868 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1869 .gpr = "fsl,imx8mq-iomuxc-gpr", 1870 .mode_off[0] = IOMUXC_GPR12, 1871 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1872 .mode_off[1] = IOMUXC_GPR12, 1873 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1874 .init_phy = imx8mq_pcie_init_phy, 1875 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1876 }, 1877 [IMX8MM] = { 1878 .variant = IMX8MM, 1879 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1880 IMX_PCIE_FLAG_HAS_PHYDRV | 1881 IMX_PCIE_FLAG_HAS_APP_RESET, 1882 .gpr = "fsl,imx8mm-iomuxc-gpr", 1883 .mode_off[0] = IOMUXC_GPR12, 1884 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1885 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1886 }, 1887 [IMX8MP] = { 1888 .variant = IMX8MP, 1889 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1890 IMX_PCIE_FLAG_HAS_PHYDRV | 1891 IMX_PCIE_FLAG_HAS_APP_RESET, 1892 .gpr = "fsl,imx8mp-iomuxc-gpr", 1893 .mode_off[0] = IOMUXC_GPR12, 1894 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1895 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1896 }, 1897 [IMX8Q] = { 1898 .variant = IMX8Q, 1899 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1900 IMX_PCIE_FLAG_CPU_ADDR_FIXUP | 1901 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1902 }, 1903 [IMX95] = { 1904 .variant = IMX95, 1905 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1906 IMX_PCIE_FLAG_HAS_LUT | 1907 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 1908 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1909 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1910 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1911 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1912 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1913 .core_reset = imx95_pcie_core_reset, 1914 .init_phy = imx95_pcie_init_phy, 1915 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 1916 }, 1917 [IMX8MQ_EP] = { 1918 .variant = IMX8MQ_EP, 1919 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1920 IMX_PCIE_FLAG_HAS_PHY_RESET, 1921 .mode = DW_PCIE_EP_TYPE, 1922 .gpr = "fsl,imx8mq-iomuxc-gpr", 1923 .mode_off[0] = IOMUXC_GPR12, 1924 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1925 .mode_off[1] = IOMUXC_GPR12, 1926 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1927 .epc_features = &imx8q_pcie_epc_features, 1928 .init_phy = imx8mq_pcie_init_phy, 1929 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1930 }, 1931 [IMX8MM_EP] = { 1932 .variant = IMX8MM_EP, 1933 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1934 IMX_PCIE_FLAG_HAS_PHYDRV, 1935 .mode = DW_PCIE_EP_TYPE, 1936 .gpr = "fsl,imx8mm-iomuxc-gpr", 1937 .mode_off[0] = IOMUXC_GPR12, 1938 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1939 .epc_features = &imx8m_pcie_epc_features, 1940 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1941 }, 1942 [IMX8MP_EP] = { 1943 .variant = IMX8MP_EP, 1944 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1945 IMX_PCIE_FLAG_HAS_PHYDRV, 1946 .mode = DW_PCIE_EP_TYPE, 1947 .gpr = "fsl,imx8mp-iomuxc-gpr", 1948 .mode_off[0] = IOMUXC_GPR12, 1949 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1950 .epc_features = &imx8m_pcie_epc_features, 1951 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1952 }, 1953 [IMX8Q_EP] = { 1954 .variant = IMX8Q_EP, 1955 .flags = IMX_PCIE_FLAG_HAS_PHYDRV, 1956 .mode = DW_PCIE_EP_TYPE, 1957 .epc_features = &imx8q_pcie_epc_features, 1958 }, 1959 [IMX95_EP] = { 1960 .variant = IMX95_EP, 1961 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1962 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 1963 IMX_PCIE_FLAG_SUPPORT_64BIT, 1964 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1965 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1966 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1967 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1968 .init_phy = imx95_pcie_init_phy, 1969 .core_reset = imx95_pcie_core_reset, 1970 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 1971 .epc_features = &imx95_pcie_epc_features, 1972 .mode = DW_PCIE_EP_TYPE, 1973 }, 1974 }; 1975 1976 static const struct of_device_id imx_pcie_of_match[] = { 1977 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 1978 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 1979 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 1980 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 1981 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 1982 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 1983 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 1984 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 1985 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 1986 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 1987 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 1988 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 1989 { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, 1990 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 1991 {}, 1992 }; 1993 1994 static struct platform_driver imx_pcie_driver = { 1995 .driver = { 1996 .name = "imx6q-pcie", 1997 .of_match_table = imx_pcie_of_match, 1998 .suppress_bind_attrs = true, 1999 .pm = &imx_pcie_pm_ops, 2000 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2001 }, 2002 .probe = imx_pcie_probe, 2003 .shutdown = imx_pcie_shutdown, 2004 }; 2005 2006 static void imx_pcie_quirk(struct pci_dev *dev) 2007 { 2008 struct pci_bus *bus = dev->bus; 2009 struct dw_pcie_rp *pp = bus->sysdata; 2010 2011 /* Bus parent is the PCI bridge, its parent is this platform driver */ 2012 if (!bus->dev.parent || !bus->dev.parent->parent) 2013 return; 2014 2015 /* Make sure we only quirk devices associated with this driver */ 2016 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 2017 return; 2018 2019 if (pci_is_root_bus(bus)) { 2020 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 2021 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 2022 2023 /* 2024 * Limit config length to avoid the kernel reading beyond 2025 * the register set and causing an abort on i.MX 6Quad 2026 */ 2027 if (imx_pcie->drvdata->dbi_length) { 2028 dev->cfg_size = imx_pcie->drvdata->dbi_length; 2029 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 2030 dev->cfg_size); 2031 } 2032 } 2033 } 2034 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 2035 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 2036 2037 static int __init imx_pcie_init(void) 2038 { 2039 #ifdef CONFIG_ARM 2040 struct device_node *np; 2041 2042 np = of_find_matching_node(NULL, imx_pcie_of_match); 2043 if (!np) 2044 return -ENODEV; 2045 of_node_put(np); 2046 2047 /* 2048 * Since probe() can be deferred we need to make sure that 2049 * hook_fault_code is not called after __init memory is freed 2050 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 2051 * we can install the handler here without risking it 2052 * accessing some uninitialized driver state. 2053 */ 2054 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 2055 "external abort on non-linefetch"); 2056 #endif 2057 2058 return platform_driver_register(&imx_pcie_driver); 2059 } 2060 device_initcall(imx_pcie_init); 2061