1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * https://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 #include <linux/types.h> 29 #include <linux/interrupt.h> 30 #include <linux/reset.h> 31 #include <linux/phy/pcie.h> 32 #include <linux/phy/phy.h> 33 #include <linux/pm_domain.h> 34 #include <linux/pm_runtime.h> 35 36 #include "../../pci.h" 37 #include "pcie-designware.h" 38 39 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) 40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) 41 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) 42 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) 43 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) 44 45 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 46 #define IMX95_PCIE_REF_USE_PAD BIT(17) 47 48 #define IMX95_PCIE_PHY_MPLLA_CTRL 0x10 49 #define IMX95_PCIE_PHY_MPLL_STATE BIT(30) 50 51 #define IMX95_PCIE_SS_RW_REG_0 0xf0 52 #define IMX95_PCIE_REF_CLKEN BIT(23) 53 #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) 54 #define IMX95_PCIE_SS_RW_REG_1 0xf4 55 #define IMX95_PCIE_CLKREQ_OVERRIDE_EN BIT(8) 56 #define IMX95_PCIE_CLKREQ_OVERRIDE_VAL BIT(9) 57 #define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31) 58 59 #define IMX95_PE0_GEN_CTRL_1 0x1050 60 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) 61 62 #define IMX95_PE0_GEN_CTRL_3 0x1058 63 #define IMX95_PCIE_LTSSM_EN BIT(0) 64 65 #define IMX95_PE0_LUT_ACSCTRL 0x1008 66 #define IMX95_PEO_LUT_RWA BIT(16) 67 #define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) 68 69 #define IMX95_PE0_LUT_DATA1 0x100c 70 #define IMX95_PE0_LUT_VLD BIT(31) 71 #define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) 72 #define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) 73 74 #define IMX95_PE0_LUT_DATA2 0x1010 75 #define IMX95_PE0_LUT_REQID GENMASK(31, 16) 76 #define IMX95_PE0_LUT_MASK GENMASK(15, 0) 77 78 #define IMX95_SID_MASK GENMASK(5, 0) 79 #define IMX95_MAX_LUT 32 80 81 #define IMX95_PCIE_RST_CTRL 0x3010 82 #define IMX95_PCIE_COLD_RST BIT(0) 83 84 #define to_imx_pcie(x) dev_get_drvdata((x)->dev) 85 86 enum imx_pcie_variants { 87 IMX6Q, 88 IMX6SX, 89 IMX6QP, 90 IMX7D, 91 IMX8MQ, 92 IMX8MM, 93 IMX8MP, 94 IMX8Q, 95 IMX95, 96 IMX8MQ_EP, 97 IMX8MM_EP, 98 IMX8MP_EP, 99 IMX8Q_EP, 100 IMX95_EP, 101 }; 102 103 #define IMX_PCIE_FLAG_IMX_PHY BIT(0) 104 #define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1) 105 #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) 106 #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) 107 #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) 108 #define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) 109 #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) 110 #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) 111 #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) 112 /* 113 * Because of ERR005723 (PCIe does not support L2 power down) we need to 114 * workaround suspend resume on some devices which are affected by this errata. 115 */ 116 #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) 117 #define IMX_PCIE_FLAG_HAS_LUT BIT(10) 118 #define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11) 119 #define IMX_PCIE_FLAG_SKIP_L23_READY BIT(12) 120 121 #define imx_check_flag(pci, val) (pci->drvdata->flags & val) 122 123 #define IMX_PCIE_MAX_INSTANCES 2 124 125 struct imx_pcie; 126 127 struct imx_pcie_drvdata { 128 enum imx_pcie_variants variant; 129 enum dw_pcie_device_mode mode; 130 u32 flags; 131 int dbi_length; 132 const char *gpr; 133 const u32 ltssm_off; 134 const u32 ltssm_mask; 135 const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; 136 const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; 137 const struct pci_epc_features *epc_features; 138 int (*init_phy)(struct imx_pcie *pcie); 139 int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); 140 int (*core_reset)(struct imx_pcie *pcie, bool assert); 141 int (*wait_pll_lock)(struct imx_pcie *pcie); 142 void (*clr_clkreq_override)(struct imx_pcie *pcie); 143 const struct dw_pcie_host_ops *ops; 144 }; 145 146 struct imx_lut_data { 147 u32 data1; 148 u32 data2; 149 }; 150 151 struct imx_pcie { 152 struct dw_pcie *pci; 153 struct gpio_desc *reset_gpiod; 154 struct clk_bulk_data *clks; 155 int num_clks; 156 bool supports_clkreq; 157 bool enable_ext_refclk; 158 struct regmap *iomuxc_gpr; 159 u16 msi_ctrl; 160 u32 controller_id; 161 struct reset_control *pciephy_reset; 162 struct reset_control *apps_reset; 163 u32 tx_deemph_gen1; 164 u32 tx_deemph_gen2_3p5db; 165 u32 tx_deemph_gen2_6db; 166 u32 tx_swing_full; 167 u32 tx_swing_low; 168 struct regulator *vpcie; 169 struct regulator *vph; 170 void __iomem *phy_base; 171 172 /* LUT data for pcie */ 173 struct imx_lut_data luts[IMX95_MAX_LUT]; 174 /* power domain for pcie */ 175 struct device *pd_pcie; 176 /* power domain for pcie phy */ 177 struct device *pd_pcie_phy; 178 struct phy *phy; 179 const struct imx_pcie_drvdata *drvdata; 180 181 /* Ensure that only one device's LUT is configured at any given time */ 182 struct mutex lock; 183 }; 184 185 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 186 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 187 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) 188 189 /* PCIe Port Logic registers (memory-mapped) */ 190 #define PL_OFFSET 0x700 191 192 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 193 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) 194 #define PCIE_PHY_CTRL_CAP_ADR BIT(16) 195 #define PCIE_PHY_CTRL_CAP_DAT BIT(17) 196 #define PCIE_PHY_CTRL_WR BIT(18) 197 #define PCIE_PHY_CTRL_RD BIT(19) 198 199 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 200 #define PCIE_PHY_STAT_ACK BIT(16) 201 202 /* PHY registers (not memory-mapped) */ 203 #define PCIE_PHY_ATEOVRD 0x10 204 #define PCIE_PHY_ATEOVRD_EN BIT(2) 205 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 206 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 207 208 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 209 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 210 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 211 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) 212 213 #define PCIE_PHY_RX_ASIC_OUT 0x100D 214 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 215 216 /* iMX7 PCIe PHY registers */ 217 #define PCIE_PHY_CMN_REG4 0x14 218 /* These are probably the bits that *aren't* DCC_FB_EN */ 219 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 220 221 #define PCIE_PHY_CMN_REG15 0x54 222 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) 223 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) 224 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) 225 226 #define PCIE_PHY_CMN_REG24 0x90 227 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) 228 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) 229 230 #define PCIE_PHY_CMN_REG26 0x98 231 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC 232 233 #define PHY_RX_OVRD_IN_LO 0x1005 234 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) 235 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) 236 237 static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) 238 { 239 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && 240 imx_pcie->drvdata->variant != IMX8MQ_EP && 241 imx_pcie->drvdata->variant != IMX8MM && 242 imx_pcie->drvdata->variant != IMX8MM_EP && 243 imx_pcie->drvdata->variant != IMX8MP && 244 imx_pcie->drvdata->variant != IMX8MP_EP); 245 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; 246 } 247 248 static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) 249 { 250 bool ext = imx_pcie->enable_ext_refclk; 251 252 /* 253 * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready 254 * Through Beacon or PERST# De-assertion 255 * 256 * When the auxiliary power is not available, the controller 257 * cannot exit from L23 Ready with beacon or PERST# de-assertion 258 * when main power is not removed. 259 * 260 * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1. 261 */ 262 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, 263 IMX95_PCIE_SYS_AUX_PWR_DET); 264 265 regmap_update_bits(imx_pcie->iomuxc_gpr, 266 IMX95_PCIE_SS_RW_REG_0, 267 IMX95_PCIE_PHY_CR_PARA_SEL, 268 IMX95_PCIE_PHY_CR_PARA_SEL); 269 270 regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_PHY_GEN_CTRL, 271 ext ? IMX95_PCIE_REF_USE_PAD : 0, 272 IMX95_PCIE_REF_USE_PAD); 273 regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0, 274 IMX95_PCIE_REF_CLKEN, 275 ext ? 0 : IMX95_PCIE_REF_CLKEN); 276 277 return 0; 278 } 279 280 static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) 281 { 282 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 283 unsigned int mask, val, mode, id; 284 285 if (drvdata->mode == DW_PCIE_EP_TYPE) 286 mode = PCI_EXP_TYPE_ENDPOINT; 287 else 288 mode = PCI_EXP_TYPE_ROOT_PORT; 289 290 id = imx_pcie->controller_id; 291 292 /* If mode_mask is 0, generic PHY driver is used to set the mode */ 293 if (!drvdata->mode_mask[0]) 294 return; 295 296 /* If mode_mask[id] is 0, each controller has its individual GPR */ 297 if (!drvdata->mode_mask[id]) 298 id = 0; 299 300 mask = drvdata->mode_mask[id]; 301 val = mode << (ffs(mask) - 1); 302 303 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); 304 } 305 306 static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) 307 { 308 struct dw_pcie *pci = imx_pcie->pci; 309 bool val; 310 u32 max_iterations = 10; 311 u32 wait_counter = 0; 312 313 do { 314 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & 315 PCIE_PHY_STAT_ACK; 316 wait_counter++; 317 318 if (val == exp_val) 319 return 0; 320 321 udelay(1); 322 } while (wait_counter < max_iterations); 323 324 return -ETIMEDOUT; 325 } 326 327 static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) 328 { 329 struct dw_pcie *pci = imx_pcie->pci; 330 u32 val; 331 int ret; 332 333 val = PCIE_PHY_CTRL_DATA(addr); 334 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 335 336 val |= PCIE_PHY_CTRL_CAP_ADR; 337 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 338 339 ret = pcie_phy_poll_ack(imx_pcie, true); 340 if (ret) 341 return ret; 342 343 val = PCIE_PHY_CTRL_DATA(addr); 344 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 345 346 return pcie_phy_poll_ack(imx_pcie, false); 347 } 348 349 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 350 static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) 351 { 352 struct dw_pcie *pci = imx_pcie->pci; 353 u32 phy_ctl; 354 int ret; 355 356 ret = pcie_phy_wait_ack(imx_pcie, addr); 357 if (ret) 358 return ret; 359 360 /* assert Read signal */ 361 phy_ctl = PCIE_PHY_CTRL_RD; 362 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 363 364 ret = pcie_phy_poll_ack(imx_pcie, true); 365 if (ret) 366 return ret; 367 368 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 369 370 /* deassert Read signal */ 371 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 372 373 return pcie_phy_poll_ack(imx_pcie, false); 374 } 375 376 static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) 377 { 378 struct dw_pcie *pci = imx_pcie->pci; 379 u32 var; 380 int ret; 381 382 /* write addr */ 383 /* cap addr */ 384 ret = pcie_phy_wait_ack(imx_pcie, addr); 385 if (ret) 386 return ret; 387 388 var = PCIE_PHY_CTRL_DATA(data); 389 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 390 391 /* capture data */ 392 var |= PCIE_PHY_CTRL_CAP_DAT; 393 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 394 395 ret = pcie_phy_poll_ack(imx_pcie, true); 396 if (ret) 397 return ret; 398 399 /* deassert cap data */ 400 var = PCIE_PHY_CTRL_DATA(data); 401 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 402 403 /* wait for ack de-assertion */ 404 ret = pcie_phy_poll_ack(imx_pcie, false); 405 if (ret) 406 return ret; 407 408 /* assert wr signal */ 409 var = PCIE_PHY_CTRL_WR; 410 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 411 412 /* wait for ack */ 413 ret = pcie_phy_poll_ack(imx_pcie, true); 414 if (ret) 415 return ret; 416 417 /* deassert wr signal */ 418 var = PCIE_PHY_CTRL_DATA(data); 419 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 420 421 /* wait for ack de-assertion */ 422 ret = pcie_phy_poll_ack(imx_pcie, false); 423 if (ret) 424 return ret; 425 426 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 427 428 return 0; 429 } 430 431 static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) 432 { 433 /* TODO: This code assumes external oscillator is being used */ 434 regmap_update_bits(imx_pcie->iomuxc_gpr, 435 imx_pcie_grp_offset(imx_pcie), 436 IMX8MQ_GPR_PCIE_REF_USE_PAD, 437 IMX8MQ_GPR_PCIE_REF_USE_PAD); 438 /* 439 * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the 440 * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared 441 * to zero. 442 */ 443 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) 444 regmap_update_bits(imx_pcie->iomuxc_gpr, 445 imx_pcie_grp_offset(imx_pcie), 446 IMX8MQ_GPR_PCIE_VREG_BYPASS, 447 0); 448 449 return 0; 450 } 451 452 static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) 453 { 454 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 455 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 456 457 /* configure constant input signal to the pcie ctrl and phy */ 458 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 459 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 460 461 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 462 IMX6Q_GPR8_TX_DEEMPH_GEN1, 463 imx_pcie->tx_deemph_gen1 << 0); 464 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 465 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 466 imx_pcie->tx_deemph_gen2_3p5db << 6); 467 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 468 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 469 imx_pcie->tx_deemph_gen2_6db << 12); 470 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 471 IMX6Q_GPR8_TX_SWING_FULL, 472 imx_pcie->tx_swing_full << 18); 473 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, 474 IMX6Q_GPR8_TX_SWING_LOW, 475 imx_pcie->tx_swing_low << 25); 476 return 0; 477 } 478 479 static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) 480 { 481 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 482 IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); 483 484 return imx_pcie_init_phy(imx_pcie); 485 } 486 487 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 488 { 489 u32 val; 490 struct device *dev = imx_pcie->pci->dev; 491 492 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 493 IOMUXC_GPR22, val, 494 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, 495 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 496 PHY_PLL_LOCK_WAIT_TIMEOUT)) 497 dev_err(dev, "PCIe PLL lock timeout\n"); 498 } 499 500 static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) 501 { 502 u32 val; 503 struct device *dev = imx_pcie->pci->dev; 504 505 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, 506 IMX95_PCIE_PHY_MPLLA_CTRL, val, 507 val & IMX95_PCIE_PHY_MPLL_STATE, 508 PHY_PLL_LOCK_WAIT_USLEEP_MAX, 509 PHY_PLL_LOCK_WAIT_TIMEOUT)) { 510 dev_err(dev, "PCIe PLL lock timeout\n"); 511 return -ETIMEDOUT; 512 } 513 514 return 0; 515 } 516 517 static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) 518 { 519 unsigned long phy_rate = 0; 520 int mult, div; 521 u16 val; 522 int i; 523 struct clk_bulk_data *clks = imx_pcie->clks; 524 525 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 526 return 0; 527 528 for (i = 0; i < imx_pcie->num_clks; i++) 529 if (strncmp(clks[i].id, "pcie_phy", 8) == 0) 530 phy_rate = clk_get_rate(clks[i].clk); 531 532 switch (phy_rate) { 533 case 125000000: 534 /* 535 * The default settings of the MPLL are for a 125MHz input 536 * clock, so no need to reconfigure anything in that case. 537 */ 538 return 0; 539 case 100000000: 540 mult = 25; 541 div = 0; 542 break; 543 case 200000000: 544 mult = 25; 545 div = 1; 546 break; 547 default: 548 dev_err(imx_pcie->pci->dev, 549 "Unsupported PHY reference clock rate %lu\n", phy_rate); 550 return -EINVAL; 551 } 552 553 pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 554 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 555 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 556 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 557 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 558 pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 559 560 pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); 561 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 562 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 563 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 564 val |= PCIE_PHY_ATEOVRD_EN; 565 pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); 566 567 return 0; 568 } 569 570 static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) 571 { 572 u16 tmp; 573 574 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) 575 return; 576 577 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 578 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 579 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 580 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 581 582 usleep_range(2000, 3000); 583 584 pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); 585 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 586 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 587 pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); 588 } 589 590 #ifdef CONFIG_ARM 591 /* Added for PCI abort handling */ 592 static int imx6q_pcie_abort_handler(unsigned long addr, 593 unsigned int fsr, struct pt_regs *regs) 594 { 595 unsigned long pc = instruction_pointer(regs); 596 unsigned long instr = *(unsigned long *)pc; 597 int reg = (instr >> 12) & 15; 598 599 /* 600 * If the instruction being executed was a read, 601 * make it look like it read all-ones. 602 */ 603 if ((instr & 0x0c100000) == 0x04100000) { 604 unsigned long val; 605 606 if (instr & 0x00400000) 607 val = 255; 608 else 609 val = -1; 610 611 regs->uregs[reg] = val; 612 regs->ARM_pc += 4; 613 return 0; 614 } 615 616 if ((instr & 0x0e100090) == 0x00100090) { 617 regs->uregs[reg] = -1; 618 regs->ARM_pc += 4; 619 return 0; 620 } 621 622 return 1; 623 } 624 #endif 625 626 static int imx_pcie_attach_pd(struct device *dev) 627 { 628 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 629 struct device_link *link; 630 631 /* Do nothing when in a single power domain */ 632 if (dev->pm_domain) 633 return 0; 634 635 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 636 if (IS_ERR(imx_pcie->pd_pcie)) 637 return PTR_ERR(imx_pcie->pd_pcie); 638 /* Do nothing when power domain missing */ 639 if (!imx_pcie->pd_pcie) 640 return 0; 641 link = device_link_add(dev, imx_pcie->pd_pcie, 642 DL_FLAG_STATELESS | 643 DL_FLAG_PM_RUNTIME | 644 DL_FLAG_RPM_ACTIVE); 645 if (!link) { 646 dev_err(dev, "Failed to add device_link to pcie pd\n"); 647 return -EINVAL; 648 } 649 650 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); 651 if (IS_ERR(imx_pcie->pd_pcie_phy)) 652 return PTR_ERR(imx_pcie->pd_pcie_phy); 653 654 link = device_link_add(dev, imx_pcie->pd_pcie_phy, 655 DL_FLAG_STATELESS | 656 DL_FLAG_PM_RUNTIME | 657 DL_FLAG_RPM_ACTIVE); 658 if (!link) { 659 dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); 660 return -EINVAL; 661 } 662 663 return 0; 664 } 665 666 static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 667 { 668 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 669 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 670 enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 671 return 0; 672 } 673 674 static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 675 { 676 if (enable) { 677 /* power up core phy and enable ref clock */ 678 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 679 /* 680 * The async reset input need ref clock to sync internally, 681 * when the ref clock comes after reset, internal synced 682 * reset time is too short, cannot meet the requirement. 683 * Add a ~10us delay here. 684 */ 685 usleep_range(10, 100); 686 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 687 } else { 688 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 689 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 690 } 691 692 return 0; 693 } 694 695 static void imx8mm_pcie_clkreq_override(struct imx_pcie *imx_pcie, bool enable) 696 { 697 int offset = imx_pcie_grp_offset(imx_pcie); 698 699 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 700 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 701 enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); 702 regmap_update_bits(imx_pcie->iomuxc_gpr, offset, 703 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, 704 enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); 705 } 706 707 static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 708 { 709 imx8mm_pcie_clkreq_override(imx_pcie, enable); 710 return 0; 711 } 712 713 static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 714 { 715 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 716 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 717 enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 718 return 0; 719 } 720 721 static void imx95_pcie_clkreq_override(struct imx_pcie *imx_pcie, bool enable) 722 { 723 regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, 724 IMX95_PCIE_CLKREQ_OVERRIDE_EN, 725 enable ? IMX95_PCIE_CLKREQ_OVERRIDE_EN : 0); 726 regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, 727 IMX95_PCIE_CLKREQ_OVERRIDE_VAL, 728 enable ? IMX95_PCIE_CLKREQ_OVERRIDE_VAL : 0); 729 } 730 731 static int imx95_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) 732 { 733 imx95_pcie_clkreq_override(imx_pcie, enable); 734 return 0; 735 } 736 737 static void imx8mm_pcie_clr_clkreq_override(struct imx_pcie *imx_pcie) 738 { 739 imx8mm_pcie_clkreq_override(imx_pcie, false); 740 } 741 742 static void imx95_pcie_clr_clkreq_override(struct imx_pcie *imx_pcie) 743 { 744 imx95_pcie_clkreq_override(imx_pcie, false); 745 } 746 747 static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) 748 { 749 struct dw_pcie *pci = imx_pcie->pci; 750 struct device *dev = pci->dev; 751 int ret; 752 753 ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); 754 if (ret) 755 return ret; 756 757 if (imx_pcie->drvdata->enable_ref_clk) { 758 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 759 if (ret) { 760 dev_err(dev, "Failed to enable PCIe REFCLK\n"); 761 goto err_ref_clk; 762 } 763 } 764 765 /* allow the clocks to stabilize */ 766 usleep_range(200, 500); 767 return 0; 768 769 err_ref_clk: 770 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 771 772 return ret; 773 } 774 775 static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) 776 { 777 if (imx_pcie->drvdata->enable_ref_clk) 778 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 779 clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); 780 } 781 782 static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 783 { 784 if (assert) 785 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, 786 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 787 788 /* Force PCIe PHY reset */ 789 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 790 assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); 791 return 0; 792 } 793 794 static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 795 { 796 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 797 assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); 798 if (!assert) 799 usleep_range(200, 500); 800 801 return 0; 802 } 803 804 static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 805 { 806 if (!assert) 807 return 0; 808 809 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); 810 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); 811 812 return 0; 813 } 814 815 static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 816 { 817 struct dw_pcie *pci = imx_pcie->pci; 818 struct device *dev = pci->dev; 819 820 if (assert) 821 return 0; 822 823 /* 824 * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): 825 * 826 * PCIe: PLL may fail to lock under corner conditions. 827 * 828 * Initial VCO oscillation may fail under corner conditions such as 829 * cold temperature which will cause the PCIe PLL fail to lock in the 830 * initialization phase. 831 * 832 * The Duty-cycle Corrector calibration must be disabled. 833 * 834 * 1. De-assert the G_RST signal by clearing 835 * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. 836 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register 837 * address 0x306d0014 (PCIE_PHY_CMN_REG4). 838 * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register 839 * address 0x306d0090 (PCIE_PHY_CMN_REG24). 840 * 4. Assert ATT_MODE by writing data “0xbc” to the register 841 * address 0x306d0098 (PCIE_PHY_CMN_REG26). 842 * 5. De-assert the CMN_RST signal by clearing register bit 843 * SRC_PCIEPHY_RCR[PCIEPHY_BTN] 844 */ 845 846 if (likely(imx_pcie->phy_base)) { 847 /* De-assert DCC_FB_EN */ 848 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); 849 /* Assert RX_EQS and RX_EQS_SEL */ 850 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, 851 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); 852 /* Assert ATT_MODE */ 853 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); 854 } else { 855 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); 856 } 857 imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); 858 return 0; 859 } 860 861 static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) 862 { 863 u32 val; 864 865 if (assert) { 866 /* 867 * From i.MX95 PCIe PHY perspective, the COLD reset toggle 868 * should be complete after power-up by the following sequence. 869 * > 10us(at power-up) 870 * > 10ns(warm reset) 871 * |<------------>| 872 * ______________ 873 * phy_reset ____/ \________________ 874 * ____________ 875 * ref_clk_en_______________________/ 876 * Toggle COLD reset aligned with this sequence for i.MX95 PCIe. 877 */ 878 regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 879 IMX95_PCIE_COLD_RST); 880 /* 881 * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the 882 * hardware by doing a read. Otherwise, there is no guarantee 883 * that the write has reached the hardware before udelay(). 884 */ 885 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 886 &val); 887 udelay(15); 888 regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 889 IMX95_PCIE_COLD_RST); 890 regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, 891 &val); 892 udelay(10); 893 } 894 895 return 0; 896 } 897 898 static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) 899 { 900 reset_control_assert(imx_pcie->pciephy_reset); 901 902 if (imx_pcie->drvdata->core_reset) 903 imx_pcie->drvdata->core_reset(imx_pcie, true); 904 905 /* Some boards don't have PCIe reset GPIO. */ 906 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); 907 } 908 909 static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) 910 { 911 reset_control_deassert(imx_pcie->pciephy_reset); 912 913 if (imx_pcie->drvdata->core_reset) 914 imx_pcie->drvdata->core_reset(imx_pcie, false); 915 916 /* Some boards don't have PCIe reset GPIO. */ 917 if (imx_pcie->reset_gpiod) { 918 msleep(100); 919 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); 920 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ 921 msleep(100); 922 } 923 924 return 0; 925 } 926 927 static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) 928 { 929 struct dw_pcie *pci = imx_pcie->pci; 930 struct device *dev = pci->dev; 931 u32 tmp; 932 unsigned int retries; 933 934 for (retries = 0; retries < 200; retries++) { 935 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 936 /* Test if the speed change finished. */ 937 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 938 return 0; 939 usleep_range(100, 1000); 940 } 941 942 dev_err(dev, "Speed change timeout\n"); 943 return -ETIMEDOUT; 944 } 945 946 static void imx_pcie_ltssm_enable(struct device *dev) 947 { 948 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 949 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 950 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); 951 u32 tmp; 952 953 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); 954 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); 955 if (drvdata->ltssm_mask) 956 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 957 drvdata->ltssm_mask); 958 959 reset_control_deassert(imx_pcie->apps_reset); 960 } 961 962 static void imx_pcie_ltssm_disable(struct device *dev) 963 { 964 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 965 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; 966 967 phy_set_speed(imx_pcie->phy, 0); 968 if (drvdata->ltssm_mask) 969 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, 970 drvdata->ltssm_mask, 0); 971 972 reset_control_assert(imx_pcie->apps_reset); 973 } 974 975 static int imx_pcie_start_link(struct dw_pcie *pci) 976 { 977 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 978 struct device *dev = pci->dev; 979 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 980 u32 tmp; 981 int ret; 982 983 if (!(imx_pcie->drvdata->flags & 984 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) { 985 imx_pcie_ltssm_enable(dev); 986 return 0; 987 } 988 989 /* 990 * Force Gen1 operation when starting the link. In case the link is 991 * started in Gen2 mode, there is a possibility the devices on the 992 * bus will not be detected at all. This happens with PCIe switches. 993 */ 994 dw_pcie_dbi_ro_wr_en(pci); 995 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 996 tmp &= ~PCI_EXP_LNKCAP_SLS; 997 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; 998 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 999 dw_pcie_dbi_ro_wr_dis(pci); 1000 1001 /* Start LTSSM. */ 1002 imx_pcie_ltssm_enable(dev); 1003 1004 if (pci->max_link_speed > 1) { 1005 ret = dw_pcie_wait_for_link(pci); 1006 if (ret) 1007 goto err_reset_phy; 1008 1009 /* Allow faster modes after the link is up */ 1010 dw_pcie_dbi_ro_wr_en(pci); 1011 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 1012 tmp &= ~PCI_EXP_LNKCAP_SLS; 1013 tmp |= pci->max_link_speed; 1014 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); 1015 1016 /* 1017 * Start Directed Speed Change so the best possible 1018 * speed both link partners support can be negotiated. 1019 */ 1020 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 1021 tmp |= PORT_LOGIC_SPEED_CHANGE; 1022 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 1023 dw_pcie_dbi_ro_wr_dis(pci); 1024 1025 ret = imx_pcie_wait_for_speed_change(imx_pcie); 1026 if (ret) { 1027 dev_err(dev, "Failed to bring link up!\n"); 1028 goto err_reset_phy; 1029 } 1030 } else { 1031 dev_info(dev, "Link: Only Gen1 is enabled\n"); 1032 } 1033 1034 return 0; 1035 1036 err_reset_phy: 1037 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 1038 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), 1039 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); 1040 imx_pcie_reset_phy(imx_pcie); 1041 return 0; 1042 } 1043 1044 static void imx_pcie_stop_link(struct dw_pcie *pci) 1045 { 1046 struct device *dev = pci->dev; 1047 1048 /* Turn off PCIe LTSSM */ 1049 imx_pcie_ltssm_disable(dev); 1050 } 1051 1052 static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) 1053 { 1054 struct dw_pcie *pci = imx_pcie->pci; 1055 struct device *dev = pci->dev; 1056 u32 data1, data2; 1057 int free = -1; 1058 int i; 1059 1060 if (sid >= 64) { 1061 dev_err(dev, "Invalid SID for index %d\n", sid); 1062 return -EINVAL; 1063 } 1064 1065 guard(mutex)(&imx_pcie->lock); 1066 1067 /* 1068 * Iterate through all LUT entries to check for duplicate RID and 1069 * identify the first available entry. Configure this available entry 1070 * immediately after verification to avoid rescanning it. 1071 */ 1072 for (i = 0; i < IMX95_MAX_LUT; i++) { 1073 regmap_write(imx_pcie->iomuxc_gpr, 1074 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1075 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1076 1077 if (!(data1 & IMX95_PE0_LUT_VLD)) { 1078 if (free < 0) 1079 free = i; 1080 continue; 1081 } 1082 1083 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1084 1085 /* Do not add duplicate RID */ 1086 if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { 1087 dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); 1088 return 0; 1089 } 1090 } 1091 1092 if (free < 0) { 1093 dev_err(dev, "LUT entry is not available\n"); 1094 return -ENOSPC; 1095 } 1096 1097 data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); 1098 data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); 1099 data1 |= IMX95_PE0_LUT_VLD; 1100 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); 1101 1102 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) 1103 data2 = 0x7; /* In the EP mode, only 'Device ID' is required */ 1104 else 1105 data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ 1106 data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); 1107 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); 1108 1109 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); 1110 1111 return 0; 1112 } 1113 1114 static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) 1115 { 1116 u32 data2; 1117 int i; 1118 1119 guard(mutex)(&imx_pcie->lock); 1120 1121 for (i = 0; i < IMX95_MAX_LUT; i++) { 1122 regmap_write(imx_pcie->iomuxc_gpr, 1123 IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); 1124 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1125 if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { 1126 regmap_write(imx_pcie->iomuxc_gpr, 1127 IMX95_PE0_LUT_DATA1, 0); 1128 regmap_write(imx_pcie->iomuxc_gpr, 1129 IMX95_PE0_LUT_DATA2, 0); 1130 regmap_write(imx_pcie->iomuxc_gpr, 1131 IMX95_PE0_LUT_ACSCTRL, i); 1132 1133 break; 1134 } 1135 } 1136 } 1137 1138 static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid) 1139 { 1140 struct device *dev = imx_pcie->pci->dev; 1141 struct device_node *target; 1142 u32 sid_i, sid_m; 1143 int err_i, err_m; 1144 u32 sid = 0; 1145 1146 target = NULL; 1147 err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", 1148 &target, &sid_i); 1149 if (target) { 1150 of_node_put(target); 1151 } else { 1152 /* 1153 * "target == NULL && err_i == 0" means RID out of map range. 1154 * Use 1:1 map RID to streamID. Hardware can't support this 1155 * because the streamID is only 6 bits 1156 */ 1157 err_i = -EINVAL; 1158 } 1159 1160 target = NULL; 1161 err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", 1162 &target, &sid_m); 1163 1164 /* 1165 * err_m target 1166 * 0 NULL RID out of range. Use 1:1 map RID to 1167 * streamID, Current hardware can't 1168 * support it, so return -EINVAL. 1169 * != 0 NULL msi-map does not exist, use built-in MSI 1170 * 0 != NULL Get correct streamID from RID 1171 * != 0 != NULL Invalid combination 1172 */ 1173 if (!err_m && !target) 1174 return -EINVAL; 1175 else if (target) 1176 of_node_put(target); /* Find streamID map entry for RID in msi-map */ 1177 1178 /* 1179 * msi-map iommu-map 1180 * N N DWC MSI Ctrl 1181 * Y Y ITS + SMMU, require the same SID 1182 * Y N ITS 1183 * N Y DWC MSI Ctrl + SMMU 1184 */ 1185 if (err_i && err_m) 1186 return 0; 1187 1188 if (!err_i && !err_m) { 1189 /* 1190 * Glue Layer 1191 * <==========> 1192 * ┌─────┐ ┌──────────┐ 1193 * │ LUT │ 6-bit streamID │ │ 1194 * │ │─────────────────►│ MSI │ 1195 * └─────┘ 2-bit ctrl ID │ │ 1196 * ┌───────────►│ │ 1197 * (i.MX95) │ │ │ 1198 * 00 PCIe0 │ │ │ 1199 * 01 ENETC │ │ │ 1200 * 10 PCIe1 │ │ │ 1201 * │ └──────────┘ 1202 * The MSI glue layer auto adds 2 bits controller ID ahead of 1203 * streamID, so mask these 2 bits to get streamID. The 1204 * IOMMU glue layer doesn't do that. 1205 */ 1206 if (sid_i != (sid_m & IMX95_SID_MASK)) { 1207 dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); 1208 return -EINVAL; 1209 } 1210 } 1211 1212 if (!err_i) 1213 sid = sid_i; 1214 else if (!err_m) 1215 sid = sid_m & IMX95_SID_MASK; 1216 1217 return imx_pcie_add_lut(imx_pcie, rid, sid); 1218 } 1219 1220 static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) 1221 { 1222 struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1223 1224 return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev)); 1225 } 1226 1227 static void imx_pcie_disable_device(struct pci_host_bridge *bridge, 1228 struct pci_dev *pdev) 1229 { 1230 struct imx_pcie *imx_pcie; 1231 1232 imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); 1233 imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); 1234 } 1235 1236 static int imx_pcie_host_init(struct dw_pcie_rp *pp) 1237 { 1238 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1239 struct device *dev = pci->dev; 1240 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1241 int ret; 1242 1243 if (imx_pcie->vpcie) { 1244 ret = regulator_enable(imx_pcie->vpcie); 1245 if (ret) { 1246 dev_err(dev, "failed to enable vpcie regulator: %d\n", 1247 ret); 1248 return ret; 1249 } 1250 } 1251 1252 if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { 1253 pp->bridge->enable_device = imx_pcie_enable_device; 1254 pp->bridge->disable_device = imx_pcie_disable_device; 1255 } 1256 1257 imx_pcie_assert_core_reset(imx_pcie); 1258 1259 if (imx_pcie->drvdata->init_phy) 1260 imx_pcie->drvdata->init_phy(imx_pcie); 1261 1262 imx_pcie_configure_type(imx_pcie); 1263 1264 ret = imx_pcie_clk_enable(imx_pcie); 1265 if (ret) { 1266 dev_err(dev, "unable to enable pcie clocks: %d\n", ret); 1267 goto err_reg_disable; 1268 } 1269 1270 if (imx_pcie->phy) { 1271 ret = phy_init(imx_pcie->phy); 1272 if (ret) { 1273 dev_err(dev, "pcie PHY power up failed\n"); 1274 goto err_clk_disable; 1275 } 1276 1277 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, 1278 imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? 1279 PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); 1280 if (ret) { 1281 dev_err(dev, "unable to set PCIe PHY mode\n"); 1282 goto err_phy_exit; 1283 } 1284 1285 ret = phy_power_on(imx_pcie->phy); 1286 if (ret) { 1287 dev_err(dev, "waiting for PHY ready timeout!\n"); 1288 goto err_phy_exit; 1289 } 1290 } 1291 1292 /* Make sure that PCIe LTSSM is cleared */ 1293 imx_pcie_ltssm_disable(dev); 1294 1295 ret = imx_pcie_deassert_core_reset(imx_pcie); 1296 if (ret < 0) { 1297 dev_err(dev, "pcie deassert core reset failed: %d\n", ret); 1298 goto err_phy_off; 1299 } 1300 1301 if (imx_pcie->drvdata->wait_pll_lock) { 1302 ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie); 1303 if (ret < 0) 1304 goto err_phy_off; 1305 } 1306 1307 imx_setup_phy_mpll(imx_pcie); 1308 1309 return 0; 1310 1311 err_phy_off: 1312 phy_power_off(imx_pcie->phy); 1313 err_phy_exit: 1314 phy_exit(imx_pcie->phy); 1315 err_clk_disable: 1316 imx_pcie_clk_disable(imx_pcie); 1317 err_reg_disable: 1318 if (imx_pcie->vpcie) 1319 regulator_disable(imx_pcie->vpcie); 1320 return ret; 1321 } 1322 1323 static void imx_pcie_host_exit(struct dw_pcie_rp *pp) 1324 { 1325 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1326 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1327 1328 if (imx_pcie->phy) { 1329 if (phy_power_off(imx_pcie->phy)) 1330 dev_err(pci->dev, "unable to power off PHY\n"); 1331 phy_exit(imx_pcie->phy); 1332 } 1333 imx_pcie_clk_disable(imx_pcie); 1334 1335 if (imx_pcie->vpcie) 1336 regulator_disable(imx_pcie->vpcie); 1337 } 1338 1339 static void imx_pcie_host_post_init(struct dw_pcie_rp *pp) 1340 { 1341 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1342 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1343 u32 val; 1344 1345 if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) { 1346 /* 1347 * ERR051586: Compliance with 8GT/s Receiver Impedance ECN 1348 * 1349 * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1350 * is 1 which makes receiver non-compliant with the ZRX-DC 1351 * parameter for 2.5 GT/s when operating at 8 GT/s or higher. 1352 * It causes unnecessary timeout in L1. 1353 * 1354 * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] 1355 * to 0. 1356 */ 1357 dw_pcie_dbi_ro_wr_en(pci); 1358 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1359 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1360 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1361 dw_pcie_dbi_ro_wr_dis(pci); 1362 } 1363 1364 /* Clear CLKREQ# override if supports_clkreq is true and link is up */ 1365 if (dw_pcie_link_up(pci) && imx_pcie->supports_clkreq) { 1366 if (imx_pcie->drvdata->clr_clkreq_override) 1367 imx_pcie->drvdata->clr_clkreq_override(imx_pcie); 1368 } 1369 } 1370 1371 /* 1372 * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 1373 * register is reserved, so the generic DWC implementation of sending the 1374 * PME_Turn_Off message using a dummy MMIO write cannot be used. 1375 */ 1376 static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) 1377 { 1378 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1379 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1380 1381 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1382 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); 1383 1384 usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); 1385 } 1386 1387 static const struct dw_pcie_host_ops imx_pcie_host_ops = { 1388 .init = imx_pcie_host_init, 1389 .deinit = imx_pcie_host_exit, 1390 .pme_turn_off = imx_pcie_pme_turn_off, 1391 }; 1392 1393 static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { 1394 .init = imx_pcie_host_init, 1395 .deinit = imx_pcie_host_exit, 1396 .post_init = imx_pcie_host_post_init, 1397 }; 1398 1399 static const struct dw_pcie_ops dw_pcie_ops = { 1400 .start_link = imx_pcie_start_link, 1401 .stop_link = imx_pcie_stop_link, 1402 }; 1403 1404 static void imx_pcie_ep_init(struct dw_pcie_ep *ep) 1405 { 1406 enum pci_barno bar; 1407 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1408 1409 for (bar = BAR_0; bar <= BAR_5; bar++) 1410 dw_pcie_ep_reset_bar(pci, bar); 1411 } 1412 1413 static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1414 unsigned int type, u16 interrupt_num) 1415 { 1416 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1417 1418 switch (type) { 1419 case PCI_IRQ_INTX: 1420 return dw_pcie_ep_raise_intx_irq(ep, func_no); 1421 case PCI_IRQ_MSI: 1422 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 1423 case PCI_IRQ_MSIX: 1424 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); 1425 default: 1426 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 1427 return -EINVAL; 1428 } 1429 1430 return 0; 1431 } 1432 1433 static const struct pci_epc_features imx8m_pcie_epc_features = { 1434 DWC_EPC_COMMON_FEATURES, 1435 .msi_capable = true, 1436 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1437 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1438 .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, }, 1439 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1440 .align = SZ_64K, 1441 }; 1442 1443 static const struct pci_epc_features imx8q_pcie_epc_features = { 1444 DWC_EPC_COMMON_FEATURES, 1445 .msi_capable = true, 1446 .bar[BAR_1] = { .type = BAR_RESERVED, }, 1447 .bar[BAR_3] = { .type = BAR_RESERVED, }, 1448 .bar[BAR_5] = { .type = BAR_RESERVED, }, 1449 .align = SZ_64K, 1450 }; 1451 1452 /* 1453 * | Default | Default | Default | BAR Sizing 1454 * BAR# | Enable? | Type | Size | Scheme 1455 * ======================================================= 1456 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size 1457 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size 1458 * (BAR1 should be disabled if BAR0 is 64-bit) 1459 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size 1460 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size 1461 * BAR4 | Enable | 32-bit | 1 MB | Programmable Size 1462 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size 1463 */ 1464 static const struct pci_epc_features imx95_pcie_epc_features = { 1465 DWC_EPC_COMMON_FEATURES, 1466 .msi_capable = true, 1467 .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, 1468 .align = SZ_4K, 1469 }; 1470 1471 static const struct pci_epc_features* 1472 imx_pcie_ep_get_features(struct dw_pcie_ep *ep) 1473 { 1474 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1475 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 1476 1477 return imx_pcie->drvdata->epc_features; 1478 } 1479 1480 static const struct dw_pcie_ep_ops pcie_ep_ops = { 1481 .init = imx_pcie_ep_init, 1482 .raise_irq = imx_pcie_ep_raise_irq, 1483 .get_features = imx_pcie_ep_get_features, 1484 }; 1485 1486 static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, 1487 struct platform_device *pdev) 1488 { 1489 int ret; 1490 struct dw_pcie_ep *ep; 1491 struct dw_pcie *pci = imx_pcie->pci; 1492 struct dw_pcie_rp *pp = &pci->pp; 1493 struct device *dev = pci->dev; 1494 1495 imx_pcie_host_init(pp); 1496 ep = &pci->ep; 1497 ep->ops = &pcie_ep_ops; 1498 1499 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) 1500 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1501 1502 ep->page_size = imx_pcie->drvdata->epc_features->align; 1503 1504 ret = dw_pcie_ep_init(ep); 1505 if (ret) { 1506 dev_err(dev, "failed to initialize endpoint\n"); 1507 return ret; 1508 } 1509 imx_pcie_host_post_init(pp); 1510 1511 ret = dw_pcie_ep_init_registers(ep); 1512 if (ret) { 1513 dev_err(dev, "Failed to initialize DWC endpoint registers\n"); 1514 dw_pcie_ep_deinit(ep); 1515 return ret; 1516 } 1517 1518 pci_epc_init_notify(ep->epc); 1519 1520 return 0; 1521 } 1522 1523 static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) 1524 { 1525 u8 offset; 1526 u16 val; 1527 struct dw_pcie *pci = imx_pcie->pci; 1528 1529 if (pci_msi_enabled()) { 1530 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1531 if (save) { 1532 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1533 imx_pcie->msi_ctrl = val; 1534 } else { 1535 dw_pcie_dbi_ro_wr_en(pci); 1536 val = imx_pcie->msi_ctrl; 1537 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1538 dw_pcie_dbi_ro_wr_dis(pci); 1539 } 1540 } 1541 } 1542 1543 static void imx_pcie_lut_save(struct imx_pcie *imx_pcie) 1544 { 1545 u32 data1, data2; 1546 int i; 1547 1548 for (i = 0; i < IMX95_MAX_LUT; i++) { 1549 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, 1550 IMX95_PEO_LUT_RWA | i); 1551 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); 1552 regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); 1553 if (data1 & IMX95_PE0_LUT_VLD) { 1554 imx_pcie->luts[i].data1 = data1; 1555 imx_pcie->luts[i].data2 = data2; 1556 } else { 1557 imx_pcie->luts[i].data1 = 0; 1558 imx_pcie->luts[i].data2 = 0; 1559 } 1560 } 1561 } 1562 1563 static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie) 1564 { 1565 int i; 1566 1567 for (i = 0; i < IMX95_MAX_LUT; i++) { 1568 if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0) 1569 continue; 1570 1571 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, 1572 imx_pcie->luts[i].data1); 1573 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, 1574 imx_pcie->luts[i].data2); 1575 regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i); 1576 } 1577 } 1578 1579 static int imx_pcie_suspend_noirq(struct device *dev) 1580 { 1581 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1582 1583 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1584 return 0; 1585 1586 imx_pcie_msi_save_restore(imx_pcie, true); 1587 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1588 imx_pcie_lut_save(imx_pcie); 1589 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1590 /* 1591 * The minimum for a workaround would be to set PERST# and to 1592 * set the PCIE_TEST_PD flag. However, we can also disable the 1593 * clock which saves some power. 1594 */ 1595 imx_pcie_assert_core_reset(imx_pcie); 1596 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); 1597 } else { 1598 return dw_pcie_suspend_noirq(imx_pcie->pci); 1599 } 1600 1601 return 0; 1602 } 1603 1604 static int imx_pcie_resume_noirq(struct device *dev) 1605 { 1606 int ret; 1607 struct imx_pcie *imx_pcie = dev_get_drvdata(dev); 1608 1609 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) 1610 return 0; 1611 1612 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { 1613 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); 1614 if (ret) 1615 return ret; 1616 ret = imx_pcie_deassert_core_reset(imx_pcie); 1617 if (ret) 1618 return ret; 1619 1620 /* 1621 * Using PCIE_TEST_PD seems to disable MSI and powers down the 1622 * root complex. This is why we have to setup the rc again and 1623 * why we have to restore the MSI register. 1624 */ 1625 ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); 1626 if (ret) 1627 return ret; 1628 } else { 1629 ret = dw_pcie_resume_noirq(imx_pcie->pci); 1630 if (ret) 1631 return ret; 1632 } 1633 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) 1634 imx_pcie_lut_restore(imx_pcie); 1635 imx_pcie_msi_save_restore(imx_pcie, false); 1636 1637 return 0; 1638 } 1639 1640 static const struct dev_pm_ops imx_pcie_pm_ops = { 1641 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, 1642 imx_pcie_resume_noirq) 1643 }; 1644 1645 static int imx_pcie_probe(struct platform_device *pdev) 1646 { 1647 struct device *dev = &pdev->dev; 1648 struct dw_pcie *pci; 1649 struct imx_pcie *imx_pcie; 1650 struct device_node *np; 1651 struct device_node *node = dev->of_node; 1652 int i, ret, domain; 1653 u16 val; 1654 1655 imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); 1656 if (!imx_pcie) 1657 return -ENOMEM; 1658 1659 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1660 if (!pci) 1661 return -ENOMEM; 1662 1663 pci->dev = dev; 1664 pci->ops = &dw_pcie_ops; 1665 1666 imx_pcie->pci = pci; 1667 imx_pcie->drvdata = of_device_get_match_data(dev); 1668 1669 mutex_init(&imx_pcie->lock); 1670 1671 if (imx_pcie->drvdata->ops) 1672 pci->pp.ops = imx_pcie->drvdata->ops; 1673 else 1674 pci->pp.ops = &imx_pcie_host_dw_pme_ops; 1675 1676 /* Find the PHY if one is defined, only imx7d uses it */ 1677 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); 1678 if (np) { 1679 struct resource res; 1680 1681 ret = of_address_to_resource(np, 0, &res); 1682 if (ret) { 1683 dev_err(dev, "Unable to map PCIe PHY\n"); 1684 return ret; 1685 } 1686 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); 1687 if (IS_ERR(imx_pcie->phy_base)) 1688 return PTR_ERR(imx_pcie->phy_base); 1689 } 1690 1691 /* Fetch GPIOs */ 1692 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 1693 if (IS_ERR(imx_pcie->reset_gpiod)) 1694 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), 1695 "unable to get reset gpio\n"); 1696 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); 1697 1698 /* Fetch clocks */ 1699 imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); 1700 if (imx_pcie->num_clks < 0) 1701 return dev_err_probe(dev, imx_pcie->num_clks, 1702 "failed to get clocks\n"); 1703 for (i = 0; i < imx_pcie->num_clks; i++) 1704 if (strncmp(imx_pcie->clks[i].id, "extref", 6) == 0) 1705 imx_pcie->enable_ext_refclk = true; 1706 1707 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { 1708 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 1709 if (IS_ERR(imx_pcie->phy)) 1710 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), 1711 "failed to get pcie phy\n"); 1712 } 1713 1714 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { 1715 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); 1716 if (IS_ERR(imx_pcie->apps_reset)) 1717 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), 1718 "failed to get pcie apps reset control\n"); 1719 } 1720 1721 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { 1722 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); 1723 if (IS_ERR(imx_pcie->pciephy_reset)) 1724 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), 1725 "Failed to get PCIEPHY reset control\n"); 1726 } 1727 1728 switch (imx_pcie->drvdata->variant) { 1729 case IMX8MQ: 1730 case IMX8MQ_EP: 1731 domain = of_get_pci_domain_nr(node); 1732 if (domain < 0 || domain > 1) 1733 return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); 1734 1735 imx_pcie->controller_id = domain; 1736 break; 1737 default: 1738 break; 1739 } 1740 1741 if (imx_pcie->drvdata->gpr) { 1742 /* Grab GPR config register range */ 1743 imx_pcie->iomuxc_gpr = 1744 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); 1745 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1746 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1747 "unable to find iomuxc registers\n"); 1748 } 1749 1750 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { 1751 void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); 1752 1753 if (IS_ERR(off)) 1754 return dev_err_probe(dev, PTR_ERR(off), 1755 "unable to find serdes registers\n"); 1756 1757 static const struct regmap_config regmap_config = { 1758 .reg_bits = 32, 1759 .val_bits = 32, 1760 .reg_stride = 4, 1761 }; 1762 1763 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); 1764 if (IS_ERR(imx_pcie->iomuxc_gpr)) 1765 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), 1766 "unable to find iomuxc registers\n"); 1767 } 1768 1769 /* Grab PCIe PHY Tx Settings */ 1770 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 1771 &imx_pcie->tx_deemph_gen1)) 1772 imx_pcie->tx_deemph_gen1 = 0; 1773 1774 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 1775 &imx_pcie->tx_deemph_gen2_3p5db)) 1776 imx_pcie->tx_deemph_gen2_3p5db = 0; 1777 1778 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 1779 &imx_pcie->tx_deemph_gen2_6db)) 1780 imx_pcie->tx_deemph_gen2_6db = 20; 1781 1782 if (of_property_read_u32(node, "fsl,tx-swing-full", 1783 &imx_pcie->tx_swing_full)) 1784 imx_pcie->tx_swing_full = 127; 1785 1786 if (of_property_read_u32(node, "fsl,tx-swing-low", 1787 &imx_pcie->tx_swing_low)) 1788 imx_pcie->tx_swing_low = 127; 1789 1790 /* Limit link speed */ 1791 pci->max_link_speed = 1; 1792 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); 1793 imx_pcie->supports_clkreq = of_property_read_bool(node, "supports-clkreq"); 1794 1795 ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux"); 1796 if (ret < 0 && ret != -ENODEV) 1797 return dev_err_probe(dev, ret, "failed to enable Vaux supply\n"); 1798 1799 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1800 if (IS_ERR(imx_pcie->vpcie)) { 1801 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) 1802 return PTR_ERR(imx_pcie->vpcie); 1803 imx_pcie->vpcie = NULL; 1804 } 1805 1806 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); 1807 if (IS_ERR(imx_pcie->vph)) { 1808 if (PTR_ERR(imx_pcie->vph) != -ENODEV) 1809 return PTR_ERR(imx_pcie->vph); 1810 imx_pcie->vph = NULL; 1811 } 1812 1813 platform_set_drvdata(pdev, imx_pcie); 1814 1815 ret = imx_pcie_attach_pd(dev); 1816 if (ret) 1817 return ret; 1818 1819 pci->use_parent_dt_ranges = true; 1820 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { 1821 ret = imx_add_pcie_ep(imx_pcie, pdev); 1822 if (ret < 0) 1823 return ret; 1824 1825 /* 1826 * FIXME: Only single Device (EPF) is supported due to the 1827 * Endpoint framework limitation. 1828 */ 1829 imx_pcie_add_lut_by_rid(imx_pcie, 0); 1830 } else { 1831 if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SKIP_L23_READY)) 1832 pci->pp.skip_l23_ready = true; 1833 pci->pp.use_atu_msg = true; 1834 ret = dw_pcie_host_init(&pci->pp); 1835 if (ret < 0) 1836 return ret; 1837 1838 if (pci_msi_enabled()) { 1839 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); 1840 1841 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); 1842 val |= PCI_MSI_FLAGS_ENABLE; 1843 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); 1844 } 1845 } 1846 1847 return 0; 1848 } 1849 1850 static void imx_pcie_shutdown(struct platform_device *pdev) 1851 { 1852 struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); 1853 1854 /* bring down link, so bootloader gets clean state in case of reboot */ 1855 imx_pcie_assert_core_reset(imx_pcie); 1856 } 1857 1858 static const struct imx_pcie_drvdata drvdata[] = { 1859 [IMX6Q] = { 1860 .variant = IMX6Q, 1861 .flags = IMX_PCIE_FLAG_IMX_PHY | 1862 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1863 IMX_PCIE_FLAG_BROKEN_SUSPEND | 1864 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1865 .dbi_length = 0x200, 1866 .gpr = "fsl,imx6q-iomuxc-gpr", 1867 .ltssm_off = IOMUXC_GPR12, 1868 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1869 .mode_off[0] = IOMUXC_GPR12, 1870 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1871 .init_phy = imx_pcie_init_phy, 1872 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1873 .core_reset = imx6q_pcie_core_reset, 1874 }, 1875 [IMX6SX] = { 1876 .variant = IMX6SX, 1877 .flags = IMX_PCIE_FLAG_IMX_PHY | 1878 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1879 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1880 .gpr = "fsl,imx6q-iomuxc-gpr", 1881 .ltssm_off = IOMUXC_GPR12, 1882 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1883 .mode_off[0] = IOMUXC_GPR12, 1884 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1885 .init_phy = imx6sx_pcie_init_phy, 1886 .enable_ref_clk = imx6sx_pcie_enable_ref_clk, 1887 .core_reset = imx6sx_pcie_core_reset, 1888 .ops = &imx_pcie_host_ops, 1889 }, 1890 [IMX6QP] = { 1891 .variant = IMX6QP, 1892 .flags = IMX_PCIE_FLAG_IMX_PHY | 1893 IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | 1894 IMX_PCIE_FLAG_SKIP_L23_READY | 1895 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1896 .dbi_length = 0x200, 1897 .gpr = "fsl,imx6q-iomuxc-gpr", 1898 .ltssm_off = IOMUXC_GPR12, 1899 .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, 1900 .mode_off[0] = IOMUXC_GPR12, 1901 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1902 .init_phy = imx_pcie_init_phy, 1903 .enable_ref_clk = imx6q_pcie_enable_ref_clk, 1904 .core_reset = imx6qp_pcie_core_reset, 1905 .ops = &imx_pcie_host_ops, 1906 }, 1907 [IMX7D] = { 1908 .variant = IMX7D, 1909 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1910 IMX_PCIE_FLAG_HAS_APP_RESET | 1911 IMX_PCIE_FLAG_SKIP_L23_READY | 1912 IMX_PCIE_FLAG_HAS_PHY_RESET, 1913 .gpr = "fsl,imx7d-iomuxc-gpr", 1914 .mode_off[0] = IOMUXC_GPR12, 1915 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1916 .enable_ref_clk = imx7d_pcie_enable_ref_clk, 1917 .core_reset = imx7d_pcie_core_reset, 1918 }, 1919 [IMX8MQ] = { 1920 .variant = IMX8MQ, 1921 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1922 IMX_PCIE_FLAG_HAS_PHY_RESET | 1923 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1924 .gpr = "fsl,imx8mq-iomuxc-gpr", 1925 .mode_off[0] = IOMUXC_GPR12, 1926 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1927 .mode_off[1] = IOMUXC_GPR12, 1928 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1929 .init_phy = imx8mq_pcie_init_phy, 1930 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1931 .clr_clkreq_override = imx8mm_pcie_clr_clkreq_override, 1932 }, 1933 [IMX8MM] = { 1934 .variant = IMX8MM, 1935 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1936 IMX_PCIE_FLAG_HAS_PHYDRV | 1937 IMX_PCIE_FLAG_HAS_APP_RESET, 1938 .gpr = "fsl,imx8mm-iomuxc-gpr", 1939 .mode_off[0] = IOMUXC_GPR12, 1940 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1941 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1942 .clr_clkreq_override = imx8mm_pcie_clr_clkreq_override, 1943 }, 1944 [IMX8MP] = { 1945 .variant = IMX8MP, 1946 .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | 1947 IMX_PCIE_FLAG_HAS_PHYDRV | 1948 IMX_PCIE_FLAG_HAS_APP_RESET, 1949 .gpr = "fsl,imx8mp-iomuxc-gpr", 1950 .mode_off[0] = IOMUXC_GPR12, 1951 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1952 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1953 .clr_clkreq_override = imx8mm_pcie_clr_clkreq_override, 1954 }, 1955 [IMX8Q] = { 1956 .variant = IMX8Q, 1957 .flags = IMX_PCIE_FLAG_HAS_PHYDRV | 1958 IMX_PCIE_FLAG_CPU_ADDR_FIXUP | 1959 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1960 }, 1961 [IMX95] = { 1962 .variant = IMX95, 1963 .flags = IMX_PCIE_FLAG_HAS_SERDES | 1964 IMX_PCIE_FLAG_HAS_LUT | 1965 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 1966 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, 1967 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 1968 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 1969 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 1970 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 1971 .core_reset = imx95_pcie_core_reset, 1972 .init_phy = imx95_pcie_init_phy, 1973 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 1974 .enable_ref_clk = imx95_pcie_enable_ref_clk, 1975 .clr_clkreq_override = imx95_pcie_clr_clkreq_override, 1976 }, 1977 [IMX8MQ_EP] = { 1978 .variant = IMX8MQ_EP, 1979 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1980 IMX_PCIE_FLAG_HAS_PHY_RESET, 1981 .mode = DW_PCIE_EP_TYPE, 1982 .gpr = "fsl,imx8mq-iomuxc-gpr", 1983 .mode_off[0] = IOMUXC_GPR12, 1984 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1985 .mode_off[1] = IOMUXC_GPR12, 1986 .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, 1987 .epc_features = &imx8q_pcie_epc_features, 1988 .init_phy = imx8mq_pcie_init_phy, 1989 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 1990 }, 1991 [IMX8MM_EP] = { 1992 .variant = IMX8MM_EP, 1993 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 1994 IMX_PCIE_FLAG_HAS_PHYDRV, 1995 .mode = DW_PCIE_EP_TYPE, 1996 .gpr = "fsl,imx8mm-iomuxc-gpr", 1997 .mode_off[0] = IOMUXC_GPR12, 1998 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 1999 .epc_features = &imx8m_pcie_epc_features, 2000 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 2001 }, 2002 [IMX8MP_EP] = { 2003 .variant = IMX8MP_EP, 2004 .flags = IMX_PCIE_FLAG_HAS_APP_RESET | 2005 IMX_PCIE_FLAG_HAS_PHYDRV, 2006 .mode = DW_PCIE_EP_TYPE, 2007 .gpr = "fsl,imx8mp-iomuxc-gpr", 2008 .mode_off[0] = IOMUXC_GPR12, 2009 .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, 2010 .epc_features = &imx8m_pcie_epc_features, 2011 .enable_ref_clk = imx8mm_pcie_enable_ref_clk, 2012 }, 2013 [IMX8Q_EP] = { 2014 .variant = IMX8Q_EP, 2015 .flags = IMX_PCIE_FLAG_HAS_PHYDRV, 2016 .mode = DW_PCIE_EP_TYPE, 2017 .epc_features = &imx8q_pcie_epc_features, 2018 }, 2019 [IMX95_EP] = { 2020 .variant = IMX95_EP, 2021 .flags = IMX_PCIE_FLAG_HAS_SERDES | 2022 IMX_PCIE_FLAG_8GT_ECN_ERR051586 | 2023 IMX_PCIE_FLAG_SUPPORT_64BIT, 2024 .ltssm_off = IMX95_PE0_GEN_CTRL_3, 2025 .ltssm_mask = IMX95_PCIE_LTSSM_EN, 2026 .mode_off[0] = IMX95_PE0_GEN_CTRL_1, 2027 .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, 2028 .init_phy = imx95_pcie_init_phy, 2029 .core_reset = imx95_pcie_core_reset, 2030 .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, 2031 .epc_features = &imx95_pcie_epc_features, 2032 .enable_ref_clk = imx95_pcie_enable_ref_clk, 2033 .mode = DW_PCIE_EP_TYPE, 2034 }, 2035 }; 2036 2037 static const struct of_device_id imx_pcie_of_match[] = { 2038 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, 2039 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, 2040 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, 2041 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, 2042 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, 2043 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, 2044 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, 2045 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, 2046 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, 2047 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, 2048 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, 2049 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, 2050 { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, 2051 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, 2052 {}, 2053 }; 2054 2055 static struct platform_driver imx_pcie_driver = { 2056 .driver = { 2057 .name = "imx6q-pcie", 2058 .of_match_table = imx_pcie_of_match, 2059 .suppress_bind_attrs = true, 2060 .pm = &imx_pcie_pm_ops, 2061 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2062 }, 2063 .probe = imx_pcie_probe, 2064 .shutdown = imx_pcie_shutdown, 2065 }; 2066 2067 static void imx_pcie_quirk(struct pci_dev *dev) 2068 { 2069 struct pci_bus *bus = dev->bus; 2070 struct dw_pcie_rp *pp = bus->sysdata; 2071 2072 /* Bus parent is the PCI bridge, its parent is this platform driver */ 2073 if (!bus->dev.parent || !bus->dev.parent->parent) 2074 return; 2075 2076 /* Make sure we only quirk devices associated with this driver */ 2077 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) 2078 return; 2079 2080 if (pci_is_root_bus(bus)) { 2081 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 2082 struct imx_pcie *imx_pcie = to_imx_pcie(pci); 2083 2084 /* 2085 * Limit config length to avoid the kernel reading beyond 2086 * the register set and causing an abort on i.MX 6Quad 2087 */ 2088 if (imx_pcie->drvdata->dbi_length) { 2089 dev->cfg_size = imx_pcie->drvdata->dbi_length; 2090 dev_info(&dev->dev, "Limiting cfg_size to %d\n", 2091 dev->cfg_size); 2092 } 2093 } 2094 } 2095 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, 2096 PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); 2097 2098 static int __init imx_pcie_init(void) 2099 { 2100 #ifdef CONFIG_ARM 2101 struct device_node *np; 2102 2103 np = of_find_matching_node(NULL, imx_pcie_of_match); 2104 if (!np) 2105 return -ENODEV; 2106 of_node_put(np); 2107 2108 /* 2109 * Since probe() can be deferred we need to make sure that 2110 * hook_fault_code is not called after __init memory is freed 2111 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 2112 * we can install the handler here without risking it 2113 * accessing some uninitialized driver state. 2114 */ 2115 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 2116 "external abort on non-linefetch"); 2117 #endif 2118 2119 return platform_driver_register(&imx_pcie_driver); 2120 } 2121 device_initcall(imx_pcie_init); 2122