Lines Matching +full:de +full:- +full:assertion

1 // SPDX-License-Identifier: GPL-2.0
17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
36 #include "pcie-designware.h"
58 #define to_imx_pcie(x) dev_get_drvdata((x)->dev)
86 #define imx_check_flag(pci, val) (pci->drvdata->flags & val)
143 /* PCIe Port Logic registers (memory-mapped) */
156 /* PHY registers (not memory-mapped) */
193 WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && in imx_pcie_grp_offset()
194 imx_pcie->drvdata->variant != IMX8MQ_EP && in imx_pcie_grp_offset()
195 imx_pcie->drvdata->variant != IMX8MM && in imx_pcie_grp_offset()
196 imx_pcie->drvdata->variant != IMX8MM_EP && in imx_pcie_grp_offset()
197 imx_pcie->drvdata->variant != IMX8MP && in imx_pcie_grp_offset()
198 imx_pcie->drvdata->variant != IMX8MP_EP); in imx_pcie_grp_offset()
199 return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; in imx_pcie_grp_offset()
204 regmap_update_bits(imx_pcie->iomuxc_gpr, in imx95_pcie_init_phy()
209 regmap_update_bits(imx_pcie->iomuxc_gpr, in imx95_pcie_init_phy()
212 regmap_update_bits(imx_pcie->iomuxc_gpr, in imx95_pcie_init_phy()
222 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; in imx_pcie_configure_type()
225 if (drvdata->mode == DW_PCIE_EP_TYPE) in imx_pcie_configure_type()
230 id = imx_pcie->controller_id; in imx_pcie_configure_type()
233 if (!drvdata->mode_mask[0]) in imx_pcie_configure_type()
237 if (!drvdata->mode_mask[id]) in imx_pcie_configure_type()
240 mask = drvdata->mode_mask[id]; in imx_pcie_configure_type()
241 val = mode << (ffs(mask) - 1); in imx_pcie_configure_type()
243 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); in imx_pcie_configure_type()
248 struct dw_pcie *pci = imx_pcie->pci; in pcie_phy_poll_ack()
264 return -ETIMEDOUT; in pcie_phy_poll_ack()
269 struct dw_pcie *pci = imx_pcie->pci; in pcie_phy_wait_ack()
289 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
292 struct dw_pcie *pci = imx_pcie->pci; in pcie_phy_read()
318 struct dw_pcie *pci = imx_pcie->pci; in pcie_phy_write()
343 /* wait for ack de-assertion */ in pcie_phy_write()
361 /* wait for ack de-assertion */ in pcie_phy_write()
374 regmap_update_bits(imx_pcie->iomuxc_gpr, in imx8mq_pcie_init_phy()
382 if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) in imx8mq_pcie_init_phy()
383 regmap_update_bits(imx_pcie->iomuxc_gpr, in imx8mq_pcie_init_phy()
393 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); in imx7d_pcie_init_phy()
400 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx_pcie_init_phy()
404 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx_pcie_init_phy()
407 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, in imx_pcie_init_phy()
409 imx_pcie->tx_deemph_gen1 << 0); in imx_pcie_init_phy()
410 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, in imx_pcie_init_phy()
412 imx_pcie->tx_deemph_gen2_3p5db << 6); in imx_pcie_init_phy()
413 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, in imx_pcie_init_phy()
415 imx_pcie->tx_deemph_gen2_6db << 12); in imx_pcie_init_phy()
416 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, in imx_pcie_init_phy()
418 imx_pcie->tx_swing_full << 18); in imx_pcie_init_phy()
419 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, in imx_pcie_init_phy()
421 imx_pcie->tx_swing_low << 25); in imx_pcie_init_phy()
427 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx6sx_pcie_init_phy()
436 struct device *dev = imx_pcie->pci->dev; in imx7d_pcie_wait_for_phy_pll_lock()
438 if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, in imx7d_pcie_wait_for_phy_pll_lock()
453 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) in imx_setup_phy_mpll()
456 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) in imx_setup_phy_mpll()
457 if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) in imx_setup_phy_mpll()
458 phy_rate = clk_get_rate(imx_pcie->clks[i].clk); in imx_setup_phy_mpll()
476 dev_err(imx_pcie->pci->dev, in imx_setup_phy_mpll()
478 return -EINVAL; in imx_setup_phy_mpll()
502 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) in imx_pcie_reset_phy()
529 * make it look like it read all-ones. in imx6q_pcie_abort_handler()
537 val = -1; in imx6q_pcie_abort_handler()
539 regs->uregs[reg] = val; in imx6q_pcie_abort_handler()
540 regs->ARM_pc += 4; in imx6q_pcie_abort_handler()
545 regs->uregs[reg] = -1; in imx6q_pcie_abort_handler()
546 regs->ARM_pc += 4; in imx6q_pcie_abort_handler()
560 if (dev->pm_domain) in imx_pcie_attach_pd()
563 imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); in imx_pcie_attach_pd()
564 if (IS_ERR(imx_pcie->pd_pcie)) in imx_pcie_attach_pd()
565 return PTR_ERR(imx_pcie->pd_pcie); in imx_pcie_attach_pd()
567 if (!imx_pcie->pd_pcie) in imx_pcie_attach_pd()
569 link = device_link_add(dev, imx_pcie->pd_pcie, in imx_pcie_attach_pd()
575 return -EINVAL; in imx_pcie_attach_pd()
578 imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); in imx_pcie_attach_pd()
579 if (IS_ERR(imx_pcie->pd_pcie_phy)) in imx_pcie_attach_pd()
580 return PTR_ERR(imx_pcie->pd_pcie_phy); in imx_pcie_attach_pd()
582 link = device_link_add(dev, imx_pcie->pd_pcie_phy, in imx_pcie_attach_pd()
588 return -EINVAL; in imx_pcie_attach_pd()
597 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx6sx_pcie_enable_ref_clk()
607 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); in imx6q_pcie_enable_ref_clk()
615 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); in imx6q_pcie_enable_ref_clk()
617 regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); in imx6q_pcie_enable_ref_clk()
618 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); in imx6q_pcie_enable_ref_clk()
629 regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); in imx8mm_pcie_enable_ref_clk()
630 regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); in imx8mm_pcie_enable_ref_clk()
639 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx7d_pcie_enable_ref_clk()
646 struct dw_pcie *pci = imx_pcie->pci; in imx_pcie_clk_enable()
647 struct device *dev = pci->dev; in imx_pcie_clk_enable()
650 ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); in imx_pcie_clk_enable()
654 if (imx_pcie->drvdata->enable_ref_clk) { in imx_pcie_clk_enable()
655 ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); in imx_pcie_clk_enable()
667 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); in imx_pcie_clk_enable()
674 if (imx_pcie->drvdata->enable_ref_clk) in imx_pcie_clk_disable()
675 imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); in imx_pcie_clk_disable()
676 clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); in imx_pcie_clk_disable()
682 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx6sx_pcie_core_reset()
686 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, in imx6sx_pcie_core_reset()
693 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, in imx6qp_pcie_core_reset()
706 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); in imx6q_pcie_core_reset()
707 regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); in imx6q_pcie_core_reset()
714 struct dw_pcie *pci = imx_pcie->pci; in imx7d_pcie_core_reset()
715 struct device *dev = pci->dev; in imx7d_pcie_core_reset()
729 * The Duty-cycle Corrector calibration must be disabled. in imx7d_pcie_core_reset()
731 * 1. De-assert the G_RST signal by clearing in imx7d_pcie_core_reset()
733 * 2. De-assert DCC_FB_EN by writing data “0x29” to the register in imx7d_pcie_core_reset()
739 * 5. De-assert the CMN_RST signal by clearing register bit in imx7d_pcie_core_reset()
743 if (likely(imx_pcie->phy_base)) { in imx7d_pcie_core_reset()
744 /* De-assert DCC_FB_EN */ in imx7d_pcie_core_reset()
745 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); in imx7d_pcie_core_reset()
748 imx_pcie->phy_base + PCIE_PHY_CMN_REG24); in imx7d_pcie_core_reset()
750 writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); in imx7d_pcie_core_reset()
752 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); in imx7d_pcie_core_reset()
760 reset_control_assert(imx_pcie->pciephy_reset); in imx_pcie_assert_core_reset()
761 reset_control_assert(imx_pcie->apps_reset); in imx_pcie_assert_core_reset()
763 if (imx_pcie->drvdata->core_reset) in imx_pcie_assert_core_reset()
764 imx_pcie->drvdata->core_reset(imx_pcie, true); in imx_pcie_assert_core_reset()
767 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); in imx_pcie_assert_core_reset()
772 reset_control_deassert(imx_pcie->pciephy_reset); in imx_pcie_deassert_core_reset()
774 if (imx_pcie->drvdata->core_reset) in imx_pcie_deassert_core_reset()
775 imx_pcie->drvdata->core_reset(imx_pcie, false); in imx_pcie_deassert_core_reset()
778 if (imx_pcie->reset_gpiod) { in imx_pcie_deassert_core_reset()
780 gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); in imx_pcie_deassert_core_reset()
790 struct dw_pcie *pci = imx_pcie->pci; in imx_pcie_wait_for_speed_change()
791 struct device *dev = pci->dev; in imx_pcie_wait_for_speed_change()
804 return -ETIMEDOUT; in imx_pcie_wait_for_speed_change()
810 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; in imx_pcie_ltssm_enable()
811 u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); in imx_pcie_ltssm_enable()
814 tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); in imx_pcie_ltssm_enable()
815 phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); in imx_pcie_ltssm_enable()
816 if (drvdata->ltssm_mask) in imx_pcie_ltssm_enable()
817 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, in imx_pcie_ltssm_enable()
818 drvdata->ltssm_mask); in imx_pcie_ltssm_enable()
820 reset_control_deassert(imx_pcie->apps_reset); in imx_pcie_ltssm_enable()
826 const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; in imx_pcie_ltssm_disable()
828 phy_set_speed(imx_pcie->phy, 0); in imx_pcie_ltssm_disable()
829 if (drvdata->ltssm_mask) in imx_pcie_ltssm_disable()
830 regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, in imx_pcie_ltssm_disable()
831 drvdata->ltssm_mask, 0); in imx_pcie_ltssm_disable()
833 reset_control_assert(imx_pcie->apps_reset); in imx_pcie_ltssm_disable()
839 struct device *dev = pci->dev; in imx_pcie_start_link()
863 if (pci->max_link_speed > 1) { in imx_pcie_start_link()
868 tmp |= pci->max_link_speed; in imx_pcie_start_link()
880 if (imx_pcie->drvdata->flags & in imx_pcie_start_link()
885 * occurs and we go Gen1 -> yep, Gen1. The difference in imx_pcie_start_link()
906 imx_pcie->link_is_up = true; in imx_pcie_start_link()
912 imx_pcie->link_is_up = false; in imx_pcie_start_link()
922 struct device *dev = pci->dev; in imx_pcie_stop_link()
931 struct device *dev = pci->dev; in imx_pcie_host_init()
935 if (imx_pcie->vpcie) { in imx_pcie_host_init()
936 ret = regulator_enable(imx_pcie->vpcie); in imx_pcie_host_init()
946 if (imx_pcie->drvdata->init_phy) in imx_pcie_host_init()
947 imx_pcie->drvdata->init_phy(imx_pcie); in imx_pcie_host_init()
957 if (imx_pcie->phy) { in imx_pcie_host_init()
958 ret = phy_init(imx_pcie->phy); in imx_pcie_host_init()
964 ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); in imx_pcie_host_init()
970 ret = phy_power_on(imx_pcie->phy); in imx_pcie_host_init()
988 phy_power_off(imx_pcie->phy); in imx_pcie_host_init()
990 phy_exit(imx_pcie->phy); in imx_pcie_host_init()
994 if (imx_pcie->vpcie) in imx_pcie_host_init()
995 regulator_disable(imx_pcie->vpcie); in imx_pcie_host_init()
1004 if (imx_pcie->phy) { in imx_pcie_host_exit()
1005 if (phy_power_off(imx_pcie->phy)) in imx_pcie_host_exit()
1006 dev_err(pci->dev, "unable to power off PHY\n"); in imx_pcie_host_exit()
1007 phy_exit(imx_pcie->phy); in imx_pcie_host_exit()
1011 if (imx_pcie->vpcie) in imx_pcie_host_exit()
1012 regulator_disable(imx_pcie->vpcie); in imx_pcie_host_exit()
1018 struct dw_pcie_rp *pp = &pcie->pp; in imx_pcie_cpu_addr_fixup()
1021 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) in imx_pcie_cpu_addr_fixup()
1024 entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); in imx_pcie_cpu_addr_fixup()
1028 return cpu_addr - entry->offset; in imx_pcie_cpu_addr_fixup()
1064 dev_err(pci->dev, "UNKNOWN IRQ type\n"); in imx_pcie_ep_raise_irq()
1065 return -EINVAL; in imx_pcie_ep_raise_irq()
1083 * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
1084 * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
1086 * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
1087 * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
1088 * BAR4 | Enable | 32-bit | 1M | Programmable Size
1089 * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
1103 return imx_pcie->drvdata->epc_features; in imx_pcie_ep_get_features()
1118 struct dw_pcie *pci = imx_pcie->pci; in imx_add_pcie_ep()
1119 struct dw_pcie_rp *pp = &pci->pp; in imx_add_pcie_ep()
1120 struct device *dev = pci->dev; in imx_add_pcie_ep()
1123 ep = &pci->ep; in imx_add_pcie_ep()
1124 ep->ops = &pcie_ep_ops; in imx_add_pcie_ep()
1126 switch (imx_pcie->drvdata->variant) { in imx_add_pcie_ep()
1137 pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; in imx_add_pcie_ep()
1145 if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) in imx_add_pcie_ep()
1146 pci->dbi_base2 = NULL; in imx_add_pcie_ep()
1151 ep->page_size = imx_pcie->drvdata->epc_features->align; in imx_add_pcie_ep()
1166 pci_epc_init_notify(ep->epc); in imx_add_pcie_ep()
1176 struct device *dev = imx_pcie->pci->dev; in imx_pcie_pm_turnoff()
1179 if (imx_pcie->turnoff_reset) { in imx_pcie_pm_turnoff()
1180 reset_control_assert(imx_pcie->turnoff_reset); in imx_pcie_pm_turnoff()
1181 reset_control_deassert(imx_pcie->turnoff_reset); in imx_pcie_pm_turnoff()
1186 switch (imx_pcie->drvdata->variant) { in imx_pcie_pm_turnoff()
1189 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx_pcie_pm_turnoff()
1192 regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, in imx_pcie_pm_turnoff()
1204 * The standard recommends a 1-10ms timeout after which to in imx_pcie_pm_turnoff()
1215 struct dw_pcie *pci = imx_pcie->pci; in imx_pcie_msi_save_restore()
1221 imx_pcie->msi_ctrl = val; in imx_pcie_msi_save_restore()
1224 val = imx_pcie->msi_ctrl; in imx_pcie_msi_save_restore()
1234 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; in imx_pcie_suspend_noirq()
1236 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) in imx_pcie_suspend_noirq()
1241 imx_pcie_stop_link(imx_pcie->pci); in imx_pcie_suspend_noirq()
1251 struct dw_pcie_rp *pp = &imx_pcie->pci->pp; in imx_pcie_resume_noirq()
1253 if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) in imx_pcie_resume_noirq()
1262 if (imx_pcie->link_is_up) in imx_pcie_resume_noirq()
1263 imx_pcie_start_link(imx_pcie->pci); in imx_pcie_resume_noirq()
1275 struct device *dev = &pdev->dev; in imx_pcie_probe()
1280 struct device_node *node = dev->of_node; in imx_pcie_probe()
1287 return -ENOMEM; in imx_pcie_probe()
1291 return -ENOMEM; in imx_pcie_probe()
1293 pci->dev = dev; in imx_pcie_probe()
1294 pci->ops = &dw_pcie_ops; in imx_pcie_probe()
1295 pci->pp.ops = &imx_pcie_host_ops; in imx_pcie_probe()
1297 imx_pcie->pci = pci; in imx_pcie_probe()
1298 imx_pcie->drvdata = of_device_get_match_data(dev); in imx_pcie_probe()
1301 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); in imx_pcie_probe()
1310 imx_pcie->phy_base = devm_ioremap_resource(dev, &res); in imx_pcie_probe()
1311 if (IS_ERR(imx_pcie->phy_base)) in imx_pcie_probe()
1312 return PTR_ERR(imx_pcie->phy_base); in imx_pcie_probe()
1315 pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); in imx_pcie_probe()
1316 if (IS_ERR(pci->dbi_base)) in imx_pcie_probe()
1317 return PTR_ERR(pci->dbi_base); in imx_pcie_probe()
1320 imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); in imx_pcie_probe()
1321 if (IS_ERR(imx_pcie->reset_gpiod)) in imx_pcie_probe()
1322 return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), in imx_pcie_probe()
1324 gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); in imx_pcie_probe()
1326 if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) in imx_pcie_probe()
1327 return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); in imx_pcie_probe()
1329 for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) in imx_pcie_probe()
1330 imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; in imx_pcie_probe()
1333 ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); in imx_pcie_probe()
1338 imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); in imx_pcie_probe()
1339 if (IS_ERR(imx_pcie->phy)) in imx_pcie_probe()
1340 return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), in imx_pcie_probe()
1345 imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); in imx_pcie_probe()
1346 if (IS_ERR(imx_pcie->apps_reset)) in imx_pcie_probe()
1347 return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), in imx_pcie_probe()
1352 imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); in imx_pcie_probe()
1353 if (IS_ERR(imx_pcie->pciephy_reset)) in imx_pcie_probe()
1354 return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), in imx_pcie_probe()
1358 switch (imx_pcie->drvdata->variant) { in imx_pcie_probe()
1362 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) in imx_pcie_probe()
1363 imx_pcie->controller_id = 1; in imx_pcie_probe()
1370 imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); in imx_pcie_probe()
1371 if (IS_ERR(imx_pcie->turnoff_reset)) { in imx_pcie_probe()
1373 return PTR_ERR(imx_pcie->turnoff_reset); in imx_pcie_probe()
1376 if (imx_pcie->drvdata->gpr) { in imx_pcie_probe()
1378 imx_pcie->iomuxc_gpr = in imx_pcie_probe()
1379 syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); in imx_pcie_probe()
1380 if (IS_ERR(imx_pcie->iomuxc_gpr)) in imx_pcie_probe()
1381 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), in imx_pcie_probe()
1398 imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config); in imx_pcie_probe()
1399 if (IS_ERR(imx_pcie->iomuxc_gpr)) in imx_pcie_probe()
1400 return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), in imx_pcie_probe()
1405 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", in imx_pcie_probe()
1406 &imx_pcie->tx_deemph_gen1)) in imx_pcie_probe()
1407 imx_pcie->tx_deemph_gen1 = 0; in imx_pcie_probe()
1409 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", in imx_pcie_probe()
1410 &imx_pcie->tx_deemph_gen2_3p5db)) in imx_pcie_probe()
1411 imx_pcie->tx_deemph_gen2_3p5db = 0; in imx_pcie_probe()
1413 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", in imx_pcie_probe()
1414 &imx_pcie->tx_deemph_gen2_6db)) in imx_pcie_probe()
1415 imx_pcie->tx_deemph_gen2_6db = 20; in imx_pcie_probe()
1417 if (of_property_read_u32(node, "fsl,tx-swing-full", in imx_pcie_probe()
1418 &imx_pcie->tx_swing_full)) in imx_pcie_probe()
1419 imx_pcie->tx_swing_full = 127; in imx_pcie_probe()
1421 if (of_property_read_u32(node, "fsl,tx-swing-low", in imx_pcie_probe()
1422 &imx_pcie->tx_swing_low)) in imx_pcie_probe()
1423 imx_pcie->tx_swing_low = 127; in imx_pcie_probe()
1426 pci->max_link_speed = 1; in imx_pcie_probe()
1427 of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); in imx_pcie_probe()
1429 imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); in imx_pcie_probe()
1430 if (IS_ERR(imx_pcie->vpcie)) { in imx_pcie_probe()
1431 if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) in imx_pcie_probe()
1432 return PTR_ERR(imx_pcie->vpcie); in imx_pcie_probe()
1433 imx_pcie->vpcie = NULL; in imx_pcie_probe()
1436 imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); in imx_pcie_probe()
1437 if (IS_ERR(imx_pcie->vph)) { in imx_pcie_probe()
1438 if (PTR_ERR(imx_pcie->vph) != -ENODEV) in imx_pcie_probe()
1439 return PTR_ERR(imx_pcie->vph); in imx_pcie_probe()
1440 imx_pcie->vph = NULL; in imx_pcie_probe()
1449 if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { in imx_pcie_probe()
1454 ret = dw_pcie_host_init(&pci->pp); in imx_pcie_probe()
1490 .gpr = "fsl,imx6q-iomuxc-gpr",
1506 .gpr = "fsl,imx6q-iomuxc-gpr",
1523 .gpr = "fsl,imx6q-iomuxc-gpr",
1539 .gpr = "fsl,imx7d-iomuxc-gpr",
1552 .gpr = "fsl,imx8mq-iomuxc-gpr",
1567 .gpr = "fsl,imx8mm-iomuxc-gpr",
1579 .gpr = "fsl,imx8mp-iomuxc-gpr",
1609 .gpr = "fsl,imx8mq-iomuxc-gpr",
1625 .gpr = "fsl,imx8mm-iomuxc-gpr",
1638 .gpr = "fsl,imx8mp-iomuxc-gpr",
1663 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
1664 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1665 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1666 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
1667 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
1668 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
1669 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
1670 { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
1671 { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
1672 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
1673 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
1674 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
1675 { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
1681 .name = "imx6q-pcie",
1693 struct pci_bus *bus = dev->bus; in imx_pcie_quirk()
1694 struct dw_pcie_rp *pp = bus->sysdata; in imx_pcie_quirk()
1697 if (!bus->dev.parent || !bus->dev.parent->parent) in imx_pcie_quirk()
1701 if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) in imx_pcie_quirk()
1712 if (imx_pcie->drvdata->dbi_length) { in imx_pcie_quirk()
1713 dev->cfg_size = imx_pcie->drvdata->dbi_length; in imx_pcie_quirk()
1714 dev_info(&dev->dev, "Limiting cfg_size to %d\n", in imx_pcie_quirk()
1715 dev->cfg_size); in imx_pcie_quirk()
1729 return -ENODEV; in imx_pcie_init()
1735 * by kernel and since imx6q_pcie_abort_handler() is a no-op, in imx_pcie_init()
1740 "external abort on non-linefetch"); in imx_pcie_init()