Lines Matching +full:tegra210 +full:- +full:pmc
1 // SPDX-License-Identifier: GPL-2.0+
9 * Copyright (c) 2008-2009, NVIDIA Corporation.
11 * Bits taken from arch/arm/mach-dove/pcie.c
44 #include <soc/tegra/pmc.h>
256 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
378 writel(value, pcie->afi + offset); in afi_writel()
383 return readl(pcie->afi + offset); in afi_readl()
389 writel(value, pcie->pads + offset); in pads_writel()
394 return readl(pcie->pads + offset); in pads_readl()
429 struct tegra_pcie *pcie = bus->sysdata; in tegra_pcie_map_bus()
432 if (bus->number == 0) { in tegra_pcie_map_bus()
436 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_map_bus()
437 if (port->index + 1 == slot) { in tegra_pcie_map_bus()
438 addr = port->base + (where & ~3); in tegra_pcie_map_bus()
446 offset = tegra_pcie_conf_offset(bus->number, devfn, where); in tegra_pcie_map_bus()
449 base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); in tegra_pcie_map_bus()
453 addr = pcie->cfg + (offset & (SZ_4K - 1)); in tegra_pcie_map_bus()
462 if (bus->number == 0) in tegra_pcie_config_read()
472 if (bus->number == 0) in tegra_pcie_config_write()
487 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_port_get_pex_ctrl()
490 switch (port->index) { in tegra_pcie_port_get_pex_ctrl()
500 ret = soc->afi_pex2_ctrl; in tegra_pcie_port_get_pex_ctrl()
513 if (port->reset_gpio) { in tegra_pcie_port_reset()
514 gpiod_set_value(port->reset_gpio, 1); in tegra_pcie_port_reset()
516 value = afi_readl(port->pcie, ctrl); in tegra_pcie_port_reset()
518 afi_writel(port->pcie, value, ctrl); in tegra_pcie_port_reset()
523 if (port->reset_gpio) { in tegra_pcie_port_reset()
524 gpiod_set_value(port->reset_gpio, 0); in tegra_pcie_port_reset()
526 value = afi_readl(port->pcie, ctrl); in tegra_pcie_port_reset()
528 afi_writel(port->pcie, value, ctrl); in tegra_pcie_port_reset()
534 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_enable_rp_features()
538 value = readl(port->base + RP_VEND_CTL1); in tegra_pcie_enable_rp_features()
540 writel(value, port->base + RP_VEND_CTL1); in tegra_pcie_enable_rp_features()
543 value = readl(port->base + RP_VEND_XP); in tegra_pcie_enable_rp_features()
546 writel(value, port->base + RP_VEND_XP); in tegra_pcie_enable_rp_features()
552 value = readl(port->base + RP_VEND_XP_BIST); in tegra_pcie_enable_rp_features()
554 writel(value, port->base + RP_VEND_XP_BIST); in tegra_pcie_enable_rp_features()
556 value = readl(port->base + RP_PRIV_MISC); in tegra_pcie_enable_rp_features()
560 if (soc->update_clamp_threshold) { in tegra_pcie_enable_rp_features()
567 writel(value, port->base + RP_PRIV_MISC); in tegra_pcie_enable_rp_features()
572 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_program_ectl_settings()
575 value = readl(port->base + RP_ECTL_2_R1); in tegra_pcie_program_ectl_settings()
577 value |= soc->ectl.regs.rp_ectl_2_r1; in tegra_pcie_program_ectl_settings()
578 writel(value, port->base + RP_ECTL_2_R1); in tegra_pcie_program_ectl_settings()
580 value = readl(port->base + RP_ECTL_4_R1); in tegra_pcie_program_ectl_settings()
582 value |= soc->ectl.regs.rp_ectl_4_r1 << in tegra_pcie_program_ectl_settings()
584 writel(value, port->base + RP_ECTL_4_R1); in tegra_pcie_program_ectl_settings()
586 value = readl(port->base + RP_ECTL_5_R1); in tegra_pcie_program_ectl_settings()
588 value |= soc->ectl.regs.rp_ectl_5_r1; in tegra_pcie_program_ectl_settings()
589 writel(value, port->base + RP_ECTL_5_R1); in tegra_pcie_program_ectl_settings()
591 value = readl(port->base + RP_ECTL_6_R1); in tegra_pcie_program_ectl_settings()
593 value |= soc->ectl.regs.rp_ectl_6_r1; in tegra_pcie_program_ectl_settings()
594 writel(value, port->base + RP_ECTL_6_R1); in tegra_pcie_program_ectl_settings()
596 value = readl(port->base + RP_ECTL_2_R2); in tegra_pcie_program_ectl_settings()
598 value |= soc->ectl.regs.rp_ectl_2_r2; in tegra_pcie_program_ectl_settings()
599 writel(value, port->base + RP_ECTL_2_R2); in tegra_pcie_program_ectl_settings()
601 value = readl(port->base + RP_ECTL_4_R2); in tegra_pcie_program_ectl_settings()
603 value |= soc->ectl.regs.rp_ectl_4_r2 << in tegra_pcie_program_ectl_settings()
605 writel(value, port->base + RP_ECTL_4_R2); in tegra_pcie_program_ectl_settings()
607 value = readl(port->base + RP_ECTL_5_R2); in tegra_pcie_program_ectl_settings()
609 value |= soc->ectl.regs.rp_ectl_5_r2; in tegra_pcie_program_ectl_settings()
610 writel(value, port->base + RP_ECTL_5_R2); in tegra_pcie_program_ectl_settings()
612 value = readl(port->base + RP_ECTL_6_R2); in tegra_pcie_program_ectl_settings()
614 value |= soc->ectl.regs.rp_ectl_6_r2; in tegra_pcie_program_ectl_settings()
615 writel(value, port->base + RP_ECTL_6_R2); in tegra_pcie_program_ectl_settings()
620 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_apply_sw_fixup()
625 * instability in deskew logic on lane-0. Increase the deskew in tegra_pcie_apply_sw_fixup()
628 if (soc->program_deskew_time) { in tegra_pcie_apply_sw_fixup()
629 value = readl(port->base + RP_VEND_CTL0); in tegra_pcie_apply_sw_fixup()
632 writel(value, port->base + RP_VEND_CTL0); in tegra_pcie_apply_sw_fixup()
635 if (soc->update_fc_timer) { in tegra_pcie_apply_sw_fixup()
636 value = readl(port->base + RP_VEND_XP); in tegra_pcie_apply_sw_fixup()
638 value |= soc->update_fc_threshold; in tegra_pcie_apply_sw_fixup()
639 writel(value, port->base + RP_VEND_XP); in tegra_pcie_apply_sw_fixup()
644 * root port advertises both Gen-1 and Gen-2 speeds in Tegra. in tegra_pcie_apply_sw_fixup()
646 * only Gen-1 and after link is up, retrain link to Gen-2 speed in tegra_pcie_apply_sw_fixup()
648 value = readl(port->base + RP_LINK_CONTROL_STATUS_2); in tegra_pcie_apply_sw_fixup()
651 writel(value, port->base + RP_LINK_CONTROL_STATUS_2); in tegra_pcie_apply_sw_fixup()
657 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_port_enable()
661 value = afi_readl(port->pcie, ctrl); in tegra_pcie_port_enable()
664 if (soc->has_pex_clkreq_en) in tegra_pcie_port_enable()
669 afi_writel(port->pcie, value, ctrl); in tegra_pcie_port_enable()
673 if (soc->force_pca_enable) { in tegra_pcie_port_enable()
674 value = readl(port->base + RP_VEND_CTL2); in tegra_pcie_port_enable()
676 writel(value, port->base + RP_VEND_CTL2); in tegra_pcie_port_enable()
681 if (soc->ectl.enable) in tegra_pcie_port_enable()
690 const struct tegra_pcie_soc *soc = port->pcie->soc; in tegra_pcie_port_disable()
694 value = afi_readl(port->pcie, ctrl); in tegra_pcie_port_disable()
696 afi_writel(port->pcie, value, ctrl); in tegra_pcie_port_disable()
699 value = afi_readl(port->pcie, ctrl); in tegra_pcie_port_disable()
701 if (soc->has_pex_clkreq_en) in tegra_pcie_port_disable()
705 afi_writel(port->pcie, value, ctrl); in tegra_pcie_port_disable()
708 value = afi_readl(port->pcie, AFI_PCIE_CONFIG); in tegra_pcie_port_disable()
709 value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); in tegra_pcie_port_disable()
710 value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); in tegra_pcie_port_disable()
711 afi_writel(port->pcie, value, AFI_PCIE_CONFIG); in tegra_pcie_port_disable()
716 struct tegra_pcie *pcie = port->pcie; in tegra_pcie_port_free()
717 struct device *dev = pcie->dev; in tegra_pcie_port_free()
719 devm_iounmap(dev, port->base); in tegra_pcie_port_free()
720 devm_release_mem_region(dev, port->regs.start, in tegra_pcie_port_free()
721 resource_size(&port->regs)); in tegra_pcie_port_free()
722 list_del(&port->list); in tegra_pcie_port_free()
729 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; in tegra_pcie_fixup_class()
748 struct tegra_pcie *pcie = pdev->bus->sysdata; in tegra_pcie_map_irq()
755 irq = pcie->irq; in tegra_pcie_map_irq()
780 struct device *dev = pcie->dev; in tegra_pcie_isr()
818 * - 0xfdfc000000: I/O space
819 * - 0xfdfe000000: type 0 configuration space
820 * - 0xfdff000000: type 1 configuration space
821 * - 0xfe00000000: type 0 extended configuration space
822 * - 0xfe10000000: type 1 extended configuration space
831 size = resource_size(&pcie->cs); in tegra_pcie_setup_translations()
832 afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); in tegra_pcie_setup_translations()
835 resource_list_for_each_entry(entry, &bridge->windows) { in tegra_pcie_setup_translations()
837 struct resource *res = entry->res; in tegra_pcie_setup_translations()
845 axi_address = pci_pio_to_address(res->start); in tegra_pcie_setup_translations()
851 fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1; in tegra_pcie_setup_translations()
852 axi_address = res->start; in tegra_pcie_setup_translations()
854 if (res->flags & IORESOURCE_PREFETCH) { in tegra_pcie_setup_translations()
879 if (pcie->soc->has_cache_bars) { in tegra_pcie_setup_translations()
896 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_pll_wait()
902 value = pads_readl(pcie, soc->pads_pll_ctl); in tegra_pcie_pll_wait()
907 return -ETIMEDOUT; in tegra_pcie_pll_wait()
912 struct device *dev = pcie->dev; in tegra_pcie_phy_enable()
913 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_phy_enable()
929 value = pads_readl(pcie, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
931 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; in tegra_pcie_phy_enable()
932 pads_writel(pcie, value, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
935 value = pads_readl(pcie, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
937 pads_writel(pcie, value, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
942 value = pads_readl(pcie, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
944 pads_writel(pcie, value, soc->pads_pll_ctl); in tegra_pcie_phy_enable()
968 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_phy_disable()
982 value = pads_readl(pcie, soc->pads_pll_ctl); in tegra_pcie_phy_disable()
984 pads_writel(pcie, value, soc->pads_pll_ctl); in tegra_pcie_phy_disable()
993 struct device *dev = port->pcie->dev; in tegra_pcie_port_phy_power_on()
997 for (i = 0; i < port->lanes; i++) { in tegra_pcie_port_phy_power_on()
998 err = phy_power_on(port->phys[i]); in tegra_pcie_port_phy_power_on()
1010 struct device *dev = port->pcie->dev; in tegra_pcie_port_phy_power_off()
1014 for (i = 0; i < port->lanes; i++) { in tegra_pcie_port_phy_power_off()
1015 err = phy_power_off(port->phys[i]); in tegra_pcie_port_phy_power_off()
1028 struct device *dev = pcie->dev; in tegra_pcie_phy_power_on()
1032 if (pcie->legacy_phy) { in tegra_pcie_phy_power_on()
1033 if (pcie->phy) in tegra_pcie_phy_power_on()
1034 err = phy_power_on(pcie->phy); in tegra_pcie_phy_power_on()
1044 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_phy_power_on()
1049 port->index, err); in tegra_pcie_phy_power_on()
1059 struct device *dev = pcie->dev; in tegra_pcie_phy_power_off()
1063 if (pcie->legacy_phy) { in tegra_pcie_phy_power_off()
1064 if (pcie->phy) in tegra_pcie_phy_power_off()
1065 err = phy_power_off(pcie->phy); in tegra_pcie_phy_power_off()
1075 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_phy_power_off()
1080 port->index, err); in tegra_pcie_phy_power_off()
1090 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_enable_controller()
1095 if (pcie->phy) { in tegra_pcie_enable_controller()
1103 if (soc->has_pex_bias_ctrl) in tegra_pcie_enable_controller()
1109 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; in tegra_pcie_enable_controller()
1112 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_enable_controller()
1113 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); in tegra_pcie_enable_controller()
1114 value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); in tegra_pcie_enable_controller()
1119 if (soc->has_gen2) { in tegra_pcie_enable_controller()
1139 if (soc->has_intr_prsnt_sense) in tegra_pcie_enable_controller()
1154 struct device *dev = pcie->dev; in tegra_pcie_power_off()
1155 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_power_off()
1158 reset_control_assert(pcie->afi_rst); in tegra_pcie_power_off()
1160 clk_disable_unprepare(pcie->pll_e); in tegra_pcie_power_off()
1161 if (soc->has_cml_clk) in tegra_pcie_power_off()
1162 clk_disable_unprepare(pcie->cml_clk); in tegra_pcie_power_off()
1163 clk_disable_unprepare(pcie->afi_clk); in tegra_pcie_power_off()
1165 if (!dev->pm_domain) in tegra_pcie_power_off()
1168 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); in tegra_pcie_power_off()
1175 struct device *dev = pcie->dev; in tegra_pcie_power_on()
1176 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_power_on()
1179 reset_control_assert(pcie->pcie_xrst); in tegra_pcie_power_on()
1180 reset_control_assert(pcie->afi_rst); in tegra_pcie_power_on()
1181 reset_control_assert(pcie->pex_rst); in tegra_pcie_power_on()
1183 if (!dev->pm_domain) in tegra_pcie_power_on()
1187 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); in tegra_pcie_power_on()
1191 if (!dev->pm_domain) { in tegra_pcie_power_on()
1204 err = clk_prepare_enable(pcie->afi_clk); in tegra_pcie_power_on()
1210 if (soc->has_cml_clk) { in tegra_pcie_power_on()
1211 err = clk_prepare_enable(pcie->cml_clk); in tegra_pcie_power_on()
1218 err = clk_prepare_enable(pcie->pll_e); in tegra_pcie_power_on()
1224 reset_control_deassert(pcie->afi_rst); in tegra_pcie_power_on()
1229 if (soc->has_cml_clk) in tegra_pcie_power_on()
1230 clk_disable_unprepare(pcie->cml_clk); in tegra_pcie_power_on()
1232 clk_disable_unprepare(pcie->afi_clk); in tegra_pcie_power_on()
1234 if (!dev->pm_domain) in tegra_pcie_power_on()
1237 regulator_bulk_disable(pcie->num_supplies, pcie->supplies); in tegra_pcie_power_on()
1244 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_apply_pad_settings()
1247 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); in tegra_pcie_apply_pad_settings()
1249 if (soc->num_ports > 2) in tegra_pcie_apply_pad_settings()
1250 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); in tegra_pcie_apply_pad_settings()
1255 struct device *dev = pcie->dev; in tegra_pcie_clocks_get()
1256 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_clocks_get()
1258 pcie->pex_clk = devm_clk_get(dev, "pex"); in tegra_pcie_clocks_get()
1259 if (IS_ERR(pcie->pex_clk)) in tegra_pcie_clocks_get()
1260 return PTR_ERR(pcie->pex_clk); in tegra_pcie_clocks_get()
1262 pcie->afi_clk = devm_clk_get(dev, "afi"); in tegra_pcie_clocks_get()
1263 if (IS_ERR(pcie->afi_clk)) in tegra_pcie_clocks_get()
1264 return PTR_ERR(pcie->afi_clk); in tegra_pcie_clocks_get()
1266 pcie->pll_e = devm_clk_get(dev, "pll_e"); in tegra_pcie_clocks_get()
1267 if (IS_ERR(pcie->pll_e)) in tegra_pcie_clocks_get()
1268 return PTR_ERR(pcie->pll_e); in tegra_pcie_clocks_get()
1270 if (soc->has_cml_clk) { in tegra_pcie_clocks_get()
1271 pcie->cml_clk = devm_clk_get(dev, "cml"); in tegra_pcie_clocks_get()
1272 if (IS_ERR(pcie->cml_clk)) in tegra_pcie_clocks_get()
1273 return PTR_ERR(pcie->cml_clk); in tegra_pcie_clocks_get()
1281 struct device *dev = pcie->dev; in tegra_pcie_resets_get()
1283 pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); in tegra_pcie_resets_get()
1284 if (IS_ERR(pcie->pex_rst)) in tegra_pcie_resets_get()
1285 return PTR_ERR(pcie->pex_rst); in tegra_pcie_resets_get()
1287 pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); in tegra_pcie_resets_get()
1288 if (IS_ERR(pcie->afi_rst)) in tegra_pcie_resets_get()
1289 return PTR_ERR(pcie->afi_rst); in tegra_pcie_resets_get()
1291 pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); in tegra_pcie_resets_get()
1292 if (IS_ERR(pcie->pcie_xrst)) in tegra_pcie_resets_get()
1293 return PTR_ERR(pcie->pcie_xrst); in tegra_pcie_resets_get()
1300 struct device *dev = pcie->dev; in tegra_pcie_phys_get_legacy()
1303 pcie->phy = devm_phy_optional_get(dev, "pcie"); in tegra_pcie_phys_get_legacy()
1304 if (IS_ERR(pcie->phy)) { in tegra_pcie_phys_get_legacy()
1305 err = PTR_ERR(pcie->phy); in tegra_pcie_phys_get_legacy()
1310 err = phy_init(pcie->phy); in tegra_pcie_phys_get_legacy()
1316 pcie->legacy_phy = true; in tegra_pcie_phys_get_legacy()
1329 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); in devm_of_phy_optional_get_index()
1331 return ERR_PTR(-ENOMEM); in devm_of_phy_optional_get_index()
1341 struct device *dev = port->pcie->dev; in tegra_pcie_port_get_phys()
1346 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); in tegra_pcie_port_get_phys()
1347 if (!port->phys) in tegra_pcie_port_get_phys()
1348 return -ENOMEM; in tegra_pcie_port_get_phys()
1350 for (i = 0; i < port->lanes; i++) { in tegra_pcie_port_get_phys()
1351 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); in tegra_pcie_port_get_phys()
1365 port->phys[i] = phy; in tegra_pcie_port_get_phys()
1373 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_phys_get()
1374 struct device_node *np = pcie->dev->of_node; in tegra_pcie_phys_get()
1378 if (!soc->has_gen2 || of_property_present(np, "phys")) in tegra_pcie_phys_get()
1381 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_phys_get()
1393 struct device *dev = pcie->dev; in tegra_pcie_phys_put()
1396 if (pcie->legacy_phy) { in tegra_pcie_phys_put()
1397 err = phy_exit(pcie->phy); in tegra_pcie_phys_put()
1403 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_phys_put()
1404 for (i = 0; i < port->lanes; i++) { in tegra_pcie_phys_put()
1405 err = phy_exit(port->phys[i]); in tegra_pcie_phys_put()
1415 struct device *dev = pcie->dev; in tegra_pcie_get_resources()
1418 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_get_resources()
1433 if (soc->program_uphy) { in tegra_pcie_get_resources()
1441 pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads"); in tegra_pcie_get_resources()
1442 if (IS_ERR(pcie->pads)) { in tegra_pcie_get_resources()
1443 err = PTR_ERR(pcie->pads); in tegra_pcie_get_resources()
1447 pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi"); in tegra_pcie_get_resources()
1448 if (IS_ERR(pcie->afi)) { in tegra_pcie_get_resources()
1449 err = PTR_ERR(pcie->afi); in tegra_pcie_get_resources()
1456 err = -EADDRNOTAVAIL; in tegra_pcie_get_resources()
1460 pcie->cs = *res; in tegra_pcie_get_resources()
1463 pcie->cs.end = pcie->cs.start + SZ_4K - 1; in tegra_pcie_get_resources()
1465 pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); in tegra_pcie_get_resources()
1466 if (IS_ERR(pcie->cfg)) { in tegra_pcie_get_resources()
1467 err = PTR_ERR(pcie->cfg); in tegra_pcie_get_resources()
1476 pcie->irq = err; in tegra_pcie_get_resources()
1478 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); in tegra_pcie_get_resources()
1487 if (soc->program_uphy) in tegra_pcie_get_resources()
1495 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_put_resources()
1497 if (pcie->irq > 0) in tegra_pcie_put_resources()
1498 free_irq(pcie->irq, pcie); in tegra_pcie_put_resources()
1500 if (soc->program_uphy) in tegra_pcie_put_resources()
1508 struct tegra_pcie *pcie = port->pcie; in tegra_pcie_pme_turnoff()
1509 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_pme_turnoff()
1515 val |= (0x1 << soc->ports[port->index].pme.turnoff_bit); in tegra_pcie_pme_turnoff()
1518 ack_bit = soc->ports[port->index].pme.ack_bit; in tegra_pcie_pme_turnoff()
1519 err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val, in tegra_pcie_pme_turnoff()
1522 dev_err(pcie->dev, "PME Ack is not received on port: %d\n", in tegra_pcie_pme_turnoff()
1523 port->index); in tegra_pcie_pme_turnoff()
1528 val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit); in tegra_pcie_pme_turnoff()
1536 struct tegra_msi *msi = &pcie->msi; in tegra_pcie_msi_irq()
1537 struct device *dev = pcie->dev; in tegra_pcie_msi_irq()
1550 ret = generic_handle_domain_irq(msi->domain->parent, index); in tegra_pcie_msi_irq()
1596 unsigned int index = d->hwirq / 32; in tegra_msi_irq_ack()
1599 afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index)); in tegra_msi_irq_ack()
1606 unsigned int index = d->hwirq / 32; in tegra_msi_irq_mask()
1610 spin_lock_irqsave(&msi->mask_lock, flags); in tegra_msi_irq_mask()
1612 value &= ~BIT(d->hwirq % 32); in tegra_msi_irq_mask()
1614 spin_unlock_irqrestore(&msi->mask_lock, flags); in tegra_msi_irq_mask()
1621 unsigned int index = d->hwirq / 32; in tegra_msi_irq_unmask()
1625 spin_lock_irqsave(&msi->mask_lock, flags); in tegra_msi_irq_unmask()
1627 value |= BIT(d->hwirq % 32); in tegra_msi_irq_unmask()
1629 spin_unlock_irqrestore(&msi->mask_lock, flags); in tegra_msi_irq_unmask()
1636 msg->address_lo = lower_32_bits(msi->phys); in tegra_compose_msi_msg()
1637 msg->address_hi = upper_32_bits(msi->phys); in tegra_compose_msi_msg()
1638 msg->data = data->hwirq; in tegra_compose_msi_msg()
1652 struct tegra_msi *msi = domain->host_data; in tegra_msi_domain_alloc()
1656 mutex_lock(&msi->map_lock); in tegra_msi_domain_alloc()
1658 hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs)); in tegra_msi_domain_alloc()
1660 mutex_unlock(&msi->map_lock); in tegra_msi_domain_alloc()
1663 return -ENOSPC; in tegra_msi_domain_alloc()
1667 &tegra_msi_bottom_chip, domain->host_data, in tegra_msi_domain_alloc()
1679 struct tegra_msi *msi = domain->host_data; in tegra_msi_domain_free()
1681 mutex_lock(&msi->map_lock); in tegra_msi_domain_free()
1683 bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs)); in tegra_msi_domain_free()
1685 mutex_unlock(&msi->map_lock); in tegra_msi_domain_free()
1702 struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); in tegra_allocate_domains()
1708 dev_err(pcie->dev, "failed to create IRQ domain\n"); in tegra_allocate_domains()
1709 return -ENOMEM; in tegra_allocate_domains()
1713 msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent); in tegra_allocate_domains()
1714 if (!msi->domain) { in tegra_allocate_domains()
1715 dev_err(pcie->dev, "failed to create MSI domain\n"); in tegra_allocate_domains()
1717 return -ENOMEM; in tegra_allocate_domains()
1725 struct irq_domain *parent = msi->domain->parent; in tegra_free_domains()
1727 irq_domain_remove(msi->domain); in tegra_free_domains()
1733 struct platform_device *pdev = to_platform_device(pcie->dev); in tegra_pcie_msi_setup()
1734 struct tegra_msi *msi = &pcie->msi; in tegra_pcie_msi_setup()
1735 struct device *dev = pcie->dev; in tegra_pcie_msi_setup()
1738 mutex_init(&msi->map_lock); in tegra_pcie_msi_setup()
1739 spin_lock_init(&msi->mask_lock); in tegra_pcie_msi_setup()
1751 msi->irq = err; in tegra_pcie_msi_setup()
1753 irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie); in tegra_pcie_msi_setup()
1755 /* Though the PCIe controller can address >32-bit address space, to in tegra_pcie_msi_setup()
1756 * facilitate endpoints that support only 32-bit MSI target address, in tegra_pcie_msi_setup()
1757 * the mask is set to 32-bit to make sure that MSI target address is in tegra_pcie_msi_setup()
1758 * always a 32-bit address in tegra_pcie_msi_setup()
1766 msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL, in tegra_pcie_msi_setup()
1768 if (!msi->virt) { in tegra_pcie_msi_setup()
1770 err = -ENOMEM; in tegra_pcie_msi_setup()
1777 irq_set_chained_handler_and_data(msi->irq, NULL, NULL); in tegra_pcie_msi_setup()
1787 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_enable_msi()
1788 struct tegra_msi *msi = &pcie->msi; in tegra_pcie_enable_msi()
1792 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); in tegra_pcie_enable_msi()
1793 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); in tegra_pcie_enable_msi()
1798 bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR); in tegra_pcie_enable_msi()
1810 struct tegra_msi *msi = &pcie->msi; in tegra_pcie_msi_teardown()
1813 dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys, in tegra_pcie_msi_teardown()
1817 irq = irq_find_mapping(msi->domain, i); in tegra_pcie_msi_teardown()
1822 irq_set_chained_handler_and_data(msi->irq, NULL, NULL); in tegra_pcie_msi_teardown()
1852 struct device *dev = pcie->dev; in tegra_pcie_get_xbar_config()
1853 struct device_node *np = dev->of_node; in tegra_pcie_get_xbar_config()
1855 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { in tegra_pcie_get_xbar_config()
1879 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || in tegra_pcie_get_xbar_config()
1880 of_device_is_compatible(np, "nvidia,tegra210-pcie")) { in tegra_pcie_get_xbar_config()
1892 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { in tegra_pcie_get_xbar_config()
1909 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { in tegra_pcie_get_xbar_config()
1912 dev_info(dev, "single-mode configuration\n"); in tegra_pcie_get_xbar_config()
1917 dev_info(dev, "dual-mode configuration\n"); in tegra_pcie_get_xbar_config()
1923 return -EINVAL; in tegra_pcie_get_xbar_config()
1939 snprintf(property, 32, "%s-supply", supplies[i].supply); in of_regulator_bulk_available()
1951 * number of cases but is not future proof. However to preserve backwards-
1957 struct device *dev = pcie->dev; in tegra_pcie_get_legacy_regulators()
1958 struct device_node *np = dev->of_node; in tegra_pcie_get_legacy_regulators()
1960 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) in tegra_pcie_get_legacy_regulators()
1961 pcie->num_supplies = 3; in tegra_pcie_get_legacy_regulators()
1962 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) in tegra_pcie_get_legacy_regulators()
1963 pcie->num_supplies = 2; in tegra_pcie_get_legacy_regulators()
1965 if (pcie->num_supplies == 0) { in tegra_pcie_get_legacy_regulators()
1967 return -ENODEV; in tegra_pcie_get_legacy_regulators()
1970 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, in tegra_pcie_get_legacy_regulators()
1971 sizeof(*pcie->supplies), in tegra_pcie_get_legacy_regulators()
1973 if (!pcie->supplies) in tegra_pcie_get_legacy_regulators()
1974 return -ENOMEM; in tegra_pcie_get_legacy_regulators()
1976 pcie->supplies[0].supply = "pex-clk"; in tegra_pcie_get_legacy_regulators()
1977 pcie->supplies[1].supply = "vdd"; in tegra_pcie_get_legacy_regulators()
1979 if (pcie->num_supplies > 2) in tegra_pcie_get_legacy_regulators()
1980 pcie->supplies[2].supply = "avdd"; in tegra_pcie_get_legacy_regulators()
1982 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); in tegra_pcie_get_legacy_regulators()
1996 struct device *dev = pcie->dev; in tegra_pcie_get_regulators()
1997 struct device_node *np = dev->of_node; in tegra_pcie_get_regulators()
2000 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { in tegra_pcie_get_regulators()
2001 pcie->num_supplies = 4; in tegra_pcie_get_regulators()
2003 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2004 sizeof(*pcie->supplies), in tegra_pcie_get_regulators()
2006 if (!pcie->supplies) in tegra_pcie_get_regulators()
2007 return -ENOMEM; in tegra_pcie_get_regulators()
2009 pcie->supplies[i++].supply = "dvdd-pex"; in tegra_pcie_get_regulators()
2010 pcie->supplies[i++].supply = "hvdd-pex-pll"; in tegra_pcie_get_regulators()
2011 pcie->supplies[i++].supply = "hvdd-pex"; in tegra_pcie_get_regulators()
2012 pcie->supplies[i++].supply = "vddio-pexctl-aud"; in tegra_pcie_get_regulators()
2013 } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { in tegra_pcie_get_regulators()
2014 pcie->num_supplies = 3; in tegra_pcie_get_regulators()
2016 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2017 sizeof(*pcie->supplies), in tegra_pcie_get_regulators()
2019 if (!pcie->supplies) in tegra_pcie_get_regulators()
2020 return -ENOMEM; in tegra_pcie_get_regulators()
2022 pcie->supplies[i++].supply = "hvddio-pex"; in tegra_pcie_get_regulators()
2023 pcie->supplies[i++].supply = "dvddio-pex"; in tegra_pcie_get_regulators()
2024 pcie->supplies[i++].supply = "vddio-pex-ctl"; in tegra_pcie_get_regulators()
2025 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { in tegra_pcie_get_regulators()
2026 pcie->num_supplies = 4; in tegra_pcie_get_regulators()
2028 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2029 sizeof(*pcie->supplies), in tegra_pcie_get_regulators()
2031 if (!pcie->supplies) in tegra_pcie_get_regulators()
2032 return -ENOMEM; in tegra_pcie_get_regulators()
2034 pcie->supplies[i++].supply = "avddio-pex"; in tegra_pcie_get_regulators()
2035 pcie->supplies[i++].supply = "dvddio-pex"; in tegra_pcie_get_regulators()
2036 pcie->supplies[i++].supply = "hvdd-pex"; in tegra_pcie_get_regulators()
2037 pcie->supplies[i++].supply = "vddio-pex-ctl"; in tegra_pcie_get_regulators()
2038 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { in tegra_pcie_get_regulators()
2049 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + in tegra_pcie_get_regulators()
2052 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2053 sizeof(*pcie->supplies), in tegra_pcie_get_regulators()
2055 if (!pcie->supplies) in tegra_pcie_get_regulators()
2056 return -ENOMEM; in tegra_pcie_get_regulators()
2058 pcie->supplies[i++].supply = "avdd-pex-pll"; in tegra_pcie_get_regulators()
2059 pcie->supplies[i++].supply = "hvdd-pex"; in tegra_pcie_get_regulators()
2060 pcie->supplies[i++].supply = "vddio-pex-ctl"; in tegra_pcie_get_regulators()
2061 pcie->supplies[i++].supply = "avdd-plle"; in tegra_pcie_get_regulators()
2064 pcie->supplies[i++].supply = "avdd-pexa"; in tegra_pcie_get_regulators()
2065 pcie->supplies[i++].supply = "vdd-pexa"; in tegra_pcie_get_regulators()
2069 pcie->supplies[i++].supply = "avdd-pexb"; in tegra_pcie_get_regulators()
2070 pcie->supplies[i++].supply = "vdd-pexb"; in tegra_pcie_get_regulators()
2072 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { in tegra_pcie_get_regulators()
2073 pcie->num_supplies = 5; in tegra_pcie_get_regulators()
2075 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2076 sizeof(*pcie->supplies), in tegra_pcie_get_regulators()
2078 if (!pcie->supplies) in tegra_pcie_get_regulators()
2079 return -ENOMEM; in tegra_pcie_get_regulators()
2081 pcie->supplies[0].supply = "avdd-pex"; in tegra_pcie_get_regulators()
2082 pcie->supplies[1].supply = "vdd-pex"; in tegra_pcie_get_regulators()
2083 pcie->supplies[2].supply = "avdd-pex-pll"; in tegra_pcie_get_regulators()
2084 pcie->supplies[3].supply = "avdd-plle"; in tegra_pcie_get_regulators()
2085 pcie->supplies[4].supply = "vddio-pex-clk"; in tegra_pcie_get_regulators()
2088 if (of_regulator_bulk_available(dev->of_node, pcie->supplies, in tegra_pcie_get_regulators()
2089 pcie->num_supplies)) in tegra_pcie_get_regulators()
2090 return devm_regulator_bulk_get(dev, pcie->num_supplies, in tegra_pcie_get_regulators()
2091 pcie->supplies); in tegra_pcie_get_regulators()
2100 devm_kfree(dev, pcie->supplies); in tegra_pcie_get_regulators()
2101 pcie->num_supplies = 0; in tegra_pcie_get_regulators()
2108 struct device *dev = pcie->dev; in tegra_pcie_parse_dt()
2109 struct device_node *np = dev->of_node, *port; in tegra_pcie_parse_dt()
2110 const struct tegra_pcie_soc *soc = pcie->soc; in tegra_pcie_parse_dt()
2130 if (index < 1 || index > soc->num_ports) { in tegra_pcie_parse_dt()
2132 err = -EINVAL; in tegra_pcie_parse_dt()
2136 index--; in tegra_pcie_parse_dt()
2138 err = of_property_read_u32(port, "nvidia,num-lanes", &value); in tegra_pcie_parse_dt()
2147 err = -EINVAL; in tegra_pcie_parse_dt()
2158 mask |= ((1 << value) - 1) << lane; in tegra_pcie_parse_dt()
2163 err = -ENOMEM; in tegra_pcie_parse_dt()
2167 err = of_address_to_resource(port, 0, &rp->regs); in tegra_pcie_parse_dt()
2173 INIT_LIST_HEAD(&rp->list); in tegra_pcie_parse_dt()
2174 rp->index = index; in tegra_pcie_parse_dt()
2175 rp->lanes = value; in tegra_pcie_parse_dt()
2176 rp->pcie = pcie; in tegra_pcie_parse_dt()
2177 rp->np = port; in tegra_pcie_parse_dt()
2179 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); in tegra_pcie_parse_dt()
2180 if (IS_ERR(rp->base)) { in tegra_pcie_parse_dt()
2181 err = PTR_ERR(rp->base); in tegra_pcie_parse_dt()
2185 label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); in tegra_pcie_parse_dt()
2187 err = -ENOMEM; in tegra_pcie_parse_dt()
2192 * Returns -ENOENT if reset-gpios property is not populated in tegra_pcie_parse_dt()
2196 rp->reset_gpio = devm_fwnode_gpiod_get(dev, in tegra_pcie_parse_dt()
2201 if (IS_ERR(rp->reset_gpio)) { in tegra_pcie_parse_dt()
2202 if (PTR_ERR(rp->reset_gpio) == -ENOENT) { in tegra_pcie_parse_dt()
2203 rp->reset_gpio = NULL; in tegra_pcie_parse_dt()
2206 PTR_ERR(rp->reset_gpio)); in tegra_pcie_parse_dt()
2207 err = PTR_ERR(rp->reset_gpio); in tegra_pcie_parse_dt()
2212 list_add_tail(&rp->list, &pcie->ports); in tegra_pcie_parse_dt()
2215 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); in tegra_pcie_parse_dt()
2240 struct device *dev = port->pcie->dev; in tegra_pcie_port_check_link()
2245 value = readl(port->base + RP_PRIV_MISC); in tegra_pcie_port_check_link()
2248 writel(value, port->base + RP_PRIV_MISC); in tegra_pcie_port_check_link()
2254 value = readl(port->base + RP_VEND_XP); in tegra_pcie_port_check_link()
2260 } while (--timeout); in tegra_pcie_port_check_link()
2263 dev_dbg(dev, "link %u down, retrying\n", port->index); in tegra_pcie_port_check_link()
2270 value = readl(port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_port_check_link()
2276 } while (--timeout); in tegra_pcie_port_check_link()
2280 } while (--retries); in tegra_pcie_port_check_link()
2287 struct device *dev = pcie->dev; in tegra_pcie_change_link_speed()
2292 list_for_each_entry(port, &pcie->ports, list) { in tegra_pcie_change_link_speed()
2299 value = readl(port->base + RP_LINK_CONTROL_STATUS_2); in tegra_pcie_change_link_speed()
2302 writel(value, port->base + RP_LINK_CONTROL_STATUS_2); in tegra_pcie_change_link_speed()
2311 value = readl(port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_change_link_speed()
2320 port->index); in tegra_pcie_change_link_speed()
2323 value = readl(port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_change_link_speed()
2325 writel(value, port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_change_link_speed()
2330 value = readl(port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_change_link_speed()
2339 port->index); in tegra_pcie_change_link_speed()
2345 struct device *dev = pcie->dev; in tegra_pcie_enable_ports()
2348 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in tegra_pcie_enable_ports()
2350 port->index, port->lanes); in tegra_pcie_enable_ports()
2356 reset_control_deassert(pcie->pcie_xrst); in tegra_pcie_enable_ports()
2358 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in tegra_pcie_enable_ports()
2362 dev_info(dev, "link %u down, ignoring\n", port->index); in tegra_pcie_enable_ports()
2368 if (pcie->soc->has_gen2) in tegra_pcie_enable_ports()
2376 reset_control_assert(pcie->pcie_xrst); in tegra_pcie_disable_ports()
2378 list_for_each_entry_safe(port, tmp, &pcie->ports, list) in tegra_pcie_disable_ports()
2523 { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2524 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2525 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2526 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2527 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2534 struct tegra_pcie *pcie = s->private; in tegra_pcie_ports_seq_start()
2536 if (list_empty(&pcie->ports)) in tegra_pcie_ports_seq_start()
2541 return seq_list_start(&pcie->ports, *pos); in tegra_pcie_ports_seq_start()
2546 struct tegra_pcie *pcie = s->private; in tegra_pcie_ports_seq_next()
2548 return seq_list_next(v, &pcie->ports, pos); in tegra_pcie_ports_seq_next()
2563 value = readl(port->base + RP_VEND_XP); in tegra_pcie_ports_seq_show()
2568 value = readl(port->base + RP_LINK_CONTROL_STATUS); in tegra_pcie_ports_seq_show()
2573 seq_printf(s, "%2u ", port->index); in tegra_pcie_ports_seq_show()
2600 debugfs_remove_recursive(pcie->debugfs); in tegra_pcie_debugfs_exit()
2601 pcie->debugfs = NULL; in tegra_pcie_debugfs_exit()
2606 pcie->debugfs = debugfs_create_dir("pcie", NULL); in tegra_pcie_debugfs_init()
2608 debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie, in tegra_pcie_debugfs_init()
2614 struct device *dev = &pdev->dev; in tegra_pcie_probe()
2621 return -ENOMEM; in tegra_pcie_probe()
2624 host->sysdata = pcie; in tegra_pcie_probe()
2627 pcie->soc = of_device_get_match_data(dev); in tegra_pcie_probe()
2628 INIT_LIST_HEAD(&pcie->ports); in tegra_pcie_probe()
2629 pcie->dev = dev; in tegra_pcie_probe()
2647 pm_runtime_enable(pcie->dev); in tegra_pcie_probe()
2648 err = pm_runtime_get_sync(pcie->dev); in tegra_pcie_probe()
2654 host->ops = &tegra_pcie_ops; in tegra_pcie_probe()
2655 host->map_irq = tegra_pcie_map_irq; in tegra_pcie_probe()
2669 pm_runtime_put_sync(pcie->dev); in tegra_pcie_probe()
2670 pm_runtime_disable(pcie->dev); in tegra_pcie_probe()
2686 pci_stop_root_bus(host->bus); in tegra_pcie_remove()
2687 pci_remove_root_bus(host->bus); in tegra_pcie_remove()
2688 pm_runtime_put_sync(pcie->dev); in tegra_pcie_remove()
2689 pm_runtime_disable(pcie->dev); in tegra_pcie_remove()
2696 list_for_each_entry_safe(port, tmp, &pcie->ports, list) in tegra_pcie_remove()
2706 list_for_each_entry(port, &pcie->ports, list) in tegra_pcie_pm_suspend()
2717 if (pcie->soc->program_uphy) { in tegra_pcie_pm_suspend()
2723 reset_control_assert(pcie->pex_rst); in tegra_pcie_pm_suspend()
2724 clk_disable_unprepare(pcie->pex_clk); in tegra_pcie_pm_suspend()
2758 err = clk_prepare_enable(pcie->pex_clk); in tegra_pcie_pm_resume()
2764 reset_control_deassert(pcie->pex_rst); in tegra_pcie_pm_resume()
2766 if (pcie->soc->program_uphy) { in tegra_pcie_pm_resume()
2780 reset_control_assert(pcie->pex_rst); in tegra_pcie_pm_resume()
2781 clk_disable_unprepare(pcie->pex_clk); in tegra_pcie_pm_resume()
2797 .name = "tegra-pcie",