Lines Matching +full:opp +full:- +full:peak +full:- +full:kbps

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
26 #include <linux/pci-ecam.h>
39 #include "../pci-host-common.h"
40 #include "pcie-designware.h"
41 #include "pcie-qcom-common.h"
271 * struct qcom_pcie_cfg - Per SoC config struct
304 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
311 list_for_each_entry(port, &pcie->ports, list) in qcom_perst_assert()
312 gpiod_set_value_cansleep(port->reset, val); in qcom_perst_assert()
336 writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE); in qcom_pci_config_ecam()
337 writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI); in qcom_pci_config_ecam()
350 addr = pci->dbi_phys_addr + SZ_4K; in qcom_pci_config_ecam()
351 writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE); in qcom_pci_config_ecam()
352 writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI); in qcom_pci_config_ecam()
354 writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE); in qcom_pci_config_ecam()
355 writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI); in qcom_pci_config_ecam()
357 addr_end = pci->dbi_phys_addr + SZ_1M - 1; in qcom_pci_config_ecam()
359 writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT); in qcom_pci_config_ecam()
360 writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI); in qcom_pci_config_ecam()
362 writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT); in qcom_pci_config_ecam()
363 writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI); in qcom_pci_config_ecam()
365 val = readl_relaxed(pcie->parf + PARF_SYS_CTRL); in qcom_pci_config_ecam()
367 writel_relaxed(val, pcie->parf + PARF_SYS_CTRL); in qcom_pci_config_ecam()
376 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) in qcom_pcie_start_link()
380 if (pcie->cfg->ops->ltssm_enable) in qcom_pcie_start_link()
381 pcie->cfg->ops->ltssm_enable(pcie); in qcom_pcie_start_link()
392 if (!pcie->cfg->no_l0s) in qcom_pcie_clear_aspm_l0s()
399 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_clear_aspm_l0s()
401 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_clear_aspm_l0s()
413 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_clear_hpc()
415 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_clear_hpc()
422 struct dw_pcie *pci = pcie->pci; in qcom_pcie_configure_dbi_base()
424 if (pci->dbi_phys_addr) { in qcom_pcie_configure_dbi_base()
429 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_base()
431 writel(SLV_ADDR_SPACE_SZ, pcie->parf + in qcom_pcie_configure_dbi_base()
438 struct dw_pcie *pci = pcie->pci; in qcom_pcie_configure_dbi_atu_base()
440 if (pci->dbi_phys_addr) { in qcom_pcie_configure_dbi_atu_base()
446 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
448 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
451 if (pci->atu_phys_addr) { in qcom_pcie_configure_dbi_atu_base()
452 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
454 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
458 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); in qcom_pcie_configure_dbi_atu_base()
459 writel(SLV_ADDR_SPACE_SZ, pcie->parf + in qcom_pcie_configure_dbi_atu_base()
466 struct dw_pcie *pci = pcie->pci; in qcom_pcie_2_1_0_ltssm_enable()
469 if (!pci->elbi_base) { in qcom_pcie_2_1_0_ltssm_enable()
470 dev_err(pci->dev, "ELBI is not present\n"); in qcom_pcie_2_1_0_ltssm_enable()
474 val = readl(pci->elbi_base + ELBI_SYS_CTRL); in qcom_pcie_2_1_0_ltssm_enable()
476 writel(val, pci->elbi_base + ELBI_SYS_CTRL); in qcom_pcie_2_1_0_ltssm_enable()
481 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_get_resources_2_1_0()
482 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_1_0()
483 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_1_0()
484 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); in qcom_pcie_get_resources_2_1_0()
487 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_1_0()
488 res->supplies[1].supply = "vdda_phy"; in qcom_pcie_get_resources_2_1_0()
489 res->supplies[2].supply = "vdda_refclk"; in qcom_pcie_get_resources_2_1_0()
490 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_1_0()
491 res->supplies); in qcom_pcie_get_resources_2_1_0()
495 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_1_0()
496 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_1_0()
498 return res->num_clks; in qcom_pcie_get_resources_2_1_0()
501 res->resets[0].id = "pci"; in qcom_pcie_get_resources_2_1_0()
502 res->resets[1].id = "axi"; in qcom_pcie_get_resources_2_1_0()
503 res->resets[2].id = "ahb"; in qcom_pcie_get_resources_2_1_0()
504 res->resets[3].id = "por"; in qcom_pcie_get_resources_2_1_0()
505 res->resets[4].id = "phy"; in qcom_pcie_get_resources_2_1_0()
506 res->resets[5].id = "ext"; in qcom_pcie_get_resources_2_1_0()
509 res->num_resets = is_apq ? 5 : 6; in qcom_pcie_get_resources_2_1_0()
510 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); in qcom_pcie_get_resources_2_1_0()
519 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_deinit_2_1_0()
521 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_1_0()
522 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_deinit_2_1_0()
524 writel(1, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_deinit_2_1_0()
526 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_1_0()
531 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_init_2_1_0()
532 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_1_0()
533 struct device *dev = pci->dev; in qcom_pcie_init_2_1_0()
537 ret = reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_1_0()
543 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_1_0()
549 ret = reset_control_bulk_deassert(res->num_resets, res->resets); in qcom_pcie_init_2_1_0()
552 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_1_0()
561 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_post_init_2_1_0()
562 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_1_0()
563 struct device *dev = pci->dev; in qcom_pcie_post_init_2_1_0()
564 struct device_node *node = dev->of_node; in qcom_pcie_post_init_2_1_0()
569 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
571 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
573 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_post_init_2_1_0()
577 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || in qcom_pcie_post_init_2_1_0()
578 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { in qcom_pcie_post_init_2_1_0()
582 pcie->parf + PARF_PCS_DEEMPH); in qcom_pcie_post_init_2_1_0()
585 pcie->parf + PARF_PCS_SWING); in qcom_pcie_post_init_2_1_0()
586 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); in qcom_pcie_post_init_2_1_0()
589 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { in qcom_pcie_post_init_2_1_0()
591 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
594 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
598 val = readl(pcie->parf + PARF_PHY_REFCLK); in qcom_pcie_post_init_2_1_0()
600 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) in qcom_pcie_post_init_2_1_0()
603 writel(val, pcie->parf + PARF_PHY_REFCLK); in qcom_pcie_post_init_2_1_0()
610 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); in qcom_pcie_post_init_2_1_0()
612 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); in qcom_pcie_post_init_2_1_0()
614 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_1_0()
621 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_get_resources_1_0_0()
622 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_1_0_0()
623 struct device *dev = pci->dev; in qcom_pcie_get_resources_1_0_0()
625 res->vdda = devm_regulator_get(dev, "vdda"); in qcom_pcie_get_resources_1_0_0()
626 if (IS_ERR(res->vdda)) in qcom_pcie_get_resources_1_0_0()
627 return PTR_ERR(res->vdda); in qcom_pcie_get_resources_1_0_0()
629 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_1_0_0()
630 if (res->num_clks < 0) { in qcom_pcie_get_resources_1_0_0()
632 return res->num_clks; in qcom_pcie_get_resources_1_0_0()
635 res->core = devm_reset_control_get_exclusive(dev, "core"); in qcom_pcie_get_resources_1_0_0()
636 return PTR_ERR_OR_ZERO(res->core); in qcom_pcie_get_resources_1_0_0()
641 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_deinit_1_0_0()
643 reset_control_assert(res->core); in qcom_pcie_deinit_1_0_0()
644 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_1_0_0()
645 regulator_disable(res->vdda); in qcom_pcie_deinit_1_0_0()
650 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_init_1_0_0()
651 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_1_0_0()
652 struct device *dev = pci->dev; in qcom_pcie_init_1_0_0()
655 ret = reset_control_deassert(res->core); in qcom_pcie_init_1_0_0()
661 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_1_0_0()
667 ret = regulator_enable(res->vdda); in qcom_pcie_init_1_0_0()
676 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_init_1_0_0()
678 reset_control_assert(res->core); in qcom_pcie_init_1_0_0()
688 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); in qcom_pcie_post_init_1_0_0()
691 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); in qcom_pcie_post_init_1_0_0()
694 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_1_0_0()
704 val = readl(pcie->parf + PARF_LTSSM); in qcom_pcie_2_3_2_ltssm_enable()
706 writel(val, pcie->parf + PARF_LTSSM); in qcom_pcie_2_3_2_ltssm_enable()
711 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_get_resources_2_3_2()
712 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_3_2()
713 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_3_2()
716 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_3_2()
717 res->supplies[1].supply = "vddpe-3v3"; in qcom_pcie_get_resources_2_3_2()
718 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_3_2()
719 res->supplies); in qcom_pcie_get_resources_2_3_2()
723 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_3_2()
724 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_3_2()
726 return res->num_clks; in qcom_pcie_get_resources_2_3_2()
734 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_deinit_2_3_2()
736 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_3_2()
737 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_3_2()
742 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_init_2_3_2()
743 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_3_2()
744 struct device *dev = pci->dev; in qcom_pcie_init_2_3_2()
747 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_3_2()
753 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_3_2()
756 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_3_2()
768 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_2()
770 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_2()
775 val = readl(pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_2()
777 writel(val, pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_2()
779 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_3_2()
781 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_3_2()
783 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_post_init_2_3_2()
785 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_post_init_2_3_2()
787 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_3_2()
794 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_get_resources_2_4_0()
795 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_4_0()
796 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_4_0()
797 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); in qcom_pcie_get_resources_2_4_0()
800 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_4_0()
801 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_4_0()
803 return res->num_clks; in qcom_pcie_get_resources_2_4_0()
806 res->resets[0].id = "axi_m"; in qcom_pcie_get_resources_2_4_0()
807 res->resets[1].id = "axi_s"; in qcom_pcie_get_resources_2_4_0()
808 res->resets[2].id = "axi_m_sticky"; in qcom_pcie_get_resources_2_4_0()
809 res->resets[3].id = "pipe_sticky"; in qcom_pcie_get_resources_2_4_0()
810 res->resets[4].id = "pwr"; in qcom_pcie_get_resources_2_4_0()
811 res->resets[5].id = "ahb"; in qcom_pcie_get_resources_2_4_0()
812 res->resets[6].id = "pipe"; in qcom_pcie_get_resources_2_4_0()
813 res->resets[7].id = "axi_m_vmid"; in qcom_pcie_get_resources_2_4_0()
814 res->resets[8].id = "axi_s_xpu"; in qcom_pcie_get_resources_2_4_0()
815 res->resets[9].id = "parf"; in qcom_pcie_get_resources_2_4_0()
816 res->resets[10].id = "phy"; in qcom_pcie_get_resources_2_4_0()
817 res->resets[11].id = "phy_ahb"; in qcom_pcie_get_resources_2_4_0()
819 res->num_resets = is_ipq ? 12 : 6; in qcom_pcie_get_resources_2_4_0()
821 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); in qcom_pcie_get_resources_2_4_0()
830 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_deinit_2_4_0()
832 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_deinit_2_4_0()
833 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_4_0()
838 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_init_2_4_0()
839 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_4_0()
840 struct device *dev = pci->dev; in qcom_pcie_init_2_4_0()
843 ret = reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
851 ret = reset_control_bulk_deassert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
859 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_4_0()
861 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
870 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_get_resources_2_3_3()
871 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_3_3()
872 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_3_3()
875 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_3_3()
876 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_3_3()
878 return res->num_clks; in qcom_pcie_get_resources_2_3_3()
881 res->rst[0].id = "axi_m"; in qcom_pcie_get_resources_2_3_3()
882 res->rst[1].id = "axi_s"; in qcom_pcie_get_resources_2_3_3()
883 res->rst[2].id = "pipe"; in qcom_pcie_get_resources_2_3_3()
884 res->rst[3].id = "axi_m_sticky"; in qcom_pcie_get_resources_2_3_3()
885 res->rst[4].id = "sticky"; in qcom_pcie_get_resources_2_3_3()
886 res->rst[5].id = "ahb"; in qcom_pcie_get_resources_2_3_3()
887 res->rst[6].id = "sleep"; in qcom_pcie_get_resources_2_3_3()
889 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_get_resources_2_3_3()
898 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_deinit_2_3_3()
900 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_3_3()
905 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_init_2_3_3()
906 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_3_3()
907 struct device *dev = pci->dev; in qcom_pcie_init_2_3_3()
910 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
918 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
930 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_3_3()
943 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
950 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_3_3()
954 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_3()
956 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_3()
963 pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_3()
964 writel(0, pcie->parf + PARF_Q2A_FLUSH); in qcom_pcie_post_init_2_3_3()
966 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); in qcom_pcie_post_init_2_3_3()
970 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_post_init_2_3_3()
972 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_3_3()
974 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_3_3()
976 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + in qcom_pcie_post_init_2_3_3()
986 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_get_resources_2_7_0()
987 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_7_0()
988 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_7_0()
991 res->rst = devm_reset_control_array_get_exclusive(dev); in qcom_pcie_get_resources_2_7_0()
992 if (IS_ERR(res->rst)) in qcom_pcie_get_resources_2_7_0()
993 return PTR_ERR(res->rst); in qcom_pcie_get_resources_2_7_0()
995 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_7_0()
996 res->supplies[1].supply = "vddpe-3v3"; in qcom_pcie_get_resources_2_7_0()
997 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_7_0()
998 res->supplies); in qcom_pcie_get_resources_2_7_0()
1002 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_7_0()
1003 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_7_0()
1005 return res->num_clks; in qcom_pcie_get_resources_2_7_0()
1013 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_init_2_7_0()
1014 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_7_0()
1015 struct device *dev = pci->dev; in qcom_pcie_init_2_7_0()
1019 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_7_0()
1025 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_7_0()
1029 ret = reset_control_assert(res->rst); in qcom_pcie_init_2_7_0()
1037 ret = reset_control_deassert(res->rst); in qcom_pcie_init_2_7_0()
1047 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); in qcom_pcie_init_2_7_0()
1050 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_init_2_7_0()
1052 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_init_2_7_0()
1057 val = readl(pcie->parf + PARF_SYS_CTRL); in qcom_pcie_init_2_7_0()
1059 writel(val, pcie->parf + PARF_SYS_CTRL); in qcom_pcie_init_2_7_0()
1061 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_init_2_7_0()
1063 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_init_2_7_0()
1066 val = readl(pcie->parf + PARF_PM_CTRL); in qcom_pcie_init_2_7_0()
1068 writel(val, pcie->parf + PARF_PM_CTRL); in qcom_pcie_init_2_7_0()
1070 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_init_2_7_0()
1072 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_init_2_7_0()
1076 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_init_2_7_0()
1078 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_7_0()
1085 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; in qcom_pcie_post_init_2_7_0()
1087 if (pcie_cfg->override_no_snoop) in qcom_pcie_post_init_2_7_0()
1089 pcie->parf + PARF_NO_SNOOP_OVERRIDE); in qcom_pcie_post_init_2_7_0()
1091 qcom_pcie_clear_aspm_l0s(pcie->pci); in qcom_pcie_post_init_2_7_0()
1092 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_7_0()
1099 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_deinit_2_7_0()
1101 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_7_0()
1103 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_7_0()
1115 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; in qcom_pcie_config_sid_1_9_0()
1116 struct device *dev = pcie->pci->dev; in qcom_pcie_config_sid_1_9_0()
1122 of_get_property(dev->of_node, "iommu-map", &size); in qcom_pcie_config_sid_1_9_0()
1127 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); in qcom_pcie_config_sid_1_9_0()
1129 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); in qcom_pcie_config_sid_1_9_0()
1133 return -ENOMEM; in qcom_pcie_config_sid_1_9_0()
1135 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, in qcom_pcie_config_sid_1_9_0()
1145 /* Extract the SMMU SID base from the first entry of iommu-map */ in qcom_pcie_config_sid_1_9_0()
1173 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; in qcom_pcie_config_sid_1_9_0()
1184 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_get_resources_2_9_0()
1185 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_9_0()
1186 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_9_0()
1188 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_9_0()
1189 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_9_0()
1191 return res->num_clks; in qcom_pcie_get_resources_2_9_0()
1194 res->rst = devm_reset_control_array_get_exclusive(dev); in qcom_pcie_get_resources_2_9_0()
1195 if (IS_ERR(res->rst)) in qcom_pcie_get_resources_2_9_0()
1196 return PTR_ERR(res->rst); in qcom_pcie_get_resources_2_9_0()
1203 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_deinit_2_9_0()
1205 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_9_0()
1210 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_init_2_9_0()
1211 struct device *dev = pcie->pci->dev; in qcom_pcie_init_2_9_0()
1214 ret = reset_control_assert(res->rst); in qcom_pcie_init_2_9_0()
1226 ret = reset_control_deassert(res->rst); in qcom_pcie_init_2_9_0()
1234 return clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_9_0()
1239 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_9_0()
1244 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_9_0()
1246 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_9_0()
1250 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); in qcom_pcie_post_init_2_9_0()
1252 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_9_0()
1255 pci->dbi_base + GEN3_RELATED_OFF); in qcom_pcie_post_init_2_9_0()
1260 pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_9_0()
1262 writel(0, pcie->parf + PARF_Q2A_FLUSH); in qcom_pcie_post_init_2_9_0()
1266 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_post_init_2_9_0()
1268 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_9_0()
1270 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_9_0()
1272 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + in qcom_pcie_post_init_2_9_0()
1278 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); in qcom_pcie_post_init_2_9_0()
1286 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); in qcom_pcie_link_up()
1295 list_for_each_entry(port, &pcie->ports, list) in qcom_pcie_phy_power_off()
1296 phy_power_off(port->phy); in qcom_pcie_phy_power_off()
1304 list_for_each_entry(port, &pcie->ports, list) { in qcom_pcie_phy_power_on()
1305 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); in qcom_pcie_phy_power_on()
1309 ret = phy_power_on(port->phy); in qcom_pcie_phy_power_on()
1328 ret = pcie->cfg->ops->init(pcie); in qcom_pcie_host_init()
1332 if (pp->ecam_enabled) { in qcom_pcie_host_init()
1337 offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI)); in qcom_pcie_host_init()
1338 pci->elbi_base = pci->dbi_base + offset; in qcom_pcie_host_init()
1347 if (pcie->cfg->ops->post_init) { in qcom_pcie_host_init()
1348 ret = pcie->cfg->ops->post_init(pcie); in qcom_pcie_host_init()
1355 if (pcie->cfg->ops->config_sid) { in qcom_pcie_host_init()
1356 ret = pcie->cfg->ops->config_sid(pcie); in qcom_pcie_host_init()
1368 pcie->cfg->ops->deinit(pcie); in qcom_pcie_host_init()
1380 pcie->cfg->ops->deinit(pcie); in qcom_pcie_host_deinit()
1523 struct dw_pcie *pci = pcie->pci; in qcom_pcie_icc_init()
1526 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); in qcom_pcie_icc_init()
1527 if (IS_ERR(pcie->icc_mem)) in qcom_pcie_icc_init()
1528 return PTR_ERR(pcie->icc_mem); in qcom_pcie_icc_init()
1530 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); in qcom_pcie_icc_init()
1531 if (IS_ERR(pcie->icc_cpu)) in qcom_pcie_icc_init()
1532 return PTR_ERR(pcie->icc_cpu); in qcom_pcie_icc_init()
1537 * Set an initial peak bandwidth corresponding to single-lane Gen 1 in qcom_pcie_icc_init()
1538 * for the pcie-mem path. in qcom_pcie_icc_init()
1540 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); in qcom_pcie_icc_init()
1542 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_icc_init()
1548 * Since the CPU-PCIe path is only used for activities like register in qcom_pcie_icc_init()
1550 * HW team has recommended to use a minimal bandwidth of 1KBps just to in qcom_pcie_icc_init()
1553 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); in qcom_pcie_icc_init()
1555 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", in qcom_pcie_icc_init()
1557 icc_set_bw(pcie->icc_mem, 0, 0); in qcom_pcie_icc_init()
1567 struct dw_pcie *pci = pcie->pci; in qcom_pcie_icc_opp_update()
1569 struct dev_pm_opp *opp; in qcom_pcie_icc_opp_update() local
1573 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); in qcom_pcie_icc_opp_update()
1582 if (pcie->icc_mem) { in qcom_pcie_icc_opp_update()
1583 ret = icc_set_bw(pcie->icc_mem, 0, in qcom_pcie_icc_opp_update()
1586 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_icc_opp_update()
1589 } else if (pcie->use_pm_opp) { in qcom_pcie_icc_opp_update()
1595 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, in qcom_pcie_icc_opp_update()
1597 if (!IS_ERR(opp)) { in qcom_pcie_icc_opp_update()
1598 ret = dev_pm_opp_set_opp(pci->dev, opp); in qcom_pcie_icc_opp_update()
1600 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", in qcom_pcie_icc_opp_update()
1602 dev_pm_opp_put(opp); in qcom_pcie_icc_opp_update()
1609 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); in qcom_pcie_link_transition_count()
1612 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); in qcom_pcie_link_transition_count()
1615 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); in qcom_pcie_link_transition_count()
1618 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); in qcom_pcie_link_transition_count()
1621 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); in qcom_pcie_link_transition_count()
1624 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); in qcom_pcie_link_transition_count()
1631 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_debugfs()
1632 struct device *dev = pci->dev; in qcom_pcie_init_debugfs()
1635 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); in qcom_pcie_init_debugfs()
1639 pcie->debugfs = debugfs_create_dir(name, NULL); in qcom_pcie_init_debugfs()
1640 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, in qcom_pcie_init_debugfs()
1647 struct dw_pcie_rp *pp = &pcie->pci->pp; in qcom_pcie_global_irq_thread()
1648 struct device *dev = pcie->pci->dev; in qcom_pcie_global_irq_thread()
1649 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); in qcom_pcie_global_irq_thread()
1651 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); in qcom_pcie_global_irq_thread()
1658 pci_rescan_bus(pp->bridge->bus); in qcom_pcie_global_irq_thread()
1674 if (pp && pp->has_msi_ctrl) in qcom_pci_free_msi()
1680 struct device *dev = cfg->parent; in qcom_pcie_ecam_host_init()
1687 return -ENOMEM; in qcom_pcie_ecam_host_init()
1689 pci->dev = dev; in qcom_pcie_ecam_host_init()
1690 pp = &pci->pp; in qcom_pcie_ecam_host_init()
1691 pci->dbi_base = cfg->win; in qcom_pcie_ecam_host_init()
1692 pp->num_vectors = MSI_DEF_NUM_VECTORS; in qcom_pcie_ecam_host_init()
1698 pp->has_msi_ctrl = true; in qcom_pcie_ecam_host_init()
1715 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_port()
1732 return -ENOMEM; in qcom_pcie_parse_port()
1738 port->reset = reset; in qcom_pcie_parse_port()
1739 port->phy = phy; in qcom_pcie_parse_port()
1740 INIT_LIST_HEAD(&port->list); in qcom_pcie_parse_port()
1741 list_add_tail(&port->list, &pcie->ports); in qcom_pcie_parse_port()
1748 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_ports()
1750 int ret = -ENOENT; in qcom_pcie_parse_ports()
1752 for_each_available_child_of_node_scoped(dev->of_node, of_port) { in qcom_pcie_parse_ports()
1763 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in qcom_pcie_parse_ports()
1764 phy_exit(port->phy); in qcom_pcie_parse_ports()
1765 list_del(&port->list); in qcom_pcie_parse_ports()
1773 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_legacy_binding()
1793 return -ENOMEM; in qcom_pcie_parse_legacy_binding()
1795 port->reset = reset; in qcom_pcie_parse_legacy_binding()
1796 port->phy = phy; in qcom_pcie_parse_legacy_binding()
1797 INIT_LIST_HEAD(&port->list); in qcom_pcie_parse_legacy_binding()
1798 list_add_tail(&port->list, &pcie->ports); in qcom_pcie_parse_legacy_binding()
1808 struct device *dev = &pdev->dev; in qcom_pcie_probe()
1809 struct dev_pm_opp *opp; in qcom_pcie_probe() local
1820 return -ENODATA; in qcom_pcie_probe()
1823 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { in qcom_pcie_probe()
1825 return -ENODATA; in qcom_pcie_probe()
1833 if (pcie_cfg->firmware_managed) { in qcom_pcie_probe()
1839 ret = -ENOMEM; in qcom_pcie_probe()
1851 bridge->sysdata = cfg; in qcom_pcie_probe()
1852 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; in qcom_pcie_probe()
1853 bridge->msi_domain = true; in qcom_pcie_probe()
1864 ret = -ENOMEM; in qcom_pcie_probe()
1870 ret = -ENOMEM; in qcom_pcie_probe()
1874 INIT_LIST_HEAD(&pcie->ports); in qcom_pcie_probe()
1876 pci->dev = dev; in qcom_pcie_probe()
1877 pci->ops = &dw_pcie_ops; in qcom_pcie_probe()
1878 pp = &pci->pp; in qcom_pcie_probe()
1880 pcie->pci = pci; in qcom_pcie_probe()
1882 pcie->cfg = pcie_cfg; in qcom_pcie_probe()
1884 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); in qcom_pcie_probe()
1885 if (IS_ERR(pcie->parf)) { in qcom_pcie_probe()
1886 ret = PTR_ERR(pcie->parf); in qcom_pcie_probe()
1893 pcie->mhi = devm_ioremap_resource(dev, res); in qcom_pcie_probe()
1894 if (IS_ERR(pcie->mhi)) { in qcom_pcie_probe()
1895 ret = PTR_ERR(pcie->mhi); in qcom_pcie_probe()
1900 /* OPP table is optional */ in qcom_pcie_probe()
1902 if (ret && ret != -ENODEV) { in qcom_pcie_probe()
1903 dev_err_probe(dev, ret, "Failed to add OPP table\n"); in qcom_pcie_probe()
1908 * Before the PCIe link is initialized, vote for highest OPP in the OPP in qcom_pcie_probe()
1911 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). in qcom_pcie_probe()
1914 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); in qcom_pcie_probe()
1915 if (IS_ERR(opp)) { in qcom_pcie_probe()
1916 ret = PTR_ERR(opp); in qcom_pcie_probe()
1917 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1918 "Unable to find max freq OPP\n"); in qcom_pcie_probe()
1921 ret = dev_pm_opp_set_opp(dev, opp); in qcom_pcie_probe()
1924 dev_pm_opp_put(opp); in qcom_pcie_probe()
1926 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1927 "Failed to set OPP for freq %lu\n", in qcom_pcie_probe()
1932 pcie->use_pm_opp = true; in qcom_pcie_probe()
1934 /* Skip ICC init if OPP is supported as it is handled by OPP */ in qcom_pcie_probe()
1940 ret = pcie->cfg->ops->get_resources(pcie); in qcom_pcie_probe()
1944 pp->ops = &qcom_pcie_dw_ops; in qcom_pcie_probe()
1948 if (ret != -ENOENT) { in qcom_pcie_probe()
1949 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1968 pp->use_linkup_irq = true; in qcom_pcie_probe()
1977 pci_domain_nr(pp->bridge->bus)); in qcom_pcie_probe()
1979 ret = -ENOMEM; in qcom_pcie_probe()
1984 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, in qcom_pcie_probe()
1988 dev_err_probe(&pdev->dev, ret, in qcom_pcie_probe()
1994 pcie->parf + PARF_INT_ALL_MASK); in qcom_pcie_probe()
1999 if (pcie->mhi) in qcom_pcie_probe()
2007 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in qcom_pcie_probe()
2008 phy_exit(port->phy); in qcom_pcie_probe()
2009 list_del(&port->list); in qcom_pcie_probe()
2031 if (pcie->icc_mem) { in qcom_pcie_suspend_noirq()
2032 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); in qcom_pcie_suspend_noirq()
2035 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_suspend_noirq()
2056 if (!dw_pcie_link_up(pcie->pci)) { in qcom_pcie_suspend_noirq()
2057 qcom_pcie_host_deinit(&pcie->pci->pp); in qcom_pcie_suspend_noirq()
2058 pcie->suspended = true; in qcom_pcie_suspend_noirq()
2062 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. in qcom_pcie_suspend_noirq()
2064 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC in qcom_pcie_suspend_noirq()
2068 ret = icc_disable(pcie->icc_cpu); in qcom_pcie_suspend_noirq()
2070 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); in qcom_pcie_suspend_noirq()
2072 if (pcie->use_pm_opp) in qcom_pcie_suspend_noirq()
2073 dev_pm_opp_set_opp(pcie->pci->dev, NULL); in qcom_pcie_suspend_noirq()
2088 ret = icc_enable(pcie->icc_cpu); in qcom_pcie_resume_noirq()
2090 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); in qcom_pcie_resume_noirq()
2095 if (pcie->suspended) { in qcom_pcie_resume_noirq()
2096 ret = qcom_pcie_host_init(&pcie->pci->pp); in qcom_pcie_resume_noirq()
2100 pcie->suspended = false; in qcom_pcie_resume_noirq()
2109 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
2110 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
2111 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
2112 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
2113 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
2114 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
2115 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
2116 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
2117 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
2118 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
2119 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
2120 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
2121 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
2122 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
2123 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
2124 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
2125 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
2126 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
2127 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
2128 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
2129 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
2130 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
2131 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
2132 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
2133 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
2134 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
2135 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
2141 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; in qcom_fixup_class()
2158 .name = "qcom-pcie",