Lines Matching +full:vdda_phy +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
26 #include <linux/pci-ecam.h>
39 #include "../pci-host-common.h"
40 #include "pcie-designware.h"
41 #include "pcie-qcom-common.h"
257 * struct qcom_pcie_cfg - Per SoC config struct
290 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
297 list_for_each_entry(port, &pcie->ports, list) in qcom_perst_assert()
298 gpiod_set_value_cansleep(port->reset, val); in qcom_perst_assert()
321 if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) in qcom_pcie_start_link()
325 if (pcie->cfg->ops->ltssm_enable) in qcom_pcie_start_link()
326 pcie->cfg->ops->ltssm_enable(pcie); in qcom_pcie_start_link()
337 if (!pcie->cfg->no_l0s) in qcom_pcie_clear_aspm_l0s()
344 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_clear_aspm_l0s()
346 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_clear_aspm_l0s()
358 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_clear_hpc()
360 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_clear_hpc()
367 struct dw_pcie *pci = pcie->pci; in qcom_pcie_configure_dbi_base()
369 if (pci->dbi_phys_addr) { in qcom_pcie_configure_dbi_base()
374 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_base()
376 writel(SLV_ADDR_SPACE_SZ, pcie->parf + in qcom_pcie_configure_dbi_base()
383 struct dw_pcie *pci = pcie->pci; in qcom_pcie_configure_dbi_atu_base()
385 if (pci->dbi_phys_addr) { in qcom_pcie_configure_dbi_atu_base()
391 writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
393 writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
396 if (pci->atu_phys_addr) { in qcom_pcie_configure_dbi_atu_base()
397 writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
399 writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + in qcom_pcie_configure_dbi_atu_base()
403 writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); in qcom_pcie_configure_dbi_atu_base()
404 writel(SLV_ADDR_SPACE_SZ, pcie->parf + in qcom_pcie_configure_dbi_atu_base()
411 struct dw_pcie *pci = pcie->pci; in qcom_pcie_2_1_0_ltssm_enable()
414 if (!pci->elbi_base) { in qcom_pcie_2_1_0_ltssm_enable()
415 dev_err(pci->dev, "ELBI is not present\n"); in qcom_pcie_2_1_0_ltssm_enable()
419 val = readl(pci->elbi_base + ELBI_SYS_CTRL); in qcom_pcie_2_1_0_ltssm_enable()
421 writel(val, pci->elbi_base + ELBI_SYS_CTRL); in qcom_pcie_2_1_0_ltssm_enable()
426 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_get_resources_2_1_0()
427 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_1_0()
428 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_1_0()
429 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); in qcom_pcie_get_resources_2_1_0()
432 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_1_0()
433 res->supplies[1].supply = "vdda_phy"; in qcom_pcie_get_resources_2_1_0()
434 res->supplies[2].supply = "vdda_refclk"; in qcom_pcie_get_resources_2_1_0()
435 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_1_0()
436 res->supplies); in qcom_pcie_get_resources_2_1_0()
440 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_1_0()
441 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_1_0()
443 return res->num_clks; in qcom_pcie_get_resources_2_1_0()
446 res->resets[0].id = "pci"; in qcom_pcie_get_resources_2_1_0()
447 res->resets[1].id = "axi"; in qcom_pcie_get_resources_2_1_0()
448 res->resets[2].id = "ahb"; in qcom_pcie_get_resources_2_1_0()
449 res->resets[3].id = "por"; in qcom_pcie_get_resources_2_1_0()
450 res->resets[4].id = "phy"; in qcom_pcie_get_resources_2_1_0()
451 res->resets[5].id = "ext"; in qcom_pcie_get_resources_2_1_0()
454 res->num_resets = is_apq ? 5 : 6; in qcom_pcie_get_resources_2_1_0()
455 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); in qcom_pcie_get_resources_2_1_0()
464 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_deinit_2_1_0()
466 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_1_0()
467 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_deinit_2_1_0()
469 writel(1, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_deinit_2_1_0()
471 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_1_0()
476 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_init_2_1_0()
477 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_1_0()
478 struct device *dev = pci->dev; in qcom_pcie_init_2_1_0()
482 ret = reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_1_0()
488 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_1_0()
494 ret = reset_control_bulk_deassert(res->num_resets, res->resets); in qcom_pcie_init_2_1_0()
497 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_1_0()
506 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; in qcom_pcie_post_init_2_1_0()
507 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_1_0()
508 struct device *dev = pci->dev; in qcom_pcie_post_init_2_1_0()
509 struct device_node *node = dev->of_node; in qcom_pcie_post_init_2_1_0()
514 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
516 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
518 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_post_init_2_1_0()
522 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || in qcom_pcie_post_init_2_1_0()
523 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { in qcom_pcie_post_init_2_1_0()
527 pcie->parf + PARF_PCS_DEEMPH); in qcom_pcie_post_init_2_1_0()
530 pcie->parf + PARF_PCS_SWING); in qcom_pcie_post_init_2_1_0()
531 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); in qcom_pcie_post_init_2_1_0()
534 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { in qcom_pcie_post_init_2_1_0()
536 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
539 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_1_0()
543 val = readl(pcie->parf + PARF_PHY_REFCLK); in qcom_pcie_post_init_2_1_0()
545 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) in qcom_pcie_post_init_2_1_0()
548 writel(val, pcie->parf + PARF_PHY_REFCLK); in qcom_pcie_post_init_2_1_0()
555 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); in qcom_pcie_post_init_2_1_0()
557 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); in qcom_pcie_post_init_2_1_0()
559 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_1_0()
566 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_get_resources_1_0_0()
567 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_1_0_0()
568 struct device *dev = pci->dev; in qcom_pcie_get_resources_1_0_0()
570 res->vdda = devm_regulator_get(dev, "vdda"); in qcom_pcie_get_resources_1_0_0()
571 if (IS_ERR(res->vdda)) in qcom_pcie_get_resources_1_0_0()
572 return PTR_ERR(res->vdda); in qcom_pcie_get_resources_1_0_0()
574 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_1_0_0()
575 if (res->num_clks < 0) { in qcom_pcie_get_resources_1_0_0()
577 return res->num_clks; in qcom_pcie_get_resources_1_0_0()
580 res->core = devm_reset_control_get_exclusive(dev, "core"); in qcom_pcie_get_resources_1_0_0()
581 return PTR_ERR_OR_ZERO(res->core); in qcom_pcie_get_resources_1_0_0()
586 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_deinit_1_0_0()
588 reset_control_assert(res->core); in qcom_pcie_deinit_1_0_0()
589 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_1_0_0()
590 regulator_disable(res->vdda); in qcom_pcie_deinit_1_0_0()
595 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; in qcom_pcie_init_1_0_0()
596 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_1_0_0()
597 struct device *dev = pci->dev; in qcom_pcie_init_1_0_0()
600 ret = reset_control_deassert(res->core); in qcom_pcie_init_1_0_0()
606 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_1_0_0()
612 ret = regulator_enable(res->vdda); in qcom_pcie_init_1_0_0()
621 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_init_1_0_0()
623 reset_control_assert(res->core); in qcom_pcie_init_1_0_0()
633 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); in qcom_pcie_post_init_1_0_0()
636 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); in qcom_pcie_post_init_1_0_0()
639 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_1_0_0()
661 val = readl(pcie->parf + PARF_LTSSM); in qcom_pcie_2_3_2_ltssm_enable()
663 writel(val, pcie->parf + PARF_LTSSM); in qcom_pcie_2_3_2_ltssm_enable()
668 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_get_resources_2_3_2()
669 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_3_2()
670 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_3_2()
673 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_3_2()
674 res->supplies[1].supply = "vddpe-3v3"; in qcom_pcie_get_resources_2_3_2()
675 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_3_2()
676 res->supplies); in qcom_pcie_get_resources_2_3_2()
680 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_3_2()
681 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_3_2()
683 return res->num_clks; in qcom_pcie_get_resources_2_3_2()
691 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_deinit_2_3_2()
693 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_3_2()
694 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_3_2()
699 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; in qcom_pcie_init_2_3_2()
700 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_3_2()
701 struct device *dev = pci->dev; in qcom_pcie_init_2_3_2()
704 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_3_2()
710 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_3_2()
713 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_3_2()
725 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_2()
727 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_2()
732 val = readl(pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_2()
734 writel(val, pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_2()
736 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_3_2()
738 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_3_2()
740 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_post_init_2_3_2()
742 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_post_init_2_3_2()
744 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_3_2()
751 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_get_resources_2_4_0()
752 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_4_0()
753 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_4_0()
754 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); in qcom_pcie_get_resources_2_4_0()
757 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_4_0()
758 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_4_0()
760 return res->num_clks; in qcom_pcie_get_resources_2_4_0()
763 res->resets[0].id = "axi_m"; in qcom_pcie_get_resources_2_4_0()
764 res->resets[1].id = "axi_s"; in qcom_pcie_get_resources_2_4_0()
765 res->resets[2].id = "axi_m_sticky"; in qcom_pcie_get_resources_2_4_0()
766 res->resets[3].id = "pipe_sticky"; in qcom_pcie_get_resources_2_4_0()
767 res->resets[4].id = "pwr"; in qcom_pcie_get_resources_2_4_0()
768 res->resets[5].id = "ahb"; in qcom_pcie_get_resources_2_4_0()
769 res->resets[6].id = "pipe"; in qcom_pcie_get_resources_2_4_0()
770 res->resets[7].id = "axi_m_vmid"; in qcom_pcie_get_resources_2_4_0()
771 res->resets[8].id = "axi_s_xpu"; in qcom_pcie_get_resources_2_4_0()
772 res->resets[9].id = "parf"; in qcom_pcie_get_resources_2_4_0()
773 res->resets[10].id = "phy"; in qcom_pcie_get_resources_2_4_0()
774 res->resets[11].id = "phy_ahb"; in qcom_pcie_get_resources_2_4_0()
776 res->num_resets = is_ipq ? 12 : 6; in qcom_pcie_get_resources_2_4_0()
778 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); in qcom_pcie_get_resources_2_4_0()
787 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_deinit_2_4_0()
789 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_deinit_2_4_0()
790 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_4_0()
795 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; in qcom_pcie_init_2_4_0()
796 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_4_0()
797 struct device *dev = pci->dev; in qcom_pcie_init_2_4_0()
800 ret = reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
808 ret = reset_control_bulk_deassert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
816 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_4_0()
818 reset_control_bulk_assert(res->num_resets, res->resets); in qcom_pcie_init_2_4_0()
827 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_get_resources_2_3_3()
828 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_3_3()
829 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_3_3()
832 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_3_3()
833 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_3_3()
835 return res->num_clks; in qcom_pcie_get_resources_2_3_3()
838 res->rst[0].id = "axi_m"; in qcom_pcie_get_resources_2_3_3()
839 res->rst[1].id = "axi_s"; in qcom_pcie_get_resources_2_3_3()
840 res->rst[2].id = "pipe"; in qcom_pcie_get_resources_2_3_3()
841 res->rst[3].id = "axi_m_sticky"; in qcom_pcie_get_resources_2_3_3()
842 res->rst[4].id = "sticky"; in qcom_pcie_get_resources_2_3_3()
843 res->rst[5].id = "ahb"; in qcom_pcie_get_resources_2_3_3()
844 res->rst[6].id = "sleep"; in qcom_pcie_get_resources_2_3_3()
846 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_get_resources_2_3_3()
855 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_deinit_2_3_3()
857 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_3_3()
862 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; in qcom_pcie_init_2_3_3()
863 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_3_3()
864 struct device *dev = pci->dev; in qcom_pcie_init_2_3_3()
867 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
875 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
887 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_3_3()
900 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); in qcom_pcie_init_2_3_3()
907 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_3_3()
911 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_3()
913 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_3_3()
920 pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_3_3()
921 writel(0, pcie->parf + PARF_Q2A_FLUSH); in qcom_pcie_post_init_2_3_3()
923 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); in qcom_pcie_post_init_2_3_3()
927 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_post_init_2_3_3()
929 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_3_3()
931 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_3_3()
933 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + in qcom_pcie_post_init_2_3_3()
943 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_get_resources_2_7_0()
944 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_7_0()
945 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_7_0()
948 res->rst = devm_reset_control_array_get_exclusive(dev); in qcom_pcie_get_resources_2_7_0()
949 if (IS_ERR(res->rst)) in qcom_pcie_get_resources_2_7_0()
950 return PTR_ERR(res->rst); in qcom_pcie_get_resources_2_7_0()
952 res->supplies[0].supply = "vdda"; in qcom_pcie_get_resources_2_7_0()
953 res->supplies[1].supply = "vddpe-3v3"; in qcom_pcie_get_resources_2_7_0()
954 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), in qcom_pcie_get_resources_2_7_0()
955 res->supplies); in qcom_pcie_get_resources_2_7_0()
959 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_7_0()
960 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_7_0()
962 return res->num_clks; in qcom_pcie_get_resources_2_7_0()
970 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_init_2_7_0()
971 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_2_7_0()
972 struct device *dev = pci->dev; in qcom_pcie_init_2_7_0()
976 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_7_0()
982 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_7_0()
986 ret = reset_control_assert(res->rst); in qcom_pcie_init_2_7_0()
994 ret = reset_control_deassert(res->rst); in qcom_pcie_init_2_7_0()
1004 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); in qcom_pcie_init_2_7_0()
1007 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_init_2_7_0()
1009 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_init_2_7_0()
1014 val = readl(pcie->parf + PARF_SYS_CTRL); in qcom_pcie_init_2_7_0()
1016 writel(val, pcie->parf + PARF_SYS_CTRL); in qcom_pcie_init_2_7_0()
1018 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_init_2_7_0()
1020 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_init_2_7_0()
1023 val = readl(pcie->parf + PARF_PM_CTRL); in qcom_pcie_init_2_7_0()
1025 writel(val, pcie->parf + PARF_PM_CTRL); in qcom_pcie_init_2_7_0()
1027 pci->l1ss_support = true; in qcom_pcie_init_2_7_0()
1029 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_init_2_7_0()
1031 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); in qcom_pcie_init_2_7_0()
1035 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_init_2_7_0()
1037 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_init_2_7_0()
1044 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; in qcom_pcie_post_init_2_7_0()
1046 if (pcie_cfg->override_no_snoop) in qcom_pcie_post_init_2_7_0()
1048 pcie->parf + PARF_NO_SNOOP_OVERRIDE); in qcom_pcie_post_init_2_7_0()
1050 qcom_pcie_clear_aspm_l0s(pcie->pci); in qcom_pcie_post_init_2_7_0()
1051 qcom_pcie_clear_hpc(pcie->pci); in qcom_pcie_post_init_2_7_0()
1070 struct dw_pcie_rp *pp = &pcie->pci->pp; in qcom_pcie_host_post_init_2_7_0()
1072 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); in qcom_pcie_host_post_init_2_7_0()
1077 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; in qcom_pcie_deinit_2_7_0()
1079 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_7_0()
1081 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); in qcom_pcie_deinit_2_7_0()
1093 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; in qcom_pcie_config_sid_1_9_0()
1094 struct device *dev = pcie->pci->dev; in qcom_pcie_config_sid_1_9_0()
1100 of_get_property(dev->of_node, "iommu-map", &size); in qcom_pcie_config_sid_1_9_0()
1105 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); in qcom_pcie_config_sid_1_9_0()
1107 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); in qcom_pcie_config_sid_1_9_0()
1111 return -ENOMEM; in qcom_pcie_config_sid_1_9_0()
1113 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, in qcom_pcie_config_sid_1_9_0()
1123 /* Extract the SMMU SID base from the first entry of iommu-map */ in qcom_pcie_config_sid_1_9_0()
1151 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; in qcom_pcie_config_sid_1_9_0()
1162 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_get_resources_2_9_0()
1163 struct dw_pcie *pci = pcie->pci; in qcom_pcie_get_resources_2_9_0()
1164 struct device *dev = pci->dev; in qcom_pcie_get_resources_2_9_0()
1166 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); in qcom_pcie_get_resources_2_9_0()
1167 if (res->num_clks < 0) { in qcom_pcie_get_resources_2_9_0()
1169 return res->num_clks; in qcom_pcie_get_resources_2_9_0()
1172 res->rst = devm_reset_control_array_get_exclusive(dev); in qcom_pcie_get_resources_2_9_0()
1173 if (IS_ERR(res->rst)) in qcom_pcie_get_resources_2_9_0()
1174 return PTR_ERR(res->rst); in qcom_pcie_get_resources_2_9_0()
1181 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_deinit_2_9_0()
1183 clk_bulk_disable_unprepare(res->num_clks, res->clks); in qcom_pcie_deinit_2_9_0()
1188 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; in qcom_pcie_init_2_9_0()
1189 struct device *dev = pcie->pci->dev; in qcom_pcie_init_2_9_0()
1192 ret = reset_control_assert(res->rst); in qcom_pcie_init_2_9_0()
1204 ret = reset_control_deassert(res->rst); in qcom_pcie_init_2_9_0()
1212 return clk_bulk_prepare_enable(res->num_clks, res->clks); in qcom_pcie_init_2_9_0()
1217 struct dw_pcie *pci = pcie->pci; in qcom_pcie_post_init_2_9_0()
1222 val = readl(pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_9_0()
1224 writel(val, pcie->parf + PARF_PHY_CTRL); in qcom_pcie_post_init_2_9_0()
1228 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); in qcom_pcie_post_init_2_9_0()
1230 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); in qcom_pcie_post_init_2_9_0()
1233 pci->dbi_base + GEN3_RELATED_OFF); in qcom_pcie_post_init_2_9_0()
1238 pcie->parf + PARF_SYS_CTRL); in qcom_pcie_post_init_2_9_0()
1240 writel(0, pcie->parf + PARF_Q2A_FLUSH); in qcom_pcie_post_init_2_9_0()
1244 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); in qcom_pcie_post_init_2_9_0()
1246 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_9_0()
1248 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); in qcom_pcie_post_init_2_9_0()
1250 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + in qcom_pcie_post_init_2_9_0()
1256 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); in qcom_pcie_post_init_2_9_0()
1264 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); in qcom_pcie_link_up()
1273 list_for_each_entry(port, &pcie->ports, list) in qcom_pcie_phy_power_off()
1274 phy_power_off(port->phy); in qcom_pcie_phy_power_off()
1282 list_for_each_entry(port, &pcie->ports, list) { in qcom_pcie_phy_power_on()
1283 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); in qcom_pcie_phy_power_on()
1287 ret = phy_power_on(port->phy); in qcom_pcie_phy_power_on()
1305 ret = pcie->cfg->ops->init(pcie); in qcom_pcie_host_init()
1313 if (pcie->cfg->ops->post_init) { in qcom_pcie_host_init()
1314 ret = pcie->cfg->ops->post_init(pcie); in qcom_pcie_host_init()
1321 if (pcie->cfg->ops->config_sid) { in qcom_pcie_host_init()
1322 ret = pcie->cfg->ops->config_sid(pcie); in qcom_pcie_host_init()
1334 pcie->cfg->ops->deinit(pcie); in qcom_pcie_host_init()
1346 pcie->cfg->ops->deinit(pcie); in qcom_pcie_host_deinit()
1354 if (pcie->cfg->ops->host_post_init) in qcom_pcie_host_post_init()
1355 pcie->cfg->ops->host_post_init(pcie); in qcom_pcie_host_post_init()
1502 struct dw_pcie *pci = pcie->pci; in qcom_pcie_icc_init()
1505 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); in qcom_pcie_icc_init()
1506 if (IS_ERR(pcie->icc_mem)) in qcom_pcie_icc_init()
1507 return PTR_ERR(pcie->icc_mem); in qcom_pcie_icc_init()
1509 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); in qcom_pcie_icc_init()
1510 if (IS_ERR(pcie->icc_cpu)) in qcom_pcie_icc_init()
1511 return PTR_ERR(pcie->icc_cpu); in qcom_pcie_icc_init()
1516 * Set an initial peak bandwidth corresponding to single-lane Gen 1 in qcom_pcie_icc_init()
1517 * for the pcie-mem path. in qcom_pcie_icc_init()
1519 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); in qcom_pcie_icc_init()
1521 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_icc_init()
1527 * Since the CPU-PCIe path is only used for activities like register in qcom_pcie_icc_init()
1532 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); in qcom_pcie_icc_init()
1534 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n", in qcom_pcie_icc_init()
1536 icc_set_bw(pcie->icc_mem, 0, 0); in qcom_pcie_icc_init()
1546 struct dw_pcie *pci = pcie->pci; in qcom_pcie_icc_opp_update()
1553 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); in qcom_pcie_icc_opp_update()
1562 if (pcie->icc_mem) { in qcom_pcie_icc_opp_update()
1563 ret = icc_set_bw(pcie->icc_mem, 0, in qcom_pcie_icc_opp_update()
1566 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_icc_opp_update()
1569 } else if (pcie->use_pm_opp) { in qcom_pcie_icc_opp_update()
1575 opp = dev_pm_opp_find_level_exact(pci->dev, speed); in qcom_pcie_icc_opp_update()
1577 /* opp-level is not defined use only frequency */ in qcom_pcie_icc_opp_update()
1578 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, in qcom_pcie_icc_opp_update()
1581 /* put opp-level OPP */ in qcom_pcie_icc_opp_update()
1587 opp = dev_pm_opp_find_key_exact(pci->dev, &key, true); in qcom_pcie_icc_opp_update()
1590 ret = dev_pm_opp_set_opp(pci->dev, opp); in qcom_pcie_icc_opp_update()
1592 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", in qcom_pcie_icc_opp_update()
1601 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); in qcom_pcie_link_transition_count()
1604 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); in qcom_pcie_link_transition_count()
1607 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); in qcom_pcie_link_transition_count()
1610 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); in qcom_pcie_link_transition_count()
1613 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); in qcom_pcie_link_transition_count()
1616 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); in qcom_pcie_link_transition_count()
1623 struct dw_pcie *pci = pcie->pci; in qcom_pcie_init_debugfs()
1624 struct device *dev = pci->dev; in qcom_pcie_init_debugfs()
1627 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); in qcom_pcie_init_debugfs()
1631 pcie->debugfs = debugfs_create_dir(name, NULL); in qcom_pcie_init_debugfs()
1632 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, in qcom_pcie_init_debugfs()
1639 struct dw_pcie_rp *pp = &pcie->pci->pp; in qcom_pcie_global_irq_thread()
1640 struct device *dev = pcie->pci->dev; in qcom_pcie_global_irq_thread()
1641 u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); in qcom_pcie_global_irq_thread()
1643 writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); in qcom_pcie_global_irq_thread()
1650 pci_rescan_bus(pp->bridge->bus); in qcom_pcie_global_irq_thread()
1666 if (pp && pp->has_msi_ctrl) in qcom_pci_free_msi()
1672 struct device *dev = cfg->parent; in qcom_pcie_ecam_host_init()
1679 return -ENOMEM; in qcom_pcie_ecam_host_init()
1681 pci->dev = dev; in qcom_pcie_ecam_host_init()
1682 pp = &pci->pp; in qcom_pcie_ecam_host_init()
1683 pci->dbi_base = cfg->win; in qcom_pcie_ecam_host_init()
1684 pp->num_vectors = MSI_DEF_NUM_VECTORS; in qcom_pcie_ecam_host_init()
1690 pp->has_msi_ctrl = true; in qcom_pcie_ecam_host_init()
1707 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_port()
1724 return -ENOMEM; in qcom_pcie_parse_port()
1730 port->reset = reset; in qcom_pcie_parse_port()
1731 port->phy = phy; in qcom_pcie_parse_port()
1732 INIT_LIST_HEAD(&port->list); in qcom_pcie_parse_port()
1733 list_add_tail(&port->list, &pcie->ports); in qcom_pcie_parse_port()
1740 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_ports()
1742 int ret = -ENOENT; in qcom_pcie_parse_ports()
1744 for_each_available_child_of_node_scoped(dev->of_node, of_port) { in qcom_pcie_parse_ports()
1755 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in qcom_pcie_parse_ports()
1756 phy_exit(port->phy); in qcom_pcie_parse_ports()
1757 list_del(&port->list); in qcom_pcie_parse_ports()
1765 struct device *dev = pcie->pci->dev; in qcom_pcie_parse_legacy_binding()
1785 return -ENOMEM; in qcom_pcie_parse_legacy_binding()
1787 port->reset = reset; in qcom_pcie_parse_legacy_binding()
1788 port->phy = phy; in qcom_pcie_parse_legacy_binding()
1789 INIT_LIST_HEAD(&port->list); in qcom_pcie_parse_legacy_binding()
1790 list_add_tail(&port->list, &pcie->ports); in qcom_pcie_parse_legacy_binding()
1800 struct device *dev = &pdev->dev; in qcom_pcie_probe()
1812 return -ENODATA; in qcom_pcie_probe()
1815 if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { in qcom_pcie_probe()
1817 return -ENODATA; in qcom_pcie_probe()
1825 if (pcie_cfg->firmware_managed) { in qcom_pcie_probe()
1831 ret = -ENOMEM; in qcom_pcie_probe()
1843 bridge->sysdata = cfg; in qcom_pcie_probe()
1844 bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; in qcom_pcie_probe()
1845 bridge->msi_domain = true; in qcom_pcie_probe()
1856 ret = -ENOMEM; in qcom_pcie_probe()
1862 ret = -ENOMEM; in qcom_pcie_probe()
1866 INIT_LIST_HEAD(&pcie->ports); in qcom_pcie_probe()
1868 pci->dev = dev; in qcom_pcie_probe()
1869 pci->ops = &dw_pcie_ops; in qcom_pcie_probe()
1870 pp = &pci->pp; in qcom_pcie_probe()
1872 pcie->pci = pci; in qcom_pcie_probe()
1874 pcie->cfg = pcie_cfg; in qcom_pcie_probe()
1876 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); in qcom_pcie_probe()
1877 if (IS_ERR(pcie->parf)) { in qcom_pcie_probe()
1878 ret = PTR_ERR(pcie->parf); in qcom_pcie_probe()
1885 pcie->mhi = devm_ioremap_resource(dev, res); in qcom_pcie_probe()
1886 if (IS_ERR(pcie->mhi)) { in qcom_pcie_probe()
1887 ret = PTR_ERR(pcie->mhi); in qcom_pcie_probe()
1894 if (ret && ret != -ENODEV) { in qcom_pcie_probe()
1909 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1918 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1924 pcie->use_pm_opp = true; in qcom_pcie_probe()
1932 ret = pcie->cfg->ops->get_resources(pcie); in qcom_pcie_probe()
1936 pp->ops = &qcom_pcie_dw_ops; in qcom_pcie_probe()
1940 if (ret != -ENOENT) { in qcom_pcie_probe()
1941 dev_err_probe(pci->dev, ret, in qcom_pcie_probe()
1960 pp->use_linkup_irq = true; in qcom_pcie_probe()
1969 pci_domain_nr(pp->bridge->bus)); in qcom_pcie_probe()
1971 ret = -ENOMEM; in qcom_pcie_probe()
1976 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, in qcom_pcie_probe()
1980 dev_err_probe(&pdev->dev, ret, in qcom_pcie_probe()
1986 pcie->parf + PARF_INT_ALL_MASK); in qcom_pcie_probe()
1991 if (pcie->mhi) in qcom_pcie_probe()
1999 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in qcom_pcie_probe()
2000 phy_exit(port->phy); in qcom_pcie_probe()
2001 list_del(&port->list); in qcom_pcie_probe()
2023 if (pcie->icc_mem) { in qcom_pcie_suspend_noirq()
2024 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); in qcom_pcie_suspend_noirq()
2027 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", in qcom_pcie_suspend_noirq()
2044 * implies VDD supply will be removed and the devices may go into in qcom_pcie_suspend_noirq()
2048 if (!dw_pcie_link_up(pcie->pci)) { in qcom_pcie_suspend_noirq()
2049 qcom_pcie_host_deinit(&pcie->pci->pp); in qcom_pcie_suspend_noirq()
2050 pcie->suspended = true; in qcom_pcie_suspend_noirq()
2054 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. in qcom_pcie_suspend_noirq()
2056 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC in qcom_pcie_suspend_noirq()
2060 ret = icc_disable(pcie->icc_cpu); in qcom_pcie_suspend_noirq()
2062 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); in qcom_pcie_suspend_noirq()
2064 if (pcie->use_pm_opp) in qcom_pcie_suspend_noirq()
2065 dev_pm_opp_set_opp(pcie->pci->dev, NULL); in qcom_pcie_suspend_noirq()
2080 ret = icc_enable(pcie->icc_cpu); in qcom_pcie_resume_noirq()
2082 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); in qcom_pcie_resume_noirq()
2087 if (pcie->suspended) { in qcom_pcie_resume_noirq()
2088 ret = qcom_pcie_host_init(&pcie->pci->pp); in qcom_pcie_resume_noirq()
2092 pcie->suspended = false; in qcom_pcie_resume_noirq()
2101 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
2102 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
2103 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
2104 { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
2105 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
2106 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
2107 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
2108 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
2109 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
2110 { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
2111 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
2112 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
2113 { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
2114 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
2115 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
2116 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
2117 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
2118 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
2119 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
2120 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
2121 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
2122 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
2123 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
2124 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
2125 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
2126 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
2127 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
2133 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; in qcom_fixup_class()
2150 .name = "qcom-pcie",