1*ff64e078SAlex Elder // SPDX-License-Identifier: GPL-2.0 2*ff64e078SAlex Elder /* 3*ff64e078SAlex Elder * SpacemiT K1 PCIe host driver 4*ff64e078SAlex Elder * 5*ff64e078SAlex Elder * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved. 6*ff64e078SAlex Elder * Copyright (c) 2023, spacemit Corporation. 7*ff64e078SAlex Elder */ 8*ff64e078SAlex Elder 9*ff64e078SAlex Elder #include <linux/clk.h> 10*ff64e078SAlex Elder #include <linux/delay.h> 11*ff64e078SAlex Elder #include <linux/device.h> 12*ff64e078SAlex Elder #include <linux/err.h> 13*ff64e078SAlex Elder #include <linux/gfp.h> 14*ff64e078SAlex Elder #include <linux/mfd/syscon.h> 15*ff64e078SAlex Elder #include <linux/mod_devicetable.h> 16*ff64e078SAlex Elder #include <linux/phy/phy.h> 17*ff64e078SAlex Elder #include <linux/platform_device.h> 18*ff64e078SAlex Elder #include <linux/regmap.h> 19*ff64e078SAlex Elder #include <linux/reset.h> 20*ff64e078SAlex Elder #include <linux/types.h> 21*ff64e078SAlex Elder 22*ff64e078SAlex Elder #include "pcie-designware.h" 23*ff64e078SAlex Elder 24*ff64e078SAlex Elder #define PCI_VENDOR_ID_SPACEMIT 0x201f 25*ff64e078SAlex Elder #define PCI_DEVICE_ID_SPACEMIT_K1 0x0001 26*ff64e078SAlex Elder 27*ff64e078SAlex Elder /* Offsets and field definitions for link management registers */ 28*ff64e078SAlex Elder #define K1_PHY_AHB_IRQ_EN 0x0000 29*ff64e078SAlex Elder #define PCIE_INTERRUPT_EN BIT(0) 30*ff64e078SAlex Elder 31*ff64e078SAlex Elder #define K1_PHY_AHB_LINK_STS 0x0004 32*ff64e078SAlex Elder #define SMLH_LINK_UP BIT(1) 33*ff64e078SAlex Elder #define RDLH_LINK_UP BIT(12) 34*ff64e078SAlex Elder 35*ff64e078SAlex Elder #define INTR_ENABLE 0x0014 36*ff64e078SAlex Elder #define MSI_CTRL_INT BIT(11) 37*ff64e078SAlex Elder 38*ff64e078SAlex Elder /* Some controls require APMU regmap access */ 39*ff64e078SAlex Elder #define SYSCON_APMU "spacemit,apmu" 40*ff64e078SAlex Elder 41*ff64e078SAlex Elder /* Offsets and field definitions for APMU registers */ 42*ff64e078SAlex Elder #define PCIE_CLK_RESET_CONTROL 0x0000 43*ff64e078SAlex Elder #define LTSSM_EN BIT(6) 44*ff64e078SAlex Elder #define PCIE_AUX_PWR_DET BIT(9) 45*ff64e078SAlex Elder #define PCIE_RC_PERST BIT(12) /* 1: assert PERST# */ 46*ff64e078SAlex Elder #define APP_HOLD_PHY_RST BIT(30) 47*ff64e078SAlex Elder #define DEVICE_TYPE_RC BIT(31) /* 0: endpoint; 1: RC */ 48*ff64e078SAlex Elder 49*ff64e078SAlex Elder #define PCIE_CONTROL_LOGIC 0x0004 50*ff64e078SAlex Elder #define PCIE_SOFT_RESET BIT(0) 51*ff64e078SAlex Elder 52*ff64e078SAlex Elder struct k1_pcie { 53*ff64e078SAlex Elder struct dw_pcie pci; 54*ff64e078SAlex Elder struct phy *phy; 55*ff64e078SAlex Elder void __iomem *link; 56*ff64e078SAlex Elder struct regmap *pmu; /* Errors ignored; MMIO-backed regmap */ 57*ff64e078SAlex Elder u32 pmu_off; 58*ff64e078SAlex Elder }; 59*ff64e078SAlex Elder 60*ff64e078SAlex Elder #define to_k1_pcie(dw_pcie) \ 61*ff64e078SAlex Elder platform_get_drvdata(to_platform_device((dw_pcie)->dev)) 62*ff64e078SAlex Elder 63*ff64e078SAlex Elder static void k1_pcie_toggle_soft_reset(struct k1_pcie *k1) 64*ff64e078SAlex Elder { 65*ff64e078SAlex Elder u32 offset; 66*ff64e078SAlex Elder u32 val; 67*ff64e078SAlex Elder 68*ff64e078SAlex Elder /* 69*ff64e078SAlex Elder * Write, then read back to guarantee it has reached the device 70*ff64e078SAlex Elder * before we start the delay. 71*ff64e078SAlex Elder */ 72*ff64e078SAlex Elder offset = k1->pmu_off + PCIE_CONTROL_LOGIC; 73*ff64e078SAlex Elder regmap_set_bits(k1->pmu, offset, PCIE_SOFT_RESET); 74*ff64e078SAlex Elder regmap_read(k1->pmu, offset, &val); 75*ff64e078SAlex Elder 76*ff64e078SAlex Elder mdelay(2); 77*ff64e078SAlex Elder 78*ff64e078SAlex Elder regmap_clear_bits(k1->pmu, offset, PCIE_SOFT_RESET); 79*ff64e078SAlex Elder } 80*ff64e078SAlex Elder 81*ff64e078SAlex Elder /* Enable app clocks, deassert resets */ 82*ff64e078SAlex Elder static int k1_pcie_enable_resources(struct k1_pcie *k1) 83*ff64e078SAlex Elder { 84*ff64e078SAlex Elder struct dw_pcie *pci = &k1->pci; 85*ff64e078SAlex Elder int ret; 86*ff64e078SAlex Elder 87*ff64e078SAlex Elder ret = clk_bulk_prepare_enable(ARRAY_SIZE(pci->app_clks), pci->app_clks); 88*ff64e078SAlex Elder if (ret) 89*ff64e078SAlex Elder return ret; 90*ff64e078SAlex Elder 91*ff64e078SAlex Elder ret = reset_control_bulk_deassert(ARRAY_SIZE(pci->app_rsts), 92*ff64e078SAlex Elder pci->app_rsts); 93*ff64e078SAlex Elder if (ret) 94*ff64e078SAlex Elder goto err_disable_clks; 95*ff64e078SAlex Elder 96*ff64e078SAlex Elder return 0; 97*ff64e078SAlex Elder 98*ff64e078SAlex Elder err_disable_clks: 99*ff64e078SAlex Elder clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks); 100*ff64e078SAlex Elder 101*ff64e078SAlex Elder return ret; 102*ff64e078SAlex Elder } 103*ff64e078SAlex Elder 104*ff64e078SAlex Elder /* Assert resets, disable app clocks */ 105*ff64e078SAlex Elder static void k1_pcie_disable_resources(struct k1_pcie *k1) 106*ff64e078SAlex Elder { 107*ff64e078SAlex Elder struct dw_pcie *pci = &k1->pci; 108*ff64e078SAlex Elder 109*ff64e078SAlex Elder reset_control_bulk_assert(ARRAY_SIZE(pci->app_rsts), pci->app_rsts); 110*ff64e078SAlex Elder clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks); 111*ff64e078SAlex Elder } 112*ff64e078SAlex Elder 113*ff64e078SAlex Elder /* FIXME: Disable ASPM L1 to avoid errors reported on some NVMe drives */ 114*ff64e078SAlex Elder static void k1_pcie_disable_aspm_l1(struct k1_pcie *k1) 115*ff64e078SAlex Elder { 116*ff64e078SAlex Elder struct dw_pcie *pci = &k1->pci; 117*ff64e078SAlex Elder u8 offset; 118*ff64e078SAlex Elder u32 val; 119*ff64e078SAlex Elder 120*ff64e078SAlex Elder offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 121*ff64e078SAlex Elder offset += PCI_EXP_LNKCAP; 122*ff64e078SAlex Elder 123*ff64e078SAlex Elder dw_pcie_dbi_ro_wr_en(pci); 124*ff64e078SAlex Elder val = dw_pcie_readl_dbi(pci, offset); 125*ff64e078SAlex Elder val &= ~PCI_EXP_LNKCAP_ASPM_L1; 126*ff64e078SAlex Elder dw_pcie_writel_dbi(pci, offset, val); 127*ff64e078SAlex Elder dw_pcie_dbi_ro_wr_dis(pci); 128*ff64e078SAlex Elder } 129*ff64e078SAlex Elder 130*ff64e078SAlex Elder static int k1_pcie_init(struct dw_pcie_rp *pp) 131*ff64e078SAlex Elder { 132*ff64e078SAlex Elder struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 133*ff64e078SAlex Elder struct k1_pcie *k1 = to_k1_pcie(pci); 134*ff64e078SAlex Elder u32 reset_ctrl; 135*ff64e078SAlex Elder u32 val; 136*ff64e078SAlex Elder int ret; 137*ff64e078SAlex Elder 138*ff64e078SAlex Elder k1_pcie_toggle_soft_reset(k1); 139*ff64e078SAlex Elder 140*ff64e078SAlex Elder ret = k1_pcie_enable_resources(k1); 141*ff64e078SAlex Elder if (ret) 142*ff64e078SAlex Elder return ret; 143*ff64e078SAlex Elder 144*ff64e078SAlex Elder /* Set the PCI vendor and device ID */ 145*ff64e078SAlex Elder dw_pcie_dbi_ro_wr_en(pci); 146*ff64e078SAlex Elder dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_SPACEMIT); 147*ff64e078SAlex Elder dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_SPACEMIT_K1); 148*ff64e078SAlex Elder dw_pcie_dbi_ro_wr_dis(pci); 149*ff64e078SAlex Elder 150*ff64e078SAlex Elder /* 151*ff64e078SAlex Elder * Start by asserting fundamental reset (drive PERST# low). The 152*ff64e078SAlex Elder * PCI CEM spec says that PERST# should be deasserted at least 153*ff64e078SAlex Elder * 100ms after the power becomes stable, so we'll insert that 154*ff64e078SAlex Elder * delay first. Write, then read it back to guarantee the write 155*ff64e078SAlex Elder * reaches the device before we start the delay. 156*ff64e078SAlex Elder */ 157*ff64e078SAlex Elder reset_ctrl = k1->pmu_off + PCIE_CLK_RESET_CONTROL; 158*ff64e078SAlex Elder regmap_set_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST); 159*ff64e078SAlex Elder regmap_read(k1->pmu, reset_ctrl, &val); 160*ff64e078SAlex Elder mdelay(PCIE_T_PVPERL_MS); 161*ff64e078SAlex Elder 162*ff64e078SAlex Elder /* 163*ff64e078SAlex Elder * Put the controller in root complex mode, and indicate that 164*ff64e078SAlex Elder * Vaux (3.3v) is present. 165*ff64e078SAlex Elder */ 166*ff64e078SAlex Elder regmap_set_bits(k1->pmu, reset_ctrl, DEVICE_TYPE_RC | PCIE_AUX_PWR_DET); 167*ff64e078SAlex Elder 168*ff64e078SAlex Elder ret = phy_init(k1->phy); 169*ff64e078SAlex Elder if (ret) { 170*ff64e078SAlex Elder k1_pcie_disable_resources(k1); 171*ff64e078SAlex Elder 172*ff64e078SAlex Elder return ret; 173*ff64e078SAlex Elder } 174*ff64e078SAlex Elder 175*ff64e078SAlex Elder /* Deassert fundamental reset (drive PERST# high) */ 176*ff64e078SAlex Elder regmap_clear_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST); 177*ff64e078SAlex Elder 178*ff64e078SAlex Elder /* Finally, as a workaround, disable ASPM L1 */ 179*ff64e078SAlex Elder k1_pcie_disable_aspm_l1(k1); 180*ff64e078SAlex Elder 181*ff64e078SAlex Elder return 0; 182*ff64e078SAlex Elder } 183*ff64e078SAlex Elder 184*ff64e078SAlex Elder static void k1_pcie_deinit(struct dw_pcie_rp *pp) 185*ff64e078SAlex Elder { 186*ff64e078SAlex Elder struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 187*ff64e078SAlex Elder struct k1_pcie *k1 = to_k1_pcie(pci); 188*ff64e078SAlex Elder 189*ff64e078SAlex Elder /* Assert fundamental reset (drive PERST# low) */ 190*ff64e078SAlex Elder regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, 191*ff64e078SAlex Elder PCIE_RC_PERST); 192*ff64e078SAlex Elder 193*ff64e078SAlex Elder phy_exit(k1->phy); 194*ff64e078SAlex Elder 195*ff64e078SAlex Elder k1_pcie_disable_resources(k1); 196*ff64e078SAlex Elder } 197*ff64e078SAlex Elder 198*ff64e078SAlex Elder static const struct dw_pcie_host_ops k1_pcie_host_ops = { 199*ff64e078SAlex Elder .init = k1_pcie_init, 200*ff64e078SAlex Elder .deinit = k1_pcie_deinit, 201*ff64e078SAlex Elder }; 202*ff64e078SAlex Elder 203*ff64e078SAlex Elder static bool k1_pcie_link_up(struct dw_pcie *pci) 204*ff64e078SAlex Elder { 205*ff64e078SAlex Elder struct k1_pcie *k1 = to_k1_pcie(pci); 206*ff64e078SAlex Elder u32 val; 207*ff64e078SAlex Elder 208*ff64e078SAlex Elder val = readl_relaxed(k1->link + K1_PHY_AHB_LINK_STS); 209*ff64e078SAlex Elder 210*ff64e078SAlex Elder return (val & RDLH_LINK_UP) && (val & SMLH_LINK_UP); 211*ff64e078SAlex Elder } 212*ff64e078SAlex Elder 213*ff64e078SAlex Elder static int k1_pcie_start_link(struct dw_pcie *pci) 214*ff64e078SAlex Elder { 215*ff64e078SAlex Elder struct k1_pcie *k1 = to_k1_pcie(pci); 216*ff64e078SAlex Elder u32 val; 217*ff64e078SAlex Elder 218*ff64e078SAlex Elder /* Stop holding the PHY in reset, and enable link training */ 219*ff64e078SAlex Elder regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, 220*ff64e078SAlex Elder APP_HOLD_PHY_RST | LTSSM_EN, LTSSM_EN); 221*ff64e078SAlex Elder 222*ff64e078SAlex Elder /* Enable the MSI interrupt */ 223*ff64e078SAlex Elder writel_relaxed(MSI_CTRL_INT, k1->link + INTR_ENABLE); 224*ff64e078SAlex Elder 225*ff64e078SAlex Elder /* Top-level interrupt enable */ 226*ff64e078SAlex Elder val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN); 227*ff64e078SAlex Elder val |= PCIE_INTERRUPT_EN; 228*ff64e078SAlex Elder writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN); 229*ff64e078SAlex Elder 230*ff64e078SAlex Elder return 0; 231*ff64e078SAlex Elder } 232*ff64e078SAlex Elder 233*ff64e078SAlex Elder static void k1_pcie_stop_link(struct dw_pcie *pci) 234*ff64e078SAlex Elder { 235*ff64e078SAlex Elder struct k1_pcie *k1 = to_k1_pcie(pci); 236*ff64e078SAlex Elder u32 val; 237*ff64e078SAlex Elder 238*ff64e078SAlex Elder /* Disable interrupts */ 239*ff64e078SAlex Elder val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN); 240*ff64e078SAlex Elder val &= ~PCIE_INTERRUPT_EN; 241*ff64e078SAlex Elder writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN); 242*ff64e078SAlex Elder 243*ff64e078SAlex Elder writel_relaxed(0, k1->link + INTR_ENABLE); 244*ff64e078SAlex Elder 245*ff64e078SAlex Elder /* Disable the link and hold the PHY in reset */ 246*ff64e078SAlex Elder regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, 247*ff64e078SAlex Elder APP_HOLD_PHY_RST | LTSSM_EN, APP_HOLD_PHY_RST); 248*ff64e078SAlex Elder } 249*ff64e078SAlex Elder 250*ff64e078SAlex Elder static const struct dw_pcie_ops k1_pcie_ops = { 251*ff64e078SAlex Elder .link_up = k1_pcie_link_up, 252*ff64e078SAlex Elder .start_link = k1_pcie_start_link, 253*ff64e078SAlex Elder .stop_link = k1_pcie_stop_link, 254*ff64e078SAlex Elder }; 255*ff64e078SAlex Elder 256*ff64e078SAlex Elder static int k1_pcie_parse_port(struct k1_pcie *k1) 257*ff64e078SAlex Elder { 258*ff64e078SAlex Elder struct device *dev = k1->pci.dev; 259*ff64e078SAlex Elder struct device_node *root_port; 260*ff64e078SAlex Elder struct phy *phy; 261*ff64e078SAlex Elder 262*ff64e078SAlex Elder /* We assume only one root port */ 263*ff64e078SAlex Elder root_port = of_get_next_available_child(dev_of_node(dev), NULL); 264*ff64e078SAlex Elder if (!root_port) 265*ff64e078SAlex Elder return -EINVAL; 266*ff64e078SAlex Elder 267*ff64e078SAlex Elder phy = devm_of_phy_get(dev, root_port, NULL); 268*ff64e078SAlex Elder 269*ff64e078SAlex Elder of_node_put(root_port); 270*ff64e078SAlex Elder 271*ff64e078SAlex Elder if (IS_ERR(phy)) 272*ff64e078SAlex Elder return PTR_ERR(phy); 273*ff64e078SAlex Elder 274*ff64e078SAlex Elder k1->phy = phy; 275*ff64e078SAlex Elder 276*ff64e078SAlex Elder return 0; 277*ff64e078SAlex Elder } 278*ff64e078SAlex Elder 279*ff64e078SAlex Elder static int k1_pcie_probe(struct platform_device *pdev) 280*ff64e078SAlex Elder { 281*ff64e078SAlex Elder struct device *dev = &pdev->dev; 282*ff64e078SAlex Elder struct k1_pcie *k1; 283*ff64e078SAlex Elder int ret; 284*ff64e078SAlex Elder 285*ff64e078SAlex Elder k1 = devm_kzalloc(dev, sizeof(*k1), GFP_KERNEL); 286*ff64e078SAlex Elder if (!k1) 287*ff64e078SAlex Elder return -ENOMEM; 288*ff64e078SAlex Elder 289*ff64e078SAlex Elder k1->pmu = syscon_regmap_lookup_by_phandle_args(dev_of_node(dev), 290*ff64e078SAlex Elder SYSCON_APMU, 1, 291*ff64e078SAlex Elder &k1->pmu_off); 292*ff64e078SAlex Elder if (IS_ERR(k1->pmu)) 293*ff64e078SAlex Elder return dev_err_probe(dev, PTR_ERR(k1->pmu), 294*ff64e078SAlex Elder "failed to lookup PMU registers\n"); 295*ff64e078SAlex Elder 296*ff64e078SAlex Elder k1->link = devm_platform_ioremap_resource_byname(pdev, "link"); 297*ff64e078SAlex Elder if (IS_ERR(k1->link)) 298*ff64e078SAlex Elder return dev_err_probe(dev, PTR_ERR(k1->link), 299*ff64e078SAlex Elder "failed to map \"link\" registers\n"); 300*ff64e078SAlex Elder 301*ff64e078SAlex Elder k1->pci.dev = dev; 302*ff64e078SAlex Elder k1->pci.ops = &k1_pcie_ops; 303*ff64e078SAlex Elder k1->pci.pp.num_vectors = MAX_MSI_IRQS; 304*ff64e078SAlex Elder dw_pcie_cap_set(&k1->pci, REQ_RES); 305*ff64e078SAlex Elder 306*ff64e078SAlex Elder k1->pci.pp.ops = &k1_pcie_host_ops; 307*ff64e078SAlex Elder 308*ff64e078SAlex Elder /* Hold the PHY in reset until we start the link */ 309*ff64e078SAlex Elder regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, 310*ff64e078SAlex Elder APP_HOLD_PHY_RST); 311*ff64e078SAlex Elder 312*ff64e078SAlex Elder ret = devm_regulator_get_enable(dev, "vpcie3v3"); 313*ff64e078SAlex Elder if (ret) 314*ff64e078SAlex Elder return dev_err_probe(dev, ret, 315*ff64e078SAlex Elder "failed to get \"vpcie3v3\" supply\n"); 316*ff64e078SAlex Elder 317*ff64e078SAlex Elder pm_runtime_set_active(dev); 318*ff64e078SAlex Elder pm_runtime_no_callbacks(dev); 319*ff64e078SAlex Elder devm_pm_runtime_enable(dev); 320*ff64e078SAlex Elder 321*ff64e078SAlex Elder platform_set_drvdata(pdev, k1); 322*ff64e078SAlex Elder 323*ff64e078SAlex Elder ret = k1_pcie_parse_port(k1); 324*ff64e078SAlex Elder if (ret) 325*ff64e078SAlex Elder return dev_err_probe(dev, ret, "failed to parse root port\n"); 326*ff64e078SAlex Elder 327*ff64e078SAlex Elder ret = dw_pcie_host_init(&k1->pci.pp); 328*ff64e078SAlex Elder if (ret) 329*ff64e078SAlex Elder return dev_err_probe(dev, ret, "failed to initialize host\n"); 330*ff64e078SAlex Elder 331*ff64e078SAlex Elder return 0; 332*ff64e078SAlex Elder } 333*ff64e078SAlex Elder 334*ff64e078SAlex Elder static void k1_pcie_remove(struct platform_device *pdev) 335*ff64e078SAlex Elder { 336*ff64e078SAlex Elder struct k1_pcie *k1 = platform_get_drvdata(pdev); 337*ff64e078SAlex Elder 338*ff64e078SAlex Elder dw_pcie_host_deinit(&k1->pci.pp); 339*ff64e078SAlex Elder } 340*ff64e078SAlex Elder 341*ff64e078SAlex Elder static const struct of_device_id k1_pcie_of_match_table[] = { 342*ff64e078SAlex Elder { .compatible = "spacemit,k1-pcie", }, 343*ff64e078SAlex Elder { } 344*ff64e078SAlex Elder }; 345*ff64e078SAlex Elder 346*ff64e078SAlex Elder static struct platform_driver k1_pcie_driver = { 347*ff64e078SAlex Elder .probe = k1_pcie_probe, 348*ff64e078SAlex Elder .remove = k1_pcie_remove, 349*ff64e078SAlex Elder .driver = { 350*ff64e078SAlex Elder .name = "spacemit-k1-pcie", 351*ff64e078SAlex Elder .of_match_table = k1_pcie_of_match_table, 352*ff64e078SAlex Elder .probe_type = PROBE_PREFER_ASYNCHRONOUS, 353*ff64e078SAlex Elder }, 354*ff64e078SAlex Elder }; 355*ff64e078SAlex Elder module_platform_driver(k1_pcie_driver); 356*ff64e078SAlex Elder MODULE_LICENSE("GPL"); 357*ff64e078SAlex Elder MODULE_DESCRIPTION("SpacemiT K1 PCIe host driver"); 358