xref: /linux/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe Gen4 host controller driver for NXP Layerscape SoCs
4  *
5  * Copyright 2019-2020 NXP
6  *
7  * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/of_pci.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/resource.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
22 
23 #include "pcie-mobiveil.h"
24 
25 /* LUT and PF control registers */
26 #define PCIE_LUT_OFF			0x80000
27 #define PCIE_PF_OFF			0xc0000
28 #define PCIE_PF_INT_STAT		0x18
29 #define PF_INT_STAT_PABRST		BIT(31)
30 
31 #define PCIE_PF_DBG			0x7fc
32 #define PF_DBG_LTSSM_MASK		0x3f
33 #define PF_DBG_LTSSM_L0			0x2d /* L0 state */
34 #define PF_DBG_WE			BIT(31)
35 #define PF_DBG_PABR			BIT(27)
36 
37 #define to_ls_g4_pcie(x)		platform_get_drvdata((x)->pdev)
38 
39 struct ls_g4_pcie {
40 	struct mobiveil_pcie pci;
41 	struct delayed_work dwork;
42 	int irq;
43 };
44 
ls_g4_pcie_pf_readl(struct ls_g4_pcie * pcie,u32 off)45 static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off)
46 {
47 	return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
48 }
49 
ls_g4_pcie_pf_writel(struct ls_g4_pcie * pcie,u32 off,u32 val)50 static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
51 					u32 off, u32 val)
52 {
53 	iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
54 }
55 
ls_g4_pcie_link_up(struct mobiveil_pcie * pci)56 static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
57 {
58 	struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
59 	u32 state;
60 
61 	state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
62 	state =	state & PF_DBG_LTSSM_MASK;
63 
64 	if (state == PF_DBG_LTSSM_L0)
65 		return 1;
66 
67 	return 0;
68 }
69 
ls_g4_pcie_disable_interrupt(struct ls_g4_pcie * pcie)70 static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
71 {
72 	struct mobiveil_pcie *mv_pci = &pcie->pci;
73 
74 	mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
75 }
76 
ls_g4_pcie_enable_interrupt(struct ls_g4_pcie * pcie)77 static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie)
78 {
79 	struct mobiveil_pcie *mv_pci = &pcie->pci;
80 	u32 val;
81 
82 	/* Clear the interrupt status */
83 	mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
84 
85 	val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
86 	      PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
87 	mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
88 }
89 
ls_g4_pcie_reinit_hw(struct ls_g4_pcie * pcie)90 static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie)
91 {
92 	struct mobiveil_pcie *mv_pci = &pcie->pci;
93 	struct device *dev = &mv_pci->pdev->dev;
94 	u32 val, act_stat;
95 	int to = 100;
96 
97 	/* Poll for pab_csb_reset to set and PAB activity to clear */
98 	do {
99 		usleep_range(10, 15);
100 		val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT);
101 		act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
102 	} while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
103 	if (to < 0) {
104 		dev_err(dev, "Poll PABRST&PABACT timeout\n");
105 		return -EIO;
106 	}
107 
108 	/* clear PEX_RESET bit in PEX_PF0_DBG register */
109 	val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
110 	val |= PF_DBG_WE;
111 	ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
112 
113 	val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
114 	val |= PF_DBG_PABR;
115 	ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
116 
117 	val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
118 	val &= ~PF_DBG_WE;
119 	ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
120 
121 	mobiveil_host_init(mv_pci, true);
122 
123 	to = 100;
124 	while (!ls_g4_pcie_link_up(mv_pci) && to--)
125 		usleep_range(200, 250);
126 	if (to < 0) {
127 		dev_err(dev, "PCIe link training timeout\n");
128 		return -EIO;
129 	}
130 
131 	return 0;
132 }
133 
ls_g4_pcie_isr(int irq,void * dev_id)134 static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id)
135 {
136 	struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id;
137 	struct mobiveil_pcie *mv_pci = &pcie->pci;
138 	u32 val;
139 
140 	val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
141 	if (!val)
142 		return IRQ_NONE;
143 
144 	if (val & PAB_INTP_RESET) {
145 		ls_g4_pcie_disable_interrupt(pcie);
146 		schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
147 	}
148 
149 	mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
150 
151 	return IRQ_HANDLED;
152 }
153 
ls_g4_pcie_interrupt_init(struct mobiveil_pcie * mv_pci)154 static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
155 {
156 	struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci);
157 	struct platform_device *pdev = mv_pci->pdev;
158 	struct device *dev = &pdev->dev;
159 	int ret;
160 
161 	pcie->irq = platform_get_irq_byname(pdev, "intr");
162 	if (pcie->irq < 0)
163 		return pcie->irq;
164 
165 	ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr,
166 			       IRQF_SHARED, pdev->name, pcie);
167 	if (ret) {
168 		dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
169 		return  ret;
170 	}
171 
172 	return 0;
173 }
174 
ls_g4_pcie_reset(struct work_struct * work)175 static void ls_g4_pcie_reset(struct work_struct *work)
176 {
177 	struct delayed_work *dwork = container_of(work, struct delayed_work,
178 						  work);
179 	struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
180 	struct mobiveil_pcie *mv_pci = &pcie->pci;
181 	u16 ctrl;
182 
183 	ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
184 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
185 	mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
186 
187 	if (!ls_g4_pcie_reinit_hw(pcie))
188 		return;
189 
190 	ls_g4_pcie_enable_interrupt(pcie);
191 }
192 
193 static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
194 	.interrupt_init = ls_g4_pcie_interrupt_init,
195 };
196 
197 static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = {
198 	.link_up = ls_g4_pcie_link_up,
199 };
200 
ls_g4_pcie_probe(struct platform_device * pdev)201 static int __init ls_g4_pcie_probe(struct platform_device *pdev)
202 {
203 	struct device *dev = &pdev->dev;
204 	struct pci_host_bridge *bridge;
205 	struct mobiveil_pcie *mv_pci;
206 	struct ls_g4_pcie *pcie;
207 	struct device_node *np = dev->of_node;
208 	int ret;
209 
210 	if (!of_parse_phandle(np, "msi-parent", 0)) {
211 		dev_err(dev, "Failed to find msi-parent\n");
212 		return -EINVAL;
213 	}
214 
215 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
216 	if (!bridge)
217 		return -ENOMEM;
218 
219 	pcie = pci_host_bridge_priv(bridge);
220 	mv_pci = &pcie->pci;
221 
222 	mv_pci->pdev = pdev;
223 	mv_pci->ops = &ls_g4_pcie_pab_ops;
224 	mv_pci->rp.ops = &ls_g4_pcie_rp_ops;
225 	mv_pci->rp.bridge = bridge;
226 
227 	platform_set_drvdata(pdev, pcie);
228 
229 	INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset);
230 
231 	ret = mobiveil_pcie_host_probe(mv_pci);
232 	if (ret) {
233 		dev_err(dev, "Fail to probe\n");
234 		return  ret;
235 	}
236 
237 	ls_g4_pcie_enable_interrupt(pcie);
238 
239 	return 0;
240 }
241 
242 static const struct of_device_id ls_g4_pcie_of_match[] = {
243 	{ .compatible = "fsl,lx2160a-pcie", },
244 	{ },
245 };
246 
247 static struct platform_driver ls_g4_pcie_driver = {
248 	.driver = {
249 		.name = "layerscape-pcie-gen4",
250 		.of_match_table = ls_g4_pcie_of_match,
251 		.suppress_bind_attrs = true,
252 	},
253 };
254 
255 builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);
256