1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for Freescale Layerscape SoCs
4 *
5 * Copyright (C) 2014 Freescale Semiconductor.
6 * Copyright 2021 NXP
7 *
8 * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
9 */
10
11 #include <linux/delay.h>
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/iopoll.h>
16 #include <linux/of_pci.h>
17 #include <linux/of_platform.h>
18 #include <linux/of_address.h>
19 #include <linux/pci.h>
20 #include <linux/platform_device.h>
21 #include <linux/resource.h>
22 #include <linux/mfd/syscon.h>
23 #include <linux/regmap.h>
24
25 #include "../../pci.h"
26 #include "pcie-designware.h"
27
28 /* PEX Internal Configuration Registers */
29 #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
30 #define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
31 #define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
32
33 /* PF Message Command Register */
34 #define LS_PCIE_PF_MCR 0x2c
35 #define PF_MCR_PTOMR BIT(0)
36 #define PF_MCR_EXL2S BIT(1)
37
38 /* LS1021A PEXn PM Write Control Register */
39 #define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
40 #define PMXMTTURNOFF BIT(31)
41 #define SCFG_PEXSFTRSTCR 0x190
42 #define PEXSR(idx) BIT(idx)
43
44 /* LS1043A PEX PME control register */
45 #define SCFG_PEXPMECR 0x144
46 #define PEXPME(idx) BIT(31 - (idx) * 4)
47
48 /* LS1043A PEX LUT debug register */
49 #define LS_PCIE_LDBG 0x7fc
50 #define LDBG_SR BIT(30)
51 #define LDBG_WE BIT(31)
52
53 #define PCIE_IATU_NUM 6
54
55 struct ls_pcie_drvdata {
56 const u32 pf_lut_off;
57 const struct dw_pcie_host_ops *ops;
58 int (*exit_from_l2)(struct dw_pcie_rp *pp);
59 bool scfg_support;
60 bool pm_support;
61 };
62
63 struct ls_pcie {
64 struct dw_pcie *pci;
65 const struct ls_pcie_drvdata *drvdata;
66 void __iomem *pf_lut_base;
67 struct regmap *scfg;
68 int index;
69 bool big_endian;
70 };
71
72 #define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
73 #define to_ls_pcie(x) dev_get_drvdata((x)->dev)
74
ls_pcie_is_bridge(struct ls_pcie * pcie)75 static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
76 {
77 struct dw_pcie *pci = pcie->pci;
78 u32 header_type;
79
80 header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
81 header_type &= PCI_HEADER_TYPE_MASK;
82
83 return header_type == PCI_HEADER_TYPE_BRIDGE;
84 }
85
86 /* Clear multi-function bit */
ls_pcie_clear_multifunction(struct ls_pcie * pcie)87 static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
88 {
89 struct dw_pcie *pci = pcie->pci;
90
91 iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
92 }
93
94 /* Drop MSG TLP except for Vendor MSG */
ls_pcie_drop_msg_tlp(struct ls_pcie * pcie)95 static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
96 {
97 u32 val;
98 struct dw_pcie *pci = pcie->pci;
99
100 val = ioread32(pci->dbi_base + PCIE_STRFMR1);
101 val &= 0xDFFFFFFF;
102 iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
103 }
104
105 /* Forward error response of outbound non-posted requests */
ls_pcie_fix_error_response(struct ls_pcie * pcie)106 static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
107 {
108 struct dw_pcie *pci = pcie->pci;
109
110 iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
111 }
112
ls_pcie_pf_lut_readl(struct ls_pcie * pcie,u32 off)113 static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
114 {
115 if (pcie->big_endian)
116 return ioread32be(pcie->pf_lut_base + off);
117
118 return ioread32(pcie->pf_lut_base + off);
119 }
120
ls_pcie_pf_lut_writel(struct ls_pcie * pcie,u32 off,u32 val)121 static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
122 {
123 if (pcie->big_endian)
124 iowrite32be(val, pcie->pf_lut_base + off);
125 else
126 iowrite32(val, pcie->pf_lut_base + off);
127 }
128
ls_pcie_send_turnoff_msg(struct dw_pcie_rp * pp)129 static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
130 {
131 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
132 struct ls_pcie *pcie = to_ls_pcie(pci);
133 u32 val;
134 int ret;
135
136 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
137 val |= PF_MCR_PTOMR;
138 ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
139
140 ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
141 val, !(val & PF_MCR_PTOMR),
142 PCIE_PME_TO_L2_TIMEOUT_US/10,
143 PCIE_PME_TO_L2_TIMEOUT_US);
144 if (ret)
145 dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
146 }
147
ls_pcie_exit_from_l2(struct dw_pcie_rp * pp)148 static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
149 {
150 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
151 struct ls_pcie *pcie = to_ls_pcie(pci);
152 u32 val;
153 int ret;
154
155 /*
156 * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
157 * to exit L2 state.
158 */
159 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
160 val |= PF_MCR_EXL2S;
161 ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
162
163 /*
164 * L2 exit timeout of 10ms is not defined in the specifications,
165 * it was chosen based on empirical observations.
166 */
167 ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
168 val, !(val & PF_MCR_EXL2S),
169 1000,
170 10000);
171 if (ret)
172 dev_err(pcie->pci->dev, "L2 exit timeout\n");
173
174 return ret;
175 }
176
ls_pcie_host_init(struct dw_pcie_rp * pp)177 static int ls_pcie_host_init(struct dw_pcie_rp *pp)
178 {
179 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
180 struct ls_pcie *pcie = to_ls_pcie(pci);
181
182 ls_pcie_fix_error_response(pcie);
183
184 dw_pcie_dbi_ro_wr_en(pci);
185 ls_pcie_clear_multifunction(pcie);
186 dw_pcie_dbi_ro_wr_dis(pci);
187
188 ls_pcie_drop_msg_tlp(pcie);
189
190 return 0;
191 }
192
scfg_pcie_send_turnoff_msg(struct regmap * scfg,u32 reg,u32 mask)193 static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
194 {
195 /* Send PME_Turn_Off message */
196 regmap_write_bits(scfg, reg, mask, mask);
197
198 /*
199 * There is no specific register to check for PME_To_Ack from endpoint.
200 * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
201 */
202 mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
203
204 /*
205 * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
206 * to complete the PME_Turn_Off handshake.
207 */
208 regmap_write_bits(scfg, reg, mask, 0);
209 }
210
ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp * pp)211 static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
212 {
213 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
214 struct ls_pcie *pcie = to_ls_pcie(pci);
215
216 scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
217 }
218
scfg_pcie_exit_from_l2(struct regmap * scfg,u32 reg,u32 mask)219 static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
220 {
221 /* Reset the PEX wrapper to bring the link out of L2 */
222 regmap_write_bits(scfg, reg, mask, mask);
223 regmap_write_bits(scfg, reg, mask, 0);
224
225 return 0;
226 }
227
ls1021a_pcie_exit_from_l2(struct dw_pcie_rp * pp)228 static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
229 {
230 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
231 struct ls_pcie *pcie = to_ls_pcie(pci);
232
233 return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
234 }
235
ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp * pp)236 static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
237 {
238 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
239 struct ls_pcie *pcie = to_ls_pcie(pci);
240
241 scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
242 }
243
ls1043a_pcie_exit_from_l2(struct dw_pcie_rp * pp)244 static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
245 {
246 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
247 struct ls_pcie *pcie = to_ls_pcie(pci);
248 u32 val;
249
250 /*
251 * Reset the PEX wrapper to bring the link out of L2.
252 * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
253 * clearing the soft reset on the PEX module.
254 * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
255 */
256 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
257 val |= LDBG_WE;
258 ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
259
260 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
261 val |= LDBG_SR;
262 ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
263
264 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
265 val &= ~LDBG_SR;
266 ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
267
268 val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
269 val &= ~LDBG_WE;
270 ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
271
272 return 0;
273 }
274
275 static const struct dw_pcie_host_ops ls_pcie_host_ops = {
276 .init = ls_pcie_host_init,
277 .pme_turn_off = ls_pcie_send_turnoff_msg,
278 };
279
280 static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
281 .init = ls_pcie_host_init,
282 .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
283 };
284
285 static const struct ls_pcie_drvdata ls1021a_drvdata = {
286 .pm_support = true,
287 .scfg_support = true,
288 .ops = &ls1021a_pcie_host_ops,
289 .exit_from_l2 = ls1021a_pcie_exit_from_l2,
290 };
291
292 static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
293 .init = ls_pcie_host_init,
294 .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
295 };
296
297 static const struct ls_pcie_drvdata ls1043a_drvdata = {
298 .pf_lut_off = 0x10000,
299 .pm_support = true,
300 .scfg_support = true,
301 .ops = &ls1043a_pcie_host_ops,
302 .exit_from_l2 = ls1043a_pcie_exit_from_l2,
303 };
304
305 static const struct ls_pcie_drvdata layerscape_drvdata = {
306 .pf_lut_off = 0xc0000,
307 .pm_support = true,
308 .ops = &ls_pcie_host_ops,
309 .exit_from_l2 = ls_pcie_exit_from_l2,
310 };
311
312 static const struct of_device_id ls_pcie_of_match[] = {
313 { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
314 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
315 { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
316 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
317 { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
318 { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
319 { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
320 { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
321 { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
322 { },
323 };
324
ls_pcie_probe(struct platform_device * pdev)325 static int ls_pcie_probe(struct platform_device *pdev)
326 {
327 struct device *dev = &pdev->dev;
328 struct dw_pcie *pci;
329 struct ls_pcie *pcie;
330 struct resource *dbi_base;
331 u32 index[2];
332 int ret;
333
334 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
335 if (!pcie)
336 return -ENOMEM;
337
338 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
339 if (!pci)
340 return -ENOMEM;
341
342 pcie->drvdata = of_device_get_match_data(dev);
343
344 pci->dev = dev;
345 pcie->pci = pci;
346 pci->pp.ops = pcie->drvdata->ops;
347
348 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
349 pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
350 if (IS_ERR(pci->dbi_base))
351 return PTR_ERR(pci->dbi_base);
352
353 pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
354
355 pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
356
357 if (pcie->drvdata->scfg_support) {
358 pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
359 if (IS_ERR(pcie->scfg)) {
360 dev_err(dev, "No syscfg phandle specified\n");
361 return PTR_ERR(pcie->scfg);
362 }
363
364 ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
365 if (ret)
366 return ret;
367
368 pcie->index = index[1];
369 }
370
371 if (!ls_pcie_is_bridge(pcie))
372 return -ENODEV;
373
374 platform_set_drvdata(pdev, pcie);
375
376 return dw_pcie_host_init(&pci->pp);
377 }
378
ls_pcie_suspend_noirq(struct device * dev)379 static int ls_pcie_suspend_noirq(struct device *dev)
380 {
381 struct ls_pcie *pcie = dev_get_drvdata(dev);
382
383 if (!pcie->drvdata->pm_support)
384 return 0;
385
386 return dw_pcie_suspend_noirq(pcie->pci);
387 }
388
ls_pcie_resume_noirq(struct device * dev)389 static int ls_pcie_resume_noirq(struct device *dev)
390 {
391 struct ls_pcie *pcie = dev_get_drvdata(dev);
392 int ret;
393
394 if (!pcie->drvdata->pm_support)
395 return 0;
396
397 ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
398 if (ret)
399 return ret;
400
401 return dw_pcie_resume_noirq(pcie->pci);
402 }
403
404 static const struct dev_pm_ops ls_pcie_pm_ops = {
405 NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
406 };
407
408 static struct platform_driver ls_pcie_driver = {
409 .probe = ls_pcie_probe,
410 .driver = {
411 .name = "layerscape-pcie",
412 .of_match_table = ls_pcie_of_match,
413 .suppress_bind_attrs = true,
414 .pm = &ls_pcie_pm_ops,
415 },
416 };
417 builtin_platform_driver(ls_pcie_driver);
418