xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c (revision 2699bc6d062735f9fc430fe6dcf05b82ae8b2ab9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/iommu.h>
3 #include <linux/platform_device.h>
4 #include <linux/of.h>
5 #include <linux/module.h>
6 #include <linux/stmmac.h>
7 #include <linux/clk.h>
8 
9 #include "stmmac_platform.h"
10 
11 static const char *const mgbe_clks[] = {
12 	"rx-pcs", "tx", "tx-pcs", "mac-divider", "mac", "mgbe", "ptp_ref", "mac"
13 };
14 
15 struct tegra_mgbe {
16 	struct device *dev;
17 
18 	struct clk_bulk_data *clks;
19 
20 	struct reset_control *rst_mac;
21 	struct reset_control *rst_pcs;
22 
23 	u32 iommu_sid;
24 
25 	void __iomem *hv;
26 	void __iomem *regs;
27 	void __iomem *xpcs;
28 
29 	struct mii_bus *mii;
30 };
31 
32 #define XPCS_WRAP_UPHY_RX_CONTROL 0x801c
33 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD BIT(31)
34 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY BIT(10)
35 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET BIT(9)
36 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN BIT(8)
37 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP (BIT(7) | BIT(6))
38 #define XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ BIT(5)
39 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ BIT(4)
40 #define XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN BIT(0)
41 #define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
42 #define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN BIT(0)
43 #define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN BIT(2)
44 #define XPCS_WRAP_UPHY_STATUS 0x8044
45 #define XPCS_WRAP_UPHY_STATUS_TX_P_UP BIT(0)
46 #define XPCS_WRAP_IRQ_STATUS 0x8050
47 #define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS BIT(6)
48 
49 #define XPCS_REG_ADDR_SHIFT 10
50 #define XPCS_REG_ADDR_MASK 0x1fff
51 #define XPCS_ADDR 0x3fc
52 
53 #define MGBE_WRAP_COMMON_INTR_ENABLE	0x8704
54 #define MAC_SBD_INTR			BIT(2)
55 #define MGBE_WRAP_AXI_ASID0_CTRL	0x8400
56 
tegra_mgbe_suspend(struct device * dev)57 static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
58 {
59 	struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
60 	int err;
61 
62 	err = stmmac_suspend(dev);
63 	if (err)
64 		return err;
65 
66 	clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
67 
68 	return reset_control_assert(mgbe->rst_mac);
69 }
70 
tegra_mgbe_resume(struct device * dev)71 static int __maybe_unused tegra_mgbe_resume(struct device *dev)
72 {
73 	struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
74 	u32 value;
75 	int err;
76 
77 	err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
78 	if (err < 0)
79 		return err;
80 
81 	err = reset_control_deassert(mgbe->rst_mac);
82 	if (err < 0)
83 		return err;
84 
85 	/* Enable common interrupt at wrapper level */
86 	writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
87 
88 	/* Program SID */
89 	writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
90 
91 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
92 	if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
93 		value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
94 		value |= XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN;
95 		writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
96 	}
97 
98 	err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
99 				 (value & XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) == 0,
100 				 500, 500 * 2000);
101 	if (err < 0) {
102 		dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
103 		clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
104 		return err;
105 	}
106 
107 	err = stmmac_resume(dev);
108 	if (err < 0)
109 		clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
110 
111 	return err;
112 }
113 
mgbe_uphy_lane_bringup_serdes_up(struct net_device * ndev,void * mgbe_data)114 static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_data)
115 {
116 	struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
117 	u32 value;
118 	int err;
119 
120 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
121 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD;
122 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
123 
124 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
125 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ;
126 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
127 
128 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
129 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
130 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
131 
132 	usleep_range(10, 20);  /* 50ns min delay needed as per HW design */
133 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
134 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
135 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
136 
137 	usleep_range(10, 20);  /* 500ns min delay needed as per HW design */
138 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
139 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
140 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
141 
142 	err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL, value,
143 				 (value & XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN) == 0,
144 				 1000, 1000 * 2000);
145 	if (err < 0) {
146 		dev_err(mgbe->dev, "timeout waiting for RX calibration to become enabled\n");
147 		return err;
148 	}
149 
150 	usleep_range(10, 20);  /* 50ns min delay needed as per HW design */
151 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
152 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
153 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
154 
155 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
156 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
157 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
158 
159 	usleep_range(10, 20);  /* 50ns min delay needed as per HW design */
160 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
161 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
162 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
163 
164 	usleep_range(10, 20);  /* 50ns min delay needed as per HW design */
165 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
166 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
167 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
168 
169 	msleep(30);  /* 30ms delay needed as per HW design */
170 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
171 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
172 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
173 
174 	err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
175 				 value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
176 				 500, 500 * 2000);
177 	if (err < 0) {
178 		dev_err(mgbe->dev, "timeout waiting for link to become ready\n");
179 		return err;
180 	}
181 
182 	/* clear status */
183 	writel(value, mgbe->xpcs + XPCS_WRAP_IRQ_STATUS);
184 
185 	return 0;
186 }
187 
mgbe_uphy_lane_bringup_serdes_down(struct net_device * ndev,void * mgbe_data)188 static void mgbe_uphy_lane_bringup_serdes_down(struct net_device *ndev, void *mgbe_data)
189 {
190 	struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
191 	u32 value;
192 
193 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
194 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD;
195 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
196 
197 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
198 	value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
199 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
200 
201 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
202 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
203 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
204 
205 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
206 	value |= XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
207 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
208 
209 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
210 	value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ;
211 	writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
212 }
213 
tegra_mgbe_probe(struct platform_device * pdev)214 static int tegra_mgbe_probe(struct platform_device *pdev)
215 {
216 	struct plat_stmmacenet_data *plat;
217 	struct stmmac_resources res;
218 	bool use_legacy_ptp = false;
219 	struct tegra_mgbe *mgbe;
220 	int irq, err, i;
221 	u32 value;
222 
223 	mgbe = devm_kzalloc(&pdev->dev, sizeof(*mgbe), GFP_KERNEL);
224 	if (!mgbe)
225 		return -ENOMEM;
226 
227 	mgbe->dev = &pdev->dev;
228 
229 	memset(&res, 0, sizeof(res));
230 
231 	irq = platform_get_irq(pdev, 0);
232 	if (irq < 0)
233 		return irq;
234 
235 	mgbe->hv = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
236 	if (IS_ERR(mgbe->hv))
237 		return PTR_ERR(mgbe->hv);
238 
239 	mgbe->regs = devm_platform_ioremap_resource_byname(pdev, "mac");
240 	if (IS_ERR(mgbe->regs))
241 		return PTR_ERR(mgbe->regs);
242 
243 	mgbe->xpcs = devm_platform_ioremap_resource_byname(pdev, "xpcs");
244 	if (IS_ERR(mgbe->xpcs))
245 		return PTR_ERR(mgbe->xpcs);
246 
247 	/* get controller's stream id from iommu property in device tree */
248 	if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
249 		dev_err(mgbe->dev, "failed to get iommu stream id\n");
250 		return -EINVAL;
251 	}
252 
253 	res.addr = mgbe->regs;
254 	res.irq = irq;
255 
256 	mgbe->clks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(mgbe_clks),
257 				  sizeof(*mgbe->clks), GFP_KERNEL);
258 	if (!mgbe->clks)
259 		return -ENOMEM;
260 
261 	/* Older device-trees use 'ptp-ref' rather than 'ptp_ref'.
262 	 * Fall back when the legacy name is present.
263 	 */
264 	if (of_property_match_string(pdev->dev.of_node, "clock-names",
265 				     "ptp-ref") >= 0)
266 		use_legacy_ptp = true;
267 
268 	for (i = 0; i < ARRAY_SIZE(mgbe_clks); i++) {
269 		mgbe->clks[i].id = mgbe_clks[i];
270 
271 		if (use_legacy_ptp && !strcmp(mgbe_clks[i], "ptp_ref")) {
272 			dev_warn(mgbe->dev,
273 				 "Device-tree update needed for PTP clock!\n");
274 			mgbe->clks[i].id = "ptp-ref";
275 		}
276 	}
277 
278 	err = devm_clk_bulk_get(mgbe->dev, ARRAY_SIZE(mgbe_clks), mgbe->clks);
279 	if (err < 0)
280 		return err;
281 
282 	err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
283 	if (err < 0)
284 		return err;
285 
286 	/* Perform MAC reset */
287 	mgbe->rst_mac = devm_reset_control_get(&pdev->dev, "mac");
288 	if (IS_ERR(mgbe->rst_mac)) {
289 		err = PTR_ERR(mgbe->rst_mac);
290 		goto disable_clks;
291 	}
292 
293 	err = reset_control_assert(mgbe->rst_mac);
294 	if (err < 0)
295 		goto disable_clks;
296 
297 	usleep_range(2000, 4000);
298 
299 	err = reset_control_deassert(mgbe->rst_mac);
300 	if (err < 0)
301 		goto disable_clks;
302 
303 	/* Perform PCS reset */
304 	mgbe->rst_pcs = devm_reset_control_get(&pdev->dev, "pcs");
305 	if (IS_ERR(mgbe->rst_pcs)) {
306 		err = PTR_ERR(mgbe->rst_pcs);
307 		goto disable_clks;
308 	}
309 
310 	err = reset_control_assert(mgbe->rst_pcs);
311 	if (err < 0)
312 		goto disable_clks;
313 
314 	usleep_range(2000, 4000);
315 
316 	err = reset_control_deassert(mgbe->rst_pcs);
317 	if (err < 0)
318 		goto disable_clks;
319 
320 	plat = devm_stmmac_probe_config_dt(pdev, res.mac);
321 	if (IS_ERR(plat)) {
322 		err = PTR_ERR(plat);
323 		goto disable_clks;
324 	}
325 
326 	plat->core_type = DWMAC_CORE_XGMAC;
327 	plat->flags |= STMMAC_FLAG_TSO_EN;
328 	plat->pmt = true;
329 	plat->bsp_priv = mgbe;
330 
331 	if (!plat->mdio_node)
332 		plat->mdio_node = of_get_child_by_name(pdev->dev.of_node, "mdio");
333 
334 	if (!plat->mdio_bus_data) {
335 		plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(*plat->mdio_bus_data),
336 						   GFP_KERNEL);
337 		if (!plat->mdio_bus_data) {
338 			err = -ENOMEM;
339 			goto disable_clks;
340 		}
341 	}
342 
343 	plat->mdio_bus_data->needs_reset = true;
344 
345 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
346 	if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
347 		value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
348 		value |= XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN;
349 		writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
350 	}
351 
352 	err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
353 				 (value & XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) == 0,
354 				 500, 500 * 2000);
355 	if (err < 0) {
356 		dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
357 		goto disable_clks;
358 	}
359 
360 	plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up;
361 	plat->serdes_powerdown = mgbe_uphy_lane_bringup_serdes_down;
362 
363 	/* Tx FIFO Size - 128KB */
364 	plat->tx_fifo_size = 131072;
365 	/* Rx FIFO Size - 192KB */
366 	plat->rx_fifo_size = 196608;
367 
368 	/* Enable common interrupt at wrapper level */
369 	writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
370 
371 	/* Program SID */
372 	writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
373 
374 	plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
375 
376 	err = stmmac_dvr_probe(&pdev->dev, plat, &res);
377 	if (err < 0)
378 		goto disable_clks;
379 
380 	return 0;
381 
382 disable_clks:
383 	clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
384 
385 	return err;
386 }
387 
tegra_mgbe_remove(struct platform_device * pdev)388 static void tegra_mgbe_remove(struct platform_device *pdev)
389 {
390 	struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(&pdev->dev);
391 
392 	clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
393 
394 	stmmac_pltfr_remove(pdev);
395 }
396 
397 static const struct of_device_id tegra_mgbe_match[] = {
398 	{ .compatible = "nvidia,tegra234-mgbe", },
399 	{ }
400 };
401 MODULE_DEVICE_TABLE(of, tegra_mgbe_match);
402 
403 static SIMPLE_DEV_PM_OPS(tegra_mgbe_pm_ops, tegra_mgbe_suspend, tegra_mgbe_resume);
404 
405 static struct platform_driver tegra_mgbe_driver = {
406 	.probe = tegra_mgbe_probe,
407 	.remove = tegra_mgbe_remove,
408 	.driver = {
409 		.name = "tegra-mgbe",
410 		.pm		= &tegra_mgbe_pm_ops,
411 		.of_match_table = tegra_mgbe_match,
412 	},
413 };
414 module_platform_driver(tegra_mgbe_driver);
415 
416 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
417 MODULE_DESCRIPTION("NVIDIA Tegra MGBE driver");
418 MODULE_LICENSE("GPL");
419