xref: /linux/drivers/net/ethernet/lantiq_xrx200.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Lantiq / Intel PMAC driver for XRX200 SoCs
4  *
5  * Copyright (C) 2010 Lantiq Deutschland
6  * Copyright (C) 2012 John Crispin <john@phrozen.org>
7  * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 
17 #include <linux/of_net.h>
18 #include <linux/of_platform.h>
19 
20 #include <xway_dma.h>
21 
22 /* DMA */
23 #define XRX200_DMA_DATA_LEN	0x600
24 #define XRX200_DMA_RX		0
25 #define XRX200_DMA_TX		1
26 
27 /* cpu port mac */
28 #define PMAC_RX_IPG		0x0024
29 #define PMAC_RX_IPG_MASK	0xf
30 
31 #define PMAC_HD_CTL		0x0000
32 /* Add Ethernet header to packets from DMA to PMAC */
33 #define PMAC_HD_CTL_ADD		BIT(0)
34 /* Add VLAN tag to Packets from DMA to PMAC */
35 #define PMAC_HD_CTL_TAG		BIT(1)
36 /* Add CRC to packets from DMA to PMAC */
37 #define PMAC_HD_CTL_AC		BIT(2)
38 /* Add status header to packets from PMAC to DMA */
39 #define PMAC_HD_CTL_AS		BIT(3)
40 /* Remove CRC from packets from PMAC to DMA */
41 #define PMAC_HD_CTL_RC		BIT(4)
42 /* Remove Layer-2 header from packets from PMAC to DMA */
43 #define PMAC_HD_CTL_RL2		BIT(5)
44 /* Status header is present from DMA to PMAC */
45 #define PMAC_HD_CTL_RXSH	BIT(6)
46 /* Add special tag from PMAC to switch */
47 #define PMAC_HD_CTL_AST		BIT(7)
48 /* Remove specail Tag from PMAC to DMA */
49 #define PMAC_HD_CTL_RST		BIT(8)
50 /* Check CRC from DMA to PMAC */
51 #define PMAC_HD_CTL_CCRC	BIT(9)
52 /* Enable reaction to Pause frames in the PMAC */
53 #define PMAC_HD_CTL_FC		BIT(10)
54 
55 struct xrx200_chan {
56 	int tx_free;
57 
58 	struct napi_struct napi;
59 	struct ltq_dma_channel dma;
60 	struct sk_buff *skb[LTQ_DESC_NUM];
61 
62 	struct xrx200_priv *priv;
63 };
64 
65 struct xrx200_priv {
66 	struct clk *clk;
67 
68 	struct xrx200_chan chan_tx;
69 	struct xrx200_chan chan_rx;
70 
71 	struct net_device *net_dev;
72 	struct device *dev;
73 
74 	__iomem void *pmac_reg;
75 };
76 
77 static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
78 {
79 	return __raw_readl(priv->pmac_reg + offset);
80 }
81 
82 static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
83 {
84 	__raw_writel(val, priv->pmac_reg + offset);
85 }
86 
87 static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
88 			     u32 offset)
89 {
90 	u32 val = xrx200_pmac_r32(priv, offset);
91 
92 	val &= ~(clear);
93 	val |= set;
94 	xrx200_pmac_w32(priv, val, offset);
95 }
96 
97 /* drop all the packets from the DMA ring */
98 static void xrx200_flush_dma(struct xrx200_chan *ch)
99 {
100 	int i;
101 
102 	for (i = 0; i < LTQ_DESC_NUM; i++) {
103 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
104 
105 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
106 			break;
107 
108 		desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
109 			    XRX200_DMA_DATA_LEN;
110 		ch->dma.desc++;
111 		ch->dma.desc %= LTQ_DESC_NUM;
112 	}
113 }
114 
115 static int xrx200_open(struct net_device *net_dev)
116 {
117 	struct xrx200_priv *priv = netdev_priv(net_dev);
118 
119 	napi_enable(&priv->chan_tx.napi);
120 	ltq_dma_open(&priv->chan_tx.dma);
121 	ltq_dma_enable_irq(&priv->chan_tx.dma);
122 
123 	napi_enable(&priv->chan_rx.napi);
124 	ltq_dma_open(&priv->chan_rx.dma);
125 	/* The boot loader does not always deactivate the receiving of frames
126 	 * on the ports and then some packets queue up in the PPE buffers.
127 	 * They already passed the PMAC so they do not have the tags
128 	 * configured here. Read the these packets here and drop them.
129 	 * The HW should have written them into memory after 10us
130 	 */
131 	usleep_range(20, 40);
132 	xrx200_flush_dma(&priv->chan_rx);
133 	ltq_dma_enable_irq(&priv->chan_rx.dma);
134 
135 	netif_wake_queue(net_dev);
136 
137 	return 0;
138 }
139 
140 static int xrx200_close(struct net_device *net_dev)
141 {
142 	struct xrx200_priv *priv = netdev_priv(net_dev);
143 
144 	netif_stop_queue(net_dev);
145 
146 	napi_disable(&priv->chan_rx.napi);
147 	ltq_dma_close(&priv->chan_rx.dma);
148 
149 	napi_disable(&priv->chan_tx.napi);
150 	ltq_dma_close(&priv->chan_tx.dma);
151 
152 	return 0;
153 }
154 
155 static int xrx200_alloc_skb(struct xrx200_chan *ch)
156 {
157 	struct sk_buff *skb = ch->skb[ch->dma.desc];
158 	dma_addr_t mapping;
159 	int ret = 0;
160 
161 	ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
162 							  XRX200_DMA_DATA_LEN);
163 	if (!ch->skb[ch->dma.desc]) {
164 		ret = -ENOMEM;
165 		goto skip;
166 	}
167 
168 	mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
169 				 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
170 	if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
171 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
172 		ch->skb[ch->dma.desc] = skb;
173 		ret = -ENOMEM;
174 		goto skip;
175 	}
176 
177 	ch->dma.desc_base[ch->dma.desc].addr = mapping;
178 	/* Make sure the address is written before we give it to HW */
179 	wmb();
180 skip:
181 	ch->dma.desc_base[ch->dma.desc].ctl =
182 		LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
183 		XRX200_DMA_DATA_LEN;
184 
185 	return ret;
186 }
187 
188 static int xrx200_hw_receive(struct xrx200_chan *ch)
189 {
190 	struct xrx200_priv *priv = ch->priv;
191 	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
192 	struct sk_buff *skb = ch->skb[ch->dma.desc];
193 	int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
194 	struct net_device *net_dev = priv->net_dev;
195 	int ret;
196 
197 	ret = xrx200_alloc_skb(ch);
198 
199 	ch->dma.desc++;
200 	ch->dma.desc %= LTQ_DESC_NUM;
201 
202 	if (ret) {
203 		net_dev->stats.rx_dropped++;
204 		netdev_err(net_dev, "failed to allocate new rx buffer\n");
205 		return ret;
206 	}
207 
208 	skb_put(skb, len);
209 	skb->protocol = eth_type_trans(skb, net_dev);
210 	netif_receive_skb(skb);
211 	net_dev->stats.rx_packets++;
212 	net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
213 
214 	return 0;
215 }
216 
217 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
218 {
219 	struct xrx200_chan *ch = container_of(napi,
220 				struct xrx200_chan, napi);
221 	int rx = 0;
222 	int ret;
223 
224 	while (rx < budget) {
225 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
226 
227 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
228 			ret = xrx200_hw_receive(ch);
229 			if (ret)
230 				return ret;
231 			rx++;
232 		} else {
233 			break;
234 		}
235 	}
236 
237 	if (rx < budget) {
238 		if (napi_complete_done(&ch->napi, rx))
239 			ltq_dma_enable_irq(&ch->dma);
240 	}
241 
242 	return rx;
243 }
244 
245 static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
246 {
247 	struct xrx200_chan *ch = container_of(napi,
248 				struct xrx200_chan, napi);
249 	struct net_device *net_dev = ch->priv->net_dev;
250 	int pkts = 0;
251 	int bytes = 0;
252 
253 	netif_tx_lock(net_dev);
254 	while (pkts < budget) {
255 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
256 
257 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
258 			struct sk_buff *skb = ch->skb[ch->tx_free];
259 
260 			pkts++;
261 			bytes += skb->len;
262 			ch->skb[ch->tx_free] = NULL;
263 			consume_skb(skb);
264 			memset(&ch->dma.desc_base[ch->tx_free], 0,
265 			       sizeof(struct ltq_dma_desc));
266 			ch->tx_free++;
267 			ch->tx_free %= LTQ_DESC_NUM;
268 		} else {
269 			break;
270 		}
271 	}
272 
273 	net_dev->stats.tx_packets += pkts;
274 	net_dev->stats.tx_bytes += bytes;
275 	netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
276 
277 	netif_tx_unlock(net_dev);
278 	if (netif_queue_stopped(net_dev))
279 		netif_wake_queue(net_dev);
280 
281 	if (pkts < budget) {
282 		if (napi_complete_done(&ch->napi, pkts))
283 			ltq_dma_enable_irq(&ch->dma);
284 	}
285 
286 	return pkts;
287 }
288 
289 static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
290 				     struct net_device *net_dev)
291 {
292 	struct xrx200_priv *priv = netdev_priv(net_dev);
293 	struct xrx200_chan *ch = &priv->chan_tx;
294 	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
295 	u32 byte_offset;
296 	dma_addr_t mapping;
297 	int len;
298 
299 	skb->dev = net_dev;
300 	if (skb_put_padto(skb, ETH_ZLEN)) {
301 		net_dev->stats.tx_dropped++;
302 		return NETDEV_TX_OK;
303 	}
304 
305 	len = skb->len;
306 
307 	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
308 		netdev_err(net_dev, "tx ring full\n");
309 		netif_stop_queue(net_dev);
310 		return NETDEV_TX_BUSY;
311 	}
312 
313 	ch->skb[ch->dma.desc] = skb;
314 
315 	mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
316 	if (unlikely(dma_mapping_error(priv->dev, mapping)))
317 		goto err_drop;
318 
319 	/* dma needs to start on a 16 byte aligned address */
320 	byte_offset = mapping % 16;
321 
322 	desc->addr = mapping - byte_offset;
323 	/* Make sure the address is written before we give it to HW */
324 	wmb();
325 	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
326 		LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
327 	ch->dma.desc++;
328 	ch->dma.desc %= LTQ_DESC_NUM;
329 	if (ch->dma.desc == ch->tx_free)
330 		netif_stop_queue(net_dev);
331 
332 	netdev_sent_queue(net_dev, len);
333 
334 	return NETDEV_TX_OK;
335 
336 err_drop:
337 	dev_kfree_skb(skb);
338 	net_dev->stats.tx_dropped++;
339 	net_dev->stats.tx_errors++;
340 	return NETDEV_TX_OK;
341 }
342 
343 static const struct net_device_ops xrx200_netdev_ops = {
344 	.ndo_open		= xrx200_open,
345 	.ndo_stop		= xrx200_close,
346 	.ndo_start_xmit		= xrx200_start_xmit,
347 	.ndo_set_mac_address	= eth_mac_addr,
348 	.ndo_validate_addr	= eth_validate_addr,
349 };
350 
351 static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
352 {
353 	struct xrx200_chan *ch = ptr;
354 
355 	if (napi_schedule_prep(&ch->napi)) {
356 		ltq_dma_disable_irq(&ch->dma);
357 		__napi_schedule(&ch->napi);
358 	}
359 
360 	ltq_dma_ack_irq(&ch->dma);
361 
362 	return IRQ_HANDLED;
363 }
364 
365 static int xrx200_dma_init(struct xrx200_priv *priv)
366 {
367 	struct xrx200_chan *ch_rx = &priv->chan_rx;
368 	struct xrx200_chan *ch_tx = &priv->chan_tx;
369 	int ret = 0;
370 	int i;
371 
372 	ltq_dma_init_port(DMA_PORT_ETOP);
373 
374 	ch_rx->dma.nr = XRX200_DMA_RX;
375 	ch_rx->dma.dev = priv->dev;
376 	ch_rx->priv = priv;
377 
378 	ltq_dma_alloc_rx(&ch_rx->dma);
379 	for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
380 	     ch_rx->dma.desc++) {
381 		ret = xrx200_alloc_skb(ch_rx);
382 		if (ret)
383 			goto rx_free;
384 	}
385 	ch_rx->dma.desc = 0;
386 	ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
387 			       "xrx200_net_rx", &priv->chan_rx);
388 	if (ret) {
389 		dev_err(priv->dev, "failed to request RX irq %d\n",
390 			ch_rx->dma.irq);
391 		goto rx_ring_free;
392 	}
393 
394 	ch_tx->dma.nr = XRX200_DMA_TX;
395 	ch_tx->dma.dev = priv->dev;
396 	ch_tx->priv = priv;
397 
398 	ltq_dma_alloc_tx(&ch_tx->dma);
399 	ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
400 			       "xrx200_net_tx", &priv->chan_tx);
401 	if (ret) {
402 		dev_err(priv->dev, "failed to request TX irq %d\n",
403 			ch_tx->dma.irq);
404 		goto tx_free;
405 	}
406 
407 	return ret;
408 
409 tx_free:
410 	ltq_dma_free(&ch_tx->dma);
411 
412 rx_ring_free:
413 	/* free the allocated RX ring */
414 	for (i = 0; i < LTQ_DESC_NUM; i++) {
415 		if (priv->chan_rx.skb[i])
416 			dev_kfree_skb_any(priv->chan_rx.skb[i]);
417 	}
418 
419 rx_free:
420 	ltq_dma_free(&ch_rx->dma);
421 	return ret;
422 }
423 
424 static void xrx200_hw_cleanup(struct xrx200_priv *priv)
425 {
426 	int i;
427 
428 	ltq_dma_free(&priv->chan_tx.dma);
429 	ltq_dma_free(&priv->chan_rx.dma);
430 
431 	/* free the allocated RX ring */
432 	for (i = 0; i < LTQ_DESC_NUM; i++)
433 		dev_kfree_skb_any(priv->chan_rx.skb[i]);
434 }
435 
436 static int xrx200_probe(struct platform_device *pdev)
437 {
438 	struct device *dev = &pdev->dev;
439 	struct device_node *np = dev->of_node;
440 	struct xrx200_priv *priv;
441 	struct net_device *net_dev;
442 	int err;
443 
444 	/* alloc the network device */
445 	net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
446 	if (!net_dev)
447 		return -ENOMEM;
448 
449 	priv = netdev_priv(net_dev);
450 	priv->net_dev = net_dev;
451 	priv->dev = dev;
452 
453 	net_dev->netdev_ops = &xrx200_netdev_ops;
454 	SET_NETDEV_DEV(net_dev, dev);
455 	net_dev->min_mtu = ETH_ZLEN;
456 	net_dev->max_mtu = XRX200_DMA_DATA_LEN;
457 
458 	/* load the memory ranges */
459 	priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
460 	if (IS_ERR(priv->pmac_reg))
461 		return PTR_ERR(priv->pmac_reg);
462 
463 	priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
464 	if (priv->chan_rx.dma.irq < 0)
465 		return -ENOENT;
466 	priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
467 	if (priv->chan_tx.dma.irq < 0)
468 		return -ENOENT;
469 
470 	/* get the clock */
471 	priv->clk = devm_clk_get(dev, NULL);
472 	if (IS_ERR(priv->clk)) {
473 		dev_err(dev, "failed to get clock\n");
474 		return PTR_ERR(priv->clk);
475 	}
476 
477 	err = of_get_mac_address(np, net_dev->dev_addr);
478 	if (err)
479 		eth_hw_addr_random(net_dev);
480 
481 	/* bring up the dma engine and IP core */
482 	err = xrx200_dma_init(priv);
483 	if (err)
484 		return err;
485 
486 	/* enable clock gate */
487 	err = clk_prepare_enable(priv->clk);
488 	if (err)
489 		goto err_uninit_dma;
490 
491 	/* set IPG to 12 */
492 	xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
493 
494 	/* enable status header, enable CRC */
495 	xrx200_pmac_mask(priv, 0,
496 			 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
497 			 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
498 			 PMAC_HD_CTL);
499 
500 	/* setup NAPI */
501 	netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
502 	netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
503 
504 	platform_set_drvdata(pdev, priv);
505 
506 	err = register_netdev(net_dev);
507 	if (err)
508 		goto err_unprepare_clk;
509 
510 	return 0;
511 
512 err_unprepare_clk:
513 	clk_disable_unprepare(priv->clk);
514 
515 err_uninit_dma:
516 	xrx200_hw_cleanup(priv);
517 
518 	return err;
519 }
520 
521 static int xrx200_remove(struct platform_device *pdev)
522 {
523 	struct xrx200_priv *priv = platform_get_drvdata(pdev);
524 	struct net_device *net_dev = priv->net_dev;
525 
526 	/* free stack related instances */
527 	netif_stop_queue(net_dev);
528 	netif_napi_del(&priv->chan_tx.napi);
529 	netif_napi_del(&priv->chan_rx.napi);
530 
531 	/* remove the actual device */
532 	unregister_netdev(net_dev);
533 
534 	/* release the clock */
535 	clk_disable_unprepare(priv->clk);
536 
537 	/* shut down hardware */
538 	xrx200_hw_cleanup(priv);
539 
540 	return 0;
541 }
542 
543 static const struct of_device_id xrx200_match[] = {
544 	{ .compatible = "lantiq,xrx200-net" },
545 	{},
546 };
547 MODULE_DEVICE_TABLE(of, xrx200_match);
548 
549 static struct platform_driver xrx200_driver = {
550 	.probe = xrx200_probe,
551 	.remove = xrx200_remove,
552 	.driver = {
553 		.name = "lantiq,xrx200-net",
554 		.of_match_table = xrx200_match,
555 	},
556 };
557 
558 module_platform_driver(xrx200_driver);
559 
560 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
561 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
562 MODULE_LICENSE("GPL");
563