xref: /linux/drivers/net/ethernet/renesas/rswitch.c (revision 7681a4f58fb9c338d6dfe1181607f84c793d77de)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3  *
4  * Copyright (C) 2022 Renesas Electronics Corporation
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/etherdevice.h>
10 #include <linux/iopoll.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19 #include <linux/phylink.h>
20 #include <linux/phy/phy.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 
26 #include "rswitch.h"
27 
28 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
29 {
30 	u32 val;
31 
32 	return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
33 					 1, RSWITCH_TIMEOUT_US);
34 }
35 
36 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
37 {
38 	iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
39 }
40 
41 /* Common Agent block (COMA) */
42 static void rswitch_reset(struct rswitch_private *priv)
43 {
44 	iowrite32(RRC_RR, priv->addr + RRC);
45 	iowrite32(RRC_RR_CLR, priv->addr + RRC);
46 }
47 
48 static void rswitch_clock_enable(struct rswitch_private *priv)
49 {
50 	iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
51 }
52 
53 static void rswitch_clock_disable(struct rswitch_private *priv)
54 {
55 	iowrite32(RCDC_RCD, priv->addr + RCDC);
56 }
57 
58 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
59 {
60 	u32 val = ioread32(coma_addr + RCEC);
61 
62 	if (val & RCEC_RCE)
63 		return (val & BIT(port)) ? true : false;
64 	else
65 		return false;
66 }
67 
68 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
69 {
70 	u32 val;
71 
72 	if (enable) {
73 		val = ioread32(coma_addr + RCEC);
74 		iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
75 	} else {
76 		val = ioread32(coma_addr + RCDC);
77 		iowrite32(val | BIT(port), coma_addr + RCDC);
78 	}
79 }
80 
81 static int rswitch_bpool_config(struct rswitch_private *priv)
82 {
83 	u32 val;
84 
85 	val = ioread32(priv->addr + CABPIRM);
86 	if (val & CABPIRM_BPR)
87 		return 0;
88 
89 	iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
90 
91 	return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
92 }
93 
94 /* R-Switch-2 block (TOP) */
95 static void rswitch_top_init(struct rswitch_private *priv)
96 {
97 	int i;
98 
99 	for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
100 		iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
101 }
102 
103 /* Forwarding engine block (MFWD) */
104 static void rswitch_fwd_init(struct rswitch_private *priv)
105 {
106 	int i;
107 
108 	/* For ETHA */
109 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
110 		iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
111 		iowrite32(0, priv->addr + FWPBFC(i));
112 	}
113 
114 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
115 		iowrite32(priv->rdev[i]->rx_queue->index,
116 			  priv->addr + FWPBFCSDC(GWCA_INDEX, i));
117 		iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
118 	}
119 
120 	/* For GWCA */
121 	iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
122 	iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
123 	iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
124 	iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
125 }
126 
127 /* gPTP timer (gPTP) */
128 static void rswitch_get_timestamp(struct rswitch_private *priv,
129 				  struct timespec64 *ts)
130 {
131 	priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts);
132 }
133 
134 /* Gateway CPU agent block (GWCA) */
135 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
136 				    enum rswitch_gwca_mode mode)
137 {
138 	int ret;
139 
140 	if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
141 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
142 
143 	iowrite32(mode, priv->addr + GWMC);
144 
145 	ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
146 
147 	if (mode == GWMC_OPC_DISABLE)
148 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
149 
150 	return ret;
151 }
152 
153 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
154 {
155 	iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
156 
157 	return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
158 }
159 
160 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
161 {
162 	iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
163 
164 	return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
165 }
166 
167 static void rswitch_gwca_set_rate_limit(struct rswitch_private *priv, int rate)
168 {
169 	u32 gwgrlulc, gwgrlc;
170 
171 	switch (rate) {
172 	case 1000:
173 		gwgrlulc = 0x0000005f;
174 		gwgrlc = 0x00010260;
175 		break;
176 	default:
177 		dev_err(&priv->pdev->dev, "%s: This rate is not supported (%d)\n", __func__, rate);
178 		return;
179 	}
180 
181 	iowrite32(gwgrlulc, priv->addr + GWGRLULC);
182 	iowrite32(gwgrlc, priv->addr + GWGRLC);
183 }
184 
185 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
186 {
187 	u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
188 	int i;
189 
190 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
191 		if (dis[i] & mask[i])
192 			return true;
193 	}
194 
195 	return false;
196 }
197 
198 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
199 {
200 	int i;
201 
202 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
203 		dis[i] = ioread32(priv->addr + GWDIS(i));
204 		dis[i] &= ioread32(priv->addr + GWDIE(i));
205 	}
206 }
207 
208 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
209 {
210 	u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
211 
212 	iowrite32(BIT(index % 32), priv->addr + offs);
213 }
214 
215 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
216 {
217 	u32 offs = GWDIS(index / 32);
218 
219 	iowrite32(BIT(index % 32), priv->addr + offs);
220 }
221 
222 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
223 {
224 	int index = cur ? gq->cur : gq->dirty;
225 
226 	if (index + num >= gq->ring_size)
227 		index = (index + num) % gq->ring_size;
228 	else
229 		index += num;
230 
231 	return index;
232 }
233 
234 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
235 {
236 	if (gq->cur >= gq->dirty)
237 		return gq->cur - gq->dirty;
238 	else
239 		return gq->ring_size - gq->dirty + gq->cur;
240 }
241 
242 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
243 {
244 	struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
245 
246 	if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
247 		return true;
248 
249 	return false;
250 }
251 
252 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
253 					int start_index, int num)
254 {
255 	int i, index;
256 
257 	for (i = 0; i < num; i++) {
258 		index = (i + start_index) % gq->ring_size;
259 		if (gq->skbs[index])
260 			continue;
261 		gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
262 							    PKT_BUF_SZ + RSWITCH_ALIGN - 1);
263 		if (!gq->skbs[index])
264 			goto err;
265 	}
266 
267 	return 0;
268 
269 err:
270 	for (i--; i >= 0; i--) {
271 		index = (i + start_index) % gq->ring_size;
272 		dev_kfree_skb(gq->skbs[index]);
273 		gq->skbs[index] = NULL;
274 	}
275 
276 	return -ENOMEM;
277 }
278 
279 static void rswitch_gwca_queue_free(struct net_device *ndev,
280 				    struct rswitch_gwca_queue *gq)
281 {
282 	int i;
283 
284 	if (gq->gptp) {
285 		dma_free_coherent(ndev->dev.parent,
286 				  sizeof(struct rswitch_ext_ts_desc) *
287 				  (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
288 		gq->ts_ring = NULL;
289 	} else {
290 		dma_free_coherent(ndev->dev.parent,
291 				  sizeof(struct rswitch_ext_desc) *
292 				  (gq->ring_size + 1), gq->ring, gq->ring_dma);
293 		gq->ring = NULL;
294 	}
295 
296 	if (!gq->dir_tx) {
297 		for (i = 0; i < gq->ring_size; i++)
298 			dev_kfree_skb(gq->skbs[i]);
299 	}
300 
301 	kfree(gq->skbs);
302 	gq->skbs = NULL;
303 }
304 
305 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
306 				    struct rswitch_private *priv,
307 				    struct rswitch_gwca_queue *gq,
308 				    bool dir_tx, bool gptp, int ring_size)
309 {
310 	int i, bit;
311 
312 	gq->dir_tx = dir_tx;
313 	gq->gptp = gptp;
314 	gq->ring_size = ring_size;
315 	gq->ndev = ndev;
316 
317 	gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
318 	if (!gq->skbs)
319 		return -ENOMEM;
320 
321 	if (!dir_tx)
322 		rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
323 
324 	if (gptp)
325 		gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
326 						 sizeof(struct rswitch_ext_ts_desc) *
327 						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
328 	else
329 		gq->ring = dma_alloc_coherent(ndev->dev.parent,
330 					      sizeof(struct rswitch_ext_desc) *
331 					      (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
332 	if (!gq->ts_ring && !gq->ring)
333 		goto out;
334 
335 	i = gq->index / 32;
336 	bit = BIT(gq->index % 32);
337 	if (dir_tx)
338 		priv->gwca.tx_irq_bits[i] |= bit;
339 	else
340 		priv->gwca.rx_irq_bits[i] |= bit;
341 
342 	return 0;
343 
344 out:
345 	rswitch_gwca_queue_free(ndev, gq);
346 
347 	return -ENOMEM;
348 }
349 
350 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
351 {
352 	desc->dptrl = cpu_to_le32(lower_32_bits(addr));
353 	desc->dptrh = upper_32_bits(addr) & 0xff;
354 }
355 
356 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
357 {
358 	return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
359 }
360 
361 static int rswitch_gwca_queue_format(struct net_device *ndev,
362 				     struct rswitch_private *priv,
363 				     struct rswitch_gwca_queue *gq)
364 {
365 	int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
366 	struct rswitch_ext_desc *desc;
367 	struct rswitch_desc *linkfix;
368 	dma_addr_t dma_addr;
369 	int i;
370 
371 	memset(gq->ring, 0, tx_ring_size);
372 	for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
373 		if (!gq->dir_tx) {
374 			dma_addr = dma_map_single(ndev->dev.parent,
375 						  gq->skbs[i]->data, PKT_BUF_SZ,
376 						  DMA_FROM_DEVICE);
377 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
378 				goto err;
379 
380 			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
381 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
382 			desc->desc.die_dt = DT_FEMPTY | DIE;
383 		} else {
384 			desc->desc.die_dt = DT_EEMPTY | DIE;
385 		}
386 	}
387 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
388 	desc->desc.die_dt = DT_LINKFIX;
389 
390 	linkfix = &priv->linkfix_table[gq->index];
391 	linkfix->die_dt = DT_LINKFIX;
392 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
393 
394 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_EDE,
395 		  priv->addr + GWDCC_OFFS(gq->index));
396 
397 	return 0;
398 
399 err:
400 	if (!gq->dir_tx) {
401 		for (i--, desc = gq->ring; i >= 0; i--, desc++) {
402 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
403 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
404 					 DMA_FROM_DEVICE);
405 		}
406 	}
407 
408 	return -ENOMEM;
409 }
410 
411 static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
412 				      struct rswitch_gwca_queue *gq,
413 				      int start_index, int num)
414 {
415 	struct rswitch_device *rdev = netdev_priv(ndev);
416 	struct rswitch_ext_ts_desc *desc;
417 	dma_addr_t dma_addr;
418 	int i, index;
419 
420 	for (i = 0; i < num; i++) {
421 		index = (i + start_index) % gq->ring_size;
422 		desc = &gq->ts_ring[index];
423 		if (!gq->dir_tx) {
424 			dma_addr = dma_map_single(ndev->dev.parent,
425 						  gq->skbs[index]->data, PKT_BUF_SZ,
426 						  DMA_FROM_DEVICE);
427 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
428 				goto err;
429 
430 			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
431 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
432 			dma_wmb();
433 			desc->desc.die_dt = DT_FEMPTY | DIE;
434 			desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
435 		} else {
436 			desc->desc.die_dt = DT_EEMPTY | DIE;
437 		}
438 	}
439 
440 	return 0;
441 
442 err:
443 	if (!gq->dir_tx) {
444 		for (i--; i >= 0; i--) {
445 			index = (i + start_index) % gq->ring_size;
446 			desc = &gq->ts_ring[index];
447 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
448 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
449 					 DMA_FROM_DEVICE);
450 		}
451 	}
452 
453 	return -ENOMEM;
454 }
455 
456 static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
457 					struct rswitch_private *priv,
458 					struct rswitch_gwca_queue *gq)
459 {
460 	int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
461 	struct rswitch_ext_ts_desc *desc;
462 	struct rswitch_desc *linkfix;
463 	int err;
464 
465 	memset(gq->ts_ring, 0, tx_ts_ring_size);
466 	err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
467 	if (err < 0)
468 		return err;
469 
470 	desc = &gq->ts_ring[gq->ring_size];	/* Last */
471 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
472 	desc->desc.die_dt = DT_LINKFIX;
473 
474 	linkfix = &priv->linkfix_table[gq->index];
475 	linkfix->die_dt = DT_LINKFIX;
476 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
477 
478 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_ETS | GWDCC_EDE,
479 		  priv->addr + GWDCC_OFFS(gq->index));
480 
481 	return 0;
482 }
483 
484 static int rswitch_gwca_desc_alloc(struct rswitch_private *priv)
485 {
486 	int i, num_queues = priv->gwca.num_queues;
487 	struct device *dev = &priv->pdev->dev;
488 
489 	priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
490 	priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size,
491 						 &priv->linkfix_table_dma, GFP_KERNEL);
492 	if (!priv->linkfix_table)
493 		return -ENOMEM;
494 	for (i = 0; i < num_queues; i++)
495 		priv->linkfix_table[i].die_dt = DT_EOS;
496 
497 	return 0;
498 }
499 
500 static void rswitch_gwca_desc_free(struct rswitch_private *priv)
501 {
502 	if (priv->linkfix_table)
503 		dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size,
504 				  priv->linkfix_table, priv->linkfix_table_dma);
505 	priv->linkfix_table = NULL;
506 }
507 
508 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
509 {
510 	struct rswitch_gwca_queue *gq;
511 	int index;
512 
513 	index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
514 	if (index >= priv->gwca.num_queues)
515 		return NULL;
516 	set_bit(index, priv->gwca.used);
517 	gq = &priv->gwca.queues[index];
518 	memset(gq, 0, sizeof(*gq));
519 	gq->index = index;
520 
521 	return gq;
522 }
523 
524 static void rswitch_gwca_put(struct rswitch_private *priv,
525 			     struct rswitch_gwca_queue *gq)
526 {
527 	clear_bit(gq->index, priv->gwca.used);
528 }
529 
530 static int rswitch_txdmac_alloc(struct net_device *ndev)
531 {
532 	struct rswitch_device *rdev = netdev_priv(ndev);
533 	struct rswitch_private *priv = rdev->priv;
534 	int err;
535 
536 	rdev->tx_queue = rswitch_gwca_get(priv);
537 	if (!rdev->tx_queue)
538 		return -EBUSY;
539 
540 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
541 				       TX_RING_SIZE);
542 	if (err < 0) {
543 		rswitch_gwca_put(priv, rdev->tx_queue);
544 		return err;
545 	}
546 
547 	return 0;
548 }
549 
550 static void rswitch_txdmac_free(struct net_device *ndev)
551 {
552 	struct rswitch_device *rdev = netdev_priv(ndev);
553 
554 	rswitch_gwca_queue_free(ndev, rdev->tx_queue);
555 	rswitch_gwca_put(rdev->priv, rdev->tx_queue);
556 }
557 
558 static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
559 {
560 	struct rswitch_device *rdev = priv->rdev[index];
561 
562 	return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
563 }
564 
565 static int rswitch_rxdmac_alloc(struct net_device *ndev)
566 {
567 	struct rswitch_device *rdev = netdev_priv(ndev);
568 	struct rswitch_private *priv = rdev->priv;
569 	int err;
570 
571 	rdev->rx_queue = rswitch_gwca_get(priv);
572 	if (!rdev->rx_queue)
573 		return -EBUSY;
574 
575 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
576 				       RX_RING_SIZE);
577 	if (err < 0) {
578 		rswitch_gwca_put(priv, rdev->rx_queue);
579 		return err;
580 	}
581 
582 	return 0;
583 }
584 
585 static void rswitch_rxdmac_free(struct net_device *ndev)
586 {
587 	struct rswitch_device *rdev = netdev_priv(ndev);
588 
589 	rswitch_gwca_queue_free(ndev, rdev->rx_queue);
590 	rswitch_gwca_put(rdev->priv, rdev->rx_queue);
591 }
592 
593 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
594 {
595 	struct rswitch_device *rdev = priv->rdev[index];
596 	struct net_device *ndev = rdev->ndev;
597 
598 	return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
599 }
600 
601 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
602 {
603 	int i, err;
604 
605 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
606 	if (err < 0)
607 		return err;
608 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
609 	if (err < 0)
610 		return err;
611 
612 	err = rswitch_gwca_mcast_table_reset(priv);
613 	if (err < 0)
614 		return err;
615 	err = rswitch_gwca_axi_ram_reset(priv);
616 	if (err < 0)
617 		return err;
618 
619 	iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
620 	iowrite32(0, priv->addr + GWTTFC);
621 	iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1);
622 	iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0);
623 	rswitch_gwca_set_rate_limit(priv, priv->gwca.speed);
624 
625 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
626 		err = rswitch_rxdmac_init(priv, i);
627 		if (err < 0)
628 			return err;
629 		err = rswitch_txdmac_init(priv, i);
630 		if (err < 0)
631 			return err;
632 	}
633 
634 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
635 	if (err < 0)
636 		return err;
637 	return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
638 }
639 
640 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
641 {
642 	int err;
643 
644 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
645 	if (err < 0)
646 		return err;
647 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
648 	if (err < 0)
649 		return err;
650 
651 	return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
652 }
653 
654 static int rswitch_gwca_halt(struct rswitch_private *priv)
655 {
656 	int err;
657 
658 	priv->gwca_halt = true;
659 	err = rswitch_gwca_hw_deinit(priv);
660 	dev_err(&priv->pdev->dev, "halted (%d)\n", err);
661 
662 	return err;
663 }
664 
665 static bool rswitch_rx(struct net_device *ndev, int *quota)
666 {
667 	struct rswitch_device *rdev = netdev_priv(ndev);
668 	struct rswitch_gwca_queue *gq = rdev->rx_queue;
669 	struct rswitch_ext_ts_desc *desc;
670 	int limit, boguscnt, num, ret;
671 	struct sk_buff *skb;
672 	dma_addr_t dma_addr;
673 	u16 pkt_len;
674 	u32 get_ts;
675 
676 	boguscnt = min_t(int, gq->ring_size, *quota);
677 	limit = boguscnt;
678 
679 	desc = &gq->ts_ring[gq->cur];
680 	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
681 		if (--boguscnt < 0)
682 			break;
683 		dma_rmb();
684 		pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
685 		skb = gq->skbs[gq->cur];
686 		gq->skbs[gq->cur] = NULL;
687 		dma_addr = rswitch_desc_get_dptr(&desc->desc);
688 		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
689 		get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
690 		if (get_ts) {
691 			struct skb_shared_hwtstamps *shhwtstamps;
692 			struct timespec64 ts;
693 
694 			shhwtstamps = skb_hwtstamps(skb);
695 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
696 			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
697 			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
698 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
699 		}
700 		skb_put(skb, pkt_len);
701 		skb->protocol = eth_type_trans(skb, ndev);
702 		netif_receive_skb(skb);
703 		rdev->ndev->stats.rx_packets++;
704 		rdev->ndev->stats.rx_bytes += pkt_len;
705 
706 		gq->cur = rswitch_next_queue_index(gq, true, 1);
707 		desc = &gq->ts_ring[gq->cur];
708 	}
709 
710 	num = rswitch_get_num_cur_queues(gq);
711 	ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
712 	if (ret < 0)
713 		goto err;
714 	ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
715 	if (ret < 0)
716 		goto err;
717 	gq->dirty = rswitch_next_queue_index(gq, false, num);
718 
719 	*quota -= limit - (++boguscnt);
720 
721 	return boguscnt <= 0;
722 
723 err:
724 	rswitch_gwca_halt(rdev->priv);
725 
726 	return 0;
727 }
728 
729 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
730 {
731 	struct rswitch_device *rdev = netdev_priv(ndev);
732 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
733 	struct rswitch_ext_desc *desc;
734 	dma_addr_t dma_addr;
735 	struct sk_buff *skb;
736 	int free_num = 0;
737 	int size;
738 
739 	for (; rswitch_get_num_cur_queues(gq) > 0;
740 	     gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
741 		desc = &gq->ring[gq->dirty];
742 		if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
743 			break;
744 
745 		dma_rmb();
746 		size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
747 		skb = gq->skbs[gq->dirty];
748 		if (skb) {
749 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
750 				struct skb_shared_hwtstamps shhwtstamps;
751 				struct timespec64 ts;
752 
753 				rswitch_get_timestamp(rdev->priv, &ts);
754 				memset(&shhwtstamps, 0, sizeof(shhwtstamps));
755 				shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
756 				skb_tstamp_tx(skb, &shhwtstamps);
757 			}
758 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
759 			dma_unmap_single(ndev->dev.parent, dma_addr,
760 					 size, DMA_TO_DEVICE);
761 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
762 			gq->skbs[gq->dirty] = NULL;
763 			free_num++;
764 		}
765 		desc->desc.die_dt = DT_EEMPTY;
766 		rdev->ndev->stats.tx_packets++;
767 		rdev->ndev->stats.tx_bytes += size;
768 	}
769 
770 	return free_num;
771 }
772 
773 static int rswitch_poll(struct napi_struct *napi, int budget)
774 {
775 	struct net_device *ndev = napi->dev;
776 	struct rswitch_private *priv;
777 	struct rswitch_device *rdev;
778 	int quota = budget;
779 
780 	rdev = netdev_priv(ndev);
781 	priv = rdev->priv;
782 
783 retry:
784 	rswitch_tx_free(ndev, true);
785 
786 	if (rswitch_rx(ndev, &quota))
787 		goto out;
788 	else if (rdev->priv->gwca_halt)
789 		goto err;
790 	else if (rswitch_is_queue_rxed(rdev->rx_queue))
791 		goto retry;
792 
793 	netif_wake_subqueue(ndev, 0);
794 
795 	napi_complete(napi);
796 
797 	rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
798 	rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
799 
800 out:
801 	return budget - quota;
802 
803 err:
804 	napi_complete(napi);
805 
806 	return 0;
807 }
808 
809 static void rswitch_queue_interrupt(struct net_device *ndev)
810 {
811 	struct rswitch_device *rdev = netdev_priv(ndev);
812 
813 	if (napi_schedule_prep(&rdev->napi)) {
814 		rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
815 		rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
816 		__napi_schedule(&rdev->napi);
817 	}
818 }
819 
820 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
821 {
822 	struct rswitch_gwca_queue *gq;
823 	int i, index, bit;
824 
825 	for (i = 0; i < priv->gwca.num_queues; i++) {
826 		gq = &priv->gwca.queues[i];
827 		index = gq->index / 32;
828 		bit = BIT(gq->index % 32);
829 		if (!(dis[index] & bit))
830 			continue;
831 
832 		rswitch_ack_data_irq(priv, gq->index);
833 		rswitch_queue_interrupt(gq->ndev);
834 	}
835 
836 	return IRQ_HANDLED;
837 }
838 
839 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
840 {
841 	struct rswitch_private *priv = dev_id;
842 	u32 dis[RSWITCH_NUM_IRQ_REGS];
843 	irqreturn_t ret = IRQ_NONE;
844 
845 	rswitch_get_data_irq_status(priv, dis);
846 
847 	if (rswitch_is_any_data_irq(priv, dis, true) ||
848 	    rswitch_is_any_data_irq(priv, dis, false))
849 		ret = rswitch_data_irq(priv, dis);
850 
851 	return ret;
852 }
853 
854 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
855 {
856 	char *resource_name, *irq_name;
857 	int i, ret, irq;
858 
859 	for (i = 0; i < GWCA_NUM_IRQS; i++) {
860 		resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
861 		if (!resource_name)
862 			return -ENOMEM;
863 
864 		irq = platform_get_irq_byname(priv->pdev, resource_name);
865 		kfree(resource_name);
866 		if (irq < 0)
867 			return irq;
868 
869 		irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
870 					  GWCA_IRQ_NAME, i);
871 		if (!irq_name)
872 			return -ENOMEM;
873 
874 		ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
875 				       0, irq_name, priv);
876 		if (ret < 0)
877 			return ret;
878 	}
879 
880 	return 0;
881 }
882 
883 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
884 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
885 				    enum rswitch_etha_mode mode)
886 {
887 	int ret;
888 
889 	if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
890 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
891 
892 	iowrite32(mode, etha->addr + EAMC);
893 
894 	ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
895 
896 	if (mode == EAMC_OPC_DISABLE)
897 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
898 
899 	return ret;
900 }
901 
902 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
903 {
904 	u32 mrmac0 = ioread32(etha->addr + MRMAC0);
905 	u32 mrmac1 = ioread32(etha->addr + MRMAC1);
906 	u8 *mac = &etha->mac_addr[0];
907 
908 	mac[0] = (mrmac0 >>  8) & 0xFF;
909 	mac[1] = (mrmac0 >>  0) & 0xFF;
910 	mac[2] = (mrmac1 >> 24) & 0xFF;
911 	mac[3] = (mrmac1 >> 16) & 0xFF;
912 	mac[4] = (mrmac1 >>  8) & 0xFF;
913 	mac[5] = (mrmac1 >>  0) & 0xFF;
914 }
915 
916 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
917 {
918 	iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
919 	iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
920 		  etha->addr + MRMAC1);
921 }
922 
923 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
924 {
925 	iowrite32(MLVC_PLV, etha->addr + MLVC);
926 
927 	return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
928 }
929 
930 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
931 {
932 	u32 val;
933 
934 	rswitch_etha_write_mac_address(etha, mac);
935 
936 	switch (etha->speed) {
937 	case 100:
938 		val = MPIC_LSC_100M;
939 		break;
940 	case 1000:
941 		val = MPIC_LSC_1G;
942 		break;
943 	case 2500:
944 		val = MPIC_LSC_2_5G;
945 		break;
946 	default:
947 		return;
948 	}
949 
950 	iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
951 }
952 
953 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
954 {
955 	rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
956 		       MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
957 	rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
958 }
959 
960 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
961 {
962 	int err;
963 
964 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
965 	if (err < 0)
966 		return err;
967 	err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
968 	if (err < 0)
969 		return err;
970 
971 	iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
972 	rswitch_rmac_setting(etha, mac);
973 	rswitch_etha_enable_mii(etha);
974 
975 	err = rswitch_etha_wait_link_verification(etha);
976 	if (err < 0)
977 		return err;
978 
979 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
980 	if (err < 0)
981 		return err;
982 
983 	return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
984 }
985 
986 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
987 				   int phyad, int devad, int regad, int data)
988 {
989 	int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
990 	u32 val;
991 	int ret;
992 
993 	if (devad == 0xffffffff)
994 		return -ENODEV;
995 
996 	writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
997 
998 	val = MPSM_PSME | MPSM_MFF_C45;
999 	iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1000 
1001 	ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1002 	if (ret)
1003 		return ret;
1004 
1005 	rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1006 
1007 	if (read) {
1008 		writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1009 
1010 		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1011 		if (ret)
1012 			return ret;
1013 
1014 		ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1015 
1016 		rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1017 	} else {
1018 		iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1019 			  etha->addr + MPSM);
1020 
1021 		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1022 	}
1023 
1024 	return ret;
1025 }
1026 
1027 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1028 				     int regad)
1029 {
1030 	struct rswitch_etha *etha = bus->priv;
1031 
1032 	return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1033 }
1034 
1035 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1036 				      int regad, u16 val)
1037 {
1038 	struct rswitch_etha *etha = bus->priv;
1039 
1040 	return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1041 }
1042 
1043 /* Call of_node_put(port) after done */
1044 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1045 {
1046 	struct device_node *ports, *port;
1047 	int err = 0;
1048 	u32 index;
1049 
1050 	ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1051 				     "ethernet-ports");
1052 	if (!ports)
1053 		return NULL;
1054 
1055 	for_each_child_of_node(ports, port) {
1056 		err = of_property_read_u32(port, "reg", &index);
1057 		if (err < 0) {
1058 			port = NULL;
1059 			goto out;
1060 		}
1061 		if (index == rdev->etha->index)
1062 			break;
1063 	}
1064 
1065 out:
1066 	of_node_put(ports);
1067 
1068 	return port;
1069 }
1070 
1071 /* Call of_node_put(mdio) after done */
1072 static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
1073 {
1074 	struct device_node *port, *mdio;
1075 
1076 	port = rswitch_get_port_node(rdev);
1077 	if (!port)
1078 		return NULL;
1079 
1080 	mdio = of_get_child_by_name(port, "mdio");
1081 	of_node_put(port);
1082 
1083 	return mdio;
1084 }
1085 
1086 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1087 {
1088 	struct device_node *port;
1089 	int err;
1090 
1091 	port = rswitch_get_port_node(rdev);
1092 	if (!port)
1093 		return -ENODEV;
1094 
1095 	err = of_get_phy_mode(port, &rdev->etha->phy_interface);
1096 	of_node_put(port);
1097 
1098 	switch (rdev->etha->phy_interface) {
1099 	case PHY_INTERFACE_MODE_MII:
1100 		rdev->etha->speed = SPEED_100;
1101 		break;
1102 	case PHY_INTERFACE_MODE_SGMII:
1103 		rdev->etha->speed = SPEED_1000;
1104 		break;
1105 	case PHY_INTERFACE_MODE_USXGMII:
1106 		rdev->etha->speed = SPEED_2500;
1107 		break;
1108 	default:
1109 		err = -EINVAL;
1110 		break;
1111 	}
1112 
1113 	return err;
1114 }
1115 
1116 static int rswitch_mii_register(struct rswitch_device *rdev)
1117 {
1118 	struct device_node *mdio_np;
1119 	struct mii_bus *mii_bus;
1120 	int err;
1121 
1122 	mii_bus = mdiobus_alloc();
1123 	if (!mii_bus)
1124 		return -ENOMEM;
1125 
1126 	mii_bus->name = "rswitch_mii";
1127 	sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1128 	mii_bus->priv = rdev->etha;
1129 	mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1130 	mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1131 	mii_bus->parent = &rdev->priv->pdev->dev;
1132 
1133 	mdio_np = rswitch_get_mdio_node(rdev);
1134 	err = of_mdiobus_register(mii_bus, mdio_np);
1135 	if (err < 0) {
1136 		mdiobus_free(mii_bus);
1137 		goto out;
1138 	}
1139 
1140 	rdev->etha->mii = mii_bus;
1141 
1142 out:
1143 	of_node_put(mdio_np);
1144 
1145 	return err;
1146 }
1147 
1148 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1149 {
1150 	if (rdev->etha->mii) {
1151 		mdiobus_unregister(rdev->etha->mii);
1152 		mdiobus_free(rdev->etha->mii);
1153 		rdev->etha->mii = NULL;
1154 	}
1155 }
1156 
1157 static void rswitch_mac_config(struct phylink_config *config,
1158 			       unsigned int mode,
1159 			       const struct phylink_link_state *state)
1160 {
1161 }
1162 
1163 static void rswitch_mac_link_down(struct phylink_config *config,
1164 				  unsigned int mode,
1165 				  phy_interface_t interface)
1166 {
1167 }
1168 
1169 static void rswitch_mac_link_up(struct phylink_config *config,
1170 				struct phy_device *phydev, unsigned int mode,
1171 				phy_interface_t interface, int speed,
1172 				int duplex, bool tx_pause, bool rx_pause)
1173 {
1174 	/* Current hardware cannot change speed at runtime */
1175 }
1176 
1177 static const struct phylink_mac_ops rswitch_phylink_ops = {
1178 	.mac_config = rswitch_mac_config,
1179 	.mac_link_down = rswitch_mac_link_down,
1180 	.mac_link_up = rswitch_mac_link_up,
1181 };
1182 
1183 static int rswitch_phylink_init(struct rswitch_device *rdev)
1184 {
1185 	struct device_node *port;
1186 	struct phylink *phylink;
1187 	int err;
1188 
1189 	port = rswitch_get_port_node(rdev);
1190 	if (!port)
1191 		return -ENODEV;
1192 
1193 	rdev->phylink_config.dev = &rdev->ndev->dev;
1194 	rdev->phylink_config.type = PHYLINK_NETDEV;
1195 	__set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
1196 	__set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
1197 	rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
1198 
1199 	phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
1200 				 rdev->etha->phy_interface, &rswitch_phylink_ops);
1201 	if (IS_ERR(phylink)) {
1202 		err = PTR_ERR(phylink);
1203 		goto out;
1204 	}
1205 
1206 	rdev->phylink = phylink;
1207 	err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
1208 out:
1209 	of_node_put(port);
1210 
1211 	return err;
1212 }
1213 
1214 static void rswitch_phylink_deinit(struct rswitch_device *rdev)
1215 {
1216 	rtnl_lock();
1217 	phylink_disconnect_phy(rdev->phylink);
1218 	rtnl_unlock();
1219 	phylink_destroy(rdev->phylink);
1220 }
1221 
1222 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1223 {
1224 	struct device_node *port = rswitch_get_port_node(rdev);
1225 	struct phy *serdes;
1226 	int err;
1227 
1228 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1229 	of_node_put(port);
1230 	if (IS_ERR(serdes))
1231 		return PTR_ERR(serdes);
1232 
1233 	err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
1234 			       rdev->etha->phy_interface);
1235 	if (err < 0)
1236 		return err;
1237 
1238 	return phy_set_speed(serdes, rdev->etha->speed);
1239 }
1240 
1241 static int rswitch_serdes_init(struct rswitch_device *rdev)
1242 {
1243 	struct device_node *port = rswitch_get_port_node(rdev);
1244 	struct phy *serdes;
1245 
1246 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1247 	of_node_put(port);
1248 	if (IS_ERR(serdes))
1249 		return PTR_ERR(serdes);
1250 
1251 	return phy_init(serdes);
1252 }
1253 
1254 static int rswitch_serdes_deinit(struct rswitch_device *rdev)
1255 {
1256 	struct device_node *port = rswitch_get_port_node(rdev);
1257 	struct phy *serdes;
1258 
1259 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1260 	of_node_put(port);
1261 	if (IS_ERR(serdes))
1262 		return PTR_ERR(serdes);
1263 
1264 	return phy_exit(serdes);
1265 }
1266 
1267 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1268 {
1269 	int err;
1270 
1271 	if (!rdev->etha->operated) {
1272 		err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1273 		if (err < 0)
1274 			return err;
1275 		rdev->etha->operated = true;
1276 	}
1277 
1278 	err = rswitch_mii_register(rdev);
1279 	if (err < 0)
1280 		return err;
1281 
1282 	err = rswitch_phylink_init(rdev);
1283 	if (err < 0)
1284 		goto err_phylink_init;
1285 
1286 	err = rswitch_serdes_set_params(rdev);
1287 	if (err < 0)
1288 		goto err_serdes_set_params;
1289 
1290 	return 0;
1291 
1292 err_serdes_set_params:
1293 	rswitch_phylink_deinit(rdev);
1294 
1295 err_phylink_init:
1296 	rswitch_mii_unregister(rdev);
1297 
1298 	return err;
1299 }
1300 
1301 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1302 {
1303 	rswitch_phylink_deinit(rdev);
1304 	rswitch_mii_unregister(rdev);
1305 }
1306 
1307 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1308 {
1309 	int i, err;
1310 
1311 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1312 		err = rswitch_ether_port_init_one(priv->rdev[i]);
1313 		if (err)
1314 			goto err_init_one;
1315 	}
1316 
1317 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1318 		err = rswitch_serdes_init(priv->rdev[i]);
1319 		if (err)
1320 			goto err_serdes;
1321 	}
1322 
1323 	return 0;
1324 
1325 err_serdes:
1326 	for (i--; i >= 0; i--)
1327 		rswitch_serdes_deinit(priv->rdev[i]);
1328 	i = RSWITCH_NUM_PORTS;
1329 
1330 err_init_one:
1331 	for (i--; i >= 0; i--)
1332 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1333 
1334 	return err;
1335 }
1336 
1337 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1338 {
1339 	int i;
1340 
1341 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1342 		rswitch_serdes_deinit(priv->rdev[i]);
1343 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1344 	}
1345 }
1346 
1347 static int rswitch_open(struct net_device *ndev)
1348 {
1349 	struct rswitch_device *rdev = netdev_priv(ndev);
1350 
1351 	phylink_start(rdev->phylink);
1352 
1353 	napi_enable(&rdev->napi);
1354 	netif_start_queue(ndev);
1355 
1356 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1357 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1358 
1359 	return 0;
1360 };
1361 
1362 static int rswitch_stop(struct net_device *ndev)
1363 {
1364 	struct rswitch_device *rdev = netdev_priv(ndev);
1365 
1366 	netif_tx_stop_all_queues(ndev);
1367 
1368 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1369 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1370 
1371 	phylink_stop(rdev->phylink);
1372 	napi_disable(&rdev->napi);
1373 
1374 	return 0;
1375 };
1376 
1377 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1378 {
1379 	struct rswitch_device *rdev = netdev_priv(ndev);
1380 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
1381 	struct rswitch_ext_desc *desc;
1382 	int ret = NETDEV_TX_OK;
1383 	dma_addr_t dma_addr;
1384 
1385 	if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1386 		netif_stop_subqueue(ndev, 0);
1387 		return ret;
1388 	}
1389 
1390 	if (skb_put_padto(skb, ETH_ZLEN))
1391 		return ret;
1392 
1393 	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1394 	if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1395 		dev_kfree_skb_any(skb);
1396 		return ret;
1397 	}
1398 
1399 	gq->skbs[gq->cur] = skb;
1400 	desc = &gq->ring[gq->cur];
1401 	rswitch_desc_set_dptr(&desc->desc, dma_addr);
1402 	desc->desc.info_ds = cpu_to_le16(skb->len);
1403 
1404 	desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT);
1405 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1406 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1407 		rdev->ts_tag++;
1408 		desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1409 	}
1410 	skb_tx_timestamp(skb);
1411 
1412 	dma_wmb();
1413 
1414 	desc->desc.die_dt = DT_FSINGLE | DIE;
1415 	wmb();	/* gq->cur must be incremented after die_dt was set */
1416 
1417 	gq->cur = rswitch_next_queue_index(gq, true, 1);
1418 	rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1419 
1420 	return ret;
1421 }
1422 
1423 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1424 {
1425 	return &ndev->stats;
1426 }
1427 
1428 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1429 {
1430 	struct rswitch_device *rdev = netdev_priv(ndev);
1431 	struct rcar_gen4_ptp_private *ptp_priv;
1432 	struct hwtstamp_config config;
1433 
1434 	ptp_priv = rdev->priv->ptp_priv;
1435 
1436 	config.flags = 0;
1437 	config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1438 						    HWTSTAMP_TX_OFF;
1439 	switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1440 	case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1441 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1442 		break;
1443 	case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1444 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1445 		break;
1446 	default:
1447 		config.rx_filter = HWTSTAMP_FILTER_NONE;
1448 		break;
1449 	}
1450 
1451 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1452 }
1453 
1454 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1455 {
1456 	struct rswitch_device *rdev = netdev_priv(ndev);
1457 	u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1458 	struct hwtstamp_config config;
1459 	u32 tstamp_tx_ctrl;
1460 
1461 	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1462 		return -EFAULT;
1463 
1464 	if (config.flags)
1465 		return -EINVAL;
1466 
1467 	switch (config.tx_type) {
1468 	case HWTSTAMP_TX_OFF:
1469 		tstamp_tx_ctrl = 0;
1470 		break;
1471 	case HWTSTAMP_TX_ON:
1472 		tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1473 		break;
1474 	default:
1475 		return -ERANGE;
1476 	}
1477 
1478 	switch (config.rx_filter) {
1479 	case HWTSTAMP_FILTER_NONE:
1480 		tstamp_rx_ctrl = 0;
1481 		break;
1482 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1483 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1484 		break;
1485 	default:
1486 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1487 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1488 		break;
1489 	}
1490 
1491 	rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1492 	rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1493 
1494 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1495 }
1496 
1497 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1498 {
1499 	struct rswitch_device *rdev = netdev_priv(ndev);
1500 
1501 	if (!netif_running(ndev))
1502 		return -EINVAL;
1503 
1504 	switch (cmd) {
1505 	case SIOCGHWTSTAMP:
1506 		return rswitch_hwstamp_get(ndev, req);
1507 	case SIOCSHWTSTAMP:
1508 		return rswitch_hwstamp_set(ndev, req);
1509 	default:
1510 		return phylink_mii_ioctl(rdev->phylink, req, cmd);
1511 	}
1512 }
1513 
1514 static const struct net_device_ops rswitch_netdev_ops = {
1515 	.ndo_open = rswitch_open,
1516 	.ndo_stop = rswitch_stop,
1517 	.ndo_start_xmit = rswitch_start_xmit,
1518 	.ndo_get_stats = rswitch_get_stats,
1519 	.ndo_eth_ioctl = rswitch_eth_ioctl,
1520 	.ndo_validate_addr = eth_validate_addr,
1521 	.ndo_set_mac_address = eth_mac_addr,
1522 };
1523 
1524 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1525 {
1526 	struct rswitch_device *rdev = netdev_priv(ndev);
1527 
1528 	info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1529 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1530 				SOF_TIMESTAMPING_RX_SOFTWARE |
1531 				SOF_TIMESTAMPING_SOFTWARE |
1532 				SOF_TIMESTAMPING_TX_HARDWARE |
1533 				SOF_TIMESTAMPING_RX_HARDWARE |
1534 				SOF_TIMESTAMPING_RAW_HARDWARE;
1535 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1536 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1537 
1538 	return 0;
1539 }
1540 
1541 static const struct ethtool_ops rswitch_ethtool_ops = {
1542 	.get_ts_info = rswitch_get_ts_info,
1543 };
1544 
1545 static const struct of_device_id renesas_eth_sw_of_table[] = {
1546 	{ .compatible = "renesas,r8a779f0-ether-switch", },
1547 	{ }
1548 };
1549 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1550 
1551 static void rswitch_etha_init(struct rswitch_private *priv, int index)
1552 {
1553 	struct rswitch_etha *etha = &priv->etha[index];
1554 
1555 	memset(etha, 0, sizeof(*etha));
1556 	etha->index = index;
1557 	etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1558 	etha->coma_addr = priv->addr;
1559 }
1560 
1561 static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1562 {
1563 	struct platform_device *pdev = priv->pdev;
1564 	struct rswitch_device *rdev;
1565 	struct device_node *port;
1566 	struct net_device *ndev;
1567 	int err;
1568 
1569 	if (index >= RSWITCH_NUM_PORTS)
1570 		return -EINVAL;
1571 
1572 	ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1573 	if (!ndev)
1574 		return -ENOMEM;
1575 
1576 	SET_NETDEV_DEV(ndev, &pdev->dev);
1577 	ether_setup(ndev);
1578 
1579 	rdev = netdev_priv(ndev);
1580 	rdev->ndev = ndev;
1581 	rdev->priv = priv;
1582 	priv->rdev[index] = rdev;
1583 	rdev->port = index;
1584 	rdev->etha = &priv->etha[index];
1585 	rdev->addr = priv->addr;
1586 
1587 	ndev->base_addr = (unsigned long)rdev->addr;
1588 	snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1589 	ndev->netdev_ops = &rswitch_netdev_ops;
1590 	ndev->ethtool_ops = &rswitch_ethtool_ops;
1591 
1592 	netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1593 
1594 	port = rswitch_get_port_node(rdev);
1595 	err = of_get_ethdev_address(port, ndev);
1596 	of_node_put(port);
1597 	if (err) {
1598 		if (is_valid_ether_addr(rdev->etha->mac_addr))
1599 			eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1600 		else
1601 			eth_hw_addr_random(ndev);
1602 	}
1603 
1604 	err = rswitch_etha_get_params(rdev);
1605 	if (err < 0)
1606 		goto out_get_params;
1607 
1608 	if (rdev->priv->gwca.speed < rdev->etha->speed)
1609 		rdev->priv->gwca.speed = rdev->etha->speed;
1610 
1611 	err = rswitch_rxdmac_alloc(ndev);
1612 	if (err < 0)
1613 		goto out_rxdmac;
1614 
1615 	err = rswitch_txdmac_alloc(ndev);
1616 	if (err < 0)
1617 		goto out_txdmac;
1618 
1619 	return 0;
1620 
1621 out_txdmac:
1622 	rswitch_rxdmac_free(ndev);
1623 
1624 out_rxdmac:
1625 out_get_params:
1626 	netif_napi_del(&rdev->napi);
1627 	free_netdev(ndev);
1628 
1629 	return err;
1630 }
1631 
1632 static void rswitch_device_free(struct rswitch_private *priv, int index)
1633 {
1634 	struct rswitch_device *rdev = priv->rdev[index];
1635 	struct net_device *ndev = rdev->ndev;
1636 
1637 	rswitch_txdmac_free(ndev);
1638 	rswitch_rxdmac_free(ndev);
1639 	netif_napi_del(&rdev->napi);
1640 	free_netdev(ndev);
1641 }
1642 
1643 static int rswitch_init(struct rswitch_private *priv)
1644 {
1645 	int i, err;
1646 
1647 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1648 		rswitch_etha_init(priv, i);
1649 
1650 	rswitch_clock_enable(priv);
1651 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1652 		rswitch_etha_read_mac_address(&priv->etha[i]);
1653 
1654 	rswitch_reset(priv);
1655 
1656 	rswitch_clock_enable(priv);
1657 	rswitch_top_init(priv);
1658 	err = rswitch_bpool_config(priv);
1659 	if (err < 0)
1660 		return err;
1661 
1662 	err = rswitch_gwca_desc_alloc(priv);
1663 	if (err < 0)
1664 		return -ENOMEM;
1665 
1666 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1667 		err = rswitch_device_alloc(priv, i);
1668 		if (err < 0) {
1669 			for (i--; i >= 0; i--)
1670 				rswitch_device_free(priv, i);
1671 			goto err_device_alloc;
1672 		}
1673 	}
1674 
1675 	rswitch_fwd_init(priv);
1676 
1677 	err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1678 				     RCAR_GEN4_PTP_CLOCK_S4);
1679 	if (err < 0)
1680 		goto err_ptp_register;
1681 
1682 	err = rswitch_gwca_request_irqs(priv);
1683 	if (err < 0)
1684 		goto err_gwca_request_irq;
1685 
1686 	err = rswitch_gwca_hw_init(priv);
1687 	if (err < 0)
1688 		goto err_gwca_hw_init;
1689 
1690 	err = rswitch_ether_port_init_all(priv);
1691 	if (err)
1692 		goto err_ether_port_init_all;
1693 
1694 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1695 		err = register_netdev(priv->rdev[i]->ndev);
1696 		if (err) {
1697 			for (i--; i >= 0; i--)
1698 				unregister_netdev(priv->rdev[i]->ndev);
1699 			goto err_register_netdev;
1700 		}
1701 	}
1702 
1703 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1704 		netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1705 			    priv->rdev[i]->ndev->dev_addr);
1706 
1707 	return 0;
1708 
1709 err_register_netdev:
1710 	rswitch_ether_port_deinit_all(priv);
1711 
1712 err_ether_port_init_all:
1713 	rswitch_gwca_hw_deinit(priv);
1714 
1715 err_gwca_hw_init:
1716 err_gwca_request_irq:
1717 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1718 
1719 err_ptp_register:
1720 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1721 		rswitch_device_free(priv, i);
1722 
1723 err_device_alloc:
1724 	rswitch_gwca_desc_free(priv);
1725 
1726 	return err;
1727 }
1728 
1729 static int renesas_eth_sw_probe(struct platform_device *pdev)
1730 {
1731 	struct rswitch_private *priv;
1732 	struct resource *res;
1733 	int ret;
1734 
1735 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1736 	if (!res) {
1737 		dev_err(&pdev->dev, "invalid resource\n");
1738 		return -EINVAL;
1739 	}
1740 
1741 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1742 	if (!priv)
1743 		return -ENOMEM;
1744 
1745 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1746 	if (!priv->ptp_priv)
1747 		return -ENOMEM;
1748 
1749 	platform_set_drvdata(pdev, priv);
1750 	priv->pdev = pdev;
1751 	priv->addr = devm_ioremap_resource(&pdev->dev, res);
1752 	if (IS_ERR(priv->addr))
1753 		return PTR_ERR(priv->addr);
1754 
1755 	priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1756 
1757 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1758 	if (ret < 0) {
1759 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1760 		if (ret < 0)
1761 			return ret;
1762 	}
1763 
1764 	priv->gwca.index = AGENT_INDEX_GWCA;
1765 	priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1766 				    RSWITCH_MAX_NUM_QUEUES);
1767 	priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1768 					 sizeof(*priv->gwca.queues), GFP_KERNEL);
1769 	if (!priv->gwca.queues)
1770 		return -ENOMEM;
1771 
1772 	pm_runtime_enable(&pdev->dev);
1773 	pm_runtime_get_sync(&pdev->dev);
1774 
1775 	ret = rswitch_init(priv);
1776 	if (ret < 0) {
1777 		pm_runtime_put(&pdev->dev);
1778 		pm_runtime_disable(&pdev->dev);
1779 		return ret;
1780 	}
1781 
1782 	device_set_wakeup_capable(&pdev->dev, 1);
1783 
1784 	return ret;
1785 }
1786 
1787 static void rswitch_deinit(struct rswitch_private *priv)
1788 {
1789 	int i;
1790 
1791 	rswitch_gwca_hw_deinit(priv);
1792 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1793 
1794 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1795 		struct rswitch_device *rdev = priv->rdev[i];
1796 
1797 		rswitch_serdes_deinit(rdev);
1798 		rswitch_ether_port_deinit_one(rdev);
1799 		unregister_netdev(rdev->ndev);
1800 		rswitch_device_free(priv, i);
1801 	}
1802 
1803 	rswitch_gwca_desc_free(priv);
1804 
1805 	rswitch_clock_disable(priv);
1806 }
1807 
1808 static int renesas_eth_sw_remove(struct platform_device *pdev)
1809 {
1810 	struct rswitch_private *priv = platform_get_drvdata(pdev);
1811 
1812 	rswitch_deinit(priv);
1813 
1814 	pm_runtime_put(&pdev->dev);
1815 	pm_runtime_disable(&pdev->dev);
1816 
1817 	platform_set_drvdata(pdev, NULL);
1818 
1819 	return 0;
1820 }
1821 
1822 static struct platform_driver renesas_eth_sw_driver_platform = {
1823 	.probe = renesas_eth_sw_probe,
1824 	.remove = renesas_eth_sw_remove,
1825 	.driver = {
1826 		.name = "renesas_eth_sw",
1827 		.of_match_table = renesas_eth_sw_of_table,
1828 	}
1829 };
1830 module_platform_driver(renesas_eth_sw_driver_platform);
1831 MODULE_AUTHOR("Yoshihiro Shimoda");
1832 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
1833 MODULE_LICENSE("GPL");
1834