xref: /linux/drivers/net/ethernet/renesas/rswitch_main.c (revision 622303250c51b8104e23da5f6b3eff65924a80b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3  *
4  * Copyright (C) 2022-2025 Renesas Electronics Corporation
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
26 
27 #include "rswitch.h"
28 
29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
30 {
31 	u32 val;
32 
33 	return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
34 					 1, RSWITCH_TIMEOUT_US);
35 }
36 
37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
38 {
39 	iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 }
41 
42 /* Common Agent block (COMA) */
43 static void rswitch_reset(struct rswitch_private *priv)
44 {
45 	iowrite32(RRC_RR, priv->addr + RRC);
46 	iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 }
48 
49 static void rswitch_clock_enable(struct rswitch_private *priv)
50 {
51 	iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 }
53 
54 static void rswitch_clock_disable(struct rswitch_private *priv)
55 {
56 	iowrite32(RCDC_RCD, priv->addr + RCDC);
57 }
58 
59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
60 					   unsigned int port)
61 {
62 	u32 val = ioread32(coma_addr + RCEC);
63 
64 	if (val & RCEC_RCE)
65 		return (val & BIT(port)) ? true : false;
66 	else
67 		return false;
68 }
69 
70 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
71 				     int enable)
72 {
73 	u32 val;
74 
75 	if (enable) {
76 		val = ioread32(coma_addr + RCEC);
77 		iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
78 	} else {
79 		val = ioread32(coma_addr + RCDC);
80 		iowrite32(val | BIT(port), coma_addr + RCDC);
81 	}
82 }
83 
84 static int rswitch_bpool_config(struct rswitch_private *priv)
85 {
86 	u32 val;
87 
88 	val = ioread32(priv->addr + CABPIRM);
89 	if (val & CABPIRM_BPR)
90 		return 0;
91 
92 	iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
93 
94 	return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
95 }
96 
97 static void rswitch_coma_init(struct rswitch_private *priv)
98 {
99 	iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
100 }
101 
102 /* R-Switch-2 block (TOP) */
103 static void rswitch_top_init(struct rswitch_private *priv)
104 {
105 	unsigned int i;
106 
107 	for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
108 		iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
109 }
110 
111 /* Forwarding engine block (MFWD) */
112 static void rswitch_fwd_init(struct rswitch_private *priv)
113 {
114 	u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
115 	unsigned int i;
116 	u32 reg_val;
117 
118 	/* Start with empty configuration */
119 	for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
120 		/* Disable all port features */
121 		iowrite32(0, priv->addr + FWPC0(i));
122 		/* Disallow L3 forwarding and direct descriptor forwarding */
123 		iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
124 			  priv->addr + FWPC1(i));
125 		/* Disallow L2 forwarding */
126 		iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
127 			  priv->addr + FWPC2(i));
128 		/* Disallow port based forwarding */
129 		iowrite32(0, priv->addr + FWPBFC(i));
130 	}
131 
132 	/* Configure MAC table aging */
133 	rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP,
134 		       FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US));
135 
136 	reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME);
137 	reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
138 	iowrite32(reg_val, priv->addr + FWMACAGC);
139 
140 	/* For enabled ETHA ports, setup port based forwarding */
141 	rswitch_for_each_enabled_port(priv, i) {
142 		/* Port based forwarding from port i to GWCA port */
143 		rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
144 			       FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
145 		/* Within GWCA port, forward to Rx queue for port i */
146 		iowrite32(priv->rdev[i]->rx_queue->index,
147 			  priv->addr + FWPBFCSDC(GWCA_INDEX, i));
148 	}
149 
150 	/* For GWCA port, allow direct descriptor forwarding */
151 	rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
152 }
153 
154 /* Gateway CPU agent block (GWCA) */
155 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
156 				    enum rswitch_gwca_mode mode)
157 {
158 	int ret;
159 
160 	if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
161 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
162 
163 	iowrite32(mode, priv->addr + GWMC);
164 
165 	ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
166 
167 	if (mode == GWMC_OPC_DISABLE)
168 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
169 
170 	return ret;
171 }
172 
173 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
174 {
175 	iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
176 
177 	return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
178 }
179 
180 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
181 {
182 	iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
183 
184 	return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
185 }
186 
187 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
188 {
189 	u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
190 	unsigned int i;
191 
192 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
193 		if (dis[i] & mask[i])
194 			return true;
195 	}
196 
197 	return false;
198 }
199 
200 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
201 {
202 	unsigned int i;
203 
204 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
205 		dis[i] = ioread32(priv->addr + GWDIS(i));
206 		dis[i] &= ioread32(priv->addr + GWDIE(i));
207 	}
208 }
209 
210 static void rswitch_enadis_data_irq(struct rswitch_private *priv,
211 				    unsigned int index, bool enable)
212 {
213 	u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
214 
215 	iowrite32(BIT(index % 32), priv->addr + offs);
216 }
217 
218 static void rswitch_ack_data_irq(struct rswitch_private *priv,
219 				 unsigned int index)
220 {
221 	u32 offs = GWDIS(index / 32);
222 
223 	iowrite32(BIT(index % 32), priv->addr + offs);
224 }
225 
226 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
227 					     bool cur, unsigned int num)
228 {
229 	unsigned int index = cur ? gq->cur : gq->dirty;
230 
231 	if (index + num >= gq->ring_size)
232 		index = (index + num) % gq->ring_size;
233 	else
234 		index += num;
235 
236 	return index;
237 }
238 
239 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
240 {
241 	if (gq->cur >= gq->dirty)
242 		return gq->cur - gq->dirty;
243 	else
244 		return gq->ring_size - gq->dirty + gq->cur;
245 }
246 
247 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
248 {
249 	struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
250 
251 	if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
252 		return true;
253 
254 	return false;
255 }
256 
257 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
258 					   unsigned int start_index,
259 					   unsigned int num)
260 {
261 	unsigned int i, index;
262 
263 	for (i = 0; i < num; i++) {
264 		index = (i + start_index) % gq->ring_size;
265 		if (gq->rx_bufs[index])
266 			continue;
267 		gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
268 		if (!gq->rx_bufs[index])
269 			goto err;
270 	}
271 
272 	return 0;
273 
274 err:
275 	for (; i-- > 0; ) {
276 		index = (i + start_index) % gq->ring_size;
277 		skb_free_frag(gq->rx_bufs[index]);
278 		gq->rx_bufs[index] = NULL;
279 	}
280 
281 	return -ENOMEM;
282 }
283 
284 static void rswitch_gwca_queue_free(struct net_device *ndev,
285 				    struct rswitch_gwca_queue *gq)
286 {
287 	unsigned int i;
288 
289 	if (!gq->dir_tx) {
290 		dma_free_coherent(ndev->dev.parent,
291 				  sizeof(struct rswitch_ext_ts_desc) *
292 				  (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
293 		gq->rx_ring = NULL;
294 
295 		for (i = 0; i < gq->ring_size; i++)
296 			skb_free_frag(gq->rx_bufs[i]);
297 		kfree(gq->rx_bufs);
298 		gq->rx_bufs = NULL;
299 	} else {
300 		dma_free_coherent(ndev->dev.parent,
301 				  sizeof(struct rswitch_ext_desc) *
302 				  (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
303 		gq->tx_ring = NULL;
304 		kfree(gq->skbs);
305 		gq->skbs = NULL;
306 		kfree(gq->unmap_addrs);
307 		gq->unmap_addrs = NULL;
308 	}
309 }
310 
311 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
312 {
313 	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
314 
315 	dma_free_coherent(&priv->pdev->dev,
316 			  sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
317 			  gq->ts_ring, gq->ring_dma);
318 	gq->ts_ring = NULL;
319 }
320 
321 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
322 				    struct rswitch_private *priv,
323 				    struct rswitch_gwca_queue *gq,
324 				    bool dir_tx, unsigned int ring_size)
325 {
326 	unsigned int i, bit;
327 
328 	gq->dir_tx = dir_tx;
329 	gq->ring_size = ring_size;
330 	gq->ndev = ndev;
331 
332 	if (!dir_tx) {
333 		gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
334 		if (!gq->rx_bufs)
335 			return -ENOMEM;
336 		if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
337 			goto out;
338 
339 		gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
340 						 sizeof(struct rswitch_ext_ts_desc) *
341 						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
342 	} else {
343 		gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
344 		if (!gq->skbs)
345 			return -ENOMEM;
346 		gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
347 		if (!gq->unmap_addrs)
348 			goto out;
349 		gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
350 						 sizeof(struct rswitch_ext_desc) *
351 						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
352 	}
353 
354 	if (!gq->rx_ring && !gq->tx_ring)
355 		goto out;
356 
357 	i = gq->index / 32;
358 	bit = BIT(gq->index % 32);
359 	if (dir_tx)
360 		priv->gwca.tx_irq_bits[i] |= bit;
361 	else
362 		priv->gwca.rx_irq_bits[i] |= bit;
363 
364 	return 0;
365 
366 out:
367 	rswitch_gwca_queue_free(ndev, gq);
368 
369 	return -ENOMEM;
370 }
371 
372 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
373 {
374 	desc->dptrl = cpu_to_le32(lower_32_bits(addr));
375 	desc->dptrh = upper_32_bits(addr) & 0xff;
376 }
377 
378 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
379 {
380 	return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
381 }
382 
383 static int rswitch_gwca_queue_format(struct net_device *ndev,
384 				     struct rswitch_private *priv,
385 				     struct rswitch_gwca_queue *gq)
386 {
387 	unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
388 	struct rswitch_ext_desc *desc;
389 	struct rswitch_desc *linkfix;
390 	dma_addr_t dma_addr;
391 	unsigned int i;
392 
393 	memset(gq->tx_ring, 0, ring_size);
394 	for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
395 		if (!gq->dir_tx) {
396 			dma_addr = dma_map_single(ndev->dev.parent,
397 						  gq->rx_bufs[i] + RSWITCH_HEADROOM,
398 						  RSWITCH_MAP_BUF_SIZE,
399 						  DMA_FROM_DEVICE);
400 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
401 				goto err;
402 
403 			desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
404 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
405 			desc->desc.die_dt = DT_FEMPTY | DIE;
406 		} else {
407 			desc->desc.die_dt = DT_EEMPTY | DIE;
408 		}
409 	}
410 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
411 	desc->desc.die_dt = DT_LINKFIX;
412 
413 	linkfix = &priv->gwca.linkfix_table[gq->index];
414 	linkfix->die_dt = DT_LINKFIX;
415 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
416 
417 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
418 		  priv->addr + GWDCC_OFFS(gq->index));
419 
420 	return 0;
421 
422 err:
423 	if (!gq->dir_tx) {
424 		for (desc = gq->tx_ring; i-- > 0; desc++) {
425 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
426 			dma_unmap_single(ndev->dev.parent, dma_addr,
427 					 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
428 		}
429 	}
430 
431 	return -ENOMEM;
432 }
433 
434 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
435 				       unsigned int start_index,
436 				       unsigned int num)
437 {
438 	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
439 	struct rswitch_ts_desc *desc;
440 	unsigned int i, index;
441 
442 	for (i = 0; i < num; i++) {
443 		index = (i + start_index) % gq->ring_size;
444 		desc = &gq->ts_ring[index];
445 		desc->desc.die_dt = DT_FEMPTY_ND | DIE;
446 	}
447 }
448 
449 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
450 					  struct rswitch_gwca_queue *gq,
451 					  unsigned int start_index,
452 					  unsigned int num)
453 {
454 	struct rswitch_device *rdev = netdev_priv(ndev);
455 	struct rswitch_ext_ts_desc *desc;
456 	unsigned int i, index;
457 	dma_addr_t dma_addr;
458 
459 	for (i = 0; i < num; i++) {
460 		index = (i + start_index) % gq->ring_size;
461 		desc = &gq->rx_ring[index];
462 		if (!gq->dir_tx) {
463 			dma_addr = dma_map_single(ndev->dev.parent,
464 						  gq->rx_bufs[index] + RSWITCH_HEADROOM,
465 						  RSWITCH_MAP_BUF_SIZE,
466 						  DMA_FROM_DEVICE);
467 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
468 				goto err;
469 
470 			desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
471 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
472 			dma_wmb();
473 			desc->desc.die_dt = DT_FEMPTY | DIE;
474 			desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
475 		} else {
476 			desc->desc.die_dt = DT_EEMPTY | DIE;
477 		}
478 	}
479 
480 	return 0;
481 
482 err:
483 	if (!gq->dir_tx) {
484 		for (; i-- > 0; ) {
485 			index = (i + start_index) % gq->ring_size;
486 			desc = &gq->rx_ring[index];
487 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
488 			dma_unmap_single(ndev->dev.parent, dma_addr,
489 					 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
490 		}
491 	}
492 
493 	return -ENOMEM;
494 }
495 
496 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
497 					    struct rswitch_private *priv,
498 					    struct rswitch_gwca_queue *gq)
499 {
500 	unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
501 	struct rswitch_ext_ts_desc *desc;
502 	struct rswitch_desc *linkfix;
503 	int err;
504 
505 	memset(gq->rx_ring, 0, ring_size);
506 	err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
507 	if (err < 0)
508 		return err;
509 
510 	desc = &gq->rx_ring[gq->ring_size];	/* Last */
511 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
512 	desc->desc.die_dt = DT_LINKFIX;
513 
514 	linkfix = &priv->gwca.linkfix_table[gq->index];
515 	linkfix->die_dt = DT_LINKFIX;
516 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
517 
518 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
519 		  GWDCC_ETS | GWDCC_EDE,
520 		  priv->addr + GWDCC_OFFS(gq->index));
521 
522 	return 0;
523 }
524 
525 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
526 {
527 	unsigned int i, num_queues = priv->gwca.num_queues;
528 	struct rswitch_gwca *gwca = &priv->gwca;
529 	struct device *dev = &priv->pdev->dev;
530 
531 	gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
532 	gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
533 						 &gwca->linkfix_table_dma, GFP_KERNEL);
534 	if (!gwca->linkfix_table)
535 		return -ENOMEM;
536 	for (i = 0; i < num_queues; i++)
537 		gwca->linkfix_table[i].die_dt = DT_EOS;
538 
539 	return 0;
540 }
541 
542 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
543 {
544 	struct rswitch_gwca *gwca = &priv->gwca;
545 
546 	if (gwca->linkfix_table)
547 		dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
548 				  gwca->linkfix_table, gwca->linkfix_table_dma);
549 	gwca->linkfix_table = NULL;
550 }
551 
552 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
553 {
554 	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
555 	struct rswitch_ts_desc *desc;
556 
557 	gq->ring_size = TS_RING_SIZE;
558 	gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
559 					 sizeof(struct rswitch_ts_desc) *
560 					 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
561 
562 	if (!gq->ts_ring)
563 		return -ENOMEM;
564 
565 	rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
566 	desc = &gq->ts_ring[gq->ring_size];
567 	desc->desc.die_dt = DT_LINKFIX;
568 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
569 
570 	return 0;
571 }
572 
573 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
574 {
575 	struct rswitch_gwca_queue *gq;
576 	unsigned int index;
577 
578 	index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
579 	if (index >= priv->gwca.num_queues)
580 		return NULL;
581 	set_bit(index, priv->gwca.used);
582 	gq = &priv->gwca.queues[index];
583 	memset(gq, 0, sizeof(*gq));
584 	gq->index = index;
585 
586 	return gq;
587 }
588 
589 static void rswitch_gwca_put(struct rswitch_private *priv,
590 			     struct rswitch_gwca_queue *gq)
591 {
592 	clear_bit(gq->index, priv->gwca.used);
593 }
594 
595 static int rswitch_txdmac_alloc(struct net_device *ndev)
596 {
597 	struct rswitch_device *rdev = netdev_priv(ndev);
598 	struct rswitch_private *priv = rdev->priv;
599 	int err;
600 
601 	rdev->tx_queue = rswitch_gwca_get(priv);
602 	if (!rdev->tx_queue)
603 		return -EBUSY;
604 
605 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
606 	if (err < 0) {
607 		rswitch_gwca_put(priv, rdev->tx_queue);
608 		return err;
609 	}
610 
611 	return 0;
612 }
613 
614 static void rswitch_txdmac_free(struct net_device *ndev)
615 {
616 	struct rswitch_device *rdev = netdev_priv(ndev);
617 
618 	rswitch_gwca_queue_free(ndev, rdev->tx_queue);
619 	rswitch_gwca_put(rdev->priv, rdev->tx_queue);
620 }
621 
622 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
623 {
624 	struct rswitch_device *rdev = priv->rdev[index];
625 
626 	return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
627 }
628 
629 static int rswitch_rxdmac_alloc(struct net_device *ndev)
630 {
631 	struct rswitch_device *rdev = netdev_priv(ndev);
632 	struct rswitch_private *priv = rdev->priv;
633 	int err;
634 
635 	rdev->rx_queue = rswitch_gwca_get(priv);
636 	if (!rdev->rx_queue)
637 		return -EBUSY;
638 
639 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
640 	if (err < 0) {
641 		rswitch_gwca_put(priv, rdev->rx_queue);
642 		return err;
643 	}
644 
645 	return 0;
646 }
647 
648 static void rswitch_rxdmac_free(struct net_device *ndev)
649 {
650 	struct rswitch_device *rdev = netdev_priv(ndev);
651 
652 	rswitch_gwca_queue_free(ndev, rdev->rx_queue);
653 	rswitch_gwca_put(rdev->priv, rdev->rx_queue);
654 }
655 
656 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
657 {
658 	struct rswitch_device *rdev = priv->rdev[index];
659 	struct net_device *ndev = rdev->ndev;
660 
661 	return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
662 }
663 
664 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
665 {
666 	unsigned int i;
667 	int err;
668 
669 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
670 	if (err < 0)
671 		return err;
672 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
673 	if (err < 0)
674 		return err;
675 
676 	err = rswitch_gwca_mcast_table_reset(priv);
677 	if (err < 0)
678 		return err;
679 	err = rswitch_gwca_axi_ram_reset(priv);
680 	if (err < 0)
681 		return err;
682 
683 	iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
684 	iowrite32(0, priv->addr + GWTTFC);
685 	iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
686 	iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
687 	iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
688 	iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
689 	iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
690 		  priv->addr + GWMDNC);
691 	iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
692 
693 	iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
694 
695 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
696 		err = rswitch_rxdmac_init(priv, i);
697 		if (err < 0)
698 			return err;
699 		err = rswitch_txdmac_init(priv, i);
700 		if (err < 0)
701 			return err;
702 	}
703 
704 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
705 	if (err < 0)
706 		return err;
707 	return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
708 }
709 
710 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
711 {
712 	int err;
713 
714 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
715 	if (err < 0)
716 		return err;
717 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
718 	if (err < 0)
719 		return err;
720 
721 	return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
722 }
723 
724 static int rswitch_gwca_halt(struct rswitch_private *priv)
725 {
726 	int err;
727 
728 	priv->gwca_halt = true;
729 	err = rswitch_gwca_hw_deinit(priv);
730 	dev_err(&priv->pdev->dev, "halted (%d)\n", err);
731 
732 	return err;
733 }
734 
735 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
736 					      struct rswitch_gwca_queue *gq,
737 					      struct rswitch_ext_ts_desc *desc)
738 {
739 	dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
740 	u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
741 	u8 die_dt = desc->desc.die_dt & DT_MASK;
742 	struct sk_buff *skb = NULL;
743 
744 	dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
745 			 DMA_FROM_DEVICE);
746 
747 	/* The RX descriptor order will be one of the following:
748 	 * - FSINGLE
749 	 * - FSTART -> FEND
750 	 * - FSTART -> FMID -> FEND
751 	 */
752 
753 	/* Check whether the descriptor is unexpected order */
754 	switch (die_dt) {
755 	case DT_FSTART:
756 	case DT_FSINGLE:
757 		if (gq->skb_fstart) {
758 			dev_kfree_skb_any(gq->skb_fstart);
759 			gq->skb_fstart = NULL;
760 			ndev->stats.rx_dropped++;
761 		}
762 		break;
763 	case DT_FMID:
764 	case DT_FEND:
765 		if (!gq->skb_fstart) {
766 			ndev->stats.rx_dropped++;
767 			return NULL;
768 		}
769 		break;
770 	default:
771 		break;
772 	}
773 
774 	/* Handle the descriptor */
775 	switch (die_dt) {
776 	case DT_FSTART:
777 	case DT_FSINGLE:
778 		skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
779 		if (skb) {
780 			skb_reserve(skb, RSWITCH_HEADROOM);
781 			skb_put(skb, pkt_len);
782 			gq->pkt_len = pkt_len;
783 			if (die_dt == DT_FSTART) {
784 				gq->skb_fstart = skb;
785 				skb = NULL;
786 			}
787 		}
788 		break;
789 	case DT_FMID:
790 	case DT_FEND:
791 		skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
792 				virt_to_page(gq->rx_bufs[gq->cur]),
793 				offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
794 				pkt_len, RSWITCH_BUF_SIZE);
795 		if (die_dt == DT_FEND) {
796 			skb = gq->skb_fstart;
797 			gq->skb_fstart = NULL;
798 		}
799 		gq->pkt_len += pkt_len;
800 		break;
801 	default:
802 		netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
803 		break;
804 	}
805 
806 	return skb;
807 }
808 
809 static bool rswitch_rx(struct net_device *ndev, int *quota)
810 {
811 	struct rswitch_device *rdev = netdev_priv(ndev);
812 	struct rswitch_gwca_queue *gq = rdev->rx_queue;
813 	struct rswitch_ext_ts_desc *desc;
814 	int limit, boguscnt, ret;
815 	struct sk_buff *skb;
816 	unsigned int num;
817 	u32 get_ts;
818 
819 	if (*quota <= 0)
820 		return true;
821 
822 	boguscnt = min_t(int, gq->ring_size, *quota);
823 	limit = boguscnt;
824 
825 	desc = &gq->rx_ring[gq->cur];
826 	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
827 		dma_rmb();
828 		skb = rswitch_rx_handle_desc(ndev, gq, desc);
829 		if (!skb)
830 			goto out;
831 
832 		get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
833 		if (get_ts) {
834 			struct skb_shared_hwtstamps *shhwtstamps;
835 			struct timespec64 ts;
836 
837 			shhwtstamps = skb_hwtstamps(skb);
838 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
839 			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
840 			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
841 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
842 		}
843 		skb->protocol = eth_type_trans(skb, ndev);
844 		napi_gro_receive(&rdev->napi, skb);
845 		rdev->ndev->stats.rx_packets++;
846 		rdev->ndev->stats.rx_bytes += gq->pkt_len;
847 
848 out:
849 		gq->rx_bufs[gq->cur] = NULL;
850 		gq->cur = rswitch_next_queue_index(gq, true, 1);
851 		desc = &gq->rx_ring[gq->cur];
852 
853 		if (--boguscnt <= 0)
854 			break;
855 	}
856 
857 	num = rswitch_get_num_cur_queues(gq);
858 	ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
859 	if (ret < 0)
860 		goto err;
861 	ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
862 	if (ret < 0)
863 		goto err;
864 	gq->dirty = rswitch_next_queue_index(gq, false, num);
865 
866 	*quota -= limit - boguscnt;
867 
868 	return boguscnt <= 0;
869 
870 err:
871 	rswitch_gwca_halt(rdev->priv);
872 
873 	return 0;
874 }
875 
876 static void rswitch_tx_free(struct net_device *ndev)
877 {
878 	struct rswitch_device *rdev = netdev_priv(ndev);
879 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
880 	struct rswitch_ext_desc *desc;
881 	struct sk_buff *skb;
882 
883 	desc = &gq->tx_ring[gq->dirty];
884 	while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
885 		dma_rmb();
886 
887 		skb = gq->skbs[gq->dirty];
888 		if (skb) {
889 			rdev->ndev->stats.tx_packets++;
890 			rdev->ndev->stats.tx_bytes += skb->len;
891 			dma_unmap_single(ndev->dev.parent,
892 					 gq->unmap_addrs[gq->dirty],
893 					 skb->len, DMA_TO_DEVICE);
894 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
895 			gq->skbs[gq->dirty] = NULL;
896 		}
897 
898 		desc->desc.die_dt = DT_EEMPTY;
899 		gq->dirty = rswitch_next_queue_index(gq, false, 1);
900 		desc = &gq->tx_ring[gq->dirty];
901 	}
902 }
903 
904 static int rswitch_poll(struct napi_struct *napi, int budget)
905 {
906 	struct net_device *ndev = napi->dev;
907 	struct rswitch_private *priv;
908 	struct rswitch_device *rdev;
909 	unsigned long flags;
910 	int quota = budget;
911 
912 	rdev = netdev_priv(ndev);
913 	priv = rdev->priv;
914 
915 retry:
916 	rswitch_tx_free(ndev);
917 
918 	if (rswitch_rx(ndev, &quota))
919 		goto out;
920 	else if (rdev->priv->gwca_halt)
921 		goto err;
922 	else if (rswitch_is_queue_rxed(rdev->rx_queue))
923 		goto retry;
924 
925 	netif_wake_subqueue(ndev, 0);
926 
927 	if (napi_complete_done(napi, budget - quota)) {
928 		spin_lock_irqsave(&priv->lock, flags);
929 		if (test_bit(rdev->port, priv->opened_ports)) {
930 			rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
931 			rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
932 		}
933 		spin_unlock_irqrestore(&priv->lock, flags);
934 	}
935 
936 out:
937 	return budget - quota;
938 
939 err:
940 	napi_complete(napi);
941 
942 	return 0;
943 }
944 
945 static void rswitch_queue_interrupt(struct net_device *ndev)
946 {
947 	struct rswitch_device *rdev = netdev_priv(ndev);
948 
949 	if (napi_schedule_prep(&rdev->napi)) {
950 		spin_lock(&rdev->priv->lock);
951 		rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
952 		rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
953 		spin_unlock(&rdev->priv->lock);
954 		__napi_schedule(&rdev->napi);
955 	}
956 }
957 
958 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
959 {
960 	struct rswitch_gwca_queue *gq;
961 	unsigned int i, index, bit;
962 
963 	for (i = 0; i < priv->gwca.num_queues; i++) {
964 		gq = &priv->gwca.queues[i];
965 		index = gq->index / 32;
966 		bit = BIT(gq->index % 32);
967 		if (!(dis[index] & bit))
968 			continue;
969 
970 		rswitch_ack_data_irq(priv, gq->index);
971 		rswitch_queue_interrupt(gq->ndev);
972 	}
973 
974 	return IRQ_HANDLED;
975 }
976 
977 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
978 {
979 	struct rswitch_private *priv = dev_id;
980 	u32 dis[RSWITCH_NUM_IRQ_REGS];
981 	irqreturn_t ret = IRQ_NONE;
982 
983 	rswitch_get_data_irq_status(priv, dis);
984 
985 	if (rswitch_is_any_data_irq(priv, dis, true) ||
986 	    rswitch_is_any_data_irq(priv, dis, false))
987 		ret = rswitch_data_irq(priv, dis);
988 
989 	return ret;
990 }
991 
992 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
993 {
994 	char *resource_name, *irq_name;
995 	int i, ret, irq;
996 
997 	for (i = 0; i < GWCA_NUM_IRQS; i++) {
998 		resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
999 		if (!resource_name)
1000 			return -ENOMEM;
1001 
1002 		irq = platform_get_irq_byname(priv->pdev, resource_name);
1003 		kfree(resource_name);
1004 		if (irq < 0)
1005 			return irq;
1006 
1007 		irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
1008 					  GWCA_IRQ_NAME, i);
1009 		if (!irq_name)
1010 			return -ENOMEM;
1011 
1012 		ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
1013 				       0, irq_name, priv);
1014 		if (ret < 0)
1015 			return ret;
1016 	}
1017 
1018 	return 0;
1019 }
1020 
1021 static void rswitch_ts(struct rswitch_private *priv)
1022 {
1023 	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
1024 	struct skb_shared_hwtstamps shhwtstamps;
1025 	struct rswitch_ts_desc *desc;
1026 	struct rswitch_device *rdev;
1027 	struct sk_buff *ts_skb;
1028 	struct timespec64 ts;
1029 	unsigned int num;
1030 	u32 tag, port;
1031 
1032 	desc = &gq->ts_ring[gq->cur];
1033 	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
1034 		dma_rmb();
1035 
1036 		port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
1037 		if (unlikely(port >= RSWITCH_NUM_PORTS))
1038 			goto next;
1039 		rdev = priv->rdev[port];
1040 
1041 		tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
1042 		if (unlikely(tag >= TS_TAGS_PER_PORT))
1043 			goto next;
1044 		ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1045 		smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
1046 		clear_bit(tag, rdev->ts_skb_used);
1047 
1048 		if (unlikely(!ts_skb))
1049 			goto next;
1050 
1051 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1052 		ts.tv_sec = __le32_to_cpu(desc->ts_sec);
1053 		ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
1054 		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
1055 		skb_tstamp_tx(ts_skb, &shhwtstamps);
1056 		dev_consume_skb_irq(ts_skb);
1057 
1058 next:
1059 		gq->cur = rswitch_next_queue_index(gq, true, 1);
1060 		desc = &gq->ts_ring[gq->cur];
1061 	}
1062 
1063 	num = rswitch_get_num_cur_queues(gq);
1064 	rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
1065 	gq->dirty = rswitch_next_queue_index(gq, false, num);
1066 }
1067 
1068 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
1069 {
1070 	struct rswitch_private *priv = dev_id;
1071 
1072 	if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
1073 		iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
1074 		rswitch_ts(priv);
1075 
1076 		return IRQ_HANDLED;
1077 	}
1078 
1079 	return IRQ_NONE;
1080 }
1081 
1082 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
1083 {
1084 	int irq;
1085 
1086 	irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
1087 	if (irq < 0)
1088 		return irq;
1089 
1090 	return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
1091 				0, GWCA_TS_IRQ_NAME, priv);
1092 }
1093 
1094 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
1095 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
1096 				    enum rswitch_etha_mode mode)
1097 {
1098 	int ret;
1099 
1100 	if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
1101 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
1102 
1103 	iowrite32(mode, etha->addr + EAMC);
1104 
1105 	ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
1106 
1107 	if (mode == EAMC_OPC_DISABLE)
1108 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
1109 
1110 	return ret;
1111 }
1112 
1113 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1114 {
1115 	u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1116 	u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1117 	u8 *mac = &etha->mac_addr[0];
1118 
1119 	mac[0] = (mrmac0 >>  8) & 0xFF;
1120 	mac[1] = (mrmac0 >>  0) & 0xFF;
1121 	mac[2] = (mrmac1 >> 24) & 0xFF;
1122 	mac[3] = (mrmac1 >> 16) & 0xFF;
1123 	mac[4] = (mrmac1 >>  8) & 0xFF;
1124 	mac[5] = (mrmac1 >>  0) & 0xFF;
1125 }
1126 
1127 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1128 {
1129 	iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1130 	iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1131 		  etha->addr + MRMAC1);
1132 }
1133 
1134 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1135 {
1136 	iowrite32(MLVC_PLV, etha->addr + MLVC);
1137 
1138 	return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1139 }
1140 
1141 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1142 {
1143 	u32 pis, lsc;
1144 
1145 	rswitch_etha_write_mac_address(etha, mac);
1146 
1147 	switch (etha->phy_interface) {
1148 	case PHY_INTERFACE_MODE_SGMII:
1149 		pis = MPIC_PIS_GMII;
1150 		break;
1151 	case PHY_INTERFACE_MODE_USXGMII:
1152 	case PHY_INTERFACE_MODE_5GBASER:
1153 		pis = MPIC_PIS_XGMII;
1154 		break;
1155 	default:
1156 		pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
1157 		break;
1158 	}
1159 
1160 	switch (etha->speed) {
1161 	case 100:
1162 		lsc = MPIC_LSC_100M;
1163 		break;
1164 	case 1000:
1165 		lsc = MPIC_LSC_1G;
1166 		break;
1167 	case 2500:
1168 		lsc = MPIC_LSC_2_5G;
1169 		break;
1170 	default:
1171 		lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
1172 		break;
1173 	}
1174 
1175 	rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
1176 		       FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
1177 }
1178 
1179 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1180 {
1181 	rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
1182 		       FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
1183 		       FIELD_PREP(MPIC_PSMHT, 0x06));
1184 }
1185 
1186 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1187 {
1188 	int err;
1189 
1190 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1191 	if (err < 0)
1192 		return err;
1193 	err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1194 	if (err < 0)
1195 		return err;
1196 
1197 	iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1198 	rswitch_rmac_setting(etha, mac);
1199 	rswitch_etha_enable_mii(etha);
1200 
1201 	err = rswitch_etha_wait_link_verification(etha);
1202 	if (err < 0)
1203 		return err;
1204 
1205 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1206 	if (err < 0)
1207 		return err;
1208 
1209 	return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1210 }
1211 
1212 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
1213 				unsigned int mmf, unsigned int pda,
1214 				unsigned int pra, unsigned int pop,
1215 				unsigned int prd)
1216 {
1217 	u32 val;
1218 	int ret;
1219 
1220 	val = MPSM_PSME |
1221 	      FIELD_PREP(MPSM_MFF, mmf) |
1222 	      FIELD_PREP(MPSM_PDA, pda) |
1223 	      FIELD_PREP(MPSM_PRA, pra) |
1224 	      FIELD_PREP(MPSM_POP, pop) |
1225 	      FIELD_PREP(MPSM_PRD, prd);
1226 	iowrite32(val, etha->addr + MPSM);
1227 
1228 	ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
1229 	if (ret)
1230 		return ret;
1231 
1232 	if (read) {
1233 		val = ioread32(etha->addr + MPSM);
1234 		ret = FIELD_GET(MPSM_PRD, val);
1235 	}
1236 
1237 	return ret;
1238 }
1239 
1240 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1241 				     int regad)
1242 {
1243 	struct rswitch_etha *etha = bus->priv;
1244 	int ret;
1245 
1246 	ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1247 				   MPSM_POP_ADDRESS, regad);
1248 	if (ret)
1249 		return ret;
1250 
1251 	return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
1252 				    MPSM_POP_READ_C45, 0);
1253 }
1254 
1255 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1256 				      int regad, u16 val)
1257 {
1258 	struct rswitch_etha *etha = bus->priv;
1259 	int ret;
1260 
1261 	ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1262 				   MPSM_POP_ADDRESS, regad);
1263 	if (ret)
1264 		return ret;
1265 
1266 	return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1267 				    MPSM_POP_WRITE, val);
1268 }
1269 
1270 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
1271 {
1272 	struct rswitch_etha *etha = bus->priv;
1273 
1274 	return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
1275 				    MPSM_POP_READ_C22, 0);
1276 }
1277 
1278 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
1279 				      int regad, u16 val)
1280 {
1281 	struct rswitch_etha *etha = bus->priv;
1282 
1283 	return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
1284 				    MPSM_POP_WRITE, val);
1285 }
1286 
1287 /* Call of_node_put(port) after done */
1288 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1289 {
1290 	struct device_node *ports, *port;
1291 	int err = 0;
1292 	u32 index;
1293 
1294 	ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1295 				     "ethernet-ports");
1296 	if (!ports)
1297 		return NULL;
1298 
1299 	for_each_available_child_of_node(ports, port) {
1300 		err = of_property_read_u32(port, "reg", &index);
1301 		if (err < 0) {
1302 			port = NULL;
1303 			goto out;
1304 		}
1305 		if (index == rdev->etha->index)
1306 			break;
1307 	}
1308 
1309 out:
1310 	of_node_put(ports);
1311 
1312 	return port;
1313 }
1314 
1315 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1316 {
1317 	u32 max_speed;
1318 	int err;
1319 
1320 	if (!rdev->np_port)
1321 		return 0;	/* ignored */
1322 
1323 	err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1324 	if (err)
1325 		return err;
1326 
1327 	err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1328 	if (!err) {
1329 		rdev->etha->speed = max_speed;
1330 		return 0;
1331 	}
1332 
1333 	/* if no "max-speed" property, let's use default speed */
1334 	switch (rdev->etha->phy_interface) {
1335 	case PHY_INTERFACE_MODE_MII:
1336 		rdev->etha->speed = SPEED_100;
1337 		break;
1338 	case PHY_INTERFACE_MODE_SGMII:
1339 		rdev->etha->speed = SPEED_1000;
1340 		break;
1341 	case PHY_INTERFACE_MODE_USXGMII:
1342 		rdev->etha->speed = SPEED_2500;
1343 		break;
1344 	default:
1345 		return -EINVAL;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int rswitch_mii_register(struct rswitch_device *rdev)
1352 {
1353 	struct device_node *mdio_np;
1354 	struct mii_bus *mii_bus;
1355 	int err;
1356 
1357 	mii_bus = mdiobus_alloc();
1358 	if (!mii_bus)
1359 		return -ENOMEM;
1360 
1361 	mii_bus->name = "rswitch_mii";
1362 	sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1363 	mii_bus->priv = rdev->etha;
1364 	mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1365 	mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1366 	mii_bus->read = rswitch_etha_mii_read_c22;
1367 	mii_bus->write = rswitch_etha_mii_write_c22;
1368 	mii_bus->parent = &rdev->priv->pdev->dev;
1369 
1370 	mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1371 	err = of_mdiobus_register(mii_bus, mdio_np);
1372 	if (err < 0) {
1373 		mdiobus_free(mii_bus);
1374 		goto out;
1375 	}
1376 
1377 	rdev->etha->mii = mii_bus;
1378 
1379 out:
1380 	of_node_put(mdio_np);
1381 
1382 	return err;
1383 }
1384 
1385 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1386 {
1387 	if (rdev->etha->mii) {
1388 		mdiobus_unregister(rdev->etha->mii);
1389 		mdiobus_free(rdev->etha->mii);
1390 		rdev->etha->mii = NULL;
1391 	}
1392 }
1393 
1394 static void rswitch_adjust_link(struct net_device *ndev)
1395 {
1396 	struct rswitch_device *rdev = netdev_priv(ndev);
1397 	struct phy_device *phydev = ndev->phydev;
1398 
1399 	if (phydev->link != rdev->etha->link) {
1400 		phy_print_status(phydev);
1401 		if (phydev->link)
1402 			phy_power_on(rdev->serdes);
1403 		else if (rdev->serdes->power_count)
1404 			phy_power_off(rdev->serdes);
1405 
1406 		rdev->etha->link = phydev->link;
1407 
1408 		if (!rdev->priv->etha_no_runtime_change &&
1409 		    phydev->speed != rdev->etha->speed) {
1410 			rdev->etha->speed = phydev->speed;
1411 
1412 			rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1413 			phy_set_speed(rdev->serdes, rdev->etha->speed);
1414 		}
1415 	}
1416 }
1417 
1418 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1419 					 struct phy_device *phydev)
1420 {
1421 	if (!rdev->priv->etha_no_runtime_change)
1422 		return;
1423 
1424 	switch (rdev->etha->speed) {
1425 	case SPEED_2500:
1426 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1427 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1428 		break;
1429 	case SPEED_1000:
1430 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1431 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1432 		break;
1433 	case SPEED_100:
1434 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1435 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1436 		break;
1437 	default:
1438 		break;
1439 	}
1440 
1441 	phy_set_max_speed(phydev, rdev->etha->speed);
1442 }
1443 
1444 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1445 {
1446 	struct phy_device *phydev;
1447 	struct device_node *phy;
1448 	int err = -ENOENT;
1449 
1450 	if (!rdev->np_port)
1451 		return -ENODEV;
1452 
1453 	phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1454 	if (!phy)
1455 		return -ENODEV;
1456 
1457 	/* Set phydev->host_interfaces before calling of_phy_connect() to
1458 	 * configure the PHY with the information of host_interfaces.
1459 	 */
1460 	phydev = of_phy_find_device(phy);
1461 	if (!phydev)
1462 		goto out;
1463 	__set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1464 	phydev->mac_managed_pm = true;
1465 
1466 	phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1467 				rdev->etha->phy_interface);
1468 	if (!phydev)
1469 		goto out;
1470 
1471 	phy_set_max_speed(phydev, SPEED_2500);
1472 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1473 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1474 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1475 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1476 	rswitch_phy_remove_link_mode(rdev, phydev);
1477 
1478 	phy_attached_info(phydev);
1479 
1480 	err = 0;
1481 out:
1482 	of_node_put(phy);
1483 
1484 	return err;
1485 }
1486 
1487 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1488 {
1489 	if (rdev->ndev->phydev)
1490 		phy_disconnect(rdev->ndev->phydev);
1491 }
1492 
1493 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1494 {
1495 	int err;
1496 
1497 	err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1498 			       rdev->etha->phy_interface);
1499 	if (err < 0)
1500 		return err;
1501 
1502 	return phy_set_speed(rdev->serdes, rdev->etha->speed);
1503 }
1504 
1505 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1506 {
1507 	int err;
1508 
1509 	if (!rdev->etha->operated) {
1510 		err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1511 		if (err < 0)
1512 			return err;
1513 		if (rdev->priv->etha_no_runtime_change)
1514 			rdev->etha->operated = true;
1515 	}
1516 
1517 	err = rswitch_mii_register(rdev);
1518 	if (err < 0)
1519 		return err;
1520 
1521 	err = rswitch_phy_device_init(rdev);
1522 	if (err < 0)
1523 		goto err_phy_device_init;
1524 
1525 	rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1526 	if (IS_ERR(rdev->serdes)) {
1527 		err = PTR_ERR(rdev->serdes);
1528 		goto err_serdes_phy_get;
1529 	}
1530 
1531 	err = rswitch_serdes_set_params(rdev);
1532 	if (err < 0)
1533 		goto err_serdes_set_params;
1534 
1535 	return 0;
1536 
1537 err_serdes_set_params:
1538 err_serdes_phy_get:
1539 	rswitch_phy_device_deinit(rdev);
1540 
1541 err_phy_device_init:
1542 	rswitch_mii_unregister(rdev);
1543 
1544 	return err;
1545 }
1546 
1547 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1548 {
1549 	rswitch_phy_device_deinit(rdev);
1550 	rswitch_mii_unregister(rdev);
1551 }
1552 
1553 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1554 {
1555 	unsigned int i;
1556 	int err;
1557 
1558 	rswitch_for_each_enabled_port(priv, i) {
1559 		err = rswitch_ether_port_init_one(priv->rdev[i]);
1560 		if (err)
1561 			goto err_init_one;
1562 	}
1563 
1564 	rswitch_for_each_enabled_port(priv, i) {
1565 		err = phy_init(priv->rdev[i]->serdes);
1566 		if (err)
1567 			goto err_serdes;
1568 	}
1569 
1570 	return 0;
1571 
1572 err_serdes:
1573 	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1574 		phy_exit(priv->rdev[i]->serdes);
1575 	i = RSWITCH_NUM_PORTS;
1576 
1577 err_init_one:
1578 	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1579 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1580 
1581 	return err;
1582 }
1583 
1584 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1585 {
1586 	unsigned int i;
1587 
1588 	rswitch_for_each_enabled_port(priv, i) {
1589 		phy_exit(priv->rdev[i]->serdes);
1590 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1591 	}
1592 }
1593 
1594 static int rswitch_open(struct net_device *ndev)
1595 {
1596 	struct rswitch_device *rdev = netdev_priv(ndev);
1597 	unsigned long flags;
1598 
1599 	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1600 		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1601 
1602 	napi_enable(&rdev->napi);
1603 
1604 	spin_lock_irqsave(&rdev->priv->lock, flags);
1605 	bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1606 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1607 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1608 	spin_unlock_irqrestore(&rdev->priv->lock, flags);
1609 
1610 	phy_start(ndev->phydev);
1611 
1612 	netif_start_queue(ndev);
1613 
1614 	return 0;
1615 };
1616 
1617 static int rswitch_stop(struct net_device *ndev)
1618 {
1619 	struct rswitch_device *rdev = netdev_priv(ndev);
1620 	struct sk_buff *ts_skb;
1621 	unsigned long flags;
1622 	unsigned int tag;
1623 
1624 	netif_tx_stop_all_queues(ndev);
1625 
1626 	phy_stop(ndev->phydev);
1627 
1628 	spin_lock_irqsave(&rdev->priv->lock, flags);
1629 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1630 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1631 	bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1632 	spin_unlock_irqrestore(&rdev->priv->lock, flags);
1633 
1634 	napi_disable(&rdev->napi);
1635 
1636 	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1637 		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1638 
1639 	for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1640 	     tag < TS_TAGS_PER_PORT;
1641 	     tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
1642 		ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1643 		clear_bit(tag, rdev->ts_skb_used);
1644 		if (ts_skb)
1645 			dev_kfree_skb(ts_skb);
1646 	}
1647 
1648 	return 0;
1649 };
1650 
1651 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
1652 				       struct sk_buff *skb,
1653 				       struct rswitch_ext_desc *desc)
1654 {
1655 	desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1656 				  INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1657 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1658 		unsigned int tag;
1659 
1660 		tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1661 		if (tag == TS_TAGS_PER_PORT)
1662 			return false;
1663 		smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
1664 		rdev->ts_skb[tag] = skb_get(skb);
1665 		set_bit(tag, rdev->ts_skb_used);
1666 
1667 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1668 		desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
1669 
1670 		skb_tx_timestamp(skb);
1671 	}
1672 
1673 	return true;
1674 }
1675 
1676 static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
1677 				 struct sk_buff *skb,
1678 				 struct rswitch_ext_desc *desc,
1679 				 dma_addr_t dma_addr, u16 len, u8 die_dt)
1680 {
1681 	rswitch_desc_set_dptr(&desc->desc, dma_addr);
1682 	desc->desc.info_ds = cpu_to_le16(len);
1683 	if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
1684 		return false;
1685 
1686 	dma_wmb();
1687 
1688 	desc->desc.die_dt = die_dt;
1689 
1690 	return true;
1691 }
1692 
1693 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
1694 {
1695 	if (nr_desc == 1)
1696 		return DT_FSINGLE | DIE;
1697 	if (index == 0)
1698 		return DT_FSTART;
1699 	if (nr_desc - 1 == index)
1700 		return DT_FEND | DIE;
1701 	return DT_FMID;
1702 }
1703 
1704 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
1705 {
1706 	switch (die_dt & DT_MASK) {
1707 	case DT_FSINGLE:
1708 	case DT_FEND:
1709 		return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
1710 	case DT_FSTART:
1711 	case DT_FMID:
1712 		return RSWITCH_DESC_BUF_SIZE;
1713 	default:
1714 		return 0;
1715 	}
1716 }
1717 
1718 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1719 {
1720 	struct rswitch_device *rdev = netdev_priv(ndev);
1721 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
1722 	dma_addr_t dma_addr, dma_addr_orig;
1723 	netdev_tx_t ret = NETDEV_TX_OK;
1724 	struct rswitch_ext_desc *desc;
1725 	unsigned int i, nr_desc;
1726 	u8 die_dt;
1727 	u16 len;
1728 
1729 	nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
1730 	if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
1731 		netif_stop_subqueue(ndev, 0);
1732 		return NETDEV_TX_BUSY;
1733 	}
1734 
1735 	if (skb_put_padto(skb, ETH_ZLEN))
1736 		return ret;
1737 
1738 	dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1739 	if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
1740 		goto err_kfree;
1741 
1742 	/* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
1743 	gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
1744 	gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
1745 
1746 	dma_wmb();
1747 
1748 	/* DT_FSTART should be set at last. So, this is reverse order. */
1749 	for (i = nr_desc; i-- > 0; ) {
1750 		desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
1751 		die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
1752 		dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
1753 		len = rswitch_ext_desc_get_len(die_dt, skb->len);
1754 		if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
1755 			goto err_unmap;
1756 	}
1757 
1758 	gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
1759 	rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1760 
1761 	return ret;
1762 
1763 err_unmap:
1764 	gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
1765 	dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
1766 
1767 err_kfree:
1768 	dev_kfree_skb_any(skb);
1769 
1770 	return ret;
1771 }
1772 
1773 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1774 {
1775 	return &ndev->stats;
1776 }
1777 
1778 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1779 {
1780 	struct rswitch_device *rdev = netdev_priv(ndev);
1781 	struct rcar_gen4_ptp_private *ptp_priv;
1782 	struct hwtstamp_config config;
1783 
1784 	ptp_priv = rdev->priv->ptp_priv;
1785 
1786 	config.flags = 0;
1787 	config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1788 						    HWTSTAMP_TX_OFF;
1789 	switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1790 	case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1791 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1792 		break;
1793 	case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1794 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1795 		break;
1796 	default:
1797 		config.rx_filter = HWTSTAMP_FILTER_NONE;
1798 		break;
1799 	}
1800 
1801 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1802 }
1803 
1804 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1805 {
1806 	struct rswitch_device *rdev = netdev_priv(ndev);
1807 	u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1808 	struct hwtstamp_config config;
1809 	u32 tstamp_tx_ctrl;
1810 
1811 	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1812 		return -EFAULT;
1813 
1814 	if (config.flags)
1815 		return -EINVAL;
1816 
1817 	switch (config.tx_type) {
1818 	case HWTSTAMP_TX_OFF:
1819 		tstamp_tx_ctrl = 0;
1820 		break;
1821 	case HWTSTAMP_TX_ON:
1822 		tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1823 		break;
1824 	default:
1825 		return -ERANGE;
1826 	}
1827 
1828 	switch (config.rx_filter) {
1829 	case HWTSTAMP_FILTER_NONE:
1830 		tstamp_rx_ctrl = 0;
1831 		break;
1832 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1833 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1834 		break;
1835 	default:
1836 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1837 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1838 		break;
1839 	}
1840 
1841 	rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1842 	rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1843 
1844 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1845 }
1846 
1847 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1848 {
1849 	if (!netif_running(ndev))
1850 		return -EINVAL;
1851 
1852 	switch (cmd) {
1853 	case SIOCGHWTSTAMP:
1854 		return rswitch_hwstamp_get(ndev, req);
1855 	case SIOCSHWTSTAMP:
1856 		return rswitch_hwstamp_set(ndev, req);
1857 	default:
1858 		return phy_mii_ioctl(ndev->phydev, req, cmd);
1859 	}
1860 }
1861 
1862 static const struct net_device_ops rswitch_netdev_ops = {
1863 	.ndo_open = rswitch_open,
1864 	.ndo_stop = rswitch_stop,
1865 	.ndo_start_xmit = rswitch_start_xmit,
1866 	.ndo_get_stats = rswitch_get_stats,
1867 	.ndo_eth_ioctl = rswitch_eth_ioctl,
1868 	.ndo_validate_addr = eth_validate_addr,
1869 	.ndo_set_mac_address = eth_mac_addr,
1870 };
1871 
1872 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
1873 {
1874 	struct rswitch_device *rdev = netdev_priv(ndev);
1875 
1876 	info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1877 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1878 				SOF_TIMESTAMPING_TX_HARDWARE |
1879 				SOF_TIMESTAMPING_RX_HARDWARE |
1880 				SOF_TIMESTAMPING_RAW_HARDWARE;
1881 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1882 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1883 
1884 	return 0;
1885 }
1886 
1887 static const struct ethtool_ops rswitch_ethtool_ops = {
1888 	.get_ts_info = rswitch_get_ts_info,
1889 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1890 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1891 };
1892 
1893 static const struct of_device_id renesas_eth_sw_of_table[] = {
1894 	{ .compatible = "renesas,r8a779f0-ether-switch", },
1895 	{ }
1896 };
1897 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1898 
1899 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
1900 {
1901 	struct rswitch_etha *etha = &priv->etha[index];
1902 
1903 	memset(etha, 0, sizeof(*etha));
1904 	etha->index = index;
1905 	etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1906 	etha->coma_addr = priv->addr;
1907 
1908 	/* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1909 	 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1910 	 * both the numerator and the denominator by 10.
1911 	 */
1912 	etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1913 }
1914 
1915 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
1916 {
1917 	struct platform_device *pdev = priv->pdev;
1918 	struct rswitch_device *rdev;
1919 	struct net_device *ndev;
1920 	int err;
1921 
1922 	if (index >= RSWITCH_NUM_PORTS)
1923 		return -EINVAL;
1924 
1925 	ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1926 	if (!ndev)
1927 		return -ENOMEM;
1928 
1929 	SET_NETDEV_DEV(ndev, &pdev->dev);
1930 	ether_setup(ndev);
1931 
1932 	rdev = netdev_priv(ndev);
1933 	rdev->ndev = ndev;
1934 	rdev->priv = priv;
1935 	priv->rdev[index] = rdev;
1936 	rdev->port = index;
1937 	rdev->etha = &priv->etha[index];
1938 	rdev->addr = priv->addr;
1939 
1940 	ndev->base_addr = (unsigned long)rdev->addr;
1941 	snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1942 	ndev->netdev_ops = &rswitch_netdev_ops;
1943 	ndev->ethtool_ops = &rswitch_ethtool_ops;
1944 	ndev->max_mtu = RSWITCH_MAX_MTU;
1945 	ndev->min_mtu = ETH_MIN_MTU;
1946 
1947 	netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1948 
1949 	rdev->np_port = rswitch_get_port_node(rdev);
1950 	rdev->disabled = !rdev->np_port;
1951 	err = of_get_ethdev_address(rdev->np_port, ndev);
1952 	if (err) {
1953 		if (is_valid_ether_addr(rdev->etha->mac_addr))
1954 			eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1955 		else
1956 			eth_hw_addr_random(ndev);
1957 	}
1958 
1959 	err = rswitch_etha_get_params(rdev);
1960 	if (err < 0)
1961 		goto out_get_params;
1962 
1963 	err = rswitch_rxdmac_alloc(ndev);
1964 	if (err < 0)
1965 		goto out_rxdmac;
1966 
1967 	err = rswitch_txdmac_alloc(ndev);
1968 	if (err < 0)
1969 		goto out_txdmac;
1970 
1971 	return 0;
1972 
1973 out_txdmac:
1974 	rswitch_rxdmac_free(ndev);
1975 
1976 out_rxdmac:
1977 out_get_params:
1978 	of_node_put(rdev->np_port);
1979 	netif_napi_del(&rdev->napi);
1980 	free_netdev(ndev);
1981 
1982 	return err;
1983 }
1984 
1985 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
1986 {
1987 	struct rswitch_device *rdev = priv->rdev[index];
1988 	struct net_device *ndev = rdev->ndev;
1989 
1990 	rswitch_txdmac_free(ndev);
1991 	rswitch_rxdmac_free(ndev);
1992 	of_node_put(rdev->np_port);
1993 	netif_napi_del(&rdev->napi);
1994 	free_netdev(ndev);
1995 }
1996 
1997 static int rswitch_init(struct rswitch_private *priv)
1998 {
1999 	unsigned int i;
2000 	int err;
2001 
2002 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2003 		rswitch_etha_init(priv, i);
2004 
2005 	rswitch_clock_enable(priv);
2006 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2007 		rswitch_etha_read_mac_address(&priv->etha[i]);
2008 
2009 	rswitch_reset(priv);
2010 
2011 	rswitch_clock_enable(priv);
2012 	rswitch_top_init(priv);
2013 	err = rswitch_bpool_config(priv);
2014 	if (err < 0)
2015 		return err;
2016 
2017 	rswitch_coma_init(priv);
2018 
2019 	err = rswitch_gwca_linkfix_alloc(priv);
2020 	if (err < 0)
2021 		return -ENOMEM;
2022 
2023 	err = rswitch_gwca_ts_queue_alloc(priv);
2024 	if (err < 0)
2025 		goto err_ts_queue_alloc;
2026 
2027 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
2028 		err = rswitch_device_alloc(priv, i);
2029 		if (err < 0) {
2030 			for (; i-- > 0; )
2031 				rswitch_device_free(priv, i);
2032 			goto err_device_alloc;
2033 		}
2034 	}
2035 
2036 	rswitch_fwd_init(priv);
2037 
2038 	err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
2039 				     clk_get_rate(priv->clk));
2040 	if (err < 0)
2041 		goto err_ptp_register;
2042 
2043 	err = rswitch_gwca_request_irqs(priv);
2044 	if (err < 0)
2045 		goto err_gwca_request_irq;
2046 
2047 	err = rswitch_gwca_ts_request_irqs(priv);
2048 	if (err < 0)
2049 		goto err_gwca_ts_request_irq;
2050 
2051 	err = rswitch_gwca_hw_init(priv);
2052 	if (err < 0)
2053 		goto err_gwca_hw_init;
2054 
2055 	err = rswitch_ether_port_init_all(priv);
2056 	if (err)
2057 		goto err_ether_port_init_all;
2058 
2059 	rswitch_for_each_enabled_port(priv, i) {
2060 		err = register_netdev(priv->rdev[i]->ndev);
2061 		if (err) {
2062 			rswitch_for_each_enabled_port_continue_reverse(priv, i)
2063 				unregister_netdev(priv->rdev[i]->ndev);
2064 			goto err_register_netdev;
2065 		}
2066 	}
2067 
2068 	rswitch_for_each_enabled_port(priv, i)
2069 		netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
2070 			    priv->rdev[i]->ndev->dev_addr);
2071 
2072 	return 0;
2073 
2074 err_register_netdev:
2075 	rswitch_ether_port_deinit_all(priv);
2076 
2077 err_ether_port_init_all:
2078 	rswitch_gwca_hw_deinit(priv);
2079 
2080 err_gwca_hw_init:
2081 err_gwca_ts_request_irq:
2082 err_gwca_request_irq:
2083 	rcar_gen4_ptp_unregister(priv->ptp_priv);
2084 
2085 err_ptp_register:
2086 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2087 		rswitch_device_free(priv, i);
2088 
2089 err_device_alloc:
2090 	rswitch_gwca_ts_queue_free(priv);
2091 
2092 err_ts_queue_alloc:
2093 	rswitch_gwca_linkfix_free(priv);
2094 
2095 	return err;
2096 }
2097 
2098 static const struct soc_device_attribute rswitch_soc_no_speed_change[]  = {
2099 	{ .soc_id = "r8a779f0", .revision = "ES1.0" },
2100 	{ /* Sentinel */ }
2101 };
2102 
2103 static int renesas_eth_sw_probe(struct platform_device *pdev)
2104 {
2105 	const struct soc_device_attribute *attr;
2106 	struct rswitch_private *priv;
2107 	struct resource *res;
2108 	int ret;
2109 
2110 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
2111 	if (!res) {
2112 		dev_err(&pdev->dev, "invalid resource\n");
2113 		return -EINVAL;
2114 	}
2115 
2116 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2117 	if (!priv)
2118 		return -ENOMEM;
2119 	spin_lock_init(&priv->lock);
2120 
2121 	priv->clk = devm_clk_get(&pdev->dev, NULL);
2122 	if (IS_ERR(priv->clk))
2123 		return PTR_ERR(priv->clk);
2124 
2125 	attr = soc_device_match(rswitch_soc_no_speed_change);
2126 	if (attr)
2127 		priv->etha_no_runtime_change = true;
2128 
2129 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
2130 	if (!priv->ptp_priv)
2131 		return -ENOMEM;
2132 
2133 	platform_set_drvdata(pdev, priv);
2134 	priv->pdev = pdev;
2135 	priv->addr = devm_ioremap_resource(&pdev->dev, res);
2136 	if (IS_ERR(priv->addr))
2137 		return PTR_ERR(priv->addr);
2138 
2139 	priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
2140 
2141 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2142 	if (ret < 0) {
2143 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2144 		if (ret < 0)
2145 			return ret;
2146 	}
2147 
2148 	priv->gwca.index = AGENT_INDEX_GWCA;
2149 	priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
2150 				    RSWITCH_MAX_NUM_QUEUES);
2151 	priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
2152 					 sizeof(*priv->gwca.queues), GFP_KERNEL);
2153 	if (!priv->gwca.queues)
2154 		return -ENOMEM;
2155 
2156 	pm_runtime_enable(&pdev->dev);
2157 	pm_runtime_get_sync(&pdev->dev);
2158 
2159 	ret = rswitch_init(priv);
2160 	if (ret < 0) {
2161 		pm_runtime_put(&pdev->dev);
2162 		pm_runtime_disable(&pdev->dev);
2163 		return ret;
2164 	}
2165 
2166 	device_set_wakeup_capable(&pdev->dev, 1);
2167 
2168 	return ret;
2169 }
2170 
2171 static void rswitch_deinit(struct rswitch_private *priv)
2172 {
2173 	unsigned int i;
2174 
2175 	rswitch_gwca_hw_deinit(priv);
2176 	rcar_gen4_ptp_unregister(priv->ptp_priv);
2177 
2178 	rswitch_for_each_enabled_port(priv, i) {
2179 		struct rswitch_device *rdev = priv->rdev[i];
2180 
2181 		unregister_netdev(rdev->ndev);
2182 		rswitch_ether_port_deinit_one(rdev);
2183 		phy_exit(priv->rdev[i]->serdes);
2184 	}
2185 
2186 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2187 		rswitch_device_free(priv, i);
2188 
2189 	rswitch_gwca_ts_queue_free(priv);
2190 	rswitch_gwca_linkfix_free(priv);
2191 
2192 	rswitch_clock_disable(priv);
2193 }
2194 
2195 static void renesas_eth_sw_remove(struct platform_device *pdev)
2196 {
2197 	struct rswitch_private *priv = platform_get_drvdata(pdev);
2198 
2199 	rswitch_deinit(priv);
2200 
2201 	pm_runtime_put(&pdev->dev);
2202 	pm_runtime_disable(&pdev->dev);
2203 
2204 	platform_set_drvdata(pdev, NULL);
2205 }
2206 
2207 static int renesas_eth_sw_suspend(struct device *dev)
2208 {
2209 	struct rswitch_private *priv = dev_get_drvdata(dev);
2210 	struct net_device *ndev;
2211 	unsigned int i;
2212 
2213 	rswitch_for_each_enabled_port(priv, i) {
2214 		ndev = priv->rdev[i]->ndev;
2215 		if (netif_running(ndev)) {
2216 			netif_device_detach(ndev);
2217 			rswitch_stop(ndev);
2218 		}
2219 		if (priv->rdev[i]->serdes->init_count)
2220 			phy_exit(priv->rdev[i]->serdes);
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 static int renesas_eth_sw_resume(struct device *dev)
2227 {
2228 	struct rswitch_private *priv = dev_get_drvdata(dev);
2229 	struct net_device *ndev;
2230 	unsigned int i;
2231 
2232 	rswitch_for_each_enabled_port(priv, i) {
2233 		phy_init(priv->rdev[i]->serdes);
2234 		ndev = priv->rdev[i]->ndev;
2235 		if (netif_running(ndev)) {
2236 			rswitch_open(ndev);
2237 			netif_device_attach(ndev);
2238 		}
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
2245 				renesas_eth_sw_resume);
2246 
2247 static struct platform_driver renesas_eth_sw_driver_platform = {
2248 	.probe = renesas_eth_sw_probe,
2249 	.remove = renesas_eth_sw_remove,
2250 	.driver = {
2251 		.name = "renesas_eth_sw",
2252 		.pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
2253 		.of_match_table = renesas_eth_sw_of_table,
2254 	}
2255 };
2256 module_platform_driver(renesas_eth_sw_driver_platform);
2257 MODULE_AUTHOR("Yoshihiro Shimoda");
2258 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2259 MODULE_LICENSE("GPL");
2260