xref: /linux/drivers/net/ethernet/renesas/rtsn.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Renesas Ethernet-TSN device driver
4  *
5  * Copyright (C) 2022 Renesas Electronics Corporation
6  * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
23 
24 #include "rtsn.h"
25 #include "rcar_gen4_ptp.h"
26 
27 struct rtsn_private {
28 	struct net_device *ndev;
29 	struct platform_device *pdev;
30 	void __iomem *base;
31 	struct rcar_gen4_ptp_private *ptp_priv;
32 	struct clk *clk;
33 	struct reset_control *reset;
34 
35 	u32 num_tx_ring;
36 	u32 num_rx_ring;
37 	u32 tx_desc_bat_size;
38 	dma_addr_t tx_desc_bat_dma;
39 	struct rtsn_desc *tx_desc_bat;
40 	u32 rx_desc_bat_size;
41 	dma_addr_t rx_desc_bat_dma;
42 	struct rtsn_desc *rx_desc_bat;
43 	dma_addr_t tx_desc_dma;
44 	dma_addr_t rx_desc_dma;
45 	struct rtsn_ext_desc *tx_ring;
46 	struct rtsn_ext_ts_desc *rx_ring;
47 	struct sk_buff **tx_skb;
48 	struct sk_buff **rx_skb;
49 	spinlock_t lock;	/* Register access lock */
50 	u32 cur_tx;
51 	u32 dirty_tx;
52 	u32 cur_rx;
53 	u32 dirty_rx;
54 	u8 ts_tag;
55 	struct napi_struct napi;
56 	struct rtnl_link_stats64 stats;
57 
58 	struct mii_bus *mii;
59 	phy_interface_t iface;
60 	int link;
61 	int speed;
62 
63 	int tx_data_irq;
64 	int rx_data_irq;
65 
66 	u32 tstamp_tx_ctrl;
67 	u32 tstamp_rx_ctrl;
68 };
69 
70 static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
71 {
72 	return ioread32(priv->base + reg);
73 }
74 
75 static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
76 {
77 	iowrite32(data, priv->base + reg);
78 }
79 
80 static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
81 			u32 clear, u32 set)
82 {
83 	rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
84 }
85 
86 static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
87 			 u32 mask, u32 expected)
88 {
89 	u32 val;
90 
91 	return readl_poll_timeout(priv->base + reg, val,
92 				  (val & mask) == expected,
93 				  RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
94 }
95 
96 static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
97 {
98 	if (enable) {
99 		rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
100 		rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
101 	} else {
102 		rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
103 		rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
104 	}
105 }
106 
107 static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
108 {
109 	struct rtsn_private *priv = netdev_priv(ndev);
110 	struct rtsn_ext_desc *desc;
111 	struct sk_buff *skb;
112 	int free_num = 0;
113 	int entry, size;
114 
115 	for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
116 		entry = priv->dirty_tx % priv->num_tx_ring;
117 		desc = &priv->tx_ring[entry];
118 		if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
119 			break;
120 
121 		dma_rmb();
122 		size = le16_to_cpu(desc->info_ds) & TX_DS;
123 		skb = priv->tx_skb[entry];
124 		if (skb) {
125 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
126 				struct skb_shared_hwtstamps shhwtstamps;
127 				struct timespec64 ts;
128 
129 				rcar_gen4_ptp_gettime64(priv->ptp_priv, &ts);
130 				memset(&shhwtstamps, 0, sizeof(shhwtstamps));
131 				shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
132 				skb_tstamp_tx(skb, &shhwtstamps);
133 			}
134 			dma_unmap_single(ndev->dev.parent,
135 					 le32_to_cpu(desc->dptr),
136 					 size, DMA_TO_DEVICE);
137 			dev_kfree_skb_any(priv->tx_skb[entry]);
138 			free_num++;
139 
140 			priv->stats.tx_packets++;
141 			priv->stats.tx_bytes += size;
142 		}
143 
144 		desc->die_dt = DT_EEMPTY;
145 	}
146 
147 	desc = &priv->tx_ring[priv->num_tx_ring];
148 	desc->die_dt = DT_LINK;
149 
150 	return free_num;
151 }
152 
153 static int rtsn_rx(struct net_device *ndev, int budget)
154 {
155 	struct rtsn_private *priv = netdev_priv(ndev);
156 	unsigned int ndescriptors;
157 	unsigned int rx_packets;
158 	unsigned int i;
159 	bool get_ts;
160 
161 	get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
162 
163 	ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
164 	rx_packets = 0;
165 	for (i = 0; i < ndescriptors; i++) {
166 		const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
167 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
168 		struct sk_buff *skb;
169 		dma_addr_t dma_addr;
170 		u16 pkt_len;
171 
172 		/* Stop processing descriptors if budget is consumed. */
173 		if (rx_packets >= budget)
174 			break;
175 
176 		/* Stop processing descriptors on first empty. */
177 		if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
178 			break;
179 
180 		dma_rmb();
181 		pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
182 
183 		skb = priv->rx_skb[entry];
184 		priv->rx_skb[entry] = NULL;
185 		dma_addr = le32_to_cpu(desc->dptr);
186 		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
187 				 DMA_FROM_DEVICE);
188 
189 		/* Get timestamp if enabled. */
190 		if (get_ts) {
191 			struct skb_shared_hwtstamps *shhwtstamps;
192 			struct timespec64 ts;
193 
194 			shhwtstamps = skb_hwtstamps(skb);
195 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
196 
197 			ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
198 			ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
199 
200 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
201 		}
202 
203 		skb_put(skb, pkt_len);
204 		skb->protocol = eth_type_trans(skb, ndev);
205 		napi_gro_receive(&priv->napi, skb);
206 
207 		/* Update statistics. */
208 		priv->stats.rx_packets++;
209 		priv->stats.rx_bytes += pkt_len;
210 
211 		/* Update counters. */
212 		priv->cur_rx++;
213 		rx_packets++;
214 	}
215 
216 	/* Refill the RX ring buffers */
217 	for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
218 		const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
219 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
220 		struct sk_buff *skb;
221 		dma_addr_t dma_addr;
222 
223 		desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
224 
225 		if (!priv->rx_skb[entry]) {
226 			skb = napi_alloc_skb(&priv->napi,
227 					     PKT_BUF_SZ + RTSN_ALIGN - 1);
228 			if (!skb)
229 				break;
230 			skb_reserve(skb, NET_IP_ALIGN);
231 			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
232 						  le16_to_cpu(desc->info_ds),
233 						  DMA_FROM_DEVICE);
234 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
235 				desc->info_ds = cpu_to_le16(0);
236 			desc->dptr = cpu_to_le32(dma_addr);
237 			skb_checksum_none_assert(skb);
238 			priv->rx_skb[entry] = skb;
239 		}
240 
241 		dma_wmb();
242 		desc->die_dt = DT_FEMPTY | D_DIE;
243 	}
244 
245 	priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
246 
247 	return rx_packets;
248 }
249 
250 static int rtsn_poll(struct napi_struct *napi, int budget)
251 {
252 	struct rtsn_private *priv;
253 	struct net_device *ndev;
254 	unsigned long flags;
255 	int work_done;
256 
257 	ndev = napi->dev;
258 	priv = netdev_priv(ndev);
259 
260 	/* Processing RX Descriptor Ring */
261 	work_done = rtsn_rx(ndev, budget);
262 
263 	/* Processing TX Descriptor Ring */
264 	spin_lock_irqsave(&priv->lock, flags);
265 	rtsn_tx_free(ndev, true);
266 	netif_wake_subqueue(ndev, 0);
267 	spin_unlock_irqrestore(&priv->lock, flags);
268 
269 	/* Re-enable TX/RX interrupts */
270 	if (work_done < budget && napi_complete_done(napi, work_done)) {
271 		spin_lock_irqsave(&priv->lock, flags);
272 		rtsn_ctrl_data_irq(priv, true);
273 		spin_unlock_irqrestore(&priv->lock, flags);
274 	}
275 
276 	return work_done;
277 }
278 
279 static int rtsn_desc_alloc(struct rtsn_private *priv)
280 {
281 	struct device *dev = &priv->pdev->dev;
282 	unsigned int i;
283 
284 	priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
285 	priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
286 					       &priv->tx_desc_bat_dma,
287 					       GFP_KERNEL);
288 
289 	if (!priv->tx_desc_bat)
290 		return -ENOMEM;
291 
292 	for (i = 0; i < TX_NUM_CHAINS; i++)
293 		priv->tx_desc_bat[i].die_dt = DT_EOS;
294 
295 	priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
296 	priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
297 					       &priv->rx_desc_bat_dma,
298 					       GFP_KERNEL);
299 
300 	if (!priv->rx_desc_bat)
301 		return -ENOMEM;
302 
303 	for (i = 0; i < RX_NUM_CHAINS; i++)
304 		priv->rx_desc_bat[i].die_dt = DT_EOS;
305 
306 	return 0;
307 }
308 
309 static void rtsn_desc_free(struct rtsn_private *priv)
310 {
311 	if (priv->tx_desc_bat)
312 		dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
313 				  priv->tx_desc_bat, priv->tx_desc_bat_dma);
314 	priv->tx_desc_bat = NULL;
315 
316 	if (priv->rx_desc_bat)
317 		dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
318 				  priv->rx_desc_bat, priv->rx_desc_bat_dma);
319 	priv->rx_desc_bat = NULL;
320 }
321 
322 static void rtsn_chain_free(struct rtsn_private *priv)
323 {
324 	struct device *dev = &priv->pdev->dev;
325 
326 	dma_free_coherent(dev,
327 			  sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
328 			  priv->tx_ring, priv->tx_desc_dma);
329 	priv->tx_ring = NULL;
330 
331 	dma_free_coherent(dev,
332 			  sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
333 			  priv->rx_ring, priv->rx_desc_dma);
334 	priv->rx_ring = NULL;
335 
336 	kfree(priv->tx_skb);
337 	priv->tx_skb = NULL;
338 
339 	kfree(priv->rx_skb);
340 	priv->rx_skb = NULL;
341 }
342 
343 static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
344 {
345 	struct net_device *ndev = priv->ndev;
346 	struct sk_buff *skb;
347 	int i;
348 
349 	priv->num_tx_ring = tx_size;
350 	priv->num_rx_ring = rx_size;
351 
352 	priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
353 	priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
354 
355 	if (!priv->rx_skb || !priv->tx_skb)
356 		goto error;
357 
358 	for (i = 0; i < rx_size; i++) {
359 		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
360 		if (!skb)
361 			goto error;
362 		skb_reserve(skb, NET_IP_ALIGN);
363 		priv->rx_skb[i] = skb;
364 	}
365 
366 	/* Allocate TX, RX descriptors */
367 	priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
368 					   sizeof(struct rtsn_ext_desc) * (tx_size + 1),
369 					   &priv->tx_desc_dma, GFP_KERNEL);
370 	priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
371 					   sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
372 					   &priv->rx_desc_dma, GFP_KERNEL);
373 
374 	if (!priv->tx_ring || !priv->rx_ring)
375 		goto error;
376 
377 	return 0;
378 error:
379 	rtsn_chain_free(priv);
380 
381 	return -ENOMEM;
382 }
383 
384 static void rtsn_chain_format(struct rtsn_private *priv)
385 {
386 	struct net_device *ndev = priv->ndev;
387 	struct rtsn_ext_ts_desc *rx_desc;
388 	struct rtsn_ext_desc *tx_desc;
389 	struct rtsn_desc *bat_desc;
390 	dma_addr_t dma_addr;
391 	unsigned int i;
392 
393 	priv->cur_tx = 0;
394 	priv->cur_rx = 0;
395 	priv->dirty_rx = 0;
396 	priv->dirty_tx = 0;
397 
398 	/* TX */
399 	memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
400 	for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
401 		tx_desc->die_dt = DT_EEMPTY | D_DIE;
402 
403 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
404 	tx_desc->die_dt = DT_LINK;
405 
406 	bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
407 	bat_desc->die_dt = DT_LINK;
408 	bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
409 
410 	/* RX */
411 	memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
412 	for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
413 		dma_addr = dma_map_single(ndev->dev.parent,
414 					  priv->rx_skb[i]->data, PKT_BUF_SZ,
415 					  DMA_FROM_DEVICE);
416 		if (!dma_mapping_error(ndev->dev.parent, dma_addr))
417 			rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
418 		rx_desc->dptr = cpu_to_le32((u32)dma_addr);
419 		rx_desc->die_dt = DT_FEMPTY | D_DIE;
420 	}
421 	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
422 	rx_desc->die_dt = DT_LINK;
423 
424 	bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
425 	bat_desc->die_dt = DT_LINK;
426 	bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
427 }
428 
429 static int rtsn_dmac_init(struct rtsn_private *priv)
430 {
431 	int ret;
432 
433 	ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
434 	if (ret)
435 		return ret;
436 
437 	rtsn_chain_format(priv);
438 
439 	return 0;
440 }
441 
442 static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
443 {
444 	return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
445 }
446 
447 static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
448 {
449 	unsigned int i;
450 
451 	/* Need to busy loop as mode changes can happen in atomic context. */
452 	for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
453 		if (rtsn_read_mode(priv) == mode)
454 			return 0;
455 
456 		udelay(RTSN_INTERVAL_US);
457 	}
458 
459 	return -ETIMEDOUT;
460 }
461 
462 static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
463 {
464 	int ret;
465 
466 	rtsn_write(priv, OCR, mode);
467 	ret = rtsn_wait_mode(priv, mode);
468 	if (ret)
469 		netdev_err(priv->ndev, "Failed to switch operation mode\n");
470 	return ret;
471 }
472 
473 static int rtsn_get_data_irq_status(struct rtsn_private *priv)
474 {
475 	u32 val;
476 
477 	val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
478 	val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
479 
480 	return val;
481 }
482 
483 static irqreturn_t rtsn_irq(int irq, void *dev_id)
484 {
485 	struct rtsn_private *priv = dev_id;
486 	int ret = IRQ_NONE;
487 
488 	spin_lock(&priv->lock);
489 
490 	if (rtsn_get_data_irq_status(priv)) {
491 		/* Clear TX/RX irq status */
492 		rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
493 		rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
494 
495 		if (napi_schedule_prep(&priv->napi)) {
496 			/* Disable TX/RX interrupts */
497 			rtsn_ctrl_data_irq(priv, false);
498 
499 			__napi_schedule(&priv->napi);
500 		}
501 
502 		ret = IRQ_HANDLED;
503 	}
504 
505 	spin_unlock(&priv->lock);
506 
507 	return ret;
508 }
509 
510 static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
511 			    unsigned long flags, struct rtsn_private *priv,
512 			    const char *ch)
513 {
514 	char *name;
515 	int ret;
516 
517 	name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
518 			      priv->ndev->name, ch);
519 	if (!name)
520 		return -ENOMEM;
521 
522 	ret = request_irq(irq, handler, flags, name, priv);
523 	if (ret)
524 		netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
525 
526 	return ret;
527 }
528 
529 static void rtsn_free_irqs(struct rtsn_private *priv)
530 {
531 	free_irq(priv->tx_data_irq, priv);
532 	free_irq(priv->rx_data_irq, priv);
533 }
534 
535 static int rtsn_request_irqs(struct rtsn_private *priv)
536 {
537 	int ret;
538 
539 	priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
540 	if (priv->rx_data_irq < 0)
541 		return priv->rx_data_irq;
542 
543 	priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
544 	if (priv->tx_data_irq < 0)
545 		return priv->tx_data_irq;
546 
547 	ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
548 	if (ret)
549 		return ret;
550 
551 	ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
552 	if (ret) {
553 		free_irq(priv->tx_data_irq, priv);
554 		return ret;
555 	}
556 
557 	return 0;
558 }
559 
560 static int rtsn_reset(struct rtsn_private *priv)
561 {
562 	reset_control_reset(priv->reset);
563 	mdelay(1);
564 
565 	return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
566 }
567 
568 static int rtsn_axibmi_init(struct rtsn_private *priv)
569 {
570 	int ret;
571 
572 	ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
573 	if (ret)
574 		return ret;
575 
576 	/* Set AXIWC */
577 	rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
578 
579 	/* Set AXIRC */
580 	rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
581 
582 	/* TX Descriptor chain setting */
583 	rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
584 	rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
585 	rtsn_write(priv, TATLR, TATLR_TATL);
586 
587 	ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
588 	if (ret)
589 		return ret;
590 
591 	/* RX Descriptor chain setting */
592 	rtsn_write(priv, RATLS0,
593 		   RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
594 	rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
595 	rtsn_write(priv, RATLR, RATLR_RATL);
596 
597 	ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
598 	if (ret)
599 		return ret;
600 
601 	/* Enable TX/RX interrupts */
602 	rtsn_ctrl_data_irq(priv, true);
603 
604 	return 0;
605 }
606 
607 static void rtsn_mhd_init(struct rtsn_private *priv)
608 {
609 	/* TX General setting */
610 	rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
611 	rtsn_write(priv, TMS0, TMS_MFS_MAX);
612 
613 	/* RX Filter IP */
614 	rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
615 	rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
616 }
617 
618 static int rtsn_get_phy_params(struct rtsn_private *priv)
619 {
620 	int ret;
621 
622 	ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
623 	if (ret)
624 		return ret;
625 
626 	switch (priv->iface) {
627 	case PHY_INTERFACE_MODE_MII:
628 		priv->speed = 100;
629 		break;
630 	case PHY_INTERFACE_MODE_RGMII:
631 	case PHY_INTERFACE_MODE_RGMII_ID:
632 	case PHY_INTERFACE_MODE_RGMII_RXID:
633 	case PHY_INTERFACE_MODE_RGMII_TXID:
634 		priv->speed = 1000;
635 		break;
636 	default:
637 		return -EOPNOTSUPP;
638 	}
639 
640 	return 0;
641 }
642 
643 static void rtsn_set_phy_interface(struct rtsn_private *priv)
644 {
645 	u32 val;
646 
647 	switch (priv->iface) {
648 	case PHY_INTERFACE_MODE_MII:
649 		val = MPIC_PIS_MII;
650 		break;
651 	case PHY_INTERFACE_MODE_RGMII:
652 	case PHY_INTERFACE_MODE_RGMII_ID:
653 	case PHY_INTERFACE_MODE_RGMII_RXID:
654 	case PHY_INTERFACE_MODE_RGMII_TXID:
655 		val = MPIC_PIS_GMII;
656 		break;
657 	default:
658 		return;
659 	}
660 
661 	rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
662 }
663 
664 static void rtsn_set_rate(struct rtsn_private *priv)
665 {
666 	u32 val;
667 
668 	switch (priv->speed) {
669 	case 10:
670 		val = MPIC_LSC_10M;
671 		break;
672 	case 100:
673 		val = MPIC_LSC_100M;
674 		break;
675 	case 1000:
676 		val = MPIC_LSC_1G;
677 		break;
678 	default:
679 		return;
680 	}
681 
682 	rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
683 }
684 
685 static int rtsn_rmac_init(struct rtsn_private *priv)
686 {
687 	const u8 *mac_addr = priv->ndev->dev_addr;
688 	int ret;
689 
690 	/* Set MAC address */
691 	rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
692 	rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
693 		   (mac_addr[4] << 8) | mac_addr[5]);
694 
695 	/* Set xMII type */
696 	rtsn_set_phy_interface(priv);
697 	rtsn_set_rate(priv);
698 
699 	/* Enable MII */
700 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
701 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
702 
703 	/* Link verification */
704 	rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
705 	ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
706 	if (ret)
707 		return ret;
708 
709 	return ret;
710 }
711 
712 static int rtsn_hw_init(struct rtsn_private *priv)
713 {
714 	int ret;
715 
716 	ret = rtsn_reset(priv);
717 	if (ret)
718 		return ret;
719 
720 	/* Change to CONFIG mode */
721 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
722 	if (ret)
723 		return ret;
724 
725 	ret = rtsn_axibmi_init(priv);
726 	if (ret)
727 		return ret;
728 
729 	rtsn_mhd_init(priv);
730 
731 	ret = rtsn_rmac_init(priv);
732 	if (ret)
733 		return ret;
734 
735 	ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
736 	if (ret)
737 		return ret;
738 
739 	/* Change to OPERATION mode */
740 	ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
741 
742 	return ret;
743 }
744 
745 static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
746 			   int regad, u16 data)
747 {
748 	struct rtsn_private *priv = bus->priv;
749 	u32 val;
750 	int ret;
751 
752 	val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
753 
754 	if (!read)
755 		val |= MPSM_PSMAD | MPSM_PRD_SET(data);
756 
757 	rtsn_write(priv, MPSM, val);
758 
759 	ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
760 	if (ret)
761 		return ret;
762 
763 	if (read)
764 		ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
765 
766 	return ret;
767 }
768 
769 static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
770 {
771 	return rtsn_mii_access(bus, true, addr, regnum, 0);
772 }
773 
774 static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
775 {
776 	return rtsn_mii_access(bus, false, addr, regnum, val);
777 }
778 
779 static int rtsn_mdio_alloc(struct rtsn_private *priv)
780 {
781 	struct platform_device *pdev = priv->pdev;
782 	struct device *dev = &pdev->dev;
783 	struct device_node *mdio_node;
784 	struct mii_bus *mii;
785 	int ret;
786 
787 	mii = mdiobus_alloc();
788 	if (!mii)
789 		return -ENOMEM;
790 
791 	mdio_node = of_get_child_by_name(dev->of_node, "mdio");
792 	if (!mdio_node) {
793 		ret = -ENODEV;
794 		goto out_free_bus;
795 	}
796 
797 	/* Enter config mode before registering the MDIO bus */
798 	ret = rtsn_reset(priv);
799 	if (ret)
800 		goto out_free_bus;
801 
802 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
803 	if (ret)
804 		goto out_free_bus;
805 
806 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
807 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
808 
809 	/* Register the MDIO bus */
810 	mii->name = "rtsn_mii";
811 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
812 		 pdev->name, pdev->id);
813 	mii->priv = priv;
814 	mii->read = rtsn_mii_read;
815 	mii->write = rtsn_mii_write;
816 	mii->parent = dev;
817 
818 	ret = of_mdiobus_register(mii, mdio_node);
819 	of_node_put(mdio_node);
820 	if (ret)
821 		goto out_free_bus;
822 
823 	priv->mii = mii;
824 
825 	return 0;
826 
827 out_free_bus:
828 	mdiobus_free(mii);
829 	return ret;
830 }
831 
832 static void rtsn_mdio_free(struct rtsn_private *priv)
833 {
834 	mdiobus_unregister(priv->mii);
835 	mdiobus_free(priv->mii);
836 	priv->mii = NULL;
837 }
838 
839 static void rtsn_adjust_link(struct net_device *ndev)
840 {
841 	struct rtsn_private *priv = netdev_priv(ndev);
842 	struct phy_device *phydev = ndev->phydev;
843 	bool new_state = false;
844 	unsigned long flags;
845 
846 	spin_lock_irqsave(&priv->lock, flags);
847 
848 	if (phydev->link) {
849 		if (phydev->speed != priv->speed) {
850 			new_state = true;
851 			priv->speed = phydev->speed;
852 		}
853 
854 		if (!priv->link) {
855 			new_state = true;
856 			priv->link = phydev->link;
857 		}
858 	} else if (priv->link) {
859 		new_state = true;
860 		priv->link = 0;
861 		priv->speed = 0;
862 	}
863 
864 	if (new_state) {
865 		/* Need to transition to CONFIG mode before reconfiguring and
866 		 * then back to the original mode. Any state change to/from
867 		 * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
868 		 */
869 		enum rtsn_mode orgmode = rtsn_read_mode(priv);
870 
871 		/* Transit to CONFIG */
872 		if (orgmode != OCR_OPC_CONFIG) {
873 			if (orgmode != OCR_OPC_DISABLE &&
874 			    rtsn_change_mode(priv, OCR_OPC_DISABLE))
875 				goto out;
876 			if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
877 				goto out;
878 		}
879 
880 		rtsn_set_rate(priv);
881 
882 		/* Transition to original mode */
883 		if (orgmode != OCR_OPC_CONFIG) {
884 			if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
885 				goto out;
886 			if (orgmode != OCR_OPC_DISABLE &&
887 			    rtsn_change_mode(priv, orgmode))
888 				goto out;
889 		}
890 	}
891 out:
892 	spin_unlock_irqrestore(&priv->lock, flags);
893 
894 	if (new_state)
895 		phy_print_status(phydev);
896 }
897 
898 static int rtsn_phy_init(struct rtsn_private *priv)
899 {
900 	struct device_node *np = priv->ndev->dev.parent->of_node;
901 	struct phy_device *phydev;
902 	struct device_node *phy;
903 
904 	priv->link = 0;
905 
906 	phy = of_parse_phandle(np, "phy-handle", 0);
907 	if (!phy)
908 		return -ENOENT;
909 
910 	phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
911 				priv->iface);
912 	of_node_put(phy);
913 	if (!phydev)
914 		return -ENOENT;
915 
916 	/* Only support full-duplex mode */
917 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
918 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
919 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
920 
921 	phy_attached_info(phydev);
922 
923 	return 0;
924 }
925 
926 static void rtsn_phy_deinit(struct rtsn_private *priv)
927 {
928 	phy_disconnect(priv->ndev->phydev);
929 	priv->ndev->phydev = NULL;
930 }
931 
932 static int rtsn_init(struct rtsn_private *priv)
933 {
934 	int ret;
935 
936 	ret = rtsn_desc_alloc(priv);
937 	if (ret)
938 		return ret;
939 
940 	ret = rtsn_dmac_init(priv);
941 	if (ret)
942 		goto error_free_desc;
943 
944 	ret = rtsn_hw_init(priv);
945 	if (ret)
946 		goto error_free_chain;
947 
948 	ret = rtsn_phy_init(priv);
949 	if (ret)
950 		goto error_free_chain;
951 
952 	ret = rtsn_request_irqs(priv);
953 	if (ret)
954 		goto error_free_phy;
955 
956 	return 0;
957 error_free_phy:
958 	rtsn_phy_deinit(priv);
959 error_free_chain:
960 	rtsn_chain_free(priv);
961 error_free_desc:
962 	rtsn_desc_free(priv);
963 	return ret;
964 }
965 
966 static void rtsn_deinit(struct rtsn_private *priv)
967 {
968 	rtsn_free_irqs(priv);
969 	rtsn_phy_deinit(priv);
970 	rtsn_chain_free(priv);
971 	rtsn_desc_free(priv);
972 }
973 
974 static void rtsn_parse_mac_address(struct device_node *np,
975 				   struct net_device *ndev)
976 {
977 	struct rtsn_private *priv = netdev_priv(ndev);
978 	u8 addr[ETH_ALEN];
979 	u32 mrmac0;
980 	u32 mrmac1;
981 
982 	/* Try to read address from Device Tree. */
983 	if (!of_get_mac_address(np, addr)) {
984 		eth_hw_addr_set(ndev, addr);
985 		return;
986 	}
987 
988 	/* Try to read address from device. */
989 	mrmac0 = rtsn_read(priv, MRMAC0);
990 	mrmac1 = rtsn_read(priv, MRMAC1);
991 
992 	addr[0] = (mrmac0 >>  8) & 0xff;
993 	addr[1] = (mrmac0 >>  0) & 0xff;
994 	addr[2] = (mrmac1 >> 24) & 0xff;
995 	addr[3] = (mrmac1 >> 16) & 0xff;
996 	addr[4] = (mrmac1 >>  8) & 0xff;
997 	addr[5] = (mrmac1 >>  0) & 0xff;
998 
999 	if (is_valid_ether_addr(addr)) {
1000 		eth_hw_addr_set(ndev, addr);
1001 		return;
1002 	}
1003 
1004 	/* Fallback to a random address */
1005 	eth_hw_addr_random(ndev);
1006 }
1007 
1008 static int rtsn_open(struct net_device *ndev)
1009 {
1010 	struct rtsn_private *priv = netdev_priv(ndev);
1011 	int ret;
1012 
1013 	napi_enable(&priv->napi);
1014 
1015 	ret = rtsn_init(priv);
1016 	if (ret) {
1017 		napi_disable(&priv->napi);
1018 		return ret;
1019 	}
1020 
1021 	phy_start(ndev->phydev);
1022 
1023 	netif_start_queue(ndev);
1024 
1025 	return 0;
1026 }
1027 
1028 static int rtsn_stop(struct net_device *ndev)
1029 {
1030 	struct rtsn_private *priv = netdev_priv(ndev);
1031 
1032 	phy_stop(priv->ndev->phydev);
1033 	napi_disable(&priv->napi);
1034 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1035 	rtsn_deinit(priv);
1036 
1037 	return 0;
1038 }
1039 
1040 static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1041 {
1042 	struct rtsn_private *priv = netdev_priv(ndev);
1043 	struct rtsn_ext_desc *desc;
1044 	int ret = NETDEV_TX_OK;
1045 	unsigned long flags;
1046 	dma_addr_t dma_addr;
1047 	int entry;
1048 
1049 	spin_lock_irqsave(&priv->lock, flags);
1050 
1051 	/* Drop packet if it won't fit in a single descriptor. */
1052 	if (skb->len >= TX_DS) {
1053 		priv->stats.tx_dropped++;
1054 		priv->stats.tx_errors++;
1055 		dev_kfree_skb_any(skb);
1056 		goto out;
1057 	}
1058 
1059 	if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
1060 		netif_stop_subqueue(ndev, 0);
1061 		ret = NETDEV_TX_BUSY;
1062 		goto out;
1063 	}
1064 
1065 	if (skb_put_padto(skb, ETH_ZLEN))
1066 		goto out;
1067 
1068 	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1069 				  DMA_TO_DEVICE);
1070 	if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1071 		dev_kfree_skb_any(skb);
1072 		goto out;
1073 	}
1074 
1075 	entry = priv->cur_tx % priv->num_tx_ring;
1076 	priv->tx_skb[entry] = skb;
1077 	desc = &priv->tx_ring[entry];
1078 	desc->dptr = cpu_to_le32(dma_addr);
1079 	desc->info_ds = cpu_to_le16(skb->len);
1080 	desc->info1 = cpu_to_le64(skb->len);
1081 
1082 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1083 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1084 		priv->ts_tag++;
1085 		desc->info_ds |= cpu_to_le16(TXC);
1086 		desc->info = priv->ts_tag;
1087 	}
1088 
1089 	skb_tx_timestamp(skb);
1090 	dma_wmb();
1091 
1092 	desc->die_dt = DT_FSINGLE | D_DIE;
1093 	priv->cur_tx++;
1094 
1095 	/* Start xmit */
1096 	rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
1097 out:
1098 	spin_unlock_irqrestore(&priv->lock, flags);
1099 	return ret;
1100 }
1101 
1102 static void rtsn_get_stats64(struct net_device *ndev,
1103 			     struct rtnl_link_stats64 *storage)
1104 {
1105 	struct rtsn_private *priv = netdev_priv(ndev);
1106 	*storage = priv->stats;
1107 }
1108 
1109 static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1110 {
1111 	if (!netif_running(ndev))
1112 		return -ENODEV;
1113 
1114 	return phy_do_ioctl_running(ndev, ifr, cmd);
1115 }
1116 
1117 static int rtsn_hwtstamp_get(struct net_device *ndev,
1118 			     struct kernel_hwtstamp_config *config)
1119 {
1120 	struct rtsn_private *priv;
1121 
1122 	if (!netif_running(ndev))
1123 		return -ENODEV;
1124 
1125 	priv = netdev_priv(ndev);
1126 
1127 	config->flags = 0;
1128 	config->tx_type = priv->tstamp_tx_ctrl;
1129 	config->rx_filter = priv->tstamp_rx_ctrl;
1130 
1131 	return 0;
1132 }
1133 
1134 static int rtsn_hwtstamp_set(struct net_device *ndev,
1135 			     struct kernel_hwtstamp_config *config,
1136 			     struct netlink_ext_ack *extack)
1137 {
1138 	enum hwtstamp_rx_filters tstamp_rx_ctrl;
1139 	enum hwtstamp_tx_types tstamp_tx_ctrl;
1140 	struct rtsn_private *priv;
1141 
1142 	if (!netif_running(ndev))
1143 		return -ENODEV;
1144 
1145 	priv = netdev_priv(ndev);
1146 
1147 	if (config->flags)
1148 		return -EINVAL;
1149 
1150 	switch (config->tx_type) {
1151 	case HWTSTAMP_TX_OFF:
1152 	case HWTSTAMP_TX_ON:
1153 		tstamp_tx_ctrl = config->tx_type;
1154 		break;
1155 	default:
1156 		return -ERANGE;
1157 	}
1158 
1159 	switch (config->rx_filter) {
1160 	case HWTSTAMP_FILTER_NONE:
1161 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1162 		tstamp_rx_ctrl = config->rx_filter;
1163 		break;
1164 	default:
1165 		config->rx_filter = HWTSTAMP_FILTER_ALL;
1166 		tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
1167 		break;
1168 	}
1169 
1170 	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1171 	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1172 
1173 	return 0;
1174 }
1175 
1176 static const struct net_device_ops rtsn_netdev_ops = {
1177 	.ndo_open		= rtsn_open,
1178 	.ndo_stop		= rtsn_stop,
1179 	.ndo_start_xmit		= rtsn_start_xmit,
1180 	.ndo_get_stats64	= rtsn_get_stats64,
1181 	.ndo_eth_ioctl		= rtsn_do_ioctl,
1182 	.ndo_validate_addr	= eth_validate_addr,
1183 	.ndo_set_mac_address	= eth_mac_addr,
1184 	.ndo_hwtstamp_set	= rtsn_hwtstamp_set,
1185 	.ndo_hwtstamp_get	= rtsn_hwtstamp_get,
1186 };
1187 
1188 static int rtsn_get_ts_info(struct net_device *ndev,
1189 			    struct kernel_ethtool_ts_info *info)
1190 {
1191 	struct rtsn_private *priv = netdev_priv(ndev);
1192 
1193 	info->phc_index = rcar_gen4_ptp_clock_index(priv->ptp_priv);
1194 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1195 		SOF_TIMESTAMPING_TX_HARDWARE |
1196 		SOF_TIMESTAMPING_RX_HARDWARE |
1197 		SOF_TIMESTAMPING_RAW_HARDWARE;
1198 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1199 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1200 
1201 	return 0;
1202 }
1203 
1204 static const struct ethtool_ops rtsn_ethtool_ops = {
1205 	.nway_reset		= phy_ethtool_nway_reset,
1206 	.get_link		= ethtool_op_get_link,
1207 	.get_ts_info		= rtsn_get_ts_info,
1208 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1209 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1210 };
1211 
1212 static const struct of_device_id rtsn_match_table[] = {
1213 	{ .compatible = "renesas,r8a779g0-ethertsn", },
1214 	{ /* Sentinel */ }
1215 };
1216 
1217 MODULE_DEVICE_TABLE(of, rtsn_match_table);
1218 
1219 static int rtsn_probe(struct platform_device *pdev)
1220 {
1221 	struct rtsn_private *priv;
1222 	struct net_device *ndev;
1223 	void __iomem *ptpaddr;
1224 	struct resource *res;
1225 	int ret;
1226 
1227 	ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
1228 				  RX_NUM_CHAINS);
1229 	if (!ndev)
1230 		return -ENOMEM;
1231 
1232 	priv = netdev_priv(ndev);
1233 	priv->pdev = pdev;
1234 	priv->ndev = ndev;
1235 
1236 	spin_lock_init(&priv->lock);
1237 	platform_set_drvdata(pdev, priv);
1238 
1239 	priv->clk = devm_clk_get(&pdev->dev, NULL);
1240 	if (IS_ERR(priv->clk)) {
1241 		ret = PTR_ERR(priv->clk);
1242 		goto error_free;
1243 	}
1244 
1245 	priv->reset = devm_reset_control_get(&pdev->dev, NULL);
1246 	if (IS_ERR(priv->reset)) {
1247 		ret = PTR_ERR(priv->reset);
1248 		goto error_free;
1249 	}
1250 
1251 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
1252 	if (!res) {
1253 		dev_err(&pdev->dev, "Can't find tsnes resource\n");
1254 		ret = -EINVAL;
1255 		goto error_free;
1256 	}
1257 
1258 	priv->base = devm_ioremap_resource(&pdev->dev, res);
1259 	if (IS_ERR(priv->base)) {
1260 		ret = PTR_ERR(priv->base);
1261 		goto error_free;
1262 	}
1263 
1264 	SET_NETDEV_DEV(ndev, &pdev->dev);
1265 
1266 	ndev->features = NETIF_F_RXCSUM;
1267 	ndev->hw_features = NETIF_F_RXCSUM;
1268 	ndev->base_addr = res->start;
1269 	ndev->netdev_ops = &rtsn_netdev_ops;
1270 	ndev->ethtool_ops = &rtsn_ethtool_ops;
1271 
1272 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
1273 	if (!res) {
1274 		dev_err(&pdev->dev, "Can't find gptp resource\n");
1275 		ret = -EINVAL;
1276 		goto error_free;
1277 	}
1278 
1279 	ptpaddr = devm_ioremap_resource(&pdev->dev, res);
1280 	if (IS_ERR(ptpaddr)) {
1281 		ret = PTR_ERR(ptpaddr);
1282 		goto error_free;
1283 	}
1284 
1285 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev, ptpaddr);
1286 	if (!priv->ptp_priv) {
1287 		ret = -ENOMEM;
1288 		goto error_free;
1289 	}
1290 
1291 	ret = rtsn_get_phy_params(priv);
1292 	if (ret)
1293 		goto error_free;
1294 
1295 	pm_runtime_enable(&pdev->dev);
1296 	pm_runtime_get_sync(&pdev->dev);
1297 
1298 	netif_napi_add(ndev, &priv->napi, rtsn_poll);
1299 
1300 	rtsn_parse_mac_address(pdev->dev.of_node, ndev);
1301 
1302 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1303 
1304 	device_set_wakeup_capable(&pdev->dev, 1);
1305 
1306 	ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
1307 	if (ret)
1308 		goto error_pm;
1309 
1310 	ret = rtsn_mdio_alloc(priv);
1311 	if (ret)
1312 		goto error_ptp;
1313 
1314 	ret = register_netdev(ndev);
1315 	if (ret)
1316 		goto error_mdio;
1317 
1318 	netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
1319 
1320 	return 0;
1321 
1322 error_mdio:
1323 	rtsn_mdio_free(priv);
1324 error_ptp:
1325 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1326 error_pm:
1327 	netif_napi_del(&priv->napi);
1328 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1329 	pm_runtime_put_sync(&pdev->dev);
1330 	pm_runtime_disable(&pdev->dev);
1331 error_free:
1332 	free_netdev(ndev);
1333 
1334 	return ret;
1335 }
1336 
1337 static void rtsn_remove(struct platform_device *pdev)
1338 {
1339 	struct rtsn_private *priv = platform_get_drvdata(pdev);
1340 
1341 	unregister_netdev(priv->ndev);
1342 	rtsn_mdio_free(priv);
1343 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1344 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1345 	netif_napi_del(&priv->napi);
1346 
1347 	pm_runtime_put_sync(&pdev->dev);
1348 	pm_runtime_disable(&pdev->dev);
1349 
1350 	free_netdev(priv->ndev);
1351 }
1352 
1353 static struct platform_driver rtsn_driver = {
1354 	.probe		= rtsn_probe,
1355 	.remove		= rtsn_remove,
1356 	.driver	= {
1357 		.name	= "rtsn",
1358 		.of_match_table	= rtsn_match_table,
1359 	}
1360 };
1361 module_platform_driver(rtsn_driver);
1362 
1363 MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
1364 MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
1365 MODULE_LICENSE("GPL");
1366