xref: /linux/drivers/net/ethernet/renesas/rtsn.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Renesas Ethernet-TSN device driver
4  *
5  * Copyright (C) 2022 Renesas Electronics Corporation
6  * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
23 
24 #include "rtsn.h"
25 #include "rcar_gen4_ptp.h"
26 
27 struct rtsn_private {
28 	struct net_device *ndev;
29 	struct platform_device *pdev;
30 	void __iomem *base;
31 	struct rcar_gen4_ptp_private *ptp_priv;
32 	struct clk *clk;
33 	struct reset_control *reset;
34 
35 	u32 num_tx_ring;
36 	u32 num_rx_ring;
37 	u32 tx_desc_bat_size;
38 	dma_addr_t tx_desc_bat_dma;
39 	struct rtsn_desc *tx_desc_bat;
40 	u32 rx_desc_bat_size;
41 	dma_addr_t rx_desc_bat_dma;
42 	struct rtsn_desc *rx_desc_bat;
43 	dma_addr_t tx_desc_dma;
44 	dma_addr_t rx_desc_dma;
45 	struct rtsn_ext_desc *tx_ring;
46 	struct rtsn_ext_ts_desc *rx_ring;
47 	struct sk_buff **tx_skb;
48 	struct sk_buff **rx_skb;
49 	spinlock_t lock;	/* Register access lock */
50 	u32 cur_tx;
51 	u32 dirty_tx;
52 	u32 cur_rx;
53 	u32 dirty_rx;
54 	u8 ts_tag;
55 	struct napi_struct napi;
56 	struct rtnl_link_stats64 stats;
57 
58 	struct mii_bus *mii;
59 	phy_interface_t iface;
60 	int link;
61 	int speed;
62 
63 	int tx_data_irq;
64 	int rx_data_irq;
65 
66 	u32 tstamp_tx_ctrl;
67 	u32 tstamp_rx_ctrl;
68 };
69 
70 static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
71 {
72 	return ioread32(priv->base + reg);
73 }
74 
75 static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
76 {
77 	iowrite32(data, priv->base + reg);
78 }
79 
80 static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
81 			u32 clear, u32 set)
82 {
83 	rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
84 }
85 
86 static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
87 			 u32 mask, u32 expected)
88 {
89 	u32 val;
90 
91 	return readl_poll_timeout(priv->base + reg, val,
92 				  (val & mask) == expected,
93 				  RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
94 }
95 
96 static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
97 {
98 	if (enable) {
99 		rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
100 		rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
101 	} else {
102 		rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
103 		rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
104 	}
105 }
106 
107 static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
108 {
109 	struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
110 
111 	ptp_priv->info.gettime64(&ptp_priv->info, ts);
112 }
113 
114 static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
115 {
116 	struct rtsn_private *priv = netdev_priv(ndev);
117 	struct rtsn_ext_desc *desc;
118 	struct sk_buff *skb;
119 	int free_num = 0;
120 	int entry, size;
121 
122 	for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
123 		entry = priv->dirty_tx % priv->num_tx_ring;
124 		desc = &priv->tx_ring[entry];
125 		if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
126 			break;
127 
128 		dma_rmb();
129 		size = le16_to_cpu(desc->info_ds) & TX_DS;
130 		skb = priv->tx_skb[entry];
131 		if (skb) {
132 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
133 				struct skb_shared_hwtstamps shhwtstamps;
134 				struct timespec64 ts;
135 
136 				rtsn_get_timestamp(priv, &ts);
137 				memset(&shhwtstamps, 0, sizeof(shhwtstamps));
138 				shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
139 				skb_tstamp_tx(skb, &shhwtstamps);
140 			}
141 			dma_unmap_single(ndev->dev.parent,
142 					 le32_to_cpu(desc->dptr),
143 					 size, DMA_TO_DEVICE);
144 			dev_kfree_skb_any(priv->tx_skb[entry]);
145 			free_num++;
146 
147 			priv->stats.tx_packets++;
148 			priv->stats.tx_bytes += size;
149 		}
150 
151 		desc->die_dt = DT_EEMPTY;
152 	}
153 
154 	desc = &priv->tx_ring[priv->num_tx_ring];
155 	desc->die_dt = DT_LINK;
156 
157 	return free_num;
158 }
159 
160 static int rtsn_rx(struct net_device *ndev, int budget)
161 {
162 	struct rtsn_private *priv = netdev_priv(ndev);
163 	unsigned int ndescriptors;
164 	unsigned int rx_packets;
165 	unsigned int i;
166 	bool get_ts;
167 
168 	get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
169 
170 	ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
171 	rx_packets = 0;
172 	for (i = 0; i < ndescriptors; i++) {
173 		const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
174 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
175 		struct sk_buff *skb;
176 		dma_addr_t dma_addr;
177 		u16 pkt_len;
178 
179 		/* Stop processing descriptors if budget is consumed. */
180 		if (rx_packets >= budget)
181 			break;
182 
183 		/* Stop processing descriptors on first empty. */
184 		if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
185 			break;
186 
187 		dma_rmb();
188 		pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
189 
190 		skb = priv->rx_skb[entry];
191 		priv->rx_skb[entry] = NULL;
192 		dma_addr = le32_to_cpu(desc->dptr);
193 		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
194 				 DMA_FROM_DEVICE);
195 
196 		/* Get timestamp if enabled. */
197 		if (get_ts) {
198 			struct skb_shared_hwtstamps *shhwtstamps;
199 			struct timespec64 ts;
200 
201 			shhwtstamps = skb_hwtstamps(skb);
202 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
203 
204 			ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
205 			ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
206 
207 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
208 		}
209 
210 		skb_put(skb, pkt_len);
211 		skb->protocol = eth_type_trans(skb, ndev);
212 		napi_gro_receive(&priv->napi, skb);
213 
214 		/* Update statistics. */
215 		priv->stats.rx_packets++;
216 		priv->stats.rx_bytes += pkt_len;
217 
218 		/* Update counters. */
219 		priv->cur_rx++;
220 		rx_packets++;
221 	}
222 
223 	/* Refill the RX ring buffers */
224 	for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
225 		const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
226 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
227 		struct sk_buff *skb;
228 		dma_addr_t dma_addr;
229 
230 		desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
231 
232 		if (!priv->rx_skb[entry]) {
233 			skb = napi_alloc_skb(&priv->napi,
234 					     PKT_BUF_SZ + RTSN_ALIGN - 1);
235 			if (!skb)
236 				break;
237 			skb_reserve(skb, NET_IP_ALIGN);
238 			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
239 						  le16_to_cpu(desc->info_ds),
240 						  DMA_FROM_DEVICE);
241 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
242 				desc->info_ds = cpu_to_le16(0);
243 			desc->dptr = cpu_to_le32(dma_addr);
244 			skb_checksum_none_assert(skb);
245 			priv->rx_skb[entry] = skb;
246 		}
247 
248 		dma_wmb();
249 		desc->die_dt = DT_FEMPTY | D_DIE;
250 	}
251 
252 	priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
253 
254 	return rx_packets;
255 }
256 
257 static int rtsn_poll(struct napi_struct *napi, int budget)
258 {
259 	struct rtsn_private *priv;
260 	struct net_device *ndev;
261 	unsigned long flags;
262 	int work_done;
263 
264 	ndev = napi->dev;
265 	priv = netdev_priv(ndev);
266 
267 	/* Processing RX Descriptor Ring */
268 	work_done = rtsn_rx(ndev, budget);
269 
270 	/* Processing TX Descriptor Ring */
271 	spin_lock_irqsave(&priv->lock, flags);
272 	rtsn_tx_free(ndev, true);
273 	netif_wake_subqueue(ndev, 0);
274 	spin_unlock_irqrestore(&priv->lock, flags);
275 
276 	/* Re-enable TX/RX interrupts */
277 	if (work_done < budget && napi_complete_done(napi, work_done)) {
278 		spin_lock_irqsave(&priv->lock, flags);
279 		rtsn_ctrl_data_irq(priv, true);
280 		spin_unlock_irqrestore(&priv->lock, flags);
281 	}
282 
283 	return work_done;
284 }
285 
286 static int rtsn_desc_alloc(struct rtsn_private *priv)
287 {
288 	struct device *dev = &priv->pdev->dev;
289 	unsigned int i;
290 
291 	priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
292 	priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
293 					       &priv->tx_desc_bat_dma,
294 					       GFP_KERNEL);
295 
296 	if (!priv->tx_desc_bat)
297 		return -ENOMEM;
298 
299 	for (i = 0; i < TX_NUM_CHAINS; i++)
300 		priv->tx_desc_bat[i].die_dt = DT_EOS;
301 
302 	priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
303 	priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
304 					       &priv->rx_desc_bat_dma,
305 					       GFP_KERNEL);
306 
307 	if (!priv->rx_desc_bat)
308 		return -ENOMEM;
309 
310 	for (i = 0; i < RX_NUM_CHAINS; i++)
311 		priv->rx_desc_bat[i].die_dt = DT_EOS;
312 
313 	return 0;
314 }
315 
316 static void rtsn_desc_free(struct rtsn_private *priv)
317 {
318 	if (priv->tx_desc_bat)
319 		dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
320 				  priv->tx_desc_bat, priv->tx_desc_bat_dma);
321 	priv->tx_desc_bat = NULL;
322 
323 	if (priv->rx_desc_bat)
324 		dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
325 				  priv->rx_desc_bat, priv->rx_desc_bat_dma);
326 	priv->rx_desc_bat = NULL;
327 }
328 
329 static void rtsn_chain_free(struct rtsn_private *priv)
330 {
331 	struct device *dev = &priv->pdev->dev;
332 
333 	dma_free_coherent(dev,
334 			  sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
335 			  priv->tx_ring, priv->tx_desc_dma);
336 	priv->tx_ring = NULL;
337 
338 	dma_free_coherent(dev,
339 			  sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
340 			  priv->rx_ring, priv->rx_desc_dma);
341 	priv->rx_ring = NULL;
342 
343 	kfree(priv->tx_skb);
344 	priv->tx_skb = NULL;
345 
346 	kfree(priv->rx_skb);
347 	priv->rx_skb = NULL;
348 }
349 
350 static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
351 {
352 	struct net_device *ndev = priv->ndev;
353 	struct sk_buff *skb;
354 	int i;
355 
356 	priv->num_tx_ring = tx_size;
357 	priv->num_rx_ring = rx_size;
358 
359 	priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
360 	priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
361 
362 	if (!priv->rx_skb || !priv->tx_skb)
363 		goto error;
364 
365 	for (i = 0; i < rx_size; i++) {
366 		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
367 		if (!skb)
368 			goto error;
369 		skb_reserve(skb, NET_IP_ALIGN);
370 		priv->rx_skb[i] = skb;
371 	}
372 
373 	/* Allocate TX, RX descriptors */
374 	priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
375 					   sizeof(struct rtsn_ext_desc) * (tx_size + 1),
376 					   &priv->tx_desc_dma, GFP_KERNEL);
377 	priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
378 					   sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
379 					   &priv->rx_desc_dma, GFP_KERNEL);
380 
381 	if (!priv->tx_ring || !priv->rx_ring)
382 		goto error;
383 
384 	return 0;
385 error:
386 	rtsn_chain_free(priv);
387 
388 	return -ENOMEM;
389 }
390 
391 static void rtsn_chain_format(struct rtsn_private *priv)
392 {
393 	struct net_device *ndev = priv->ndev;
394 	struct rtsn_ext_ts_desc *rx_desc;
395 	struct rtsn_ext_desc *tx_desc;
396 	struct rtsn_desc *bat_desc;
397 	dma_addr_t dma_addr;
398 	unsigned int i;
399 
400 	priv->cur_tx = 0;
401 	priv->cur_rx = 0;
402 	priv->dirty_rx = 0;
403 	priv->dirty_tx = 0;
404 
405 	/* TX */
406 	memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
407 	for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
408 		tx_desc->die_dt = DT_EEMPTY | D_DIE;
409 
410 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
411 	tx_desc->die_dt = DT_LINK;
412 
413 	bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
414 	bat_desc->die_dt = DT_LINK;
415 	bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
416 
417 	/* RX */
418 	memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
419 	for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
420 		dma_addr = dma_map_single(ndev->dev.parent,
421 					  priv->rx_skb[i]->data, PKT_BUF_SZ,
422 					  DMA_FROM_DEVICE);
423 		if (!dma_mapping_error(ndev->dev.parent, dma_addr))
424 			rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
425 		rx_desc->dptr = cpu_to_le32((u32)dma_addr);
426 		rx_desc->die_dt = DT_FEMPTY | D_DIE;
427 	}
428 	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
429 	rx_desc->die_dt = DT_LINK;
430 
431 	bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
432 	bat_desc->die_dt = DT_LINK;
433 	bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
434 }
435 
436 static int rtsn_dmac_init(struct rtsn_private *priv)
437 {
438 	int ret;
439 
440 	ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
441 	if (ret)
442 		return ret;
443 
444 	rtsn_chain_format(priv);
445 
446 	return 0;
447 }
448 
449 static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
450 {
451 	return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
452 }
453 
454 static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
455 {
456 	unsigned int i;
457 
458 	/* Need to busy loop as mode changes can happen in atomic context. */
459 	for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
460 		if (rtsn_read_mode(priv) == mode)
461 			return 0;
462 
463 		udelay(RTSN_INTERVAL_US);
464 	}
465 
466 	return -ETIMEDOUT;
467 }
468 
469 static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
470 {
471 	int ret;
472 
473 	rtsn_write(priv, OCR, mode);
474 	ret = rtsn_wait_mode(priv, mode);
475 	if (ret)
476 		netdev_err(priv->ndev, "Failed to switch operation mode\n");
477 	return ret;
478 }
479 
480 static int rtsn_get_data_irq_status(struct rtsn_private *priv)
481 {
482 	u32 val;
483 
484 	val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
485 	val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
486 
487 	return val;
488 }
489 
490 static irqreturn_t rtsn_irq(int irq, void *dev_id)
491 {
492 	struct rtsn_private *priv = dev_id;
493 	int ret = IRQ_NONE;
494 
495 	spin_lock(&priv->lock);
496 
497 	if (rtsn_get_data_irq_status(priv)) {
498 		/* Clear TX/RX irq status */
499 		rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
500 		rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
501 
502 		if (napi_schedule_prep(&priv->napi)) {
503 			/* Disable TX/RX interrupts */
504 			rtsn_ctrl_data_irq(priv, false);
505 
506 			__napi_schedule(&priv->napi);
507 		}
508 
509 		ret = IRQ_HANDLED;
510 	}
511 
512 	spin_unlock(&priv->lock);
513 
514 	return ret;
515 }
516 
517 static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
518 			    unsigned long flags, struct rtsn_private *priv,
519 			    const char *ch)
520 {
521 	char *name;
522 	int ret;
523 
524 	name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
525 			      priv->ndev->name, ch);
526 	if (!name)
527 		return -ENOMEM;
528 
529 	ret = request_irq(irq, handler, flags, name, priv);
530 	if (ret)
531 		netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
532 
533 	return ret;
534 }
535 
536 static void rtsn_free_irqs(struct rtsn_private *priv)
537 {
538 	free_irq(priv->tx_data_irq, priv);
539 	free_irq(priv->rx_data_irq, priv);
540 }
541 
542 static int rtsn_request_irqs(struct rtsn_private *priv)
543 {
544 	int ret;
545 
546 	priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
547 	if (priv->rx_data_irq < 0)
548 		return priv->rx_data_irq;
549 
550 	priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
551 	if (priv->tx_data_irq < 0)
552 		return priv->tx_data_irq;
553 
554 	ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
555 	if (ret)
556 		return ret;
557 
558 	ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
559 	if (ret) {
560 		free_irq(priv->tx_data_irq, priv);
561 		return ret;
562 	}
563 
564 	return 0;
565 }
566 
567 static int rtsn_reset(struct rtsn_private *priv)
568 {
569 	reset_control_reset(priv->reset);
570 	mdelay(1);
571 
572 	return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
573 }
574 
575 static int rtsn_axibmi_init(struct rtsn_private *priv)
576 {
577 	int ret;
578 
579 	ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
580 	if (ret)
581 		return ret;
582 
583 	/* Set AXIWC */
584 	rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
585 
586 	/* Set AXIRC */
587 	rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
588 
589 	/* TX Descriptor chain setting */
590 	rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
591 	rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
592 	rtsn_write(priv, TATLR, TATLR_TATL);
593 
594 	ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
595 	if (ret)
596 		return ret;
597 
598 	/* RX Descriptor chain setting */
599 	rtsn_write(priv, RATLS0,
600 		   RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
601 	rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
602 	rtsn_write(priv, RATLR, RATLR_RATL);
603 
604 	ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
605 	if (ret)
606 		return ret;
607 
608 	/* Enable TX/RX interrupts */
609 	rtsn_ctrl_data_irq(priv, true);
610 
611 	return 0;
612 }
613 
614 static void rtsn_mhd_init(struct rtsn_private *priv)
615 {
616 	/* TX General setting */
617 	rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
618 	rtsn_write(priv, TMS0, TMS_MFS_MAX);
619 
620 	/* RX Filter IP */
621 	rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
622 	rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
623 }
624 
625 static int rtsn_get_phy_params(struct rtsn_private *priv)
626 {
627 	int ret;
628 
629 	ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
630 	if (ret)
631 		return ret;
632 
633 	switch (priv->iface) {
634 	case PHY_INTERFACE_MODE_MII:
635 		priv->speed = 100;
636 		break;
637 	case PHY_INTERFACE_MODE_RGMII:
638 	case PHY_INTERFACE_MODE_RGMII_ID:
639 	case PHY_INTERFACE_MODE_RGMII_RXID:
640 	case PHY_INTERFACE_MODE_RGMII_TXID:
641 		priv->speed = 1000;
642 		break;
643 	default:
644 		return -EOPNOTSUPP;
645 	}
646 
647 	return 0;
648 }
649 
650 static void rtsn_set_phy_interface(struct rtsn_private *priv)
651 {
652 	u32 val;
653 
654 	switch (priv->iface) {
655 	case PHY_INTERFACE_MODE_MII:
656 		val = MPIC_PIS_MII;
657 		break;
658 	case PHY_INTERFACE_MODE_RGMII:
659 	case PHY_INTERFACE_MODE_RGMII_ID:
660 	case PHY_INTERFACE_MODE_RGMII_RXID:
661 	case PHY_INTERFACE_MODE_RGMII_TXID:
662 		val = MPIC_PIS_GMII;
663 		break;
664 	default:
665 		return;
666 	}
667 
668 	rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
669 }
670 
671 static void rtsn_set_rate(struct rtsn_private *priv)
672 {
673 	u32 val;
674 
675 	switch (priv->speed) {
676 	case 10:
677 		val = MPIC_LSC_10M;
678 		break;
679 	case 100:
680 		val = MPIC_LSC_100M;
681 		break;
682 	case 1000:
683 		val = MPIC_LSC_1G;
684 		break;
685 	default:
686 		return;
687 	}
688 
689 	rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
690 }
691 
692 static int rtsn_rmac_init(struct rtsn_private *priv)
693 {
694 	const u8 *mac_addr = priv->ndev->dev_addr;
695 	int ret;
696 
697 	/* Set MAC address */
698 	rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
699 	rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
700 		   (mac_addr[4] << 8) | mac_addr[5]);
701 
702 	/* Set xMII type */
703 	rtsn_set_phy_interface(priv);
704 	rtsn_set_rate(priv);
705 
706 	/* Enable MII */
707 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
708 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
709 
710 	/* Link verification */
711 	rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
712 	ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
713 	if (ret)
714 		return ret;
715 
716 	return ret;
717 }
718 
719 static int rtsn_hw_init(struct rtsn_private *priv)
720 {
721 	int ret;
722 
723 	ret = rtsn_reset(priv);
724 	if (ret)
725 		return ret;
726 
727 	/* Change to CONFIG mode */
728 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
729 	if (ret)
730 		return ret;
731 
732 	ret = rtsn_axibmi_init(priv);
733 	if (ret)
734 		return ret;
735 
736 	rtsn_mhd_init(priv);
737 
738 	ret = rtsn_rmac_init(priv);
739 	if (ret)
740 		return ret;
741 
742 	ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
743 	if (ret)
744 		return ret;
745 
746 	/* Change to OPERATION mode */
747 	ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
748 
749 	return ret;
750 }
751 
752 static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
753 			   int regad, u16 data)
754 {
755 	struct rtsn_private *priv = bus->priv;
756 	u32 val;
757 	int ret;
758 
759 	val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
760 
761 	if (!read)
762 		val |= MPSM_PSMAD | MPSM_PRD_SET(data);
763 
764 	rtsn_write(priv, MPSM, val);
765 
766 	ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
767 	if (ret)
768 		return ret;
769 
770 	if (read)
771 		ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
772 
773 	return ret;
774 }
775 
776 static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
777 {
778 	return rtsn_mii_access(bus, true, addr, regnum, 0);
779 }
780 
781 static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
782 {
783 	return rtsn_mii_access(bus, false, addr, regnum, val);
784 }
785 
786 static int rtsn_mdio_alloc(struct rtsn_private *priv)
787 {
788 	struct platform_device *pdev = priv->pdev;
789 	struct device *dev = &pdev->dev;
790 	struct device_node *mdio_node;
791 	struct mii_bus *mii;
792 	int ret;
793 
794 	mii = mdiobus_alloc();
795 	if (!mii)
796 		return -ENOMEM;
797 
798 	mdio_node = of_get_child_by_name(dev->of_node, "mdio");
799 	if (!mdio_node) {
800 		ret = -ENODEV;
801 		goto out_free_bus;
802 	}
803 
804 	/* Enter config mode before registering the MDIO bus */
805 	ret = rtsn_reset(priv);
806 	if (ret)
807 		goto out_free_bus;
808 
809 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
810 	if (ret)
811 		goto out_free_bus;
812 
813 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
814 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
815 
816 	/* Register the MDIO bus */
817 	mii->name = "rtsn_mii";
818 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
819 		 pdev->name, pdev->id);
820 	mii->priv = priv;
821 	mii->read = rtsn_mii_read;
822 	mii->write = rtsn_mii_write;
823 	mii->parent = dev;
824 
825 	ret = of_mdiobus_register(mii, mdio_node);
826 	of_node_put(mdio_node);
827 	if (ret)
828 		goto out_free_bus;
829 
830 	priv->mii = mii;
831 
832 	return 0;
833 
834 out_free_bus:
835 	mdiobus_free(mii);
836 	return ret;
837 }
838 
839 static void rtsn_mdio_free(struct rtsn_private *priv)
840 {
841 	mdiobus_unregister(priv->mii);
842 	mdiobus_free(priv->mii);
843 	priv->mii = NULL;
844 }
845 
846 static void rtsn_adjust_link(struct net_device *ndev)
847 {
848 	struct rtsn_private *priv = netdev_priv(ndev);
849 	struct phy_device *phydev = ndev->phydev;
850 	bool new_state = false;
851 	unsigned long flags;
852 
853 	spin_lock_irqsave(&priv->lock, flags);
854 
855 	if (phydev->link) {
856 		if (phydev->speed != priv->speed) {
857 			new_state = true;
858 			priv->speed = phydev->speed;
859 		}
860 
861 		if (!priv->link) {
862 			new_state = true;
863 			priv->link = phydev->link;
864 		}
865 	} else if (priv->link) {
866 		new_state = true;
867 		priv->link = 0;
868 		priv->speed = 0;
869 	}
870 
871 	if (new_state) {
872 		/* Need to transition to CONFIG mode before reconfiguring and
873 		 * then back to the original mode. Any state change to/from
874 		 * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
875 		 */
876 		enum rtsn_mode orgmode = rtsn_read_mode(priv);
877 
878 		/* Transit to CONFIG */
879 		if (orgmode != OCR_OPC_CONFIG) {
880 			if (orgmode != OCR_OPC_DISABLE &&
881 			    rtsn_change_mode(priv, OCR_OPC_DISABLE))
882 				goto out;
883 			if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
884 				goto out;
885 		}
886 
887 		rtsn_set_rate(priv);
888 
889 		/* Transition to original mode */
890 		if (orgmode != OCR_OPC_CONFIG) {
891 			if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
892 				goto out;
893 			if (orgmode != OCR_OPC_DISABLE &&
894 			    rtsn_change_mode(priv, orgmode))
895 				goto out;
896 		}
897 	}
898 out:
899 	spin_unlock_irqrestore(&priv->lock, flags);
900 
901 	if (new_state)
902 		phy_print_status(phydev);
903 }
904 
905 static int rtsn_phy_init(struct rtsn_private *priv)
906 {
907 	struct device_node *np = priv->ndev->dev.parent->of_node;
908 	struct phy_device *phydev;
909 	struct device_node *phy;
910 
911 	priv->link = 0;
912 
913 	phy = of_parse_phandle(np, "phy-handle", 0);
914 	if (!phy)
915 		return -ENOENT;
916 
917 	phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
918 				priv->iface);
919 	of_node_put(phy);
920 	if (!phydev)
921 		return -ENOENT;
922 
923 	/* Only support full-duplex mode */
924 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
925 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
926 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
927 
928 	phy_attached_info(phydev);
929 
930 	return 0;
931 }
932 
933 static void rtsn_phy_deinit(struct rtsn_private *priv)
934 {
935 	phy_disconnect(priv->ndev->phydev);
936 	priv->ndev->phydev = NULL;
937 }
938 
939 static int rtsn_init(struct rtsn_private *priv)
940 {
941 	int ret;
942 
943 	ret = rtsn_desc_alloc(priv);
944 	if (ret)
945 		return ret;
946 
947 	ret = rtsn_dmac_init(priv);
948 	if (ret)
949 		goto error_free_desc;
950 
951 	ret = rtsn_hw_init(priv);
952 	if (ret)
953 		goto error_free_chain;
954 
955 	ret = rtsn_phy_init(priv);
956 	if (ret)
957 		goto error_free_chain;
958 
959 	ret = rtsn_request_irqs(priv);
960 	if (ret)
961 		goto error_free_phy;
962 
963 	return 0;
964 error_free_phy:
965 	rtsn_phy_deinit(priv);
966 error_free_chain:
967 	rtsn_chain_free(priv);
968 error_free_desc:
969 	rtsn_desc_free(priv);
970 	return ret;
971 }
972 
973 static void rtsn_deinit(struct rtsn_private *priv)
974 {
975 	rtsn_free_irqs(priv);
976 	rtsn_phy_deinit(priv);
977 	rtsn_chain_free(priv);
978 	rtsn_desc_free(priv);
979 }
980 
981 static void rtsn_parse_mac_address(struct device_node *np,
982 				   struct net_device *ndev)
983 {
984 	struct rtsn_private *priv = netdev_priv(ndev);
985 	u8 addr[ETH_ALEN];
986 	u32 mrmac0;
987 	u32 mrmac1;
988 
989 	/* Try to read address from Device Tree. */
990 	if (!of_get_mac_address(np, addr)) {
991 		eth_hw_addr_set(ndev, addr);
992 		return;
993 	}
994 
995 	/* Try to read address from device. */
996 	mrmac0 = rtsn_read(priv, MRMAC0);
997 	mrmac1 = rtsn_read(priv, MRMAC1);
998 
999 	addr[0] = (mrmac0 >>  8) & 0xff;
1000 	addr[1] = (mrmac0 >>  0) & 0xff;
1001 	addr[2] = (mrmac1 >> 24) & 0xff;
1002 	addr[3] = (mrmac1 >> 16) & 0xff;
1003 	addr[4] = (mrmac1 >>  8) & 0xff;
1004 	addr[5] = (mrmac1 >>  0) & 0xff;
1005 
1006 	if (is_valid_ether_addr(addr)) {
1007 		eth_hw_addr_set(ndev, addr);
1008 		return;
1009 	}
1010 
1011 	/* Fallback to a random address */
1012 	eth_hw_addr_random(ndev);
1013 }
1014 
1015 static int rtsn_open(struct net_device *ndev)
1016 {
1017 	struct rtsn_private *priv = netdev_priv(ndev);
1018 	int ret;
1019 
1020 	napi_enable(&priv->napi);
1021 
1022 	ret = rtsn_init(priv);
1023 	if (ret) {
1024 		napi_disable(&priv->napi);
1025 		return ret;
1026 	}
1027 
1028 	phy_start(ndev->phydev);
1029 
1030 	netif_start_queue(ndev);
1031 
1032 	return 0;
1033 }
1034 
1035 static int rtsn_stop(struct net_device *ndev)
1036 {
1037 	struct rtsn_private *priv = netdev_priv(ndev);
1038 
1039 	phy_stop(priv->ndev->phydev);
1040 	napi_disable(&priv->napi);
1041 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1042 	rtsn_deinit(priv);
1043 
1044 	return 0;
1045 }
1046 
1047 static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1048 {
1049 	struct rtsn_private *priv = netdev_priv(ndev);
1050 	struct rtsn_ext_desc *desc;
1051 	int ret = NETDEV_TX_OK;
1052 	unsigned long flags;
1053 	dma_addr_t dma_addr;
1054 	int entry;
1055 
1056 	spin_lock_irqsave(&priv->lock, flags);
1057 
1058 	/* Drop packet if it won't fit in a single descriptor. */
1059 	if (skb->len >= TX_DS) {
1060 		priv->stats.tx_dropped++;
1061 		priv->stats.tx_errors++;
1062 		dev_kfree_skb_any(skb);
1063 		goto out;
1064 	}
1065 
1066 	if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
1067 		netif_stop_subqueue(ndev, 0);
1068 		ret = NETDEV_TX_BUSY;
1069 		goto out;
1070 	}
1071 
1072 	if (skb_put_padto(skb, ETH_ZLEN))
1073 		goto out;
1074 
1075 	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1076 				  DMA_TO_DEVICE);
1077 	if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1078 		dev_kfree_skb_any(skb);
1079 		goto out;
1080 	}
1081 
1082 	entry = priv->cur_tx % priv->num_tx_ring;
1083 	priv->tx_skb[entry] = skb;
1084 	desc = &priv->tx_ring[entry];
1085 	desc->dptr = cpu_to_le32(dma_addr);
1086 	desc->info_ds = cpu_to_le16(skb->len);
1087 	desc->info1 = cpu_to_le64(skb->len);
1088 
1089 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1090 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1091 		priv->ts_tag++;
1092 		desc->info_ds |= cpu_to_le16(TXC);
1093 		desc->info = priv->ts_tag;
1094 	}
1095 
1096 	skb_tx_timestamp(skb);
1097 	dma_wmb();
1098 
1099 	desc->die_dt = DT_FSINGLE | D_DIE;
1100 	priv->cur_tx++;
1101 
1102 	/* Start xmit */
1103 	rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
1104 out:
1105 	spin_unlock_irqrestore(&priv->lock, flags);
1106 	return ret;
1107 }
1108 
1109 static void rtsn_get_stats64(struct net_device *ndev,
1110 			     struct rtnl_link_stats64 *storage)
1111 {
1112 	struct rtsn_private *priv = netdev_priv(ndev);
1113 	*storage = priv->stats;
1114 }
1115 
1116 static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1117 {
1118 	if (!netif_running(ndev))
1119 		return -ENODEV;
1120 
1121 	return phy_do_ioctl_running(ndev, ifr, cmd);
1122 }
1123 
1124 static int rtsn_hwtstamp_get(struct net_device *ndev,
1125 			     struct kernel_hwtstamp_config *config)
1126 {
1127 	struct rtsn_private *priv;
1128 
1129 	if (!netif_running(ndev))
1130 		return -ENODEV;
1131 
1132 	priv = netdev_priv(ndev);
1133 
1134 	config->flags = 0;
1135 	config->tx_type = priv->tstamp_tx_ctrl;
1136 	config->rx_filter = priv->tstamp_rx_ctrl;
1137 
1138 	return 0;
1139 }
1140 
1141 static int rtsn_hwtstamp_set(struct net_device *ndev,
1142 			     struct kernel_hwtstamp_config *config,
1143 			     struct netlink_ext_ack *extack)
1144 {
1145 	enum hwtstamp_rx_filters tstamp_rx_ctrl;
1146 	enum hwtstamp_tx_types tstamp_tx_ctrl;
1147 	struct rtsn_private *priv;
1148 
1149 	if (!netif_running(ndev))
1150 		return -ENODEV;
1151 
1152 	priv = netdev_priv(ndev);
1153 
1154 	if (config->flags)
1155 		return -EINVAL;
1156 
1157 	switch (config->tx_type) {
1158 	case HWTSTAMP_TX_OFF:
1159 	case HWTSTAMP_TX_ON:
1160 		tstamp_tx_ctrl = config->tx_type;
1161 		break;
1162 	default:
1163 		return -ERANGE;
1164 	}
1165 
1166 	switch (config->rx_filter) {
1167 	case HWTSTAMP_FILTER_NONE:
1168 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1169 		tstamp_rx_ctrl = config->rx_filter;
1170 		break;
1171 	default:
1172 		config->rx_filter = HWTSTAMP_FILTER_ALL;
1173 		tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
1174 		break;
1175 	}
1176 
1177 	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1178 	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1179 
1180 	return 0;
1181 }
1182 
1183 static const struct net_device_ops rtsn_netdev_ops = {
1184 	.ndo_open		= rtsn_open,
1185 	.ndo_stop		= rtsn_stop,
1186 	.ndo_start_xmit		= rtsn_start_xmit,
1187 	.ndo_get_stats64	= rtsn_get_stats64,
1188 	.ndo_eth_ioctl		= rtsn_do_ioctl,
1189 	.ndo_validate_addr	= eth_validate_addr,
1190 	.ndo_set_mac_address	= eth_mac_addr,
1191 	.ndo_hwtstamp_set	= rtsn_hwtstamp_set,
1192 	.ndo_hwtstamp_get	= rtsn_hwtstamp_get,
1193 };
1194 
1195 static int rtsn_get_ts_info(struct net_device *ndev,
1196 			    struct kernel_ethtool_ts_info *info)
1197 {
1198 	struct rtsn_private *priv = netdev_priv(ndev);
1199 
1200 	info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
1201 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1202 		SOF_TIMESTAMPING_TX_HARDWARE |
1203 		SOF_TIMESTAMPING_RX_HARDWARE |
1204 		SOF_TIMESTAMPING_RAW_HARDWARE;
1205 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1206 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1207 
1208 	return 0;
1209 }
1210 
1211 static const struct ethtool_ops rtsn_ethtool_ops = {
1212 	.nway_reset		= phy_ethtool_nway_reset,
1213 	.get_link		= ethtool_op_get_link,
1214 	.get_ts_info		= rtsn_get_ts_info,
1215 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1216 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1217 };
1218 
1219 static const struct of_device_id rtsn_match_table[] = {
1220 	{ .compatible = "renesas,r8a779g0-ethertsn", },
1221 	{ /* Sentinel */ }
1222 };
1223 
1224 MODULE_DEVICE_TABLE(of, rtsn_match_table);
1225 
1226 static int rtsn_probe(struct platform_device *pdev)
1227 {
1228 	struct rtsn_private *priv;
1229 	struct net_device *ndev;
1230 	struct resource *res;
1231 	int ret;
1232 
1233 	ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
1234 				  RX_NUM_CHAINS);
1235 	if (!ndev)
1236 		return -ENOMEM;
1237 
1238 	priv = netdev_priv(ndev);
1239 	priv->pdev = pdev;
1240 	priv->ndev = ndev;
1241 
1242 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1243 	if (!priv->ptp_priv) {
1244 		ret = -ENOMEM;
1245 		goto error_free;
1246 	}
1247 
1248 	spin_lock_init(&priv->lock);
1249 	platform_set_drvdata(pdev, priv);
1250 
1251 	priv->clk = devm_clk_get(&pdev->dev, NULL);
1252 	if (IS_ERR(priv->clk)) {
1253 		ret = PTR_ERR(priv->clk);
1254 		goto error_free;
1255 	}
1256 
1257 	priv->reset = devm_reset_control_get(&pdev->dev, NULL);
1258 	if (IS_ERR(priv->reset)) {
1259 		ret = PTR_ERR(priv->reset);
1260 		goto error_free;
1261 	}
1262 
1263 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
1264 	if (!res) {
1265 		dev_err(&pdev->dev, "Can't find tsnes resource\n");
1266 		ret = -EINVAL;
1267 		goto error_free;
1268 	}
1269 
1270 	priv->base = devm_ioremap_resource(&pdev->dev, res);
1271 	if (IS_ERR(priv->base)) {
1272 		ret = PTR_ERR(priv->base);
1273 		goto error_free;
1274 	}
1275 
1276 	SET_NETDEV_DEV(ndev, &pdev->dev);
1277 
1278 	ndev->features = NETIF_F_RXCSUM;
1279 	ndev->hw_features = NETIF_F_RXCSUM;
1280 	ndev->base_addr = res->start;
1281 	ndev->netdev_ops = &rtsn_netdev_ops;
1282 	ndev->ethtool_ops = &rtsn_ethtool_ops;
1283 
1284 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
1285 	if (!res) {
1286 		dev_err(&pdev->dev, "Can't find gptp resource\n");
1287 		ret = -EINVAL;
1288 		goto error_free;
1289 	}
1290 
1291 	priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
1292 	if (IS_ERR(priv->ptp_priv->addr)) {
1293 		ret = PTR_ERR(priv->ptp_priv->addr);
1294 		goto error_free;
1295 	}
1296 
1297 	ret = rtsn_get_phy_params(priv);
1298 	if (ret)
1299 		goto error_free;
1300 
1301 	pm_runtime_enable(&pdev->dev);
1302 	pm_runtime_get_sync(&pdev->dev);
1303 
1304 	netif_napi_add(ndev, &priv->napi, rtsn_poll);
1305 
1306 	rtsn_parse_mac_address(pdev->dev.of_node, ndev);
1307 
1308 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1309 
1310 	device_set_wakeup_capable(&pdev->dev, 1);
1311 
1312 	ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
1313 	if (ret)
1314 		goto error_pm;
1315 
1316 	ret = rtsn_mdio_alloc(priv);
1317 	if (ret)
1318 		goto error_ptp;
1319 
1320 	ret = register_netdev(ndev);
1321 	if (ret)
1322 		goto error_mdio;
1323 
1324 	netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
1325 
1326 	return 0;
1327 
1328 error_mdio:
1329 	rtsn_mdio_free(priv);
1330 error_ptp:
1331 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1332 error_pm:
1333 	netif_napi_del(&priv->napi);
1334 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1335 	pm_runtime_put_sync(&pdev->dev);
1336 	pm_runtime_disable(&pdev->dev);
1337 error_free:
1338 	free_netdev(ndev);
1339 
1340 	return ret;
1341 }
1342 
1343 static void rtsn_remove(struct platform_device *pdev)
1344 {
1345 	struct rtsn_private *priv = platform_get_drvdata(pdev);
1346 
1347 	unregister_netdev(priv->ndev);
1348 	rtsn_mdio_free(priv);
1349 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1350 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1351 	netif_napi_del(&priv->napi);
1352 
1353 	pm_runtime_put_sync(&pdev->dev);
1354 	pm_runtime_disable(&pdev->dev);
1355 
1356 	free_netdev(priv->ndev);
1357 }
1358 
1359 static struct platform_driver rtsn_driver = {
1360 	.probe		= rtsn_probe,
1361 	.remove		= rtsn_remove,
1362 	.driver	= {
1363 		.name	= "rtsn",
1364 		.of_match_table	= rtsn_match_table,
1365 	}
1366 };
1367 module_platform_driver(rtsn_driver);
1368 
1369 MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
1370 MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
1371 MODULE_LICENSE("GPL");
1372