xref: /linux/drivers/net/ethernet/renesas/rtsn.c (revision 07d6bf634bc8f93caf8920c9d61df761645336e2)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Renesas Ethernet-TSN device driver
4  *
5  * Copyright (C) 2022 Renesas Electronics Corporation
6  * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
23 
24 #include "rtsn.h"
25 #include "rcar_gen4_ptp.h"
26 
27 struct rtsn_private {
28 	struct net_device *ndev;
29 	struct platform_device *pdev;
30 	void __iomem *base;
31 	struct rcar_gen4_ptp_private *ptp_priv;
32 	struct clk *clk;
33 	struct reset_control *reset;
34 
35 	u32 num_tx_ring;
36 	u32 num_rx_ring;
37 	u32 tx_desc_bat_size;
38 	dma_addr_t tx_desc_bat_dma;
39 	struct rtsn_desc *tx_desc_bat;
40 	u32 rx_desc_bat_size;
41 	dma_addr_t rx_desc_bat_dma;
42 	struct rtsn_desc *rx_desc_bat;
43 	dma_addr_t tx_desc_dma;
44 	dma_addr_t rx_desc_dma;
45 	struct rtsn_ext_desc *tx_ring;
46 	struct rtsn_ext_ts_desc *rx_ring;
47 	struct sk_buff **tx_skb;
48 	struct sk_buff **rx_skb;
49 	spinlock_t lock;	/* Register access lock */
50 	u32 cur_tx;
51 	u32 dirty_tx;
52 	u32 cur_rx;
53 	u32 dirty_rx;
54 	u8 ts_tag;
55 	struct napi_struct napi;
56 	struct rtnl_link_stats64 stats;
57 
58 	struct mii_bus *mii;
59 	phy_interface_t iface;
60 	int link;
61 	int speed;
62 
63 	int tx_data_irq;
64 	int rx_data_irq;
65 };
66 
rtsn_read(struct rtsn_private * priv,enum rtsn_reg reg)67 static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
68 {
69 	return ioread32(priv->base + reg);
70 }
71 
rtsn_write(struct rtsn_private * priv,enum rtsn_reg reg,u32 data)72 static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
73 {
74 	iowrite32(data, priv->base + reg);
75 }
76 
rtsn_modify(struct rtsn_private * priv,enum rtsn_reg reg,u32 clear,u32 set)77 static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
78 			u32 clear, u32 set)
79 {
80 	rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
81 }
82 
rtsn_reg_wait(struct rtsn_private * priv,enum rtsn_reg reg,u32 mask,u32 expected)83 static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
84 			 u32 mask, u32 expected)
85 {
86 	u32 val;
87 
88 	return readl_poll_timeout(priv->base + reg, val,
89 				  (val & mask) == expected,
90 				  RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
91 }
92 
rtsn_ctrl_data_irq(struct rtsn_private * priv,bool enable)93 static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
94 {
95 	if (enable) {
96 		rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
97 		rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
98 	} else {
99 		rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
100 		rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
101 	}
102 }
103 
rtsn_get_timestamp(struct rtsn_private * priv,struct timespec64 * ts)104 static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
105 {
106 	struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
107 
108 	ptp_priv->info.gettime64(&ptp_priv->info, ts);
109 }
110 
rtsn_tx_free(struct net_device * ndev,bool free_txed_only)111 static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
112 {
113 	struct rtsn_private *priv = netdev_priv(ndev);
114 	struct rtsn_ext_desc *desc;
115 	struct sk_buff *skb;
116 	int free_num = 0;
117 	int entry, size;
118 
119 	for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
120 		entry = priv->dirty_tx % priv->num_tx_ring;
121 		desc = &priv->tx_ring[entry];
122 		if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
123 			break;
124 
125 		dma_rmb();
126 		size = le16_to_cpu(desc->info_ds) & TX_DS;
127 		skb = priv->tx_skb[entry];
128 		if (skb) {
129 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
130 				struct skb_shared_hwtstamps shhwtstamps;
131 				struct timespec64 ts;
132 
133 				rtsn_get_timestamp(priv, &ts);
134 				memset(&shhwtstamps, 0, sizeof(shhwtstamps));
135 				shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
136 				skb_tstamp_tx(skb, &shhwtstamps);
137 			}
138 			dma_unmap_single(ndev->dev.parent,
139 					 le32_to_cpu(desc->dptr),
140 					 size, DMA_TO_DEVICE);
141 			dev_kfree_skb_any(priv->tx_skb[entry]);
142 			free_num++;
143 
144 			priv->stats.tx_packets++;
145 			priv->stats.tx_bytes += size;
146 		}
147 
148 		desc->die_dt = DT_EEMPTY;
149 	}
150 
151 	desc = &priv->tx_ring[priv->num_tx_ring];
152 	desc->die_dt = DT_LINK;
153 
154 	return free_num;
155 }
156 
rtsn_rx(struct net_device * ndev,int budget)157 static int rtsn_rx(struct net_device *ndev, int budget)
158 {
159 	struct rtsn_private *priv = netdev_priv(ndev);
160 	unsigned int ndescriptors;
161 	unsigned int rx_packets;
162 	unsigned int i;
163 	bool get_ts;
164 
165 	get_ts = priv->ptp_priv->tstamp_rx_ctrl &
166 		RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
167 
168 	ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
169 	rx_packets = 0;
170 	for (i = 0; i < ndescriptors; i++) {
171 		const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
172 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
173 		struct sk_buff *skb;
174 		dma_addr_t dma_addr;
175 		u16 pkt_len;
176 
177 		/* Stop processing descriptors if budget is consumed. */
178 		if (rx_packets >= budget)
179 			break;
180 
181 		/* Stop processing descriptors on first empty. */
182 		if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
183 			break;
184 
185 		dma_rmb();
186 		pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
187 
188 		skb = priv->rx_skb[entry];
189 		priv->rx_skb[entry] = NULL;
190 		dma_addr = le32_to_cpu(desc->dptr);
191 		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
192 				 DMA_FROM_DEVICE);
193 
194 		/* Get timestamp if enabled. */
195 		if (get_ts) {
196 			struct skb_shared_hwtstamps *shhwtstamps;
197 			struct timespec64 ts;
198 
199 			shhwtstamps = skb_hwtstamps(skb);
200 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
201 
202 			ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
203 			ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
204 
205 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
206 		}
207 
208 		skb_put(skb, pkt_len);
209 		skb->protocol = eth_type_trans(skb, ndev);
210 		napi_gro_receive(&priv->napi, skb);
211 
212 		/* Update statistics. */
213 		priv->stats.rx_packets++;
214 		priv->stats.rx_bytes += pkt_len;
215 
216 		/* Update counters. */
217 		priv->cur_rx++;
218 		rx_packets++;
219 	}
220 
221 	/* Refill the RX ring buffers */
222 	for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
223 		const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
224 		struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
225 		struct sk_buff *skb;
226 		dma_addr_t dma_addr;
227 
228 		desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
229 
230 		if (!priv->rx_skb[entry]) {
231 			skb = napi_alloc_skb(&priv->napi,
232 					     PKT_BUF_SZ + RTSN_ALIGN - 1);
233 			if (!skb)
234 				break;
235 			skb_reserve(skb, NET_IP_ALIGN);
236 			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
237 						  le16_to_cpu(desc->info_ds),
238 						  DMA_FROM_DEVICE);
239 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
240 				desc->info_ds = cpu_to_le16(0);
241 			desc->dptr = cpu_to_le32(dma_addr);
242 			skb_checksum_none_assert(skb);
243 			priv->rx_skb[entry] = skb;
244 		}
245 
246 		dma_wmb();
247 		desc->die_dt = DT_FEMPTY | D_DIE;
248 	}
249 
250 	priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
251 
252 	return rx_packets;
253 }
254 
rtsn_poll(struct napi_struct * napi,int budget)255 static int rtsn_poll(struct napi_struct *napi, int budget)
256 {
257 	struct rtsn_private *priv;
258 	struct net_device *ndev;
259 	unsigned long flags;
260 	int work_done;
261 
262 	ndev = napi->dev;
263 	priv = netdev_priv(ndev);
264 
265 	/* Processing RX Descriptor Ring */
266 	work_done = rtsn_rx(ndev, budget);
267 
268 	/* Processing TX Descriptor Ring */
269 	spin_lock_irqsave(&priv->lock, flags);
270 	rtsn_tx_free(ndev, true);
271 	netif_wake_subqueue(ndev, 0);
272 	spin_unlock_irqrestore(&priv->lock, flags);
273 
274 	/* Re-enable TX/RX interrupts */
275 	if (work_done < budget && napi_complete_done(napi, work_done)) {
276 		spin_lock_irqsave(&priv->lock, flags);
277 		rtsn_ctrl_data_irq(priv, true);
278 		spin_unlock_irqrestore(&priv->lock, flags);
279 	}
280 
281 	return work_done;
282 }
283 
rtsn_desc_alloc(struct rtsn_private * priv)284 static int rtsn_desc_alloc(struct rtsn_private *priv)
285 {
286 	struct device *dev = &priv->pdev->dev;
287 	unsigned int i;
288 
289 	priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
290 	priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
291 					       &priv->tx_desc_bat_dma,
292 					       GFP_KERNEL);
293 
294 	if (!priv->tx_desc_bat)
295 		return -ENOMEM;
296 
297 	for (i = 0; i < TX_NUM_CHAINS; i++)
298 		priv->tx_desc_bat[i].die_dt = DT_EOS;
299 
300 	priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
301 	priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
302 					       &priv->rx_desc_bat_dma,
303 					       GFP_KERNEL);
304 
305 	if (!priv->rx_desc_bat)
306 		return -ENOMEM;
307 
308 	for (i = 0; i < RX_NUM_CHAINS; i++)
309 		priv->rx_desc_bat[i].die_dt = DT_EOS;
310 
311 	return 0;
312 }
313 
rtsn_desc_free(struct rtsn_private * priv)314 static void rtsn_desc_free(struct rtsn_private *priv)
315 {
316 	if (priv->tx_desc_bat)
317 		dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
318 				  priv->tx_desc_bat, priv->tx_desc_bat_dma);
319 	priv->tx_desc_bat = NULL;
320 
321 	if (priv->rx_desc_bat)
322 		dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
323 				  priv->rx_desc_bat, priv->rx_desc_bat_dma);
324 	priv->rx_desc_bat = NULL;
325 }
326 
rtsn_chain_free(struct rtsn_private * priv)327 static void rtsn_chain_free(struct rtsn_private *priv)
328 {
329 	struct device *dev = &priv->pdev->dev;
330 
331 	dma_free_coherent(dev,
332 			  sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
333 			  priv->tx_ring, priv->tx_desc_dma);
334 	priv->tx_ring = NULL;
335 
336 	dma_free_coherent(dev,
337 			  sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
338 			  priv->rx_ring, priv->rx_desc_dma);
339 	priv->rx_ring = NULL;
340 
341 	kfree(priv->tx_skb);
342 	priv->tx_skb = NULL;
343 
344 	kfree(priv->rx_skb);
345 	priv->rx_skb = NULL;
346 }
347 
rtsn_chain_init(struct rtsn_private * priv,int tx_size,int rx_size)348 static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
349 {
350 	struct net_device *ndev = priv->ndev;
351 	struct sk_buff *skb;
352 	int i;
353 
354 	priv->num_tx_ring = tx_size;
355 	priv->num_rx_ring = rx_size;
356 
357 	priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
358 	priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
359 
360 	if (!priv->rx_skb || !priv->tx_skb)
361 		goto error;
362 
363 	for (i = 0; i < rx_size; i++) {
364 		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
365 		if (!skb)
366 			goto error;
367 		skb_reserve(skb, NET_IP_ALIGN);
368 		priv->rx_skb[i] = skb;
369 	}
370 
371 	/* Allocate TX, RX descriptors */
372 	priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
373 					   sizeof(struct rtsn_ext_desc) * (tx_size + 1),
374 					   &priv->tx_desc_dma, GFP_KERNEL);
375 	priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
376 					   sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
377 					   &priv->rx_desc_dma, GFP_KERNEL);
378 
379 	if (!priv->tx_ring || !priv->rx_ring)
380 		goto error;
381 
382 	return 0;
383 error:
384 	rtsn_chain_free(priv);
385 
386 	return -ENOMEM;
387 }
388 
rtsn_chain_format(struct rtsn_private * priv)389 static void rtsn_chain_format(struct rtsn_private *priv)
390 {
391 	struct net_device *ndev = priv->ndev;
392 	struct rtsn_ext_ts_desc *rx_desc;
393 	struct rtsn_ext_desc *tx_desc;
394 	struct rtsn_desc *bat_desc;
395 	dma_addr_t dma_addr;
396 	unsigned int i;
397 
398 	priv->cur_tx = 0;
399 	priv->cur_rx = 0;
400 	priv->dirty_rx = 0;
401 	priv->dirty_tx = 0;
402 
403 	/* TX */
404 	memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
405 	for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
406 		tx_desc->die_dt = DT_EEMPTY | D_DIE;
407 
408 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
409 	tx_desc->die_dt = DT_LINK;
410 
411 	bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
412 	bat_desc->die_dt = DT_LINK;
413 	bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
414 
415 	/* RX */
416 	memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
417 	for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
418 		dma_addr = dma_map_single(ndev->dev.parent,
419 					  priv->rx_skb[i]->data, PKT_BUF_SZ,
420 					  DMA_FROM_DEVICE);
421 		if (!dma_mapping_error(ndev->dev.parent, dma_addr))
422 			rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
423 		rx_desc->dptr = cpu_to_le32((u32)dma_addr);
424 		rx_desc->die_dt = DT_FEMPTY | D_DIE;
425 	}
426 	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
427 	rx_desc->die_dt = DT_LINK;
428 
429 	bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
430 	bat_desc->die_dt = DT_LINK;
431 	bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
432 }
433 
rtsn_dmac_init(struct rtsn_private * priv)434 static int rtsn_dmac_init(struct rtsn_private *priv)
435 {
436 	int ret;
437 
438 	ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
439 	if (ret)
440 		return ret;
441 
442 	rtsn_chain_format(priv);
443 
444 	return 0;
445 }
446 
rtsn_read_mode(struct rtsn_private * priv)447 static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
448 {
449 	return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
450 }
451 
rtsn_wait_mode(struct rtsn_private * priv,enum rtsn_mode mode)452 static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
453 {
454 	unsigned int i;
455 
456 	/* Need to busy loop as mode changes can happen in atomic context. */
457 	for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
458 		if (rtsn_read_mode(priv) == mode)
459 			return 0;
460 
461 		udelay(RTSN_INTERVAL_US);
462 	}
463 
464 	return -ETIMEDOUT;
465 }
466 
rtsn_change_mode(struct rtsn_private * priv,enum rtsn_mode mode)467 static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
468 {
469 	int ret;
470 
471 	rtsn_write(priv, OCR, mode);
472 	ret = rtsn_wait_mode(priv, mode);
473 	if (ret)
474 		netdev_err(priv->ndev, "Failed to switch operation mode\n");
475 	return ret;
476 }
477 
rtsn_get_data_irq_status(struct rtsn_private * priv)478 static int rtsn_get_data_irq_status(struct rtsn_private *priv)
479 {
480 	u32 val;
481 
482 	val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
483 	val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
484 
485 	return val;
486 }
487 
rtsn_irq(int irq,void * dev_id)488 static irqreturn_t rtsn_irq(int irq, void *dev_id)
489 {
490 	struct rtsn_private *priv = dev_id;
491 	int ret = IRQ_NONE;
492 
493 	spin_lock(&priv->lock);
494 
495 	if (rtsn_get_data_irq_status(priv)) {
496 		/* Clear TX/RX irq status */
497 		rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
498 		rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
499 
500 		if (napi_schedule_prep(&priv->napi)) {
501 			/* Disable TX/RX interrupts */
502 			rtsn_ctrl_data_irq(priv, false);
503 
504 			__napi_schedule(&priv->napi);
505 		}
506 
507 		ret = IRQ_HANDLED;
508 	}
509 
510 	spin_unlock(&priv->lock);
511 
512 	return ret;
513 }
514 
rtsn_request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,struct rtsn_private * priv,const char * ch)515 static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
516 			    unsigned long flags, struct rtsn_private *priv,
517 			    const char *ch)
518 {
519 	char *name;
520 	int ret;
521 
522 	name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
523 			      priv->ndev->name, ch);
524 	if (!name)
525 		return -ENOMEM;
526 
527 	ret = request_irq(irq, handler, flags, name, priv);
528 	if (ret)
529 		netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
530 
531 	return ret;
532 }
533 
rtsn_free_irqs(struct rtsn_private * priv)534 static void rtsn_free_irqs(struct rtsn_private *priv)
535 {
536 	free_irq(priv->tx_data_irq, priv);
537 	free_irq(priv->rx_data_irq, priv);
538 }
539 
rtsn_request_irqs(struct rtsn_private * priv)540 static int rtsn_request_irqs(struct rtsn_private *priv)
541 {
542 	int ret;
543 
544 	priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
545 	if (priv->rx_data_irq < 0)
546 		return priv->rx_data_irq;
547 
548 	priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
549 	if (priv->tx_data_irq < 0)
550 		return priv->tx_data_irq;
551 
552 	ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
553 	if (ret)
554 		return ret;
555 
556 	ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
557 	if (ret) {
558 		free_irq(priv->tx_data_irq, priv);
559 		return ret;
560 	}
561 
562 	return 0;
563 }
564 
rtsn_reset(struct rtsn_private * priv)565 static int rtsn_reset(struct rtsn_private *priv)
566 {
567 	reset_control_reset(priv->reset);
568 	mdelay(1);
569 
570 	return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
571 }
572 
rtsn_axibmi_init(struct rtsn_private * priv)573 static int rtsn_axibmi_init(struct rtsn_private *priv)
574 {
575 	int ret;
576 
577 	ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
578 	if (ret)
579 		return ret;
580 
581 	/* Set AXIWC */
582 	rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
583 
584 	/* Set AXIRC */
585 	rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
586 
587 	/* TX Descriptor chain setting */
588 	rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
589 	rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
590 	rtsn_write(priv, TATLR, TATLR_TATL);
591 
592 	ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
593 	if (ret)
594 		return ret;
595 
596 	/* RX Descriptor chain setting */
597 	rtsn_write(priv, RATLS0,
598 		   RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
599 	rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
600 	rtsn_write(priv, RATLR, RATLR_RATL);
601 
602 	ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
603 	if (ret)
604 		return ret;
605 
606 	/* Enable TX/RX interrupts */
607 	rtsn_ctrl_data_irq(priv, true);
608 
609 	return 0;
610 }
611 
rtsn_mhd_init(struct rtsn_private * priv)612 static void rtsn_mhd_init(struct rtsn_private *priv)
613 {
614 	/* TX General setting */
615 	rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
616 	rtsn_write(priv, TMS0, TMS_MFS_MAX);
617 
618 	/* RX Filter IP */
619 	rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
620 	rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
621 }
622 
rtsn_get_phy_params(struct rtsn_private * priv)623 static int rtsn_get_phy_params(struct rtsn_private *priv)
624 {
625 	int ret;
626 
627 	ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
628 	if (ret)
629 		return ret;
630 
631 	switch (priv->iface) {
632 	case PHY_INTERFACE_MODE_MII:
633 		priv->speed = 100;
634 		break;
635 	case PHY_INTERFACE_MODE_RGMII:
636 	case PHY_INTERFACE_MODE_RGMII_ID:
637 	case PHY_INTERFACE_MODE_RGMII_RXID:
638 	case PHY_INTERFACE_MODE_RGMII_TXID:
639 		priv->speed = 1000;
640 		break;
641 	default:
642 		return -EOPNOTSUPP;
643 	}
644 
645 	return 0;
646 }
647 
rtsn_set_phy_interface(struct rtsn_private * priv)648 static void rtsn_set_phy_interface(struct rtsn_private *priv)
649 {
650 	u32 val;
651 
652 	switch (priv->iface) {
653 	case PHY_INTERFACE_MODE_MII:
654 		val = MPIC_PIS_MII;
655 		break;
656 	case PHY_INTERFACE_MODE_RGMII:
657 	case PHY_INTERFACE_MODE_RGMII_ID:
658 	case PHY_INTERFACE_MODE_RGMII_RXID:
659 	case PHY_INTERFACE_MODE_RGMII_TXID:
660 		val = MPIC_PIS_GMII;
661 		break;
662 	default:
663 		return;
664 	}
665 
666 	rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
667 }
668 
rtsn_set_rate(struct rtsn_private * priv)669 static void rtsn_set_rate(struct rtsn_private *priv)
670 {
671 	u32 val;
672 
673 	switch (priv->speed) {
674 	case 10:
675 		val = MPIC_LSC_10M;
676 		break;
677 	case 100:
678 		val = MPIC_LSC_100M;
679 		break;
680 	case 1000:
681 		val = MPIC_LSC_1G;
682 		break;
683 	default:
684 		return;
685 	}
686 
687 	rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
688 }
689 
rtsn_rmac_init(struct rtsn_private * priv)690 static int rtsn_rmac_init(struct rtsn_private *priv)
691 {
692 	const u8 *mac_addr = priv->ndev->dev_addr;
693 	int ret;
694 
695 	/* Set MAC address */
696 	rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
697 	rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
698 		   (mac_addr[4] << 8) | mac_addr[5]);
699 
700 	/* Set xMII type */
701 	rtsn_set_phy_interface(priv);
702 	rtsn_set_rate(priv);
703 
704 	/* Enable MII */
705 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
706 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
707 
708 	/* Link verification */
709 	rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
710 	ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
711 	if (ret)
712 		return ret;
713 
714 	return ret;
715 }
716 
rtsn_hw_init(struct rtsn_private * priv)717 static int rtsn_hw_init(struct rtsn_private *priv)
718 {
719 	int ret;
720 
721 	ret = rtsn_reset(priv);
722 	if (ret)
723 		return ret;
724 
725 	/* Change to CONFIG mode */
726 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
727 	if (ret)
728 		return ret;
729 
730 	ret = rtsn_axibmi_init(priv);
731 	if (ret)
732 		return ret;
733 
734 	rtsn_mhd_init(priv);
735 
736 	ret = rtsn_rmac_init(priv);
737 	if (ret)
738 		return ret;
739 
740 	ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
741 	if (ret)
742 		return ret;
743 
744 	/* Change to OPERATION mode */
745 	ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
746 
747 	return ret;
748 }
749 
rtsn_mii_access(struct mii_bus * bus,bool read,int phyad,int regad,u16 data)750 static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
751 			   int regad, u16 data)
752 {
753 	struct rtsn_private *priv = bus->priv;
754 	u32 val;
755 	int ret;
756 
757 	val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
758 
759 	if (!read)
760 		val |= MPSM_PSMAD | MPSM_PRD_SET(data);
761 
762 	rtsn_write(priv, MPSM, val);
763 
764 	ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
765 	if (ret)
766 		return ret;
767 
768 	if (read)
769 		ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
770 
771 	return ret;
772 }
773 
rtsn_mii_read(struct mii_bus * bus,int addr,int regnum)774 static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
775 {
776 	return rtsn_mii_access(bus, true, addr, regnum, 0);
777 }
778 
rtsn_mii_write(struct mii_bus * bus,int addr,int regnum,u16 val)779 static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
780 {
781 	return rtsn_mii_access(bus, false, addr, regnum, val);
782 }
783 
rtsn_mdio_alloc(struct rtsn_private * priv)784 static int rtsn_mdio_alloc(struct rtsn_private *priv)
785 {
786 	struct platform_device *pdev = priv->pdev;
787 	struct device *dev = &pdev->dev;
788 	struct device_node *mdio_node;
789 	struct mii_bus *mii;
790 	int ret;
791 
792 	mii = mdiobus_alloc();
793 	if (!mii)
794 		return -ENOMEM;
795 
796 	mdio_node = of_get_child_by_name(dev->of_node, "mdio");
797 	if (!mdio_node) {
798 		ret = -ENODEV;
799 		goto out_free_bus;
800 	}
801 
802 	/* Enter config mode before registering the MDIO bus */
803 	ret = rtsn_reset(priv);
804 	if (ret)
805 		goto out_free_bus;
806 
807 	ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
808 	if (ret)
809 		goto out_free_bus;
810 
811 	rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
812 		    MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
813 
814 	/* Register the MDIO bus */
815 	mii->name = "rtsn_mii";
816 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
817 		 pdev->name, pdev->id);
818 	mii->priv = priv;
819 	mii->read = rtsn_mii_read;
820 	mii->write = rtsn_mii_write;
821 	mii->parent = dev;
822 
823 	ret = of_mdiobus_register(mii, mdio_node);
824 	of_node_put(mdio_node);
825 	if (ret)
826 		goto out_free_bus;
827 
828 	priv->mii = mii;
829 
830 	return 0;
831 
832 out_free_bus:
833 	mdiobus_free(mii);
834 	return ret;
835 }
836 
rtsn_mdio_free(struct rtsn_private * priv)837 static void rtsn_mdio_free(struct rtsn_private *priv)
838 {
839 	mdiobus_unregister(priv->mii);
840 	mdiobus_free(priv->mii);
841 	priv->mii = NULL;
842 }
843 
rtsn_adjust_link(struct net_device * ndev)844 static void rtsn_adjust_link(struct net_device *ndev)
845 {
846 	struct rtsn_private *priv = netdev_priv(ndev);
847 	struct phy_device *phydev = ndev->phydev;
848 	bool new_state = false;
849 	unsigned long flags;
850 
851 	spin_lock_irqsave(&priv->lock, flags);
852 
853 	if (phydev->link) {
854 		if (phydev->speed != priv->speed) {
855 			new_state = true;
856 			priv->speed = phydev->speed;
857 		}
858 
859 		if (!priv->link) {
860 			new_state = true;
861 			priv->link = phydev->link;
862 		}
863 	} else if (priv->link) {
864 		new_state = true;
865 		priv->link = 0;
866 		priv->speed = 0;
867 	}
868 
869 	if (new_state) {
870 		/* Need to transition to CONFIG mode before reconfiguring and
871 		 * then back to the original mode. Any state change to/from
872 		 * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
873 		 */
874 		enum rtsn_mode orgmode = rtsn_read_mode(priv);
875 
876 		/* Transit to CONFIG */
877 		if (orgmode != OCR_OPC_CONFIG) {
878 			if (orgmode != OCR_OPC_DISABLE &&
879 			    rtsn_change_mode(priv, OCR_OPC_DISABLE))
880 				goto out;
881 			if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
882 				goto out;
883 		}
884 
885 		rtsn_set_rate(priv);
886 
887 		/* Transition to original mode */
888 		if (orgmode != OCR_OPC_CONFIG) {
889 			if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
890 				goto out;
891 			if (orgmode != OCR_OPC_DISABLE &&
892 			    rtsn_change_mode(priv, orgmode))
893 				goto out;
894 		}
895 	}
896 out:
897 	spin_unlock_irqrestore(&priv->lock, flags);
898 
899 	if (new_state)
900 		phy_print_status(phydev);
901 }
902 
rtsn_phy_init(struct rtsn_private * priv)903 static int rtsn_phy_init(struct rtsn_private *priv)
904 {
905 	struct device_node *np = priv->ndev->dev.parent->of_node;
906 	struct phy_device *phydev;
907 	struct device_node *phy;
908 
909 	priv->link = 0;
910 
911 	phy = of_parse_phandle(np, "phy-handle", 0);
912 	if (!phy)
913 		return -ENOENT;
914 
915 	phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
916 				priv->iface);
917 	of_node_put(phy);
918 	if (!phydev)
919 		return -ENOENT;
920 
921 	/* Only support full-duplex mode */
922 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
923 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
924 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
925 
926 	phy_attached_info(phydev);
927 
928 	return 0;
929 }
930 
rtsn_phy_deinit(struct rtsn_private * priv)931 static void rtsn_phy_deinit(struct rtsn_private *priv)
932 {
933 	phy_disconnect(priv->ndev->phydev);
934 	priv->ndev->phydev = NULL;
935 }
936 
rtsn_init(struct rtsn_private * priv)937 static int rtsn_init(struct rtsn_private *priv)
938 {
939 	int ret;
940 
941 	ret = rtsn_desc_alloc(priv);
942 	if (ret)
943 		return ret;
944 
945 	ret = rtsn_dmac_init(priv);
946 	if (ret)
947 		goto error_free_desc;
948 
949 	ret = rtsn_hw_init(priv);
950 	if (ret)
951 		goto error_free_chain;
952 
953 	ret = rtsn_phy_init(priv);
954 	if (ret)
955 		goto error_free_chain;
956 
957 	ret = rtsn_request_irqs(priv);
958 	if (ret)
959 		goto error_free_phy;
960 
961 	return 0;
962 error_free_phy:
963 	rtsn_phy_deinit(priv);
964 error_free_chain:
965 	rtsn_chain_free(priv);
966 error_free_desc:
967 	rtsn_desc_free(priv);
968 	return ret;
969 }
970 
rtsn_deinit(struct rtsn_private * priv)971 static void rtsn_deinit(struct rtsn_private *priv)
972 {
973 	rtsn_free_irqs(priv);
974 	rtsn_phy_deinit(priv);
975 	rtsn_chain_free(priv);
976 	rtsn_desc_free(priv);
977 }
978 
rtsn_parse_mac_address(struct device_node * np,struct net_device * ndev)979 static void rtsn_parse_mac_address(struct device_node *np,
980 				   struct net_device *ndev)
981 {
982 	struct rtsn_private *priv = netdev_priv(ndev);
983 	u8 addr[ETH_ALEN];
984 	u32 mrmac0;
985 	u32 mrmac1;
986 
987 	/* Try to read address from Device Tree. */
988 	if (!of_get_mac_address(np, addr)) {
989 		eth_hw_addr_set(ndev, addr);
990 		return;
991 	}
992 
993 	/* Try to read address from device. */
994 	mrmac0 = rtsn_read(priv, MRMAC0);
995 	mrmac1 = rtsn_read(priv, MRMAC1);
996 
997 	addr[0] = (mrmac0 >>  8) & 0xff;
998 	addr[1] = (mrmac0 >>  0) & 0xff;
999 	addr[2] = (mrmac1 >> 24) & 0xff;
1000 	addr[3] = (mrmac1 >> 16) & 0xff;
1001 	addr[4] = (mrmac1 >>  8) & 0xff;
1002 	addr[5] = (mrmac1 >>  0) & 0xff;
1003 
1004 	if (is_valid_ether_addr(addr)) {
1005 		eth_hw_addr_set(ndev, addr);
1006 		return;
1007 	}
1008 
1009 	/* Fallback to a random address */
1010 	eth_hw_addr_random(ndev);
1011 }
1012 
rtsn_open(struct net_device * ndev)1013 static int rtsn_open(struct net_device *ndev)
1014 {
1015 	struct rtsn_private *priv = netdev_priv(ndev);
1016 	int ret;
1017 
1018 	napi_enable(&priv->napi);
1019 
1020 	ret = rtsn_init(priv);
1021 	if (ret) {
1022 		napi_disable(&priv->napi);
1023 		return ret;
1024 	}
1025 
1026 	phy_start(ndev->phydev);
1027 
1028 	netif_start_queue(ndev);
1029 
1030 	return 0;
1031 }
1032 
rtsn_stop(struct net_device * ndev)1033 static int rtsn_stop(struct net_device *ndev)
1034 {
1035 	struct rtsn_private *priv = netdev_priv(ndev);
1036 
1037 	phy_stop(priv->ndev->phydev);
1038 	napi_disable(&priv->napi);
1039 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1040 	rtsn_deinit(priv);
1041 
1042 	return 0;
1043 }
1044 
rtsn_start_xmit(struct sk_buff * skb,struct net_device * ndev)1045 static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1046 {
1047 	struct rtsn_private *priv = netdev_priv(ndev);
1048 	struct rtsn_ext_desc *desc;
1049 	int ret = NETDEV_TX_OK;
1050 	unsigned long flags;
1051 	dma_addr_t dma_addr;
1052 	int entry;
1053 
1054 	spin_lock_irqsave(&priv->lock, flags);
1055 
1056 	/* Drop packet if it won't fit in a single descriptor. */
1057 	if (skb->len >= TX_DS) {
1058 		priv->stats.tx_dropped++;
1059 		priv->stats.tx_errors++;
1060 		dev_kfree_skb_any(skb);
1061 		goto out;
1062 	}
1063 
1064 	if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
1065 		netif_stop_subqueue(ndev, 0);
1066 		ret = NETDEV_TX_BUSY;
1067 		goto out;
1068 	}
1069 
1070 	if (skb_put_padto(skb, ETH_ZLEN))
1071 		goto out;
1072 
1073 	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1074 				  DMA_TO_DEVICE);
1075 	if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1076 		dev_kfree_skb_any(skb);
1077 		goto out;
1078 	}
1079 
1080 	entry = priv->cur_tx % priv->num_tx_ring;
1081 	priv->tx_skb[entry] = skb;
1082 	desc = &priv->tx_ring[entry];
1083 	desc->dptr = cpu_to_le32(dma_addr);
1084 	desc->info_ds = cpu_to_le16(skb->len);
1085 	desc->info1 = cpu_to_le64(skb->len);
1086 
1087 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1088 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1089 		priv->ts_tag++;
1090 		desc->info_ds |= cpu_to_le16(TXC);
1091 		desc->info = priv->ts_tag;
1092 	}
1093 
1094 	skb_tx_timestamp(skb);
1095 	dma_wmb();
1096 
1097 	desc->die_dt = DT_FSINGLE | D_DIE;
1098 	priv->cur_tx++;
1099 
1100 	/* Start xmit */
1101 	rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
1102 out:
1103 	spin_unlock_irqrestore(&priv->lock, flags);
1104 	return ret;
1105 }
1106 
rtsn_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * storage)1107 static void rtsn_get_stats64(struct net_device *ndev,
1108 			     struct rtnl_link_stats64 *storage)
1109 {
1110 	struct rtsn_private *priv = netdev_priv(ndev);
1111 	*storage = priv->stats;
1112 }
1113 
rtsn_do_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1114 static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1115 {
1116 	if (!netif_running(ndev))
1117 		return -ENODEV;
1118 
1119 	return phy_do_ioctl_running(ndev, ifr, cmd);
1120 }
1121 
rtsn_hwtstamp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)1122 static int rtsn_hwtstamp_get(struct net_device *ndev,
1123 			     struct kernel_hwtstamp_config *config)
1124 {
1125 	struct rcar_gen4_ptp_private *ptp_priv;
1126 	struct rtsn_private *priv;
1127 
1128 	if (!netif_running(ndev))
1129 		return -ENODEV;
1130 
1131 	priv = netdev_priv(ndev);
1132 	ptp_priv = priv->ptp_priv;
1133 
1134 	config->flags = 0;
1135 
1136 	config->tx_type =
1137 		ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1138 
1139 	switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1140 	case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1141 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1142 		break;
1143 	case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1144 		config->rx_filter = HWTSTAMP_FILTER_ALL;
1145 		break;
1146 	default:
1147 		config->rx_filter = HWTSTAMP_FILTER_NONE;
1148 		break;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
rtsn_hwtstamp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)1154 static int rtsn_hwtstamp_set(struct net_device *ndev,
1155 			     struct kernel_hwtstamp_config *config,
1156 			     struct netlink_ext_ack *extack)
1157 {
1158 	struct rcar_gen4_ptp_private *ptp_priv;
1159 	struct rtsn_private *priv;
1160 	u32 tstamp_rx_ctrl;
1161 	u32 tstamp_tx_ctrl;
1162 
1163 	if (!netif_running(ndev))
1164 		return -ENODEV;
1165 
1166 	priv = netdev_priv(ndev);
1167 	ptp_priv = priv->ptp_priv;
1168 
1169 	if (config->flags)
1170 		return -EINVAL;
1171 
1172 	switch (config->tx_type) {
1173 	case HWTSTAMP_TX_OFF:
1174 		tstamp_tx_ctrl = 0;
1175 		break;
1176 	case HWTSTAMP_TX_ON:
1177 		tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1178 		break;
1179 	default:
1180 		return -ERANGE;
1181 	}
1182 
1183 	switch (config->rx_filter) {
1184 	case HWTSTAMP_FILTER_NONE:
1185 		tstamp_rx_ctrl = 0;
1186 		break;
1187 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1188 		tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
1189 			RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1190 		break;
1191 	default:
1192 		config->rx_filter = HWTSTAMP_FILTER_ALL;
1193 		tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
1194 			RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1195 		break;
1196 	}
1197 
1198 	ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1199 	ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1200 
1201 	return 0;
1202 }
1203 
1204 static const struct net_device_ops rtsn_netdev_ops = {
1205 	.ndo_open		= rtsn_open,
1206 	.ndo_stop		= rtsn_stop,
1207 	.ndo_start_xmit		= rtsn_start_xmit,
1208 	.ndo_get_stats64	= rtsn_get_stats64,
1209 	.ndo_eth_ioctl		= rtsn_do_ioctl,
1210 	.ndo_validate_addr	= eth_validate_addr,
1211 	.ndo_set_mac_address	= eth_mac_addr,
1212 	.ndo_hwtstamp_set	= rtsn_hwtstamp_set,
1213 	.ndo_hwtstamp_get	= rtsn_hwtstamp_get,
1214 };
1215 
rtsn_get_ts_info(struct net_device * ndev,struct kernel_ethtool_ts_info * info)1216 static int rtsn_get_ts_info(struct net_device *ndev,
1217 			    struct kernel_ethtool_ts_info *info)
1218 {
1219 	struct rtsn_private *priv = netdev_priv(ndev);
1220 
1221 	info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
1222 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1223 		SOF_TIMESTAMPING_TX_HARDWARE |
1224 		SOF_TIMESTAMPING_RX_HARDWARE |
1225 		SOF_TIMESTAMPING_RAW_HARDWARE;
1226 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1227 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1228 
1229 	return 0;
1230 }
1231 
1232 static const struct ethtool_ops rtsn_ethtool_ops = {
1233 	.nway_reset		= phy_ethtool_nway_reset,
1234 	.get_link		= ethtool_op_get_link,
1235 	.get_ts_info		= rtsn_get_ts_info,
1236 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1237 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1238 };
1239 
1240 static const struct of_device_id rtsn_match_table[] = {
1241 	{ .compatible = "renesas,r8a779g0-ethertsn", },
1242 	{ /* Sentinel */ }
1243 };
1244 
1245 MODULE_DEVICE_TABLE(of, rtsn_match_table);
1246 
rtsn_probe(struct platform_device * pdev)1247 static int rtsn_probe(struct platform_device *pdev)
1248 {
1249 	struct rtsn_private *priv;
1250 	struct net_device *ndev;
1251 	struct resource *res;
1252 	int ret;
1253 
1254 	ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
1255 				  RX_NUM_CHAINS);
1256 	if (!ndev)
1257 		return -ENOMEM;
1258 
1259 	priv = netdev_priv(ndev);
1260 	priv->pdev = pdev;
1261 	priv->ndev = ndev;
1262 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1263 
1264 	spin_lock_init(&priv->lock);
1265 	platform_set_drvdata(pdev, priv);
1266 
1267 	priv->clk = devm_clk_get(&pdev->dev, NULL);
1268 	if (IS_ERR(priv->clk)) {
1269 		ret = PTR_ERR(priv->clk);
1270 		goto error_free;
1271 	}
1272 
1273 	priv->reset = devm_reset_control_get(&pdev->dev, NULL);
1274 	if (IS_ERR(priv->reset)) {
1275 		ret = PTR_ERR(priv->reset);
1276 		goto error_free;
1277 	}
1278 
1279 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
1280 	if (!res) {
1281 		dev_err(&pdev->dev, "Can't find tsnes resource\n");
1282 		ret = -EINVAL;
1283 		goto error_free;
1284 	}
1285 
1286 	priv->base = devm_ioremap_resource(&pdev->dev, res);
1287 	if (IS_ERR(priv->base)) {
1288 		ret = PTR_ERR(priv->base);
1289 		goto error_free;
1290 	}
1291 
1292 	SET_NETDEV_DEV(ndev, &pdev->dev);
1293 
1294 	ndev->features = NETIF_F_RXCSUM;
1295 	ndev->hw_features = NETIF_F_RXCSUM;
1296 	ndev->base_addr = res->start;
1297 	ndev->netdev_ops = &rtsn_netdev_ops;
1298 	ndev->ethtool_ops = &rtsn_ethtool_ops;
1299 
1300 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
1301 	if (!res) {
1302 		dev_err(&pdev->dev, "Can't find gptp resource\n");
1303 		ret = -EINVAL;
1304 		goto error_free;
1305 	}
1306 
1307 	priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
1308 	if (IS_ERR(priv->ptp_priv->addr)) {
1309 		ret = PTR_ERR(priv->ptp_priv->addr);
1310 		goto error_free;
1311 	}
1312 
1313 	ret = rtsn_get_phy_params(priv);
1314 	if (ret)
1315 		goto error_free;
1316 
1317 	pm_runtime_enable(&pdev->dev);
1318 	pm_runtime_get_sync(&pdev->dev);
1319 
1320 	netif_napi_add(ndev, &priv->napi, rtsn_poll);
1321 
1322 	rtsn_parse_mac_address(pdev->dev.of_node, ndev);
1323 
1324 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1325 
1326 	device_set_wakeup_capable(&pdev->dev, 1);
1327 
1328 	ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
1329 				     clk_get_rate(priv->clk));
1330 	if (ret)
1331 		goto error_pm;
1332 
1333 	ret = rtsn_mdio_alloc(priv);
1334 	if (ret)
1335 		goto error_ptp;
1336 
1337 	ret = register_netdev(ndev);
1338 	if (ret)
1339 		goto error_mdio;
1340 
1341 	netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
1342 
1343 	return 0;
1344 
1345 error_mdio:
1346 	rtsn_mdio_free(priv);
1347 error_ptp:
1348 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1349 error_pm:
1350 	netif_napi_del(&priv->napi);
1351 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1352 	pm_runtime_put_sync(&pdev->dev);
1353 	pm_runtime_disable(&pdev->dev);
1354 error_free:
1355 	free_netdev(ndev);
1356 
1357 	return ret;
1358 }
1359 
rtsn_remove(struct platform_device * pdev)1360 static void rtsn_remove(struct platform_device *pdev)
1361 {
1362 	struct rtsn_private *priv = platform_get_drvdata(pdev);
1363 
1364 	unregister_netdev(priv->ndev);
1365 	rtsn_mdio_free(priv);
1366 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1367 	rtsn_change_mode(priv, OCR_OPC_DISABLE);
1368 	netif_napi_del(&priv->napi);
1369 
1370 	pm_runtime_put_sync(&pdev->dev);
1371 	pm_runtime_disable(&pdev->dev);
1372 
1373 	free_netdev(priv->ndev);
1374 }
1375 
1376 static struct platform_driver rtsn_driver = {
1377 	.probe		= rtsn_probe,
1378 	.remove		= rtsn_remove,
1379 	.driver	= {
1380 		.name	= "rtsn",
1381 		.of_match_table	= rtsn_match_table,
1382 	}
1383 };
1384 module_platform_driver(rtsn_driver);
1385 
1386 MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
1387 MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
1388 MODULE_LICENSE("GPL");
1389