1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Renesas Ethernet-TSN device driver
4 *
5 * Copyright (C) 2022 Renesas Electronics Corporation
6 * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
7 */
8
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
23
24 #include "rtsn.h"
25 #include "rcar_gen4_ptp.h"
26
27 struct rtsn_private {
28 struct net_device *ndev;
29 struct platform_device *pdev;
30 void __iomem *base;
31 struct rcar_gen4_ptp_private *ptp_priv;
32 struct clk *clk;
33 struct reset_control *reset;
34
35 u32 num_tx_ring;
36 u32 num_rx_ring;
37 u32 tx_desc_bat_size;
38 dma_addr_t tx_desc_bat_dma;
39 struct rtsn_desc *tx_desc_bat;
40 u32 rx_desc_bat_size;
41 dma_addr_t rx_desc_bat_dma;
42 struct rtsn_desc *rx_desc_bat;
43 dma_addr_t tx_desc_dma;
44 dma_addr_t rx_desc_dma;
45 struct rtsn_ext_desc *tx_ring;
46 struct rtsn_ext_ts_desc *rx_ring;
47 struct sk_buff **tx_skb;
48 struct sk_buff **rx_skb;
49 spinlock_t lock; /* Register access lock */
50 u32 cur_tx;
51 u32 dirty_tx;
52 u32 cur_rx;
53 u32 dirty_rx;
54 u8 ts_tag;
55 struct napi_struct napi;
56 struct rtnl_link_stats64 stats;
57
58 struct mii_bus *mii;
59 phy_interface_t iface;
60 int link;
61 int speed;
62
63 int tx_data_irq;
64 int rx_data_irq;
65
66 u32 tstamp_tx_ctrl;
67 u32 tstamp_rx_ctrl;
68 };
69
rtsn_read(struct rtsn_private * priv,enum rtsn_reg reg)70 static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
71 {
72 return ioread32(priv->base + reg);
73 }
74
rtsn_write(struct rtsn_private * priv,enum rtsn_reg reg,u32 data)75 static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
76 {
77 iowrite32(data, priv->base + reg);
78 }
79
rtsn_modify(struct rtsn_private * priv,enum rtsn_reg reg,u32 clear,u32 set)80 static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
81 u32 clear, u32 set)
82 {
83 rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
84 }
85
rtsn_reg_wait(struct rtsn_private * priv,enum rtsn_reg reg,u32 mask,u32 expected)86 static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
87 u32 mask, u32 expected)
88 {
89 u32 val;
90
91 return readl_poll_timeout(priv->base + reg, val,
92 (val & mask) == expected,
93 RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
94 }
95
rtsn_ctrl_data_irq(struct rtsn_private * priv,bool enable)96 static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
97 {
98 if (enable) {
99 rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
100 rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
101 } else {
102 rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
103 rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
104 }
105 }
106
rtsn_tx_free(struct net_device * ndev,bool free_txed_only)107 static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
108 {
109 struct rtsn_private *priv = netdev_priv(ndev);
110 struct rtsn_ext_desc *desc;
111 struct sk_buff *skb;
112 int free_num = 0;
113 int entry, size;
114
115 for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
116 entry = priv->dirty_tx % priv->num_tx_ring;
117 desc = &priv->tx_ring[entry];
118 if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
119 break;
120
121 dma_rmb();
122 size = le16_to_cpu(desc->info_ds) & TX_DS;
123 skb = priv->tx_skb[entry];
124 if (skb) {
125 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
126 struct skb_shared_hwtstamps shhwtstamps;
127 struct timespec64 ts;
128
129 rcar_gen4_ptp_gettime64(priv->ptp_priv, &ts);
130 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
131 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
132 skb_tstamp_tx(skb, &shhwtstamps);
133 }
134 dma_unmap_single(ndev->dev.parent,
135 le32_to_cpu(desc->dptr),
136 size, DMA_TO_DEVICE);
137 dev_kfree_skb_any(priv->tx_skb[entry]);
138 free_num++;
139
140 priv->stats.tx_packets++;
141 priv->stats.tx_bytes += size;
142 }
143
144 desc->die_dt = DT_EEMPTY;
145 }
146
147 desc = &priv->tx_ring[priv->num_tx_ring];
148 desc->die_dt = DT_LINK;
149
150 return free_num;
151 }
152
rtsn_rx(struct net_device * ndev,int budget)153 static int rtsn_rx(struct net_device *ndev, int budget)
154 {
155 struct rtsn_private *priv = netdev_priv(ndev);
156 unsigned int ndescriptors;
157 unsigned int rx_packets;
158 unsigned int i;
159 bool get_ts;
160
161 get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
162
163 ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
164 rx_packets = 0;
165 for (i = 0; i < ndescriptors; i++) {
166 const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
167 struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
168 struct sk_buff *skb;
169 dma_addr_t dma_addr;
170 u16 pkt_len;
171
172 /* Stop processing descriptors if budget is consumed. */
173 if (rx_packets >= budget)
174 break;
175
176 /* Stop processing descriptors on first empty. */
177 if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
178 break;
179
180 dma_rmb();
181 pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
182
183 skb = priv->rx_skb[entry];
184 priv->rx_skb[entry] = NULL;
185 dma_addr = le32_to_cpu(desc->dptr);
186 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
187 DMA_FROM_DEVICE);
188
189 /* Get timestamp if enabled. */
190 if (get_ts) {
191 struct skb_shared_hwtstamps *shhwtstamps;
192 struct timespec64 ts;
193
194 shhwtstamps = skb_hwtstamps(skb);
195 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
196
197 ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
198 ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
199
200 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
201 }
202
203 skb_put(skb, pkt_len);
204 skb->protocol = eth_type_trans(skb, ndev);
205 napi_gro_receive(&priv->napi, skb);
206
207 /* Update statistics. */
208 priv->stats.rx_packets++;
209 priv->stats.rx_bytes += pkt_len;
210
211 /* Update counters. */
212 priv->cur_rx++;
213 rx_packets++;
214 }
215
216 /* Refill the RX ring buffers */
217 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
218 const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
219 struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
220 struct sk_buff *skb;
221 dma_addr_t dma_addr;
222
223 desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
224
225 if (!priv->rx_skb[entry]) {
226 skb = napi_alloc_skb(&priv->napi,
227 PKT_BUF_SZ + RTSN_ALIGN - 1);
228 if (!skb)
229 break;
230 skb_reserve(skb, NET_IP_ALIGN);
231 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
232 le16_to_cpu(desc->info_ds),
233 DMA_FROM_DEVICE);
234 if (dma_mapping_error(ndev->dev.parent, dma_addr))
235 desc->info_ds = cpu_to_le16(0);
236 desc->dptr = cpu_to_le32(dma_addr);
237 skb_checksum_none_assert(skb);
238 priv->rx_skb[entry] = skb;
239 }
240
241 dma_wmb();
242 desc->die_dt = DT_FEMPTY | D_DIE;
243 }
244
245 priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
246
247 return rx_packets;
248 }
249
rtsn_poll(struct napi_struct * napi,int budget)250 static int rtsn_poll(struct napi_struct *napi, int budget)
251 {
252 struct rtsn_private *priv;
253 struct net_device *ndev;
254 unsigned long flags;
255 int work_done;
256
257 ndev = napi->dev;
258 priv = netdev_priv(ndev);
259
260 /* Processing RX Descriptor Ring */
261 work_done = rtsn_rx(ndev, budget);
262
263 /* Processing TX Descriptor Ring */
264 spin_lock_irqsave(&priv->lock, flags);
265 rtsn_tx_free(ndev, true);
266 netif_wake_subqueue(ndev, 0);
267 spin_unlock_irqrestore(&priv->lock, flags);
268
269 /* Re-enable TX/RX interrupts */
270 if (work_done < budget && napi_complete_done(napi, work_done)) {
271 spin_lock_irqsave(&priv->lock, flags);
272 rtsn_ctrl_data_irq(priv, true);
273 spin_unlock_irqrestore(&priv->lock, flags);
274 }
275
276 return work_done;
277 }
278
rtsn_desc_alloc(struct rtsn_private * priv)279 static int rtsn_desc_alloc(struct rtsn_private *priv)
280 {
281 struct device *dev = &priv->pdev->dev;
282 unsigned int i;
283
284 priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
285 priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
286 &priv->tx_desc_bat_dma,
287 GFP_KERNEL);
288
289 if (!priv->tx_desc_bat)
290 return -ENOMEM;
291
292 for (i = 0; i < TX_NUM_CHAINS; i++)
293 priv->tx_desc_bat[i].die_dt = DT_EOS;
294
295 priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
296 priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
297 &priv->rx_desc_bat_dma,
298 GFP_KERNEL);
299
300 if (!priv->rx_desc_bat)
301 return -ENOMEM;
302
303 for (i = 0; i < RX_NUM_CHAINS; i++)
304 priv->rx_desc_bat[i].die_dt = DT_EOS;
305
306 return 0;
307 }
308
rtsn_desc_free(struct rtsn_private * priv)309 static void rtsn_desc_free(struct rtsn_private *priv)
310 {
311 if (priv->tx_desc_bat)
312 dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
313 priv->tx_desc_bat, priv->tx_desc_bat_dma);
314 priv->tx_desc_bat = NULL;
315
316 if (priv->rx_desc_bat)
317 dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
318 priv->rx_desc_bat, priv->rx_desc_bat_dma);
319 priv->rx_desc_bat = NULL;
320 }
321
rtsn_chain_free(struct rtsn_private * priv)322 static void rtsn_chain_free(struct rtsn_private *priv)
323 {
324 struct device *dev = &priv->pdev->dev;
325
326 dma_free_coherent(dev,
327 sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
328 priv->tx_ring, priv->tx_desc_dma);
329 priv->tx_ring = NULL;
330
331 dma_free_coherent(dev,
332 sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
333 priv->rx_ring, priv->rx_desc_dma);
334 priv->rx_ring = NULL;
335
336 kfree(priv->tx_skb);
337 priv->tx_skb = NULL;
338
339 kfree(priv->rx_skb);
340 priv->rx_skb = NULL;
341 }
342
rtsn_chain_init(struct rtsn_private * priv,int tx_size,int rx_size)343 static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
344 {
345 struct net_device *ndev = priv->ndev;
346 struct sk_buff *skb;
347 int i;
348
349 priv->num_tx_ring = tx_size;
350 priv->num_rx_ring = rx_size;
351
352 priv->tx_skb = kzalloc_objs(*priv->tx_skb, tx_size);
353 priv->rx_skb = kzalloc_objs(*priv->rx_skb, rx_size);
354
355 if (!priv->rx_skb || !priv->tx_skb)
356 goto error;
357
358 for (i = 0; i < rx_size; i++) {
359 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
360 if (!skb)
361 goto error;
362 skb_reserve(skb, NET_IP_ALIGN);
363 priv->rx_skb[i] = skb;
364 }
365
366 /* Allocate TX, RX descriptors */
367 priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
368 sizeof(struct rtsn_ext_desc) * (tx_size + 1),
369 &priv->tx_desc_dma, GFP_KERNEL);
370 priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
371 sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
372 &priv->rx_desc_dma, GFP_KERNEL);
373
374 if (!priv->tx_ring || !priv->rx_ring)
375 goto error;
376
377 return 0;
378 error:
379 rtsn_chain_free(priv);
380
381 return -ENOMEM;
382 }
383
rtsn_chain_format(struct rtsn_private * priv)384 static void rtsn_chain_format(struct rtsn_private *priv)
385 {
386 struct net_device *ndev = priv->ndev;
387 struct rtsn_ext_ts_desc *rx_desc;
388 struct rtsn_ext_desc *tx_desc;
389 struct rtsn_desc *bat_desc;
390 dma_addr_t dma_addr;
391 unsigned int i;
392
393 priv->cur_tx = 0;
394 priv->cur_rx = 0;
395 priv->dirty_rx = 0;
396 priv->dirty_tx = 0;
397
398 /* TX */
399 memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
400 for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
401 tx_desc->die_dt = DT_EEMPTY | D_DIE;
402
403 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
404 tx_desc->die_dt = DT_LINK;
405
406 bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
407 bat_desc->die_dt = DT_LINK;
408 bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
409
410 /* RX */
411 memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
412 for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
413 dma_addr = dma_map_single(ndev->dev.parent,
414 priv->rx_skb[i]->data, PKT_BUF_SZ,
415 DMA_FROM_DEVICE);
416 if (!dma_mapping_error(ndev->dev.parent, dma_addr))
417 rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
418 rx_desc->dptr = cpu_to_le32((u32)dma_addr);
419 rx_desc->die_dt = DT_FEMPTY | D_DIE;
420 }
421 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
422 rx_desc->die_dt = DT_LINK;
423
424 bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
425 bat_desc->die_dt = DT_LINK;
426 bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
427 }
428
rtsn_dmac_init(struct rtsn_private * priv)429 static int rtsn_dmac_init(struct rtsn_private *priv)
430 {
431 int ret;
432
433 ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
434 if (ret)
435 return ret;
436
437 rtsn_chain_format(priv);
438
439 return 0;
440 }
441
rtsn_read_mode(struct rtsn_private * priv)442 static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
443 {
444 return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
445 }
446
rtsn_wait_mode(struct rtsn_private * priv,enum rtsn_mode mode)447 static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
448 {
449 unsigned int i;
450
451 /* Need to busy loop as mode changes can happen in atomic context. */
452 for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
453 if (rtsn_read_mode(priv) == mode)
454 return 0;
455
456 udelay(RTSN_INTERVAL_US);
457 }
458
459 return -ETIMEDOUT;
460 }
461
rtsn_change_mode(struct rtsn_private * priv,enum rtsn_mode mode)462 static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
463 {
464 int ret;
465
466 rtsn_write(priv, OCR, mode);
467 ret = rtsn_wait_mode(priv, mode);
468 if (ret)
469 netdev_err(priv->ndev, "Failed to switch operation mode\n");
470 return ret;
471 }
472
rtsn_get_data_irq_status(struct rtsn_private * priv)473 static int rtsn_get_data_irq_status(struct rtsn_private *priv)
474 {
475 u32 val;
476
477 val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
478 val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
479
480 return val;
481 }
482
rtsn_irq(int irq,void * dev_id)483 static irqreturn_t rtsn_irq(int irq, void *dev_id)
484 {
485 struct rtsn_private *priv = dev_id;
486 int ret = IRQ_NONE;
487
488 spin_lock(&priv->lock);
489
490 if (rtsn_get_data_irq_status(priv)) {
491 /* Clear TX/RX irq status */
492 rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
493 rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
494
495 if (napi_schedule_prep(&priv->napi)) {
496 /* Disable TX/RX interrupts */
497 rtsn_ctrl_data_irq(priv, false);
498
499 __napi_schedule(&priv->napi);
500 }
501
502 ret = IRQ_HANDLED;
503 }
504
505 spin_unlock(&priv->lock);
506
507 return ret;
508 }
509
rtsn_request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,struct rtsn_private * priv,const char * ch)510 static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
511 unsigned long flags, struct rtsn_private *priv,
512 const char *ch)
513 {
514 char *name;
515 int ret;
516
517 name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
518 priv->ndev->name, ch);
519 if (!name)
520 return -ENOMEM;
521
522 ret = request_irq(irq, handler, flags, name, priv);
523 if (ret)
524 netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
525
526 return ret;
527 }
528
rtsn_free_irqs(struct rtsn_private * priv)529 static void rtsn_free_irqs(struct rtsn_private *priv)
530 {
531 free_irq(priv->tx_data_irq, priv);
532 free_irq(priv->rx_data_irq, priv);
533 }
534
rtsn_request_irqs(struct rtsn_private * priv)535 static int rtsn_request_irqs(struct rtsn_private *priv)
536 {
537 int ret;
538
539 priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
540 if (priv->rx_data_irq < 0)
541 return priv->rx_data_irq;
542
543 priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
544 if (priv->tx_data_irq < 0)
545 return priv->tx_data_irq;
546
547 ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
548 if (ret)
549 return ret;
550
551 ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
552 if (ret) {
553 free_irq(priv->tx_data_irq, priv);
554 return ret;
555 }
556
557 return 0;
558 }
559
rtsn_reset(struct rtsn_private * priv)560 static int rtsn_reset(struct rtsn_private *priv)
561 {
562 reset_control_reset(priv->reset);
563 mdelay(1);
564
565 return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
566 }
567
rtsn_axibmi_init(struct rtsn_private * priv)568 static int rtsn_axibmi_init(struct rtsn_private *priv)
569 {
570 int ret;
571
572 ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
573 if (ret)
574 return ret;
575
576 /* Set AXIWC */
577 rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
578
579 /* Set AXIRC */
580 rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
581
582 /* TX Descriptor chain setting */
583 rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
584 rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
585 rtsn_write(priv, TATLR, TATLR_TATL);
586
587 ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
588 if (ret)
589 return ret;
590
591 /* RX Descriptor chain setting */
592 rtsn_write(priv, RATLS0,
593 RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
594 rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
595 rtsn_write(priv, RATLR, RATLR_RATL);
596
597 ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
598 if (ret)
599 return ret;
600
601 /* Enable TX/RX interrupts */
602 rtsn_ctrl_data_irq(priv, true);
603
604 return 0;
605 }
606
rtsn_mhd_init(struct rtsn_private * priv)607 static void rtsn_mhd_init(struct rtsn_private *priv)
608 {
609 /* TX General setting */
610 rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
611 rtsn_write(priv, TMS0, TMS_MFS_MAX);
612
613 /* RX Filter IP */
614 rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
615 rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
616 }
617
rtsn_get_phy_params(struct rtsn_private * priv)618 static int rtsn_get_phy_params(struct rtsn_private *priv)
619 {
620 int ret;
621
622 ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
623 if (ret)
624 return ret;
625
626 switch (priv->iface) {
627 case PHY_INTERFACE_MODE_MII:
628 priv->speed = 100;
629 break;
630 case PHY_INTERFACE_MODE_RGMII:
631 case PHY_INTERFACE_MODE_RGMII_ID:
632 case PHY_INTERFACE_MODE_RGMII_RXID:
633 case PHY_INTERFACE_MODE_RGMII_TXID:
634 priv->speed = 1000;
635 break;
636 default:
637 return -EOPNOTSUPP;
638 }
639
640 return 0;
641 }
642
rtsn_set_phy_interface(struct rtsn_private * priv)643 static void rtsn_set_phy_interface(struct rtsn_private *priv)
644 {
645 u32 val;
646
647 switch (priv->iface) {
648 case PHY_INTERFACE_MODE_MII:
649 val = MPIC_PIS_MII;
650 break;
651 case PHY_INTERFACE_MODE_RGMII:
652 case PHY_INTERFACE_MODE_RGMII_ID:
653 case PHY_INTERFACE_MODE_RGMII_RXID:
654 case PHY_INTERFACE_MODE_RGMII_TXID:
655 val = MPIC_PIS_GMII;
656 break;
657 default:
658 return;
659 }
660
661 rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
662 }
663
rtsn_set_rate(struct rtsn_private * priv)664 static void rtsn_set_rate(struct rtsn_private *priv)
665 {
666 u32 val;
667
668 switch (priv->speed) {
669 case 10:
670 val = MPIC_LSC_10M;
671 break;
672 case 100:
673 val = MPIC_LSC_100M;
674 break;
675 case 1000:
676 val = MPIC_LSC_1G;
677 break;
678 default:
679 return;
680 }
681
682 rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
683 }
684
rtsn_rmac_init(struct rtsn_private * priv)685 static int rtsn_rmac_init(struct rtsn_private *priv)
686 {
687 const u8 *mac_addr = priv->ndev->dev_addr;
688 int ret;
689
690 /* Set MAC address */
691 rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
692 rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
693 (mac_addr[4] << 8) | mac_addr[5]);
694
695 /* Set xMII type */
696 rtsn_set_phy_interface(priv);
697 rtsn_set_rate(priv);
698
699 /* Enable MII */
700 rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
701 MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
702
703 /* Link verification */
704 rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
705 ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
706 if (ret)
707 return ret;
708
709 return ret;
710 }
711
rtsn_hw_init(struct rtsn_private * priv)712 static int rtsn_hw_init(struct rtsn_private *priv)
713 {
714 int ret;
715
716 ret = rtsn_reset(priv);
717 if (ret)
718 return ret;
719
720 /* Change to CONFIG mode */
721 ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
722 if (ret)
723 return ret;
724
725 ret = rtsn_axibmi_init(priv);
726 if (ret)
727 return ret;
728
729 rtsn_mhd_init(priv);
730
731 ret = rtsn_rmac_init(priv);
732 if (ret)
733 return ret;
734
735 ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
736 if (ret)
737 return ret;
738
739 /* Change to OPERATION mode */
740 ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
741
742 return ret;
743 }
744
rtsn_mii_access(struct mii_bus * bus,bool read,int phyad,int regad,u16 data)745 static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
746 int regad, u16 data)
747 {
748 struct rtsn_private *priv = bus->priv;
749 u32 val;
750 int ret;
751
752 val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
753
754 if (!read)
755 val |= MPSM_PSMAD | MPSM_PRD_SET(data);
756
757 rtsn_write(priv, MPSM, val);
758
759 ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
760 if (ret)
761 return ret;
762
763 if (read)
764 ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
765
766 return ret;
767 }
768
rtsn_mii_read(struct mii_bus * bus,int addr,int regnum)769 static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
770 {
771 return rtsn_mii_access(bus, true, addr, regnum, 0);
772 }
773
rtsn_mii_write(struct mii_bus * bus,int addr,int regnum,u16 val)774 static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
775 {
776 return rtsn_mii_access(bus, false, addr, regnum, val);
777 }
778
rtsn_mdio_alloc(struct rtsn_private * priv)779 static int rtsn_mdio_alloc(struct rtsn_private *priv)
780 {
781 struct platform_device *pdev = priv->pdev;
782 struct device *dev = &pdev->dev;
783 struct device_node *mdio_node;
784 struct mii_bus *mii;
785 int ret;
786
787 mii = mdiobus_alloc();
788 if (!mii)
789 return -ENOMEM;
790
791 mdio_node = of_get_child_by_name(dev->of_node, "mdio");
792 if (!mdio_node) {
793 ret = -ENODEV;
794 goto out_free_bus;
795 }
796
797 /* Enter config mode before registering the MDIO bus */
798 ret = rtsn_reset(priv);
799 if (ret)
800 goto out_put_node;
801
802 ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
803 if (ret)
804 goto out_put_node;
805
806 rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
807 MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
808
809 /* Register the MDIO bus */
810 mii->name = "rtsn_mii";
811 snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
812 pdev->name, pdev->id);
813 mii->priv = priv;
814 mii->read = rtsn_mii_read;
815 mii->write = rtsn_mii_write;
816 mii->parent = dev;
817
818 ret = of_mdiobus_register(mii, mdio_node);
819 of_node_put(mdio_node);
820 if (ret)
821 goto out_free_bus;
822
823 priv->mii = mii;
824
825 return 0;
826
827 out_put_node:
828 of_node_put(mdio_node);
829 out_free_bus:
830 mdiobus_free(mii);
831 return ret;
832 }
833
rtsn_mdio_free(struct rtsn_private * priv)834 static void rtsn_mdio_free(struct rtsn_private *priv)
835 {
836 mdiobus_unregister(priv->mii);
837 mdiobus_free(priv->mii);
838 priv->mii = NULL;
839 }
840
rtsn_adjust_link(struct net_device * ndev)841 static void rtsn_adjust_link(struct net_device *ndev)
842 {
843 struct rtsn_private *priv = netdev_priv(ndev);
844 struct phy_device *phydev = ndev->phydev;
845 bool new_state = false;
846 unsigned long flags;
847
848 spin_lock_irqsave(&priv->lock, flags);
849
850 if (phydev->link) {
851 if (phydev->speed != priv->speed) {
852 new_state = true;
853 priv->speed = phydev->speed;
854 }
855
856 if (!priv->link) {
857 new_state = true;
858 priv->link = phydev->link;
859 }
860 } else if (priv->link) {
861 new_state = true;
862 priv->link = 0;
863 priv->speed = 0;
864 }
865
866 if (new_state) {
867 /* Need to transition to CONFIG mode before reconfiguring and
868 * then back to the original mode. Any state change to/from
869 * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
870 */
871 enum rtsn_mode orgmode = rtsn_read_mode(priv);
872
873 /* Transit to CONFIG */
874 if (orgmode != OCR_OPC_CONFIG) {
875 if (orgmode != OCR_OPC_DISABLE &&
876 rtsn_change_mode(priv, OCR_OPC_DISABLE))
877 goto out;
878 if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
879 goto out;
880 }
881
882 rtsn_set_rate(priv);
883
884 /* Transition to original mode */
885 if (orgmode != OCR_OPC_CONFIG) {
886 if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
887 goto out;
888 if (orgmode != OCR_OPC_DISABLE &&
889 rtsn_change_mode(priv, orgmode))
890 goto out;
891 }
892 }
893 out:
894 spin_unlock_irqrestore(&priv->lock, flags);
895
896 if (new_state)
897 phy_print_status(phydev);
898 }
899
rtsn_phy_init(struct rtsn_private * priv)900 static int rtsn_phy_init(struct rtsn_private *priv)
901 {
902 struct device_node *np = priv->ndev->dev.parent->of_node;
903 struct phy_device *phydev;
904 struct device_node *phy;
905
906 priv->link = 0;
907
908 phy = of_parse_phandle(np, "phy-handle", 0);
909 if (!phy)
910 return -ENOENT;
911
912 phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
913 priv->iface);
914 of_node_put(phy);
915 if (!phydev)
916 return -ENOENT;
917
918 /* Only support full-duplex mode */
919 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
920 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
921 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
922
923 phy_attached_info(phydev);
924
925 return 0;
926 }
927
rtsn_phy_deinit(struct rtsn_private * priv)928 static void rtsn_phy_deinit(struct rtsn_private *priv)
929 {
930 phy_disconnect(priv->ndev->phydev);
931 priv->ndev->phydev = NULL;
932 }
933
rtsn_init(struct rtsn_private * priv)934 static int rtsn_init(struct rtsn_private *priv)
935 {
936 int ret;
937
938 ret = rtsn_desc_alloc(priv);
939 if (ret)
940 return ret;
941
942 ret = rtsn_dmac_init(priv);
943 if (ret)
944 goto error_free_desc;
945
946 ret = rtsn_hw_init(priv);
947 if (ret)
948 goto error_free_chain;
949
950 ret = rtsn_phy_init(priv);
951 if (ret)
952 goto error_free_chain;
953
954 ret = rtsn_request_irqs(priv);
955 if (ret)
956 goto error_free_phy;
957
958 return 0;
959 error_free_phy:
960 rtsn_phy_deinit(priv);
961 error_free_chain:
962 rtsn_chain_free(priv);
963 error_free_desc:
964 rtsn_desc_free(priv);
965 return ret;
966 }
967
rtsn_deinit(struct rtsn_private * priv)968 static void rtsn_deinit(struct rtsn_private *priv)
969 {
970 rtsn_free_irqs(priv);
971 rtsn_phy_deinit(priv);
972 rtsn_chain_free(priv);
973 rtsn_desc_free(priv);
974 }
975
rtsn_parse_mac_address(struct device_node * np,struct net_device * ndev)976 static void rtsn_parse_mac_address(struct device_node *np,
977 struct net_device *ndev)
978 {
979 struct rtsn_private *priv = netdev_priv(ndev);
980 u8 addr[ETH_ALEN];
981 u32 mrmac0;
982 u32 mrmac1;
983
984 /* Try to read address from Device Tree. */
985 if (!of_get_mac_address(np, addr)) {
986 eth_hw_addr_set(ndev, addr);
987 return;
988 }
989
990 /* Try to read address from device. */
991 mrmac0 = rtsn_read(priv, MRMAC0);
992 mrmac1 = rtsn_read(priv, MRMAC1);
993
994 addr[0] = (mrmac0 >> 8) & 0xff;
995 addr[1] = (mrmac0 >> 0) & 0xff;
996 addr[2] = (mrmac1 >> 24) & 0xff;
997 addr[3] = (mrmac1 >> 16) & 0xff;
998 addr[4] = (mrmac1 >> 8) & 0xff;
999 addr[5] = (mrmac1 >> 0) & 0xff;
1000
1001 if (is_valid_ether_addr(addr)) {
1002 eth_hw_addr_set(ndev, addr);
1003 return;
1004 }
1005
1006 /* Fallback to a random address */
1007 eth_hw_addr_random(ndev);
1008 }
1009
rtsn_open(struct net_device * ndev)1010 static int rtsn_open(struct net_device *ndev)
1011 {
1012 struct rtsn_private *priv = netdev_priv(ndev);
1013 int ret;
1014
1015 napi_enable(&priv->napi);
1016
1017 ret = rtsn_init(priv);
1018 if (ret) {
1019 napi_disable(&priv->napi);
1020 return ret;
1021 }
1022
1023 phy_start(ndev->phydev);
1024
1025 netif_start_queue(ndev);
1026
1027 return 0;
1028 }
1029
rtsn_stop(struct net_device * ndev)1030 static int rtsn_stop(struct net_device *ndev)
1031 {
1032 struct rtsn_private *priv = netdev_priv(ndev);
1033
1034 phy_stop(priv->ndev->phydev);
1035 napi_disable(&priv->napi);
1036 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1037 rtsn_deinit(priv);
1038
1039 return 0;
1040 }
1041
rtsn_start_xmit(struct sk_buff * skb,struct net_device * ndev)1042 static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1043 {
1044 struct rtsn_private *priv = netdev_priv(ndev);
1045 struct rtsn_ext_desc *desc;
1046 int ret = NETDEV_TX_OK;
1047 unsigned long flags;
1048 dma_addr_t dma_addr;
1049 int entry;
1050
1051 spin_lock_irqsave(&priv->lock, flags);
1052
1053 /* Drop packet if it won't fit in a single descriptor. */
1054 if (skb->len >= TX_DS) {
1055 priv->stats.tx_dropped++;
1056 priv->stats.tx_errors++;
1057 dev_kfree_skb_any(skb);
1058 goto out;
1059 }
1060
1061 if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
1062 netif_stop_subqueue(ndev, 0);
1063 ret = NETDEV_TX_BUSY;
1064 goto out;
1065 }
1066
1067 if (skb_put_padto(skb, ETH_ZLEN))
1068 goto out;
1069
1070 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1071 DMA_TO_DEVICE);
1072 if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1073 dev_kfree_skb_any(skb);
1074 goto out;
1075 }
1076
1077 entry = priv->cur_tx % priv->num_tx_ring;
1078 priv->tx_skb[entry] = skb;
1079 desc = &priv->tx_ring[entry];
1080 desc->dptr = cpu_to_le32(dma_addr);
1081 desc->info_ds = cpu_to_le16(skb->len);
1082 desc->info1 = cpu_to_le64(skb->len);
1083
1084 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1085 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1086 priv->ts_tag++;
1087 desc->info_ds |= cpu_to_le16(TXC);
1088 desc->info = priv->ts_tag;
1089 }
1090
1091 skb_tx_timestamp(skb);
1092 dma_wmb();
1093
1094 desc->die_dt = DT_FSINGLE | D_DIE;
1095 priv->cur_tx++;
1096
1097 /* Start xmit */
1098 rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
1099 out:
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101 return ret;
1102 }
1103
rtsn_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * storage)1104 static void rtsn_get_stats64(struct net_device *ndev,
1105 struct rtnl_link_stats64 *storage)
1106 {
1107 struct rtsn_private *priv = netdev_priv(ndev);
1108 *storage = priv->stats;
1109 }
1110
rtsn_do_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1111 static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1112 {
1113 if (!netif_running(ndev))
1114 return -ENODEV;
1115
1116 return phy_do_ioctl_running(ndev, ifr, cmd);
1117 }
1118
rtsn_hwtstamp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)1119 static int rtsn_hwtstamp_get(struct net_device *ndev,
1120 struct kernel_hwtstamp_config *config)
1121 {
1122 struct rtsn_private *priv;
1123
1124 if (!netif_running(ndev))
1125 return -ENODEV;
1126
1127 priv = netdev_priv(ndev);
1128
1129 config->flags = 0;
1130 config->tx_type = priv->tstamp_tx_ctrl;
1131 config->rx_filter = priv->tstamp_rx_ctrl;
1132
1133 return 0;
1134 }
1135
rtsn_hwtstamp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)1136 static int rtsn_hwtstamp_set(struct net_device *ndev,
1137 struct kernel_hwtstamp_config *config,
1138 struct netlink_ext_ack *extack)
1139 {
1140 enum hwtstamp_rx_filters tstamp_rx_ctrl;
1141 enum hwtstamp_tx_types tstamp_tx_ctrl;
1142 struct rtsn_private *priv;
1143
1144 if (!netif_running(ndev))
1145 return -ENODEV;
1146
1147 priv = netdev_priv(ndev);
1148
1149 if (config->flags)
1150 return -EINVAL;
1151
1152 switch (config->tx_type) {
1153 case HWTSTAMP_TX_OFF:
1154 case HWTSTAMP_TX_ON:
1155 tstamp_tx_ctrl = config->tx_type;
1156 break;
1157 default:
1158 return -ERANGE;
1159 }
1160
1161 switch (config->rx_filter) {
1162 case HWTSTAMP_FILTER_NONE:
1163 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1164 tstamp_rx_ctrl = config->rx_filter;
1165 break;
1166 default:
1167 config->rx_filter = HWTSTAMP_FILTER_ALL;
1168 tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
1169 break;
1170 }
1171
1172 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1173 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1174
1175 return 0;
1176 }
1177
1178 static const struct net_device_ops rtsn_netdev_ops = {
1179 .ndo_open = rtsn_open,
1180 .ndo_stop = rtsn_stop,
1181 .ndo_start_xmit = rtsn_start_xmit,
1182 .ndo_get_stats64 = rtsn_get_stats64,
1183 .ndo_eth_ioctl = rtsn_do_ioctl,
1184 .ndo_validate_addr = eth_validate_addr,
1185 .ndo_set_mac_address = eth_mac_addr,
1186 .ndo_hwtstamp_set = rtsn_hwtstamp_set,
1187 .ndo_hwtstamp_get = rtsn_hwtstamp_get,
1188 };
1189
rtsn_get_ts_info(struct net_device * ndev,struct kernel_ethtool_ts_info * info)1190 static int rtsn_get_ts_info(struct net_device *ndev,
1191 struct kernel_ethtool_ts_info *info)
1192 {
1193 struct rtsn_private *priv = netdev_priv(ndev);
1194
1195 info->phc_index = rcar_gen4_ptp_clock_index(priv->ptp_priv);
1196 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1197 SOF_TIMESTAMPING_TX_HARDWARE |
1198 SOF_TIMESTAMPING_RX_HARDWARE |
1199 SOF_TIMESTAMPING_RAW_HARDWARE;
1200 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1201 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1202
1203 return 0;
1204 }
1205
1206 static const struct ethtool_ops rtsn_ethtool_ops = {
1207 .nway_reset = phy_ethtool_nway_reset,
1208 .get_link = ethtool_op_get_link,
1209 .get_ts_info = rtsn_get_ts_info,
1210 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1211 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1212 };
1213
1214 static const struct of_device_id rtsn_match_table[] = {
1215 { .compatible = "renesas,r8a779g0-ethertsn", },
1216 { /* Sentinel */ }
1217 };
1218
1219 MODULE_DEVICE_TABLE(of, rtsn_match_table);
1220
rtsn_probe(struct platform_device * pdev)1221 static int rtsn_probe(struct platform_device *pdev)
1222 {
1223 struct rtsn_private *priv;
1224 struct net_device *ndev;
1225 void __iomem *ptpaddr;
1226 struct resource *res;
1227 int ret;
1228
1229 ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
1230 RX_NUM_CHAINS);
1231 if (!ndev)
1232 return -ENOMEM;
1233
1234 priv = netdev_priv(ndev);
1235 priv->pdev = pdev;
1236 priv->ndev = ndev;
1237
1238 spin_lock_init(&priv->lock);
1239 platform_set_drvdata(pdev, priv);
1240
1241 priv->clk = devm_clk_get(&pdev->dev, NULL);
1242 if (IS_ERR(priv->clk)) {
1243 ret = PTR_ERR(priv->clk);
1244 goto error_free;
1245 }
1246
1247 priv->reset = devm_reset_control_get(&pdev->dev, NULL);
1248 if (IS_ERR(priv->reset)) {
1249 ret = PTR_ERR(priv->reset);
1250 goto error_free;
1251 }
1252
1253 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
1254 if (!res) {
1255 dev_err(&pdev->dev, "Can't find tsnes resource\n");
1256 ret = -EINVAL;
1257 goto error_free;
1258 }
1259
1260 priv->base = devm_ioremap_resource(&pdev->dev, res);
1261 if (IS_ERR(priv->base)) {
1262 ret = PTR_ERR(priv->base);
1263 goto error_free;
1264 }
1265
1266 SET_NETDEV_DEV(ndev, &pdev->dev);
1267
1268 ndev->features = NETIF_F_RXCSUM;
1269 ndev->hw_features = NETIF_F_RXCSUM;
1270 ndev->base_addr = res->start;
1271 ndev->netdev_ops = &rtsn_netdev_ops;
1272 ndev->ethtool_ops = &rtsn_ethtool_ops;
1273
1274 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
1275 if (!res) {
1276 dev_err(&pdev->dev, "Can't find gptp resource\n");
1277 ret = -EINVAL;
1278 goto error_free;
1279 }
1280
1281 ptpaddr = devm_ioremap_resource(&pdev->dev, res);
1282 if (IS_ERR(ptpaddr)) {
1283 ret = PTR_ERR(ptpaddr);
1284 goto error_free;
1285 }
1286
1287 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev, ptpaddr);
1288 if (!priv->ptp_priv) {
1289 ret = -ENOMEM;
1290 goto error_free;
1291 }
1292
1293 ret = rtsn_get_phy_params(priv);
1294 if (ret)
1295 goto error_free;
1296
1297 pm_runtime_enable(&pdev->dev);
1298 pm_runtime_get_sync(&pdev->dev);
1299
1300 netif_napi_add(ndev, &priv->napi, rtsn_poll);
1301
1302 rtsn_parse_mac_address(pdev->dev.of_node, ndev);
1303
1304 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1305
1306 device_set_wakeup_capable(&pdev->dev, 1);
1307
1308 ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
1309 if (ret)
1310 goto error_pm;
1311
1312 ret = rtsn_mdio_alloc(priv);
1313 if (ret)
1314 goto error_ptp;
1315
1316 ret = register_netdev(ndev);
1317 if (ret)
1318 goto error_mdio;
1319
1320 netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
1321
1322 return 0;
1323
1324 error_mdio:
1325 rtsn_mdio_free(priv);
1326 error_ptp:
1327 rcar_gen4_ptp_unregister(priv->ptp_priv);
1328 error_pm:
1329 netif_napi_del(&priv->napi);
1330 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1331 pm_runtime_put_sync(&pdev->dev);
1332 pm_runtime_disable(&pdev->dev);
1333 error_free:
1334 free_netdev(ndev);
1335
1336 return ret;
1337 }
1338
rtsn_remove(struct platform_device * pdev)1339 static void rtsn_remove(struct platform_device *pdev)
1340 {
1341 struct rtsn_private *priv = platform_get_drvdata(pdev);
1342
1343 unregister_netdev(priv->ndev);
1344 rtsn_mdio_free(priv);
1345 rcar_gen4_ptp_unregister(priv->ptp_priv);
1346 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1347 netif_napi_del(&priv->napi);
1348
1349 pm_runtime_put_sync(&pdev->dev);
1350 pm_runtime_disable(&pdev->dev);
1351
1352 free_netdev(priv->ndev);
1353 }
1354
1355 static struct platform_driver rtsn_driver = {
1356 .probe = rtsn_probe,
1357 .remove = rtsn_remove,
1358 .driver = {
1359 .name = "rtsn",
1360 .of_match_table = rtsn_match_table,
1361 }
1362 };
1363 module_platform_driver(rtsn_driver);
1364
1365 MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
1366 MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
1367 MODULE_LICENSE("GPL");
1368