1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet AVB device driver
3 *
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 *
8 * Based on the SuperH Ethernet driver
9 */
10
11 #include <linux/cache.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_vlan.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/net_tstamp.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/reset.h>
31 #include <linux/math64.h>
32 #include <net/ip.h>
33 #include <net/page_pool/helpers.h>
34
35 #include "ravb.h"
36
37 #define RAVB_DEF_MSG_ENABLE \
38 (NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR)
42
ravb_modify(struct net_device * ndev,enum ravb_reg reg,u32 clear,u32 set)43 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
44 u32 set)
45 {
46 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
47 }
48
ravb_wait(struct net_device * ndev,enum ravb_reg reg,u32 mask,u32 value)49 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
50 {
51 int i;
52
53 for (i = 0; i < 10000; i++) {
54 if ((ravb_read(ndev, reg) & mask) == value)
55 return 0;
56 udelay(10);
57 }
58 return -ETIMEDOUT;
59 }
60
ravb_set_opmode(struct net_device * ndev,u32 opmode)61 static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
62 {
63 u32 csr_ops = 1U << (opmode & CCC_OPC);
64 u32 ccc_mask = CCC_OPC;
65 int error;
66
67 /* If gPTP active in config mode is supported it needs to be configured
68 * along with CSEL and operating mode in the same access. This is a
69 * hardware limitation.
70 */
71 if (opmode & CCC_GAC)
72 ccc_mask |= CCC_GAC | CCC_CSEL;
73
74 /* Set operating mode */
75 ravb_modify(ndev, CCC, ccc_mask, opmode);
76 /* Check if the operating mode is changed to the requested one */
77 error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
78 if (error) {
79 netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
80 opmode & CCC_OPC);
81 }
82
83 return error;
84 }
85
ravb_set_rate_gbeth(struct net_device * ndev)86 static void ravb_set_rate_gbeth(struct net_device *ndev)
87 {
88 struct ravb_private *priv = netdev_priv(ndev);
89
90 switch (priv->speed) {
91 case 10: /* 10BASE */
92 ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
93 break;
94 case 100: /* 100BASE */
95 ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
96 break;
97 case 1000: /* 1000BASE */
98 ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
99 break;
100 }
101 }
102
ravb_set_rate_rcar(struct net_device * ndev)103 static void ravb_set_rate_rcar(struct net_device *ndev)
104 {
105 struct ravb_private *priv = netdev_priv(ndev);
106
107 switch (priv->speed) {
108 case 100: /* 100BASE */
109 ravb_write(ndev, GECMR_SPEED_100, GECMR);
110 break;
111 case 1000: /* 1000BASE */
112 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
113 break;
114 }
115 }
116
117 /* Get MAC address from the MAC address registers
118 *
119 * Ethernet AVB device doesn't have ROM for MAC address.
120 * This function gets the MAC address that was used by a bootloader.
121 */
ravb_read_mac_address(struct device_node * np,struct net_device * ndev)122 static void ravb_read_mac_address(struct device_node *np,
123 struct net_device *ndev)
124 {
125 int ret;
126
127 ret = of_get_ethdev_address(np, ndev);
128 if (ret) {
129 u32 mahr = ravb_read(ndev, MAHR);
130 u32 malr = ravb_read(ndev, MALR);
131 u8 addr[ETH_ALEN];
132
133 addr[0] = (mahr >> 24) & 0xFF;
134 addr[1] = (mahr >> 16) & 0xFF;
135 addr[2] = (mahr >> 8) & 0xFF;
136 addr[3] = (mahr >> 0) & 0xFF;
137 addr[4] = (malr >> 8) & 0xFF;
138 addr[5] = (malr >> 0) & 0xFF;
139 eth_hw_addr_set(ndev, addr);
140 }
141 }
142
ravb_mdio_ctrl(struct mdiobb_ctrl * ctrl,u32 mask,int set)143 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
144 {
145 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
146 mdiobb);
147
148 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
149 }
150
151 /* MDC pin control */
ravb_set_mdc(struct mdiobb_ctrl * ctrl,int level)152 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
153 {
154 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
155 }
156
157 /* Data I/O pin control */
ravb_set_mdio_dir(struct mdiobb_ctrl * ctrl,int output)158 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
159 {
160 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
161 }
162
163 /* Set data bit */
ravb_set_mdio_data(struct mdiobb_ctrl * ctrl,int value)164 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
165 {
166 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
167 }
168
169 /* Get data bit */
ravb_get_mdio_data(struct mdiobb_ctrl * ctrl)170 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
171 {
172 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
173 mdiobb);
174
175 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
176 }
177
178 /* MDIO bus control struct */
179 static const struct mdiobb_ops bb_ops = {
180 .owner = THIS_MODULE,
181 .set_mdc = ravb_set_mdc,
182 .set_mdio_dir = ravb_set_mdio_dir,
183 .set_mdio_data = ravb_set_mdio_data,
184 .get_mdio_data = ravb_get_mdio_data,
185 };
186
187 static struct ravb_rx_desc *
ravb_rx_get_desc(struct ravb_private * priv,unsigned int q,unsigned int i)188 ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
189 unsigned int i)
190 {
191 return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
192 }
193
194 /* Free TX skb function for AVB-IP */
ravb_tx_free(struct net_device * ndev,int q,bool free_txed_only)195 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
196 {
197 struct ravb_private *priv = netdev_priv(ndev);
198 struct net_device_stats *stats = &priv->stats[q];
199 unsigned int num_tx_desc = priv->num_tx_desc;
200 struct ravb_tx_desc *desc;
201 unsigned int entry;
202 int free_num = 0;
203 u32 size;
204
205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
206 bool txed;
207
208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
209 num_tx_desc);
210 desc = &priv->tx_ring[q][entry];
211 txed = desc->die_dt == DT_FEMPTY;
212 if (free_txed_only && !txed)
213 break;
214 /* Descriptor type must be checked before all other reads */
215 dma_rmb();
216 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
217 /* Free the original skb. */
218 if (priv->tx_skb[q][entry / num_tx_desc]) {
219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
220 size, DMA_TO_DEVICE);
221 /* Last packet descriptor? */
222 if (entry % num_tx_desc == num_tx_desc - 1) {
223 entry /= num_tx_desc;
224 dev_kfree_skb_any(priv->tx_skb[q][entry]);
225 priv->tx_skb[q][entry] = NULL;
226 if (txed)
227 stats->tx_packets++;
228 }
229 free_num++;
230 }
231 if (txed)
232 stats->tx_bytes += size;
233 desc->die_dt = DT_EEMPTY;
234 }
235 return free_num;
236 }
237
ravb_rx_ring_free(struct net_device * ndev,int q)238 static void ravb_rx_ring_free(struct net_device *ndev, int q)
239 {
240 struct ravb_private *priv = netdev_priv(ndev);
241 unsigned int ring_size;
242
243 if (!priv->rx_ring[q].raw)
244 return;
245
246 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
247 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
248 priv->rx_desc_dma[q]);
249 priv->rx_ring[q].raw = NULL;
250 }
251
252 /* Free skb's and DMA buffers for Ethernet AVB */
ravb_ring_free(struct net_device * ndev,int q)253 static void ravb_ring_free(struct net_device *ndev, int q)
254 {
255 struct ravb_private *priv = netdev_priv(ndev);
256 unsigned int num_tx_desc = priv->num_tx_desc;
257 unsigned int ring_size;
258 unsigned int i;
259
260 ravb_rx_ring_free(ndev, q);
261
262 if (priv->tx_ring[q]) {
263 ravb_tx_free(ndev, q, false);
264
265 ring_size = sizeof(struct ravb_tx_desc) *
266 (priv->num_tx_ring[q] * num_tx_desc + 1);
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
268 priv->tx_desc_dma[q]);
269 priv->tx_ring[q] = NULL;
270 }
271
272 /* Free RX buffers */
273 for (i = 0; i < priv->num_rx_ring[q]; i++) {
274 if (priv->rx_buffers[q][i].page)
275 page_pool_put_page(priv->rx_pool[q],
276 priv->rx_buffers[q][i].page,
277 0, true);
278 }
279 kfree(priv->rx_buffers[q]);
280 priv->rx_buffers[q] = NULL;
281 page_pool_destroy(priv->rx_pool[q]);
282
283 /* Free aligned TX buffers */
284 kfree(priv->tx_align[q]);
285 priv->tx_align[q] = NULL;
286
287 /* Free TX skb ringbuffer.
288 * SKBs are freed by ravb_tx_free() call above.
289 */
290 kfree(priv->tx_skb[q]);
291 priv->tx_skb[q] = NULL;
292 }
293
294 static int
ravb_alloc_rx_buffer(struct net_device * ndev,int q,u32 entry,gfp_t gfp_mask,struct ravb_rx_desc * rx_desc)295 ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
296 struct ravb_rx_desc *rx_desc)
297 {
298 struct ravb_private *priv = netdev_priv(ndev);
299 const struct ravb_hw_info *info = priv->info;
300 struct ravb_rx_buffer *rx_buff;
301 dma_addr_t dma_addr;
302 unsigned int size;
303
304 rx_buff = &priv->rx_buffers[q][entry];
305 size = info->rx_buffer_size;
306 rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
307 &size, gfp_mask);
308 if (unlikely(!rx_buff->page)) {
309 /* We just set the data size to 0 for a failed mapping which
310 * should prevent DMA from happening...
311 */
312 rx_desc->ds_cc = cpu_to_le16(0);
313 return -ENOMEM;
314 }
315
316 dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
317 dma_sync_single_for_device(ndev->dev.parent, dma_addr,
318 info->rx_buffer_size, DMA_FROM_DEVICE);
319 rx_desc->dptr = cpu_to_le32(dma_addr);
320
321 /* The end of the RX buffer is used to store skb shared data, so we need
322 * to ensure that the hardware leaves enough space for this.
323 */
324 rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
325 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
326 ETH_FCS_LEN + sizeof(__sum16));
327 return 0;
328 }
329
330 static u32
ravb_rx_ring_refill(struct net_device * ndev,int q,u32 count,gfp_t gfp_mask)331 ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
332 {
333 struct ravb_private *priv = netdev_priv(ndev);
334 struct ravb_rx_desc *rx_desc;
335 u32 i, entry;
336
337 for (i = 0; i < count; i++) {
338 entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
339 rx_desc = ravb_rx_get_desc(priv, q, entry);
340
341 if (!priv->rx_buffers[q][entry].page) {
342 if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
343 gfp_mask, rx_desc)))
344 break;
345 }
346 /* Descriptor type must be set after all the above writes */
347 dma_wmb();
348 rx_desc->die_dt = DT_FEMPTY;
349 }
350
351 return i;
352 }
353
354 /* Format skb and descriptor buffer for Ethernet AVB */
ravb_ring_format(struct net_device * ndev,int q)355 static void ravb_ring_format(struct net_device *ndev, int q)
356 {
357 struct ravb_private *priv = netdev_priv(ndev);
358 unsigned int num_tx_desc = priv->num_tx_desc;
359 struct ravb_rx_desc *rx_desc;
360 struct ravb_tx_desc *tx_desc;
361 struct ravb_desc *desc;
362 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
363 num_tx_desc;
364 unsigned int i;
365
366 priv->cur_rx[q] = 0;
367 priv->cur_tx[q] = 0;
368 priv->dirty_rx[q] = 0;
369 priv->dirty_tx[q] = 0;
370
371 /* Regular RX descriptors have already been initialized by
372 * ravb_rx_ring_refill(), we just need to initialize the final link
373 * descriptor.
374 */
375 rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
376 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
377 rx_desc->die_dt = DT_LINKFIX; /* type */
378
379 memset(priv->tx_ring[q], 0, tx_ring_size);
380 /* Build TX ring buffer */
381 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
382 i++, tx_desc++) {
383 tx_desc->die_dt = DT_EEMPTY;
384 if (num_tx_desc > 1) {
385 tx_desc++;
386 tx_desc->die_dt = DT_EEMPTY;
387 }
388 }
389 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
390 tx_desc->die_dt = DT_LINKFIX; /* type */
391
392 /* RX descriptor base address for best effort */
393 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
394 desc->die_dt = DT_LINKFIX; /* type */
395 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
396
397 /* TX descriptor base address for best effort */
398 desc = &priv->desc_bat[q];
399 desc->die_dt = DT_LINKFIX; /* type */
400 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
401 }
402
ravb_alloc_rx_desc(struct net_device * ndev,int q)403 static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
404 {
405 struct ravb_private *priv = netdev_priv(ndev);
406 unsigned int ring_size;
407
408 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
409
410 priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
411 &priv->rx_desc_dma[q],
412 GFP_KERNEL);
413
414 return priv->rx_ring[q].raw;
415 }
416
417 /* Init skb and descriptor buffer for Ethernet AVB */
ravb_ring_init(struct net_device * ndev,int q)418 static int ravb_ring_init(struct net_device *ndev, int q)
419 {
420 struct ravb_private *priv = netdev_priv(ndev);
421 unsigned int num_tx_desc = priv->num_tx_desc;
422 struct page_pool_params params = {
423 .order = 0,
424 .flags = PP_FLAG_DMA_MAP,
425 .pool_size = priv->num_rx_ring[q],
426 .nid = NUMA_NO_NODE,
427 .dev = ndev->dev.parent,
428 .dma_dir = DMA_FROM_DEVICE,
429 };
430 unsigned int ring_size;
431 u32 num_filled;
432
433 /* Allocate RX page pool and buffers */
434 priv->rx_pool[q] = page_pool_create(¶ms);
435 if (IS_ERR(priv->rx_pool[q]))
436 goto error;
437
438 /* Allocate RX buffers */
439 priv->rx_buffers[q] = kzalloc_objs(*priv->rx_buffers[q],
440 priv->num_rx_ring[q]);
441 if (!priv->rx_buffers[q])
442 goto error;
443
444 /* Allocate TX skb rings */
445 priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q]);
446 if (!priv->tx_skb[q])
447 goto error;
448
449 /* Allocate all RX descriptors. */
450 if (!ravb_alloc_rx_desc(ndev, q))
451 goto error;
452
453 /* Populate RX ring buffer. */
454 priv->dirty_rx[q] = 0;
455 ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
456 memset(priv->rx_ring[q].raw, 0, ring_size);
457 num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
458 GFP_KERNEL);
459 if (num_filled != priv->num_rx_ring[q])
460 goto error;
461
462 if (num_tx_desc > 1) {
463 /* Allocate rings for the aligned buffers */
464 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
465 DPTR_ALIGN - 1, GFP_KERNEL);
466 if (!priv->tx_align[q])
467 goto error;
468 }
469
470 /* Allocate all TX descriptors. */
471 ring_size = sizeof(struct ravb_tx_desc) *
472 (priv->num_tx_ring[q] * num_tx_desc + 1);
473 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
474 &priv->tx_desc_dma[q],
475 GFP_KERNEL);
476 if (!priv->tx_ring[q])
477 goto error;
478
479 return 0;
480
481 error:
482 ravb_ring_free(ndev, q);
483
484 return -ENOMEM;
485 }
486
ravb_csum_init_gbeth(struct net_device * ndev)487 static void ravb_csum_init_gbeth(struct net_device *ndev)
488 {
489 bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
490 bool rx_enable = ndev->features & NETIF_F_RXCSUM;
491
492 if (!(tx_enable || rx_enable))
493 goto done;
494
495 ravb_write(ndev, 0, CSR0);
496 if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
497 netdev_err(ndev, "Timeout enabling hardware checksum\n");
498
499 if (tx_enable)
500 ndev->features &= ~NETIF_F_HW_CSUM;
501
502 if (rx_enable)
503 ndev->features &= ~NETIF_F_RXCSUM;
504 } else {
505 if (tx_enable)
506 ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1);
507
508 if (rx_enable)
509 ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2);
510 }
511
512 done:
513 ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
514 }
515
ravb_emac_init_gbeth(struct net_device * ndev)516 static void ravb_emac_init_gbeth(struct net_device *ndev)
517 {
518 struct ravb_private *priv = netdev_priv(ndev);
519
520 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
521 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
522 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
523 } else {
524 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
525 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
526 CXR31_SEL_LINK0);
527 }
528
529 /* Receive frame limit set register */
530 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
531
532 /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
533 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
534 ECMR_TE | ECMR_RE | ECMR_RCPT |
535 ECMR_TXF | ECMR_RXF, ECMR);
536
537 ravb_set_rate_gbeth(ndev);
538
539 /* Set MAC address */
540 ravb_write(ndev,
541 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
542 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
543 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
544
545 /* E-MAC status register clear */
546 ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
547
548 ravb_csum_init_gbeth(ndev);
549
550 /* E-MAC interrupt enable register */
551 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
552 }
553
ravb_emac_init_rcar(struct net_device * ndev)554 static void ravb_emac_init_rcar(struct net_device *ndev)
555 {
556 struct ravb_private *priv = netdev_priv(ndev);
557
558 /* Set receive frame length
559 *
560 * The length set here describes the frame from the destination address
561 * up to and including the CRC data. However only the frame data,
562 * excluding the CRC, are transferred to memory. To allow for the
563 * largest frames add the CRC length to the maximum Rx descriptor size.
564 */
565 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
566
567 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
568 ravb_write(ndev, ECMR_ZPF | ECMR_DM |
569 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
570 ECMR_TE | ECMR_RE, ECMR);
571
572 ravb_set_rate_rcar(ndev);
573
574 /* Set MAC address */
575 ravb_write(ndev,
576 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
577 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
578 ravb_write(ndev,
579 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
580
581 /* E-MAC status register clear */
582 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
583
584 /* E-MAC interrupt enable register */
585 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
586 }
587
ravb_emac_init_rcar_gen4(struct net_device * ndev)588 static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
589 {
590 struct ravb_private *priv = netdev_priv(ndev);
591 bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
592
593 ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
594
595 ravb_emac_init_rcar(ndev);
596 }
597
598 /* E-MAC init function */
ravb_emac_init(struct net_device * ndev)599 static void ravb_emac_init(struct net_device *ndev)
600 {
601 struct ravb_private *priv = netdev_priv(ndev);
602 const struct ravb_hw_info *info = priv->info;
603
604 info->emac_init(ndev);
605 }
606
ravb_dmac_init_gbeth(struct net_device * ndev)607 static int ravb_dmac_init_gbeth(struct net_device *ndev)
608 {
609 struct ravb_private *priv = netdev_priv(ndev);
610 int error;
611
612 error = ravb_ring_init(ndev, RAVB_BE);
613 if (error)
614 return error;
615
616 /* Descriptor format */
617 ravb_ring_format(ndev, RAVB_BE);
618
619 /* Set DMAC RX */
620 ravb_write(ndev, 0x60000000, RCR);
621
622 /* Set Max Frame Length (RTC) */
623 ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
624
625 /* Set FIFO size */
626 ravb_write(ndev, 0x00222200, TGC);
627
628 ravb_write(ndev, 0, TCCR);
629
630 /* Frame receive */
631 ravb_write(ndev, RIC0_FRE0, RIC0);
632 /* Disable FIFO full warning */
633 ravb_write(ndev, 0x0, RIC1);
634 /* Receive FIFO full error, descriptor empty */
635 ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
636
637 ravb_write(ndev, TIC_FTE0, TIC);
638
639 return 0;
640 }
641
ravb_dmac_init_rcar(struct net_device * ndev)642 static int ravb_dmac_init_rcar(struct net_device *ndev)
643 {
644 struct ravb_private *priv = netdev_priv(ndev);
645 const struct ravb_hw_info *info = priv->info;
646 int error;
647
648 error = ravb_ring_init(ndev, RAVB_BE);
649 if (error)
650 return error;
651 error = ravb_ring_init(ndev, RAVB_NC);
652 if (error) {
653 ravb_ring_free(ndev, RAVB_BE);
654 return error;
655 }
656
657 /* Descriptor format */
658 ravb_ring_format(ndev, RAVB_BE);
659 ravb_ring_format(ndev, RAVB_NC);
660
661 /* Set AVB RX */
662 ravb_write(ndev,
663 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
664
665 /* Set FIFO size */
666 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
667
668 /* Timestamp enable */
669 ravb_write(ndev, TCCR_TFEN, TCCR);
670
671 /* Interrupt init: */
672 if (info->multi_irqs) {
673 /* Clear DIL.DPLx */
674 ravb_write(ndev, 0, DIL);
675 /* Set queue specific interrupt */
676 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
677 }
678 /* Frame receive */
679 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
680 /* Disable FIFO full warning */
681 ravb_write(ndev, 0, RIC1);
682 /* Receive FIFO full error, descriptor empty */
683 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
684 /* Frame transmitted, timestamp FIFO updated */
685 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
686
687 return 0;
688 }
689
690 /* Device init function for Ethernet AVB */
ravb_dmac_init(struct net_device * ndev)691 static int ravb_dmac_init(struct net_device *ndev)
692 {
693 struct ravb_private *priv = netdev_priv(ndev);
694 const struct ravb_hw_info *info = priv->info;
695 int error;
696
697 /* Clear transmission suspension */
698 ravb_modify(ndev, CCC, CCC_DTSR, 0);
699
700 /* Set CONFIG mode */
701 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
702 if (error)
703 return error;
704
705 error = info->dmac_init(ndev);
706 if (error)
707 return error;
708
709 /* Setting the control will start the AVB-DMAC process. */
710 return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
711 }
712
ravb_get_tx_tstamp(struct net_device * ndev)713 static void ravb_get_tx_tstamp(struct net_device *ndev)
714 {
715 struct ravb_private *priv = netdev_priv(ndev);
716 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
717 struct skb_shared_hwtstamps shhwtstamps;
718 struct sk_buff *skb;
719 struct timespec64 ts;
720 u16 tag, tfa_tag;
721 int count;
722 u32 tfa2;
723
724 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
725 while (count--) {
726 tfa2 = ravb_read(ndev, TFA2);
727 tfa_tag = (tfa2 & TFA2_TST) >> 16;
728 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
729 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
730 ravb_read(ndev, TFA1);
731 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
732 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
733 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
734 list) {
735 skb = ts_skb->skb;
736 tag = ts_skb->tag;
737 list_del(&ts_skb->list);
738 kfree(ts_skb);
739 if (tag == tfa_tag) {
740 skb_tstamp_tx(skb, &shhwtstamps);
741 dev_consume_skb_any(skb);
742 break;
743 } else {
744 dev_kfree_skb_any(skb);
745 }
746 }
747 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
748 }
749 }
750
ravb_rx_csum_gbeth(struct sk_buff * skb)751 static void ravb_rx_csum_gbeth(struct sk_buff *skb)
752 {
753 struct skb_shared_info *shinfo = skb_shinfo(skb);
754 size_t csum_len;
755 u16 *hw_csum;
756
757 /* The hardware checksum status is contained in 4 bytes appended to
758 * packet data.
759 *
760 * For ipv4, the first 2 bytes are the ip header checksum status. We can
761 * ignore this as it will always be re-checked in inet_gro_receive().
762 *
763 * The last 2 bytes are the protocol checksum status which will be zero
764 * if the checksum has been validated.
765 */
766 csum_len = sizeof(*hw_csum) * 2;
767 if (unlikely(skb->len < csum_len))
768 return;
769
770 if (skb_is_nonlinear(skb)) {
771 skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1];
772
773 hw_csum = (u16 *)(skb_frag_address(last_frag) +
774 skb_frag_size(last_frag));
775 skb_frag_size_sub(last_frag, csum_len);
776 } else {
777 hw_csum = (u16 *)skb_tail_pointer(skb);
778 skb_trim(skb, skb->len - csum_len);
779 }
780
781 if (!get_unaligned(--hw_csum))
782 skb->ip_summed = CHECKSUM_UNNECESSARY;
783 }
784
ravb_rx_csum(struct sk_buff * skb)785 static void ravb_rx_csum(struct sk_buff *skb)
786 {
787 u8 *hw_csum;
788
789 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
790 * appended to packet data
791 */
792 if (unlikely(skb->len < sizeof(__sum16)))
793 return;
794 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
795 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
796 skb->ip_summed = CHECKSUM_COMPLETE;
797 skb_trim(skb, skb->len - sizeof(__sum16));
798 }
799
800 /* Packet receive function for Gigabit Ethernet */
ravb_rx_gbeth(struct net_device * ndev,int budget,int q)801 static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
802 {
803 struct ravb_private *priv = netdev_priv(ndev);
804 const struct ravb_hw_info *info = priv->info;
805 struct net_device_stats *stats;
806 struct ravb_rx_desc *desc;
807 int rx_packets = 0;
808 u8 desc_status;
809 u16 desc_len;
810 u8 die_dt;
811 int entry;
812 int limit;
813 int i;
814
815 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
816 stats = &priv->stats[q];
817
818 for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
819 struct sk_buff *skb = NULL;
820
821 entry = priv->cur_rx[q] % priv->num_rx_ring[q];
822 desc = &priv->rx_ring[q].desc[entry];
823 if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
824 break;
825
826 /* Descriptor type must be checked before all other reads */
827 dma_rmb();
828 desc_status = desc->msc;
829 desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
830
831 /* We use 0-byte descriptors to mark the DMA mapping errors */
832 if (!desc_len)
833 continue;
834
835 if (desc_status & MSC_MC)
836 stats->multicast++;
837
838 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
839 stats->rx_errors++;
840 if (desc_status & MSC_CRC)
841 stats->rx_crc_errors++;
842 if (desc_status & MSC_RFE)
843 stats->rx_frame_errors++;
844 if (desc_status & (MSC_RTLF | MSC_RTSF))
845 stats->rx_length_errors++;
846 if (desc_status & MSC_CEEF)
847 stats->rx_missed_errors++;
848 } else {
849 struct ravb_rx_buffer *rx_buff;
850 void *rx_addr;
851
852 rx_buff = &priv->rx_buffers[q][entry];
853 rx_addr = page_address(rx_buff->page) + rx_buff->offset;
854 die_dt = desc->die_dt & 0xF0;
855 dma_sync_single_for_cpu(ndev->dev.parent,
856 le32_to_cpu(desc->dptr),
857 desc_len, DMA_FROM_DEVICE);
858
859 switch (die_dt) {
860 case DT_FSINGLE:
861 case DT_FSTART:
862 /* Start of packet: Set initial data length. */
863 skb = napi_build_skb(rx_addr,
864 info->rx_buffer_size);
865 if (unlikely(!skb)) {
866 stats->rx_errors++;
867 page_pool_put_page(priv->rx_pool[q],
868 rx_buff->page, 0,
869 true);
870 goto refill;
871 }
872 skb_mark_for_recycle(skb);
873 skb_put(skb, desc_len);
874
875 /* Save this skb if the packet spans multiple
876 * descriptors.
877 */
878 if (die_dt == DT_FSTART)
879 priv->rx_1st_skb = skb;
880 break;
881
882 case DT_FMID:
883 case DT_FEND:
884 /* Continuing a packet: Add this buffer as an RX
885 * frag.
886 */
887
888 /* rx_1st_skb will be NULL if napi_build_skb()
889 * failed for the first descriptor of a
890 * multi-descriptor packet.
891 */
892 if (unlikely(!priv->rx_1st_skb)) {
893 stats->rx_errors++;
894 page_pool_put_page(priv->rx_pool[q],
895 rx_buff->page, 0,
896 true);
897
898 /* We may find a DT_FSINGLE or DT_FSTART
899 * descriptor in the queue which we can
900 * process, so don't give up yet.
901 */
902 continue;
903 }
904 skb_add_rx_frag(priv->rx_1st_skb,
905 skb_shinfo(priv->rx_1st_skb)->nr_frags,
906 rx_buff->page, rx_buff->offset,
907 desc_len, info->rx_buffer_size);
908
909 /* Set skb to point at the whole packet so that
910 * we only need one code path for finishing a
911 * packet.
912 */
913 skb = priv->rx_1st_skb;
914 }
915
916 switch (die_dt) {
917 case DT_FSINGLE:
918 case DT_FEND:
919 /* Finishing a packet: Determine protocol &
920 * checksum, hand off to NAPI and update our
921 * stats.
922 */
923 skb->protocol = eth_type_trans(skb, ndev);
924 if (ndev->features & NETIF_F_RXCSUM)
925 ravb_rx_csum_gbeth(skb);
926 stats->rx_bytes += skb->len;
927 napi_gro_receive(&priv->napi[q], skb);
928 rx_packets++;
929
930 /* Clear rx_1st_skb so that it will only be
931 * non-NULL when valid.
932 */
933 priv->rx_1st_skb = NULL;
934 }
935
936 /* Mark this RX buffer as consumed. */
937 rx_buff->page = NULL;
938 }
939 }
940
941 refill:
942 /* Refill the RX ring buffers. */
943 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
944 priv->cur_rx[q] - priv->dirty_rx[q],
945 GFP_ATOMIC);
946
947 stats->rx_packets += rx_packets;
948 return rx_packets;
949 }
950
ravb_rx_rcar_hwstamp(struct ravb_private * priv,int q,struct ravb_ex_rx_desc * desc,struct sk_buff * skb)951 static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q,
952 struct ravb_ex_rx_desc *desc,
953 struct sk_buff *skb)
954 {
955 struct skb_shared_hwtstamps *shhwtstamps;
956 struct timespec64 ts;
957 bool get_ts;
958
959 if (q == RAVB_NC)
960 get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
961 else
962 get_ts = priv->tstamp_rx_ctrl == HWTSTAMP_FILTER_ALL;
963
964 if (!get_ts)
965 return;
966
967 shhwtstamps = skb_hwtstamps(skb);
968 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
969 ts.tv_sec = ((u64)le16_to_cpu(desc->ts_sh) << 32)
970 | le32_to_cpu(desc->ts_sl);
971 ts.tv_nsec = le32_to_cpu(desc->ts_n);
972 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
973 }
974
975 /* Packet receive function for Ethernet AVB */
ravb_rx_rcar(struct net_device * ndev,int budget,int q)976 static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
977 {
978 struct ravb_private *priv = netdev_priv(ndev);
979 const struct ravb_hw_info *info = priv->info;
980 struct net_device_stats *stats = &priv->stats[q];
981 struct ravb_ex_rx_desc *desc;
982 unsigned int limit, i;
983 struct sk_buff *skb;
984 int rx_packets = 0;
985 u8 desc_status;
986 u16 pkt_len;
987 int entry;
988
989 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
990 for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
991 entry = priv->cur_rx[q] % priv->num_rx_ring[q];
992 desc = &priv->rx_ring[q].ex_desc[entry];
993 if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
994 break;
995
996 /* Descriptor type must be checked before all other reads */
997 dma_rmb();
998 desc_status = desc->msc;
999 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
1000
1001 /* We use 0-byte descriptors to mark the DMA mapping errors */
1002 if (!pkt_len)
1003 continue;
1004
1005 if (desc_status & MSC_MC)
1006 stats->multicast++;
1007
1008 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
1009 MSC_CEEF)) {
1010 stats->rx_errors++;
1011 if (desc_status & MSC_CRC)
1012 stats->rx_crc_errors++;
1013 if (desc_status & MSC_RFE)
1014 stats->rx_frame_errors++;
1015 if (desc_status & (MSC_RTLF | MSC_RTSF))
1016 stats->rx_length_errors++;
1017 if (desc_status & MSC_CEEF)
1018 stats->rx_missed_errors++;
1019 } else {
1020 struct ravb_rx_buffer *rx_buff;
1021 void *rx_addr;
1022
1023 rx_buff = &priv->rx_buffers[q][entry];
1024 rx_addr = page_address(rx_buff->page) + rx_buff->offset;
1025 dma_sync_single_for_cpu(ndev->dev.parent,
1026 le32_to_cpu(desc->dptr),
1027 pkt_len, DMA_FROM_DEVICE);
1028
1029 skb = napi_build_skb(rx_addr, info->rx_buffer_size);
1030 if (unlikely(!skb)) {
1031 stats->rx_errors++;
1032 page_pool_put_page(priv->rx_pool[q],
1033 rx_buff->page, 0, true);
1034 break;
1035 }
1036 skb_mark_for_recycle(skb);
1037
1038 ravb_rx_rcar_hwstamp(priv, q, desc, skb);
1039
1040 skb_put(skb, pkt_len);
1041 skb->protocol = eth_type_trans(skb, ndev);
1042 if (ndev->features & NETIF_F_RXCSUM)
1043 ravb_rx_csum(skb);
1044 napi_gro_receive(&priv->napi[q], skb);
1045 rx_packets++;
1046 stats->rx_bytes += pkt_len;
1047
1048 /* Mark this RX buffer as consumed. */
1049 rx_buff->page = NULL;
1050 }
1051 }
1052
1053 /* Refill the RX ring buffers. */
1054 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
1055 priv->cur_rx[q] - priv->dirty_rx[q],
1056 GFP_ATOMIC);
1057
1058 stats->rx_packets += rx_packets;
1059 return rx_packets;
1060 }
1061
1062 /* Packet receive function for Ethernet AVB */
ravb_rx(struct net_device * ndev,int budget,int q)1063 static int ravb_rx(struct net_device *ndev, int budget, int q)
1064 {
1065 struct ravb_private *priv = netdev_priv(ndev);
1066 const struct ravb_hw_info *info = priv->info;
1067
1068 return info->receive(ndev, budget, q);
1069 }
1070
ravb_rcv_snd_disable(struct net_device * ndev)1071 static void ravb_rcv_snd_disable(struct net_device *ndev)
1072 {
1073 /* Disable TX and RX */
1074 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1075 }
1076
ravb_rcv_snd_enable(struct net_device * ndev)1077 static void ravb_rcv_snd_enable(struct net_device *ndev)
1078 {
1079 /* Enable TX and RX */
1080 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1081 }
1082
1083 /* function for waiting dma process finished */
ravb_stop_dma(struct net_device * ndev)1084 static int ravb_stop_dma(struct net_device *ndev)
1085 {
1086 struct ravb_private *priv = netdev_priv(ndev);
1087 const struct ravb_hw_info *info = priv->info;
1088 int error;
1089
1090 /* Wait for stopping the hardware TX process */
1091 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1092
1093 if (error)
1094 return error;
1095
1096 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1097 0);
1098 if (error)
1099 return error;
1100
1101 /* Stop the E-MAC's RX/TX processes. */
1102 ravb_rcv_snd_disable(ndev);
1103
1104 /* Wait for stopping the RX DMA process */
1105 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1106 if (error)
1107 return error;
1108
1109 /* Request for transmission suspension */
1110 ravb_modify(ndev, CCC, CCC_DTSR, CCC_DTSR);
1111 /* Access to URAM will not be suspended if WoL is enabled. */
1112 if (!priv->wol_enabled) {
1113 error = ravb_wait(ndev, CSR, CSR_DTS, CSR_DTS);
1114 if (error)
1115 netdev_err(ndev, "failed to stop AXI BUS\n");
1116 }
1117
1118 /* Stop AVB-DMAC process */
1119 return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1120 }
1121
1122 /* E-MAC interrupt handler */
ravb_emac_interrupt_unlocked(struct net_device * ndev)1123 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1124 {
1125 struct ravb_private *priv = netdev_priv(ndev);
1126 u32 ecsr, psr;
1127
1128 ecsr = ravb_read(ndev, ECSR);
1129 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
1130
1131 if (ecsr & ECSR_MPD)
1132 pm_wakeup_event(&priv->pdev->dev, 0);
1133 if (ecsr & ECSR_ICD)
1134 ndev->stats.tx_carrier_errors++;
1135 if (ecsr & ECSR_LCHNG) {
1136 /* Link changed */
1137 if (priv->no_avb_link)
1138 return;
1139 psr = ravb_read(ndev, PSR);
1140 if (priv->avb_link_active_low)
1141 psr ^= PSR_LMON;
1142 if (!(psr & PSR_LMON)) {
1143 /* DIsable RX and TX */
1144 ravb_rcv_snd_disable(ndev);
1145 } else {
1146 /* Enable RX and TX */
1147 ravb_rcv_snd_enable(ndev);
1148 }
1149 }
1150 }
1151
ravb_emac_interrupt(int irq,void * dev_id)1152 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1153 {
1154 struct net_device *ndev = dev_id;
1155 struct ravb_private *priv = netdev_priv(ndev);
1156 struct device *dev = &priv->pdev->dev;
1157 irqreturn_t result = IRQ_HANDLED;
1158
1159 pm_runtime_get_noresume(dev);
1160
1161 if (unlikely(!pm_runtime_active(dev))) {
1162 result = IRQ_NONE;
1163 goto out_rpm_put;
1164 }
1165
1166 spin_lock(&priv->lock);
1167 ravb_emac_interrupt_unlocked(ndev);
1168 spin_unlock(&priv->lock);
1169
1170 out_rpm_put:
1171 pm_runtime_put_noidle(dev);
1172 return result;
1173 }
1174
1175 /* Error interrupt handler */
ravb_error_interrupt(struct net_device * ndev)1176 static void ravb_error_interrupt(struct net_device *ndev)
1177 {
1178 struct ravb_private *priv = netdev_priv(ndev);
1179 u32 eis, ris2;
1180
1181 eis = ravb_read(ndev, EIS);
1182 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1183 if (eis & EIS_QFS) {
1184 ris2 = ravb_read(ndev, RIS2);
1185 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1186 RIS2);
1187
1188 /* Receive Descriptor Empty int */
1189 if (ris2 & RIS2_QFF0)
1190 priv->stats[RAVB_BE].rx_over_errors++;
1191
1192 /* Receive Descriptor Empty int */
1193 if (ris2 & RIS2_QFF1)
1194 priv->stats[RAVB_NC].rx_over_errors++;
1195
1196 /* Receive FIFO Overflow int */
1197 if (ris2 & RIS2_RFFF)
1198 priv->rx_fifo_errors++;
1199 }
1200 }
1201
ravb_queue_interrupt(struct net_device * ndev,int q)1202 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1203 {
1204 struct ravb_private *priv = netdev_priv(ndev);
1205 const struct ravb_hw_info *info = priv->info;
1206 u32 ris0 = ravb_read(ndev, RIS0);
1207 u32 ric0 = ravb_read(ndev, RIC0);
1208 u32 tis = ravb_read(ndev, TIS);
1209 u32 tic = ravb_read(ndev, TIC);
1210
1211 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
1212 if (napi_schedule_prep(&priv->napi[q])) {
1213 /* Mask RX and TX interrupts */
1214 if (!info->irq_en_dis) {
1215 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1216 ravb_write(ndev, tic & ~BIT(q), TIC);
1217 } else {
1218 ravb_write(ndev, BIT(q), RID0);
1219 ravb_write(ndev, BIT(q), TID);
1220 }
1221 __napi_schedule(&priv->napi[q]);
1222 } else {
1223 netdev_warn(ndev,
1224 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1225 ris0, ric0);
1226 netdev_warn(ndev,
1227 " tx status 0x%08x, tx mask 0x%08x.\n",
1228 tis, tic);
1229 }
1230 return true;
1231 }
1232 return false;
1233 }
1234
ravb_timestamp_interrupt(struct net_device * ndev)1235 static bool ravb_timestamp_interrupt(struct net_device *ndev)
1236 {
1237 u32 tis = ravb_read(ndev, TIS);
1238
1239 if (tis & TIS_TFUF) {
1240 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1241 ravb_get_tx_tstamp(ndev);
1242 return true;
1243 }
1244 return false;
1245 }
1246
ravb_interrupt(int irq,void * dev_id)1247 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1248 {
1249 struct net_device *ndev = dev_id;
1250 struct ravb_private *priv = netdev_priv(ndev);
1251 const struct ravb_hw_info *info = priv->info;
1252 struct device *dev = &priv->pdev->dev;
1253 irqreturn_t result = IRQ_NONE;
1254 u32 iss;
1255
1256 pm_runtime_get_noresume(dev);
1257
1258 if (unlikely(!pm_runtime_active(dev)))
1259 goto out_rpm_put;
1260
1261 spin_lock(&priv->lock);
1262 /* Get interrupt status */
1263 iss = ravb_read(ndev, ISS);
1264
1265 /* Received and transmitted interrupts */
1266 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1267 int q;
1268
1269 /* Timestamp updated */
1270 if (ravb_timestamp_interrupt(ndev))
1271 result = IRQ_HANDLED;
1272
1273 /* Network control and best effort queue RX/TX */
1274 if (info->nc_queues) {
1275 for (q = RAVB_NC; q >= RAVB_BE; q--) {
1276 if (ravb_queue_interrupt(ndev, q))
1277 result = IRQ_HANDLED;
1278 }
1279 } else {
1280 if (ravb_queue_interrupt(ndev, RAVB_BE))
1281 result = IRQ_HANDLED;
1282 }
1283 }
1284
1285 /* E-MAC status summary */
1286 if (iss & ISS_MS) {
1287 ravb_emac_interrupt_unlocked(ndev);
1288 result = IRQ_HANDLED;
1289 }
1290
1291 /* Error status summary */
1292 if (iss & ISS_ES) {
1293 ravb_error_interrupt(ndev);
1294 result = IRQ_HANDLED;
1295 }
1296
1297 /* gPTP interrupt status summary */
1298 if (iss & ISS_CGIS) {
1299 ravb_ptp_interrupt(ndev);
1300 result = IRQ_HANDLED;
1301 }
1302
1303 spin_unlock(&priv->lock);
1304
1305 out_rpm_put:
1306 pm_runtime_put_noidle(dev);
1307 return result;
1308 }
1309
1310 /* Timestamp/Error/gPTP interrupt handler */
ravb_multi_interrupt(int irq,void * dev_id)1311 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1312 {
1313 struct net_device *ndev = dev_id;
1314 struct ravb_private *priv = netdev_priv(ndev);
1315 struct device *dev = &priv->pdev->dev;
1316 irqreturn_t result = IRQ_NONE;
1317 u32 iss;
1318
1319 pm_runtime_get_noresume(dev);
1320
1321 if (unlikely(!pm_runtime_active(dev)))
1322 goto out_rpm_put;
1323
1324 spin_lock(&priv->lock);
1325 /* Get interrupt status */
1326 iss = ravb_read(ndev, ISS);
1327
1328 /* Timestamp updated */
1329 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1330 result = IRQ_HANDLED;
1331
1332 /* Error status summary */
1333 if (iss & ISS_ES) {
1334 ravb_error_interrupt(ndev);
1335 result = IRQ_HANDLED;
1336 }
1337
1338 /* gPTP interrupt status summary */
1339 if (iss & ISS_CGIS) {
1340 ravb_ptp_interrupt(ndev);
1341 result = IRQ_HANDLED;
1342 }
1343
1344 spin_unlock(&priv->lock);
1345
1346 out_rpm_put:
1347 pm_runtime_put_noidle(dev);
1348 return result;
1349 }
1350
ravb_dma_interrupt(int irq,void * dev_id,int q)1351 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1352 {
1353 struct net_device *ndev = dev_id;
1354 struct ravb_private *priv = netdev_priv(ndev);
1355 struct device *dev = &priv->pdev->dev;
1356 irqreturn_t result = IRQ_NONE;
1357
1358 pm_runtime_get_noresume(dev);
1359
1360 if (unlikely(!pm_runtime_active(dev)))
1361 goto out_rpm_put;
1362
1363 spin_lock(&priv->lock);
1364
1365 /* Network control/Best effort queue RX/TX */
1366 if (ravb_queue_interrupt(ndev, q))
1367 result = IRQ_HANDLED;
1368
1369 spin_unlock(&priv->lock);
1370
1371 out_rpm_put:
1372 pm_runtime_put_noidle(dev);
1373 return result;
1374 }
1375
ravb_be_interrupt(int irq,void * dev_id)1376 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1377 {
1378 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1379 }
1380
ravb_nc_interrupt(int irq,void * dev_id)1381 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1382 {
1383 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1384 }
1385
ravb_poll(struct napi_struct * napi,int budget)1386 static int ravb_poll(struct napi_struct *napi, int budget)
1387 {
1388 struct net_device *ndev = napi->dev;
1389 struct ravb_private *priv = netdev_priv(ndev);
1390 const struct ravb_hw_info *info = priv->info;
1391 unsigned long flags;
1392 int q = napi - priv->napi;
1393 int mask = BIT(q);
1394 int work_done;
1395
1396 /* Processing RX Descriptor Ring */
1397 /* Clear RX interrupt */
1398 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1399 work_done = ravb_rx(ndev, budget, q);
1400
1401 /* Processing TX Descriptor Ring */
1402 spin_lock_irqsave(&priv->lock, flags);
1403 /* Clear TX interrupt */
1404 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1405 ravb_tx_free(ndev, q, true);
1406 netif_wake_subqueue(ndev, q);
1407 spin_unlock_irqrestore(&priv->lock, flags);
1408
1409 /* Receive error message handling */
1410 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1411 if (info->nc_queues)
1412 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1413 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1414 ndev->stats.rx_over_errors = priv->rx_over_errors;
1415 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1416 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1417
1418 if (work_done < budget && napi_complete_done(napi, work_done)) {
1419 /* Re-enable RX/TX interrupts */
1420 spin_lock_irqsave(&priv->lock, flags);
1421 if (!info->irq_en_dis) {
1422 ravb_modify(ndev, RIC0, mask, mask);
1423 ravb_modify(ndev, TIC, mask, mask);
1424 } else {
1425 ravb_write(ndev, mask, RIE0);
1426 ravb_write(ndev, mask, TIE);
1427 }
1428 spin_unlock_irqrestore(&priv->lock, flags);
1429 }
1430
1431 return work_done;
1432 }
1433
ravb_set_duplex_gbeth(struct net_device * ndev)1434 static void ravb_set_duplex_gbeth(struct net_device *ndev)
1435 {
1436 struct ravb_private *priv = netdev_priv(ndev);
1437
1438 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1439 }
1440
1441 /* PHY state control function */
ravb_adjust_link(struct net_device * ndev)1442 static void ravb_adjust_link(struct net_device *ndev)
1443 {
1444 struct ravb_private *priv = netdev_priv(ndev);
1445 const struct ravb_hw_info *info = priv->info;
1446 struct phy_device *phydev = ndev->phydev;
1447 bool new_state = false;
1448 unsigned long flags;
1449
1450 spin_lock_irqsave(&priv->lock, flags);
1451
1452 /* Disable TX and RX right over here, if E-MAC change is ignored */
1453 if (priv->no_avb_link)
1454 ravb_rcv_snd_disable(ndev);
1455
1456 if (phydev->link) {
1457 if (info->half_duplex && phydev->duplex != priv->duplex) {
1458 new_state = true;
1459 priv->duplex = phydev->duplex;
1460 ravb_set_duplex_gbeth(ndev);
1461 }
1462
1463 if (phydev->speed != priv->speed) {
1464 new_state = true;
1465 priv->speed = phydev->speed;
1466 info->set_rate(ndev);
1467 }
1468 if (!priv->link) {
1469 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1470 new_state = true;
1471 priv->link = phydev->link;
1472 }
1473 } else if (priv->link) {
1474 new_state = true;
1475 priv->link = 0;
1476 priv->speed = 0;
1477 if (info->half_duplex)
1478 priv->duplex = -1;
1479 }
1480
1481 /* Enable TX and RX right over here, if E-MAC change is ignored */
1482 if (priv->no_avb_link && phydev->link)
1483 ravb_rcv_snd_enable(ndev);
1484
1485 spin_unlock_irqrestore(&priv->lock, flags);
1486
1487 if (new_state && netif_msg_link(priv))
1488 phy_print_status(phydev);
1489 }
1490
1491 /* PHY init function */
ravb_phy_init(struct net_device * ndev)1492 static int ravb_phy_init(struct net_device *ndev)
1493 {
1494 struct device_node *np = ndev->dev.parent->of_node;
1495 struct ravb_private *priv = netdev_priv(ndev);
1496 const struct ravb_hw_info *info = priv->info;
1497 struct phy_device *phydev;
1498 struct device_node *pn;
1499 phy_interface_t iface;
1500 int err;
1501
1502 priv->link = 0;
1503 priv->speed = 0;
1504 priv->duplex = -1;
1505
1506 /* Try connecting to PHY */
1507 pn = of_parse_phandle(np, "phy-handle", 0);
1508 if (!pn) {
1509 /* In the case of a fixed PHY, the DT node associated
1510 * to the PHY is the Ethernet MAC DT node.
1511 */
1512 if (of_phy_is_fixed_link(np)) {
1513 err = of_phy_register_fixed_link(np);
1514 if (err)
1515 return err;
1516 }
1517 pn = of_node_get(np);
1518 }
1519
1520 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1521 : priv->phy_interface;
1522 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1523 of_node_put(pn);
1524 if (!phydev) {
1525 netdev_err(ndev, "failed to connect PHY\n");
1526 err = -ENOENT;
1527 goto err_deregister_fixed_link;
1528 }
1529
1530 if (!info->half_duplex) {
1531 /* 10BASE, Pause and Asym Pause is not supported */
1532 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1533 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1534 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1535 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1536
1537 /* Half Duplex is not supported */
1538 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1539 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1540 }
1541
1542 phy_attached_info(phydev);
1543
1544 return 0;
1545
1546 err_deregister_fixed_link:
1547 if (of_phy_is_fixed_link(np))
1548 of_phy_deregister_fixed_link(np);
1549
1550 return err;
1551 }
1552
1553 /* PHY control start function */
ravb_phy_start(struct net_device * ndev)1554 static int ravb_phy_start(struct net_device *ndev)
1555 {
1556 int error;
1557
1558 error = ravb_phy_init(ndev);
1559 if (error)
1560 return error;
1561
1562 phy_start(ndev->phydev);
1563
1564 return 0;
1565 }
1566
ravb_get_msglevel(struct net_device * ndev)1567 static u32 ravb_get_msglevel(struct net_device *ndev)
1568 {
1569 struct ravb_private *priv = netdev_priv(ndev);
1570
1571 return priv->msg_enable;
1572 }
1573
ravb_set_msglevel(struct net_device * ndev,u32 value)1574 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1575 {
1576 struct ravb_private *priv = netdev_priv(ndev);
1577
1578 priv->msg_enable = value;
1579 }
1580
1581 static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1582 "rx_queue_0_current",
1583 "tx_queue_0_current",
1584 "rx_queue_0_dirty",
1585 "tx_queue_0_dirty",
1586 "rx_queue_0_packets",
1587 "tx_queue_0_packets",
1588 "rx_queue_0_bytes",
1589 "tx_queue_0_bytes",
1590 "rx_queue_0_mcast_packets",
1591 "rx_queue_0_errors",
1592 "rx_queue_0_crc_errors",
1593 "rx_queue_0_frame_errors",
1594 "rx_queue_0_length_errors",
1595 "rx_queue_0_csum_offload_errors",
1596 "rx_queue_0_over_errors",
1597 };
1598
1599 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1600 "rx_queue_0_current",
1601 "tx_queue_0_current",
1602 "rx_queue_0_dirty",
1603 "tx_queue_0_dirty",
1604 "rx_queue_0_packets",
1605 "tx_queue_0_packets",
1606 "rx_queue_0_bytes",
1607 "tx_queue_0_bytes",
1608 "rx_queue_0_mcast_packets",
1609 "rx_queue_0_errors",
1610 "rx_queue_0_crc_errors",
1611 "rx_queue_0_frame_errors",
1612 "rx_queue_0_length_errors",
1613 "rx_queue_0_missed_errors",
1614 "rx_queue_0_over_errors",
1615
1616 "rx_queue_1_current",
1617 "tx_queue_1_current",
1618 "rx_queue_1_dirty",
1619 "tx_queue_1_dirty",
1620 "rx_queue_1_packets",
1621 "tx_queue_1_packets",
1622 "rx_queue_1_bytes",
1623 "tx_queue_1_bytes",
1624 "rx_queue_1_mcast_packets",
1625 "rx_queue_1_errors",
1626 "rx_queue_1_crc_errors",
1627 "rx_queue_1_frame_errors",
1628 "rx_queue_1_length_errors",
1629 "rx_queue_1_missed_errors",
1630 "rx_queue_1_over_errors",
1631 };
1632
ravb_get_sset_count(struct net_device * netdev,int sset)1633 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1634 {
1635 struct ravb_private *priv = netdev_priv(netdev);
1636 const struct ravb_hw_info *info = priv->info;
1637
1638 switch (sset) {
1639 case ETH_SS_STATS:
1640 return info->stats_len;
1641 default:
1642 return -EOPNOTSUPP;
1643 }
1644 }
1645
ravb_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * estats,u64 * data)1646 static void ravb_get_ethtool_stats(struct net_device *ndev,
1647 struct ethtool_stats *estats, u64 *data)
1648 {
1649 struct ravb_private *priv = netdev_priv(ndev);
1650 const struct ravb_hw_info *info = priv->info;
1651 int num_rx_q;
1652 int i = 0;
1653 int q;
1654
1655 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1656 /* Device-specific stats */
1657 for (q = RAVB_BE; q < num_rx_q; q++) {
1658 struct net_device_stats *stats = &priv->stats[q];
1659
1660 data[i++] = priv->cur_rx[q];
1661 data[i++] = priv->cur_tx[q];
1662 data[i++] = priv->dirty_rx[q];
1663 data[i++] = priv->dirty_tx[q];
1664 data[i++] = stats->rx_packets;
1665 data[i++] = stats->tx_packets;
1666 data[i++] = stats->rx_bytes;
1667 data[i++] = stats->tx_bytes;
1668 data[i++] = stats->multicast;
1669 data[i++] = stats->rx_errors;
1670 data[i++] = stats->rx_crc_errors;
1671 data[i++] = stats->rx_frame_errors;
1672 data[i++] = stats->rx_length_errors;
1673 data[i++] = stats->rx_missed_errors;
1674 data[i++] = stats->rx_over_errors;
1675 }
1676 }
1677
ravb_get_strings(struct net_device * ndev,u32 stringset,u8 * data)1678 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1679 {
1680 struct ravb_private *priv = netdev_priv(ndev);
1681 const struct ravb_hw_info *info = priv->info;
1682
1683 switch (stringset) {
1684 case ETH_SS_STATS:
1685 memcpy(data, info->gstrings_stats, info->gstrings_size);
1686 break;
1687 }
1688 }
1689
ravb_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)1690 static void ravb_get_ringparam(struct net_device *ndev,
1691 struct ethtool_ringparam *ring,
1692 struct kernel_ethtool_ringparam *kernel_ring,
1693 struct netlink_ext_ack *extack)
1694 {
1695 struct ravb_private *priv = netdev_priv(ndev);
1696
1697 ring->rx_max_pending = BE_RX_RING_MAX;
1698 ring->tx_max_pending = BE_TX_RING_MAX;
1699 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1700 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1701 }
1702
ravb_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)1703 static int ravb_set_ringparam(struct net_device *ndev,
1704 struct ethtool_ringparam *ring,
1705 struct kernel_ethtool_ringparam *kernel_ring,
1706 struct netlink_ext_ack *extack)
1707 {
1708 struct ravb_private *priv = netdev_priv(ndev);
1709 const struct ravb_hw_info *info = priv->info;
1710 int error;
1711
1712 if (ring->tx_pending > BE_TX_RING_MAX ||
1713 ring->rx_pending > BE_RX_RING_MAX ||
1714 ring->tx_pending < BE_TX_RING_MIN ||
1715 ring->rx_pending < BE_RX_RING_MIN)
1716 return -EINVAL;
1717 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1718 return -EINVAL;
1719
1720 if (netif_running(ndev)) {
1721 netif_device_detach(ndev);
1722 /* Stop PTP Clock driver */
1723 if (info->gptp)
1724 ravb_ptp_stop(ndev);
1725 /* Wait for DMA stopping */
1726 error = ravb_stop_dma(ndev);
1727 if (error) {
1728 netdev_err(ndev,
1729 "cannot set ringparam! Any AVB processes are still running?\n");
1730 return error;
1731 }
1732 synchronize_irq(ndev->irq);
1733
1734 /* Free all the skb's in the RX queue and the DMA buffers. */
1735 ravb_ring_free(ndev, RAVB_BE);
1736 if (info->nc_queues)
1737 ravb_ring_free(ndev, RAVB_NC);
1738 }
1739
1740 /* Set new parameters */
1741 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1742 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1743
1744 if (netif_running(ndev)) {
1745 error = ravb_dmac_init(ndev);
1746 if (error) {
1747 netdev_err(ndev,
1748 "%s: ravb_dmac_init() failed, error %d\n",
1749 __func__, error);
1750 return error;
1751 }
1752
1753 ravb_emac_init(ndev);
1754
1755 /* Initialise PTP Clock driver */
1756 if (info->gptp)
1757 ravb_ptp_init(ndev, priv->pdev);
1758
1759 netif_device_attach(ndev);
1760 }
1761
1762 return 0;
1763 }
1764
ravb_get_ts_info(struct net_device * ndev,struct kernel_ethtool_ts_info * info)1765 static int ravb_get_ts_info(struct net_device *ndev,
1766 struct kernel_ethtool_ts_info *info)
1767 {
1768 struct ravb_private *priv = netdev_priv(ndev);
1769 const struct ravb_hw_info *hw_info = priv->info;
1770
1771 if (hw_info->gptp || hw_info->ccc_gac) {
1772 info->so_timestamping =
1773 SOF_TIMESTAMPING_TX_SOFTWARE |
1774 SOF_TIMESTAMPING_TX_HARDWARE |
1775 SOF_TIMESTAMPING_RX_HARDWARE |
1776 SOF_TIMESTAMPING_RAW_HARDWARE;
1777 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1778 info->rx_filters =
1779 (1 << HWTSTAMP_FILTER_NONE) |
1780 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1781 (1 << HWTSTAMP_FILTER_ALL);
1782 info->phc_index = ptp_clock_index(priv->ptp.clock);
1783 }
1784
1785 return 0;
1786 }
1787
ravb_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)1788 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1789 {
1790 struct ravb_private *priv = netdev_priv(ndev);
1791
1792 wol->supported = WAKE_MAGIC;
1793 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1794 }
1795
ravb_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)1796 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1797 {
1798 struct ravb_private *priv = netdev_priv(ndev);
1799 const struct ravb_hw_info *info = priv->info;
1800
1801 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1802 return -EOPNOTSUPP;
1803
1804 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1805
1806 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1807
1808 return 0;
1809 }
1810
1811 static const struct ethtool_ops ravb_ethtool_ops = {
1812 .nway_reset = phy_ethtool_nway_reset,
1813 .get_msglevel = ravb_get_msglevel,
1814 .set_msglevel = ravb_set_msglevel,
1815 .get_link = ethtool_op_get_link,
1816 .get_strings = ravb_get_strings,
1817 .get_ethtool_stats = ravb_get_ethtool_stats,
1818 .get_sset_count = ravb_get_sset_count,
1819 .get_ringparam = ravb_get_ringparam,
1820 .set_ringparam = ravb_set_ringparam,
1821 .get_ts_info = ravb_get_ts_info,
1822 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1823 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1824 .get_wol = ravb_get_wol,
1825 .set_wol = ravb_set_wol,
1826 };
1827
ravb_set_config_mode(struct net_device * ndev)1828 static int ravb_set_config_mode(struct net_device *ndev)
1829 {
1830 struct ravb_private *priv = netdev_priv(ndev);
1831 const struct ravb_hw_info *info = priv->info;
1832 int error;
1833
1834 if (info->gptp) {
1835 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1836 if (error)
1837 return error;
1838 /* Set CSEL value */
1839 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1840 } else if (info->ccc_gac) {
1841 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
1842 } else {
1843 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1844 }
1845
1846 return error;
1847 }
1848
ravb_set_gti(struct net_device * ndev)1849 static void ravb_set_gti(struct net_device *ndev)
1850 {
1851 struct ravb_private *priv = netdev_priv(ndev);
1852 const struct ravb_hw_info *info = priv->info;
1853
1854 if (!(info->gptp || info->ccc_gac))
1855 return;
1856
1857 ravb_write(ndev, priv->gti_tiv, GTI);
1858
1859 /* Request GTI loading */
1860 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
1861 }
1862
ravb_compute_gti(struct net_device * ndev)1863 static int ravb_compute_gti(struct net_device *ndev)
1864 {
1865 struct ravb_private *priv = netdev_priv(ndev);
1866 const struct ravb_hw_info *info = priv->info;
1867 struct device *dev = ndev->dev.parent;
1868 unsigned long rate;
1869 u64 inc;
1870
1871 if (!(info->gptp || info->ccc_gac))
1872 return 0;
1873
1874 if (info->gptp_ref_clk)
1875 rate = clk_get_rate(priv->gptp_clk);
1876 else
1877 rate = clk_get_rate(priv->clk);
1878 if (!rate)
1879 return -EINVAL;
1880
1881 inc = div64_ul(1000000000ULL << 20, rate);
1882
1883 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1884 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1885 inc, GTI_TIV_MIN, GTI_TIV_MAX);
1886 return -EINVAL;
1887 }
1888 priv->gti_tiv = inc;
1889
1890 return 0;
1891 }
1892
1893 /* Set tx and rx clock internal delay modes */
ravb_parse_delay_mode(struct device_node * np,struct net_device * ndev)1894 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1895 {
1896 struct ravb_private *priv = netdev_priv(ndev);
1897 bool explicit_delay = false;
1898 u32 delay;
1899
1900 if (!priv->info->internal_delay)
1901 return;
1902
1903 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
1904 /* Valid values are 0 and 1800, according to DT bindings */
1905 priv->rxcidm = !!delay;
1906 explicit_delay = true;
1907 }
1908 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
1909 /* Valid values are 0 and 2000, according to DT bindings */
1910 priv->txcidm = !!delay;
1911 explicit_delay = true;
1912 }
1913
1914 if (explicit_delay)
1915 return;
1916
1917 /* Fall back to legacy rgmii-*id behavior */
1918 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1919 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1920 priv->rxcidm = 1;
1921 priv->rgmii_override = 1;
1922 }
1923
1924 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1925 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1926 priv->txcidm = 1;
1927 priv->rgmii_override = 1;
1928 }
1929 }
1930
ravb_set_delay_mode(struct net_device * ndev)1931 static void ravb_set_delay_mode(struct net_device *ndev)
1932 {
1933 struct ravb_private *priv = netdev_priv(ndev);
1934 u32 set = 0;
1935
1936 if (!priv->info->internal_delay)
1937 return;
1938
1939 if (priv->rxcidm)
1940 set |= APSR_RDM;
1941 if (priv->txcidm)
1942 set |= APSR_TDM;
1943 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
1944 }
1945
1946 /* Network device open function for Ethernet AVB */
ravb_open(struct net_device * ndev)1947 static int ravb_open(struct net_device *ndev)
1948 {
1949 struct ravb_private *priv = netdev_priv(ndev);
1950 const struct ravb_hw_info *info = priv->info;
1951 struct device *dev = &priv->pdev->dev;
1952 int error;
1953
1954 napi_enable(&priv->napi[RAVB_BE]);
1955 if (info->nc_queues)
1956 napi_enable(&priv->napi[RAVB_NC]);
1957
1958 error = pm_runtime_resume_and_get(dev);
1959 if (error < 0)
1960 goto out_napi_off;
1961
1962 /* Set AVB config mode */
1963 error = ravb_set_config_mode(ndev);
1964 if (error)
1965 goto out_rpm_put;
1966
1967 ravb_set_delay_mode(ndev);
1968 ravb_write(ndev, priv->desc_bat_dma, DBAT);
1969
1970 /* Device init */
1971 error = ravb_dmac_init(ndev);
1972 if (error)
1973 goto out_set_reset;
1974
1975 ravb_emac_init(ndev);
1976
1977 ravb_set_gti(ndev);
1978
1979 /* Initialise PTP Clock driver */
1980 if (info->gptp || info->ccc_gac)
1981 ravb_ptp_init(ndev, priv->pdev);
1982
1983 /* PHY control start */
1984 error = ravb_phy_start(ndev);
1985 if (error)
1986 goto out_ptp_stop;
1987
1988 netif_tx_start_all_queues(ndev);
1989
1990 return 0;
1991
1992 out_ptp_stop:
1993 /* Stop PTP Clock driver */
1994 if (info->gptp || info->ccc_gac)
1995 ravb_ptp_stop(ndev);
1996 ravb_stop_dma(ndev);
1997 out_set_reset:
1998 ravb_set_opmode(ndev, CCC_OPC_RESET);
1999 out_rpm_put:
2000 pm_runtime_put_autosuspend(dev);
2001 out_napi_off:
2002 if (info->nc_queues)
2003 napi_disable(&priv->napi[RAVB_NC]);
2004 napi_disable(&priv->napi[RAVB_BE]);
2005 return error;
2006 }
2007
2008 /* Timeout function for Ethernet AVB */
ravb_tx_timeout(struct net_device * ndev,unsigned int txqueue)2009 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
2010 {
2011 struct ravb_private *priv = netdev_priv(ndev);
2012
2013 netif_err(priv, tx_err, ndev,
2014 "transmit timed out, status %08x, resetting...\n",
2015 ravb_read(ndev, ISS));
2016
2017 /* tx_errors count up */
2018 ndev->stats.tx_errors++;
2019
2020 schedule_work(&priv->work);
2021 }
2022
ravb_tx_timeout_work(struct work_struct * work)2023 static void ravb_tx_timeout_work(struct work_struct *work)
2024 {
2025 struct ravb_private *priv = container_of(work, struct ravb_private,
2026 work);
2027 const struct ravb_hw_info *info = priv->info;
2028 struct net_device *ndev = priv->ndev;
2029 int error;
2030
2031 if (!rtnl_trylock()) {
2032 usleep_range(1000, 2000);
2033 schedule_work(&priv->work);
2034 return;
2035 }
2036
2037 netif_tx_stop_all_queues(ndev);
2038
2039 /* Stop PTP Clock driver */
2040 if (info->gptp)
2041 ravb_ptp_stop(ndev);
2042
2043 /* Wait for DMA stopping */
2044 if (ravb_stop_dma(ndev)) {
2045 /* If ravb_stop_dma() fails, the hardware is still operating
2046 * for TX and/or RX. So, this should not call the following
2047 * functions because ravb_dmac_init() is possible to fail too.
2048 * Also, this should not retry ravb_stop_dma() again and again
2049 * here because it's possible to wait forever. So, this just
2050 * re-enables the TX and RX and skip the following
2051 * re-initialization procedure.
2052 */
2053 ravb_rcv_snd_enable(ndev);
2054 goto out;
2055 }
2056
2057 ravb_ring_free(ndev, RAVB_BE);
2058 if (info->nc_queues)
2059 ravb_ring_free(ndev, RAVB_NC);
2060
2061 /* Device init */
2062 error = ravb_dmac_init(ndev);
2063 if (error) {
2064 /* If ravb_dmac_init() fails, descriptors are freed. So, this
2065 * should return here to avoid re-enabling the TX and RX in
2066 * ravb_emac_init().
2067 */
2068 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
2069 __func__, error);
2070 goto out_unlock;
2071 }
2072 ravb_emac_init(ndev);
2073
2074 out:
2075 /* Initialise PTP Clock driver */
2076 if (info->gptp)
2077 ravb_ptp_init(ndev, priv->pdev);
2078
2079 netif_tx_start_all_queues(ndev);
2080
2081 out_unlock:
2082 rtnl_unlock();
2083 }
2084
ravb_can_tx_csum_gbeth(struct sk_buff * skb)2085 static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
2086 {
2087 u16 net_protocol = ntohs(skb->protocol);
2088 u8 inner_protocol;
2089
2090 /* GbEth IP can calculate the checksum if:
2091 * - there are zero or one VLAN headers with TPID=0x8100
2092 * - the network protocol is IPv4 or IPv6
2093 * - the transport protocol is TCP, UDP or ICMP
2094 * - the packet is not fragmented
2095 */
2096
2097 if (net_protocol == ETH_P_8021Q) {
2098 struct vlan_hdr vhdr, *vh;
2099
2100 vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr);
2101 if (!vh)
2102 return false;
2103
2104 net_protocol = ntohs(vh->h_vlan_encapsulated_proto);
2105 }
2106
2107 switch (net_protocol) {
2108 case ETH_P_IP:
2109 inner_protocol = ip_hdr(skb)->protocol;
2110 break;
2111 case ETH_P_IPV6:
2112 inner_protocol = ipv6_hdr(skb)->nexthdr;
2113 break;
2114 default:
2115 return false;
2116 }
2117
2118 switch (inner_protocol) {
2119 case IPPROTO_TCP:
2120 case IPPROTO_UDP:
2121 return true;
2122 default:
2123 return false;
2124 }
2125 }
2126
2127 /* Packet transmit function for Ethernet AVB */
ravb_start_xmit(struct sk_buff * skb,struct net_device * ndev)2128 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2129 {
2130 struct ravb_private *priv = netdev_priv(ndev);
2131 const struct ravb_hw_info *info = priv->info;
2132 unsigned int num_tx_desc = priv->num_tx_desc;
2133 u16 q = skb_get_queue_mapping(skb);
2134 struct ravb_tstamp_skb *ts_skb;
2135 struct ravb_tx_desc *desc;
2136 unsigned long flags;
2137 dma_addr_t dma_addr;
2138 void *buffer;
2139 u32 entry;
2140 u32 len;
2141
2142 if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
2143 skb_checksum_help(skb);
2144
2145 spin_lock_irqsave(&priv->lock, flags);
2146 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
2147 num_tx_desc) {
2148 netif_err(priv, tx_queued, ndev,
2149 "still transmitting with the full ring!\n");
2150 netif_stop_subqueue(ndev, q);
2151 spin_unlock_irqrestore(&priv->lock, flags);
2152 return NETDEV_TX_BUSY;
2153 }
2154
2155 if (skb_put_padto(skb, ETH_ZLEN))
2156 goto exit;
2157
2158 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
2159 priv->tx_skb[q][entry / num_tx_desc] = skb;
2160
2161 if (num_tx_desc > 1) {
2162 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
2163 entry / num_tx_desc * DPTR_ALIGN;
2164 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
2165
2166 /* Zero length DMA descriptors are problematic as they seem
2167 * to terminate DMA transfers. Avoid them by simply using a
2168 * length of DPTR_ALIGN (4) when skb data is aligned to
2169 * DPTR_ALIGN.
2170 *
2171 * As skb is guaranteed to have at least ETH_ZLEN (60)
2172 * bytes of data by the call to skb_put_padto() above this
2173 * is safe with respect to both the length of the first DMA
2174 * descriptor (len) overflowing the available data and the
2175 * length of the second DMA descriptor (skb->len - len)
2176 * being negative.
2177 */
2178 if (len == 0)
2179 len = DPTR_ALIGN;
2180
2181 memcpy(buffer, skb->data, len);
2182 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2183 DMA_TO_DEVICE);
2184 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2185 goto drop;
2186
2187 desc = &priv->tx_ring[q][entry];
2188 desc->ds_tagl = cpu_to_le16(len);
2189 desc->dptr = cpu_to_le32(dma_addr);
2190
2191 buffer = skb->data + len;
2192 len = skb->len - len;
2193 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2194 DMA_TO_DEVICE);
2195 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2196 goto unmap;
2197
2198 desc++;
2199 } else {
2200 desc = &priv->tx_ring[q][entry];
2201 len = skb->len;
2202 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2203 DMA_TO_DEVICE);
2204 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2205 goto drop;
2206 }
2207 desc->ds_tagl = cpu_to_le16(len);
2208 desc->dptr = cpu_to_le32(dma_addr);
2209
2210 /* TX timestamp required */
2211 if (info->gptp || info->ccc_gac) {
2212 if (q == RAVB_NC) {
2213 ts_skb = kmalloc_obj(*ts_skb, GFP_ATOMIC);
2214 if (!ts_skb) {
2215 if (num_tx_desc > 1) {
2216 desc--;
2217 dma_unmap_single(ndev->dev.parent, dma_addr,
2218 len, DMA_TO_DEVICE);
2219 }
2220 goto unmap;
2221 }
2222 ts_skb->skb = skb_get(skb);
2223 ts_skb->tag = priv->ts_skb_tag++;
2224 priv->ts_skb_tag &= 0x3ff;
2225 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2226
2227 /* TAG and timestamp required flag */
2228 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2229 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2230 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2231 }
2232
2233 skb_tx_timestamp(skb);
2234 }
2235
2236 if (num_tx_desc > 1) {
2237 desc->die_dt = DT_FEND;
2238 desc--;
2239 /* When using multi-descriptors, DT_FEND needs to get written
2240 * before DT_FSTART, but the compiler may reorder the memory
2241 * writes in an attempt to optimize the code.
2242 * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
2243 * are written exactly in the order shown in the code.
2244 * This is particularly important for cases where the DMA engine
2245 * is already running when we are running this code. If the DMA
2246 * sees DT_FSTART without the corresponding DT_FEND it will enter
2247 * an error condition.
2248 */
2249 dma_wmb();
2250 desc->die_dt = DT_FSTART;
2251 } else {
2252 /* Descriptor type must be set after all the above writes */
2253 dma_wmb();
2254 desc->die_dt = DT_FSINGLE;
2255 }
2256
2257 /* Before ringing the doorbell we need to make sure that the latest
2258 * writes have been committed to memory, otherwise it could delay
2259 * things until the doorbell is rang again.
2260 * This is in replacement of the read operation mentioned in the HW
2261 * manuals.
2262 */
2263 dma_wmb();
2264 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2265
2266 priv->cur_tx[q] += num_tx_desc;
2267 if (priv->cur_tx[q] - priv->dirty_tx[q] >
2268 (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2269 !ravb_tx_free(ndev, q, true))
2270 netif_stop_subqueue(ndev, q);
2271
2272 exit:
2273 spin_unlock_irqrestore(&priv->lock, flags);
2274 return NETDEV_TX_OK;
2275
2276 unmap:
2277 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2278 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2279 drop:
2280 dev_kfree_skb_any(skb);
2281 priv->tx_skb[q][entry / num_tx_desc] = NULL;
2282 goto exit;
2283 }
2284
ravb_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)2285 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2286 struct net_device *sb_dev)
2287 {
2288 /* If skb needs TX timestamp, it is handled in network control queue */
2289 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2290 RAVB_BE;
2291
2292 }
2293
ravb_get_stats(struct net_device * ndev)2294 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2295 {
2296 struct ravb_private *priv = netdev_priv(ndev);
2297 const struct ravb_hw_info *info = priv->info;
2298 struct net_device_stats *nstats, *stats0, *stats1;
2299 struct device *dev = &priv->pdev->dev;
2300
2301 nstats = &ndev->stats;
2302
2303 pm_runtime_get_noresume(dev);
2304
2305 if (!pm_runtime_active(dev))
2306 goto out_rpm_put;
2307
2308 stats0 = &priv->stats[RAVB_BE];
2309
2310 if (info->tx_counters) {
2311 nstats->tx_dropped += ravb_read(ndev, TROCR);
2312 ravb_write(ndev, 0, TROCR); /* (write clear) */
2313 }
2314
2315 if (info->carrier_counters) {
2316 nstats->collisions += ravb_read(ndev, CXR41);
2317 ravb_write(ndev, 0, CXR41); /* (write clear) */
2318 nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2319 ravb_write(ndev, 0, CXR42); /* (write clear) */
2320 }
2321
2322 nstats->rx_packets = stats0->rx_packets;
2323 nstats->tx_packets = stats0->tx_packets;
2324 nstats->rx_bytes = stats0->rx_bytes;
2325 nstats->tx_bytes = stats0->tx_bytes;
2326 nstats->multicast = stats0->multicast;
2327 nstats->rx_errors = stats0->rx_errors;
2328 nstats->rx_crc_errors = stats0->rx_crc_errors;
2329 nstats->rx_frame_errors = stats0->rx_frame_errors;
2330 nstats->rx_length_errors = stats0->rx_length_errors;
2331 nstats->rx_missed_errors = stats0->rx_missed_errors;
2332 nstats->rx_over_errors = stats0->rx_over_errors;
2333 if (info->nc_queues) {
2334 stats1 = &priv->stats[RAVB_NC];
2335
2336 nstats->rx_packets += stats1->rx_packets;
2337 nstats->tx_packets += stats1->tx_packets;
2338 nstats->rx_bytes += stats1->rx_bytes;
2339 nstats->tx_bytes += stats1->tx_bytes;
2340 nstats->multicast += stats1->multicast;
2341 nstats->rx_errors += stats1->rx_errors;
2342 nstats->rx_crc_errors += stats1->rx_crc_errors;
2343 nstats->rx_frame_errors += stats1->rx_frame_errors;
2344 nstats->rx_length_errors += stats1->rx_length_errors;
2345 nstats->rx_missed_errors += stats1->rx_missed_errors;
2346 nstats->rx_over_errors += stats1->rx_over_errors;
2347 }
2348
2349 out_rpm_put:
2350 pm_runtime_put_noidle(dev);
2351 return nstats;
2352 }
2353
2354 /* Update promiscuous bit */
ravb_set_rx_mode(struct net_device * ndev)2355 static void ravb_set_rx_mode(struct net_device *ndev)
2356 {
2357 struct ravb_private *priv = netdev_priv(ndev);
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&priv->lock, flags);
2361 ravb_modify(ndev, ECMR, ECMR_PRM,
2362 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2363 spin_unlock_irqrestore(&priv->lock, flags);
2364 }
2365
2366 /* Device close function for Ethernet AVB */
ravb_close(struct net_device * ndev)2367 static int ravb_close(struct net_device *ndev)
2368 {
2369 struct device_node *np = ndev->dev.parent->of_node;
2370 struct ravb_private *priv = netdev_priv(ndev);
2371 const struct ravb_hw_info *info = priv->info;
2372 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2373 struct device *dev = &priv->pdev->dev;
2374 int error;
2375
2376 netif_tx_stop_all_queues(ndev);
2377
2378 /* Disable interrupts by clearing the interrupt masks. */
2379 ravb_write(ndev, 0, RIC0);
2380 ravb_write(ndev, 0, RIC2);
2381 ravb_write(ndev, 0, TIC);
2382 ravb_write(ndev, 0, ECSIPR);
2383
2384 /* PHY disconnect */
2385 if (ndev->phydev) {
2386 phy_stop(ndev->phydev);
2387 phy_disconnect(ndev->phydev);
2388 if (of_phy_is_fixed_link(np))
2389 of_phy_deregister_fixed_link(np);
2390 }
2391
2392 /* Stop PTP Clock driver */
2393 if (info->gptp || info->ccc_gac)
2394 ravb_ptp_stop(ndev);
2395
2396 /* Set the config mode to stop the AVB-DMAC's processes */
2397 if (ravb_stop_dma(ndev) < 0)
2398 netdev_err(ndev,
2399 "device will be stopped after h/w processes are done.\n");
2400
2401 /* Clear the timestamp list */
2402 if (info->gptp || info->ccc_gac) {
2403 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2404 list_del(&ts_skb->list);
2405 kfree_skb(ts_skb->skb);
2406 kfree(ts_skb);
2407 }
2408 }
2409
2410 cancel_work_sync(&priv->work);
2411
2412 if (info->nc_queues)
2413 napi_disable(&priv->napi[RAVB_NC]);
2414 napi_disable(&priv->napi[RAVB_BE]);
2415
2416 /* Free all the skb's in the RX queue and the DMA buffers. */
2417 ravb_ring_free(ndev, RAVB_BE);
2418 if (info->nc_queues)
2419 ravb_ring_free(ndev, RAVB_NC);
2420
2421 /* Update statistics. */
2422 ravb_get_stats(ndev);
2423
2424 /* Set reset mode. */
2425 error = ravb_set_opmode(ndev, CCC_OPC_RESET);
2426 if (error)
2427 return error;
2428
2429 pm_runtime_put_autosuspend(dev);
2430
2431 return 0;
2432 }
2433
ravb_hwtstamp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)2434 static int ravb_hwtstamp_get(struct net_device *ndev,
2435 struct kernel_hwtstamp_config *config)
2436 {
2437 struct ravb_private *priv = netdev_priv(ndev);
2438
2439 config->flags = 0;
2440 config->tx_type = priv->tstamp_tx_ctrl;
2441 config->rx_filter = priv->tstamp_rx_ctrl;
2442
2443 return 0;
2444 }
2445
2446 /* Control hardware time stamping */
ravb_hwtstamp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2447 static int ravb_hwtstamp_set(struct net_device *ndev,
2448 struct kernel_hwtstamp_config *config,
2449 struct netlink_ext_ack *extack)
2450 {
2451 struct ravb_private *priv = netdev_priv(ndev);
2452 enum hwtstamp_rx_filters tstamp_rx_ctrl;
2453 enum hwtstamp_tx_types tstamp_tx_ctrl;
2454
2455 switch (config->tx_type) {
2456 case HWTSTAMP_TX_OFF:
2457 case HWTSTAMP_TX_ON:
2458 tstamp_tx_ctrl = config->tx_type;
2459 break;
2460 default:
2461 return -ERANGE;
2462 }
2463
2464 switch (config->rx_filter) {
2465 case HWTSTAMP_FILTER_NONE:
2466 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2467 tstamp_rx_ctrl = config->rx_filter;
2468 break;
2469 default:
2470 config->rx_filter = HWTSTAMP_FILTER_ALL;
2471 tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
2472 }
2473
2474 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2475 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2476
2477 return 0;
2478 }
2479
ravb_change_mtu(struct net_device * ndev,int new_mtu)2480 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2481 {
2482 struct ravb_private *priv = netdev_priv(ndev);
2483
2484 WRITE_ONCE(ndev->mtu, new_mtu);
2485
2486 if (netif_running(ndev)) {
2487 synchronize_irq(priv->emac_irq);
2488 ravb_emac_init(ndev);
2489 }
2490
2491 netdev_update_features(ndev);
2492
2493 return 0;
2494 }
2495
ravb_set_rx_csum(struct net_device * ndev,bool enable)2496 static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2497 {
2498 struct ravb_private *priv = netdev_priv(ndev);
2499 unsigned long flags;
2500
2501 spin_lock_irqsave(&priv->lock, flags);
2502
2503 /* Disable TX and RX */
2504 ravb_rcv_snd_disable(ndev);
2505
2506 /* Modify RX Checksum setting */
2507 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2508
2509 /* Enable TX and RX */
2510 ravb_rcv_snd_enable(ndev);
2511
2512 spin_unlock_irqrestore(&priv->lock, flags);
2513 }
2514
ravb_endisable_csum_gbeth(struct net_device * ndev,enum ravb_reg reg,u32 val,u32 mask)2515 static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
2516 u32 val, u32 mask)
2517 {
2518 u32 csr0 = CSR0_TPE | CSR0_RPE;
2519 int ret;
2520
2521 ravb_write(ndev, csr0 & ~mask, CSR0);
2522 ret = ravb_wait(ndev, CSR0, mask, 0);
2523 if (!ret)
2524 ravb_write(ndev, val, reg);
2525
2526 ravb_write(ndev, csr0, CSR0);
2527
2528 return ret;
2529 }
2530
ravb_set_features_gbeth(struct net_device * ndev,netdev_features_t features)2531 static int ravb_set_features_gbeth(struct net_device *ndev,
2532 netdev_features_t features)
2533 {
2534 netdev_features_t changed = ndev->features ^ features;
2535 struct ravb_private *priv = netdev_priv(ndev);
2536 unsigned long flags;
2537 int ret = 0;
2538 u32 val;
2539
2540 spin_lock_irqsave(&priv->lock, flags);
2541 if (changed & NETIF_F_RXCSUM) {
2542 if (features & NETIF_F_RXCSUM)
2543 val = CSR2_CSUM_ENABLE;
2544 else
2545 val = 0;
2546
2547 ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
2548 if (ret)
2549 goto done;
2550 }
2551
2552 if (changed & NETIF_F_HW_CSUM) {
2553 if (features & NETIF_F_HW_CSUM)
2554 val = CSR1_CSUM_ENABLE;
2555 else
2556 val = 0;
2557
2558 ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
2559 if (ret)
2560 goto done;
2561 }
2562
2563 done:
2564 spin_unlock_irqrestore(&priv->lock, flags);
2565
2566 return ret;
2567 }
2568
ravb_set_features_rcar(struct net_device * ndev,netdev_features_t features)2569 static int ravb_set_features_rcar(struct net_device *ndev,
2570 netdev_features_t features)
2571 {
2572 netdev_features_t changed = ndev->features ^ features;
2573
2574 if (changed & NETIF_F_RXCSUM)
2575 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2576
2577 return 0;
2578 }
2579
ravb_set_features(struct net_device * ndev,netdev_features_t features)2580 static int ravb_set_features(struct net_device *ndev,
2581 netdev_features_t features)
2582 {
2583 struct ravb_private *priv = netdev_priv(ndev);
2584 const struct ravb_hw_info *info = priv->info;
2585 struct device *dev = &priv->pdev->dev;
2586 int ret;
2587
2588 pm_runtime_get_noresume(dev);
2589
2590 if (pm_runtime_active(dev))
2591 ret = info->set_feature(ndev, features);
2592 else
2593 ret = 0;
2594
2595 pm_runtime_put_noidle(dev);
2596
2597 if (ret)
2598 return ret;
2599
2600 ndev->features = features;
2601
2602 return 0;
2603 }
2604
2605 static const struct net_device_ops ravb_netdev_ops = {
2606 .ndo_open = ravb_open,
2607 .ndo_stop = ravb_close,
2608 .ndo_start_xmit = ravb_start_xmit,
2609 .ndo_select_queue = ravb_select_queue,
2610 .ndo_get_stats = ravb_get_stats,
2611 .ndo_set_rx_mode = ravb_set_rx_mode,
2612 .ndo_tx_timeout = ravb_tx_timeout,
2613 .ndo_eth_ioctl = phy_do_ioctl_running,
2614 .ndo_change_mtu = ravb_change_mtu,
2615 .ndo_validate_addr = eth_validate_addr,
2616 .ndo_set_mac_address = eth_mac_addr,
2617 .ndo_set_features = ravb_set_features,
2618 .ndo_hwtstamp_get = ravb_hwtstamp_get,
2619 .ndo_hwtstamp_set = ravb_hwtstamp_set,
2620 };
2621
2622 /* MDIO bus init function */
ravb_mdio_init(struct ravb_private * priv)2623 static int ravb_mdio_init(struct ravb_private *priv)
2624 {
2625 struct platform_device *pdev = priv->pdev;
2626 struct device *dev = &pdev->dev;
2627 struct device_node *mdio_node;
2628 struct phy_device *phydev;
2629 struct device_node *pn;
2630 int error;
2631
2632 /* Bitbang init */
2633 priv->mdiobb.ops = &bb_ops;
2634
2635 /* MII controller setting */
2636 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2637 if (!priv->mii_bus)
2638 return -ENOMEM;
2639
2640 /* Hook up MII support for ethtool */
2641 priv->mii_bus->name = "ravb_mii";
2642 priv->mii_bus->parent = dev;
2643 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2644 pdev->name, pdev->id);
2645
2646 /* Register MDIO bus */
2647 mdio_node = of_get_child_by_name(dev->of_node, "mdio");
2648 if (!mdio_node) {
2649 /* backwards compatibility for DT lacking mdio subnode */
2650 mdio_node = of_node_get(dev->of_node);
2651 }
2652 error = of_mdiobus_register(priv->mii_bus, mdio_node);
2653 of_node_put(mdio_node);
2654 if (error)
2655 goto out_free_bus;
2656
2657 pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
2658 phydev = of_phy_find_device(pn);
2659 if (phydev) {
2660 phydev->mac_managed_pm = true;
2661 put_device(&phydev->mdio.dev);
2662 }
2663 of_node_put(pn);
2664
2665 return 0;
2666
2667 out_free_bus:
2668 free_mdio_bitbang(priv->mii_bus);
2669 return error;
2670 }
2671
2672 /* MDIO bus release function */
ravb_mdio_release(struct ravb_private * priv)2673 static int ravb_mdio_release(struct ravb_private *priv)
2674 {
2675 /* Unregister mdio bus */
2676 mdiobus_unregister(priv->mii_bus);
2677
2678 /* Free bitbang info */
2679 free_mdio_bitbang(priv->mii_bus);
2680
2681 return 0;
2682 }
2683
2684 static const struct ravb_hw_info ravb_gen2_hw_info = {
2685 .receive = ravb_rx_rcar,
2686 .set_rate = ravb_set_rate_rcar,
2687 .set_feature = ravb_set_features_rcar,
2688 .dmac_init = ravb_dmac_init_rcar,
2689 .emac_init = ravb_emac_init_rcar,
2690 .gstrings_stats = ravb_gstrings_stats,
2691 .gstrings_size = sizeof(ravb_gstrings_stats),
2692 .net_hw_features = NETIF_F_RXCSUM,
2693 .net_features = NETIF_F_RXCSUM,
2694 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2695 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2696 .tx_max_frame_size = SZ_2K,
2697 .rx_max_frame_size = SZ_2K,
2698 .rx_buffer_size = SZ_2K +
2699 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2700 .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2701 .dbat_entry_num = 22,
2702 .aligned_tx = 1,
2703 .gptp = 1,
2704 .nc_queues = 1,
2705 .magic_pkt = 1,
2706 };
2707
2708 static const struct ravb_hw_info ravb_gen3_hw_info = {
2709 .receive = ravb_rx_rcar,
2710 .set_rate = ravb_set_rate_rcar,
2711 .set_feature = ravb_set_features_rcar,
2712 .dmac_init = ravb_dmac_init_rcar,
2713 .emac_init = ravb_emac_init_rcar,
2714 .gstrings_stats = ravb_gstrings_stats,
2715 .gstrings_size = sizeof(ravb_gstrings_stats),
2716 .net_hw_features = NETIF_F_RXCSUM,
2717 .net_features = NETIF_F_RXCSUM,
2718 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2719 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2720 .tx_max_frame_size = SZ_2K,
2721 .rx_max_frame_size = SZ_2K,
2722 .rx_buffer_size = SZ_2K +
2723 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2724 .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2725 .dbat_entry_num = 22,
2726 .internal_delay = 1,
2727 .tx_counters = 1,
2728 .multi_irqs = 1,
2729 .irq_en_dis = 1,
2730 .ccc_gac = 1,
2731 .nc_queues = 1,
2732 .magic_pkt = 1,
2733 };
2734
2735 static const struct ravb_hw_info ravb_gen4_hw_info = {
2736 .receive = ravb_rx_rcar,
2737 .set_rate = ravb_set_rate_rcar,
2738 .set_feature = ravb_set_features_rcar,
2739 .dmac_init = ravb_dmac_init_rcar,
2740 .emac_init = ravb_emac_init_rcar_gen4,
2741 .gstrings_stats = ravb_gstrings_stats,
2742 .gstrings_size = sizeof(ravb_gstrings_stats),
2743 .net_hw_features = NETIF_F_RXCSUM,
2744 .net_features = NETIF_F_RXCSUM,
2745 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2746 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2747 .tx_max_frame_size = SZ_2K,
2748 .rx_max_frame_size = SZ_2K,
2749 .rx_buffer_size = SZ_2K +
2750 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2751 .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2752 .dbat_entry_num = 22,
2753 .internal_delay = 1,
2754 .tx_counters = 1,
2755 .multi_irqs = 1,
2756 .irq_en_dis = 1,
2757 .ccc_gac = 1,
2758 .nc_queues = 1,
2759 .magic_pkt = 1,
2760 };
2761
2762 static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2763 .receive = ravb_rx_rcar,
2764 .set_rate = ravb_set_rate_rcar,
2765 .set_feature = ravb_set_features_rcar,
2766 .dmac_init = ravb_dmac_init_rcar,
2767 .emac_init = ravb_emac_init_rcar,
2768 .gstrings_stats = ravb_gstrings_stats,
2769 .gstrings_size = sizeof(ravb_gstrings_stats),
2770 .net_hw_features = NETIF_F_RXCSUM,
2771 .net_features = NETIF_F_RXCSUM,
2772 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2773 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2774 .tx_max_frame_size = SZ_2K,
2775 .rx_max_frame_size = SZ_2K,
2776 .rx_buffer_size = SZ_2K +
2777 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2778 .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2779 .dbat_entry_num = 22,
2780 .multi_irqs = 1,
2781 .err_mgmt_irqs = 1,
2782 .gptp = 1,
2783 .gptp_ref_clk = 1,
2784 .nc_queues = 1,
2785 .magic_pkt = 1,
2786 };
2787
2788 static const struct ravb_hw_info gbeth_hw_info = {
2789 .receive = ravb_rx_gbeth,
2790 .set_rate = ravb_set_rate_gbeth,
2791 .set_feature = ravb_set_features_gbeth,
2792 .dmac_init = ravb_dmac_init_gbeth,
2793 .emac_init = ravb_emac_init_gbeth,
2794 .gstrings_stats = ravb_gstrings_stats_gbeth,
2795 .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2796 .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2797 .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2798 .vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2799 .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2800 .tccr_mask = TCCR_TSRQ0,
2801 .tx_max_frame_size = 1522,
2802 .rx_max_frame_size = SZ_8K,
2803 .rx_buffer_size = SZ_2K,
2804 .rx_desc_size = sizeof(struct ravb_rx_desc),
2805 .dbat_entry_num = 2,
2806 .aligned_tx = 1,
2807 .coalesce_irqs = 1,
2808 .tx_counters = 1,
2809 .carrier_counters = 1,
2810 .half_duplex = 1,
2811 };
2812
2813 static const struct of_device_id ravb_match_table[] = {
2814 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2815 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2816 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2817 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2818 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2819 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
2820 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2821 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2822 { }
2823 };
2824 MODULE_DEVICE_TABLE(of, ravb_match_table);
2825
ravb_setup_irq(struct ravb_private * priv,const char * irq_name,const char * ch,int * irq,irq_handler_t handler)2826 static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
2827 const char *ch, int *irq, irq_handler_t handler)
2828 {
2829 struct platform_device *pdev = priv->pdev;
2830 struct net_device *ndev = priv->ndev;
2831 struct device *dev = &pdev->dev;
2832 const char *devname = dev_name(dev);
2833 unsigned long flags;
2834 int error, irq_num;
2835
2836 if (irq_name) {
2837 devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
2838 if (!devname)
2839 return -ENOMEM;
2840
2841 irq_num = platform_get_irq_byname(pdev, irq_name);
2842 flags = 0;
2843 } else {
2844 irq_num = platform_get_irq(pdev, 0);
2845 flags = IRQF_SHARED;
2846 }
2847 if (irq_num < 0)
2848 return irq_num;
2849
2850 if (irq)
2851 *irq = irq_num;
2852
2853 error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
2854 if (error)
2855 netdev_err(ndev, "cannot request IRQ %s\n", devname);
2856
2857 return error;
2858 }
2859
ravb_setup_irqs(struct ravb_private * priv)2860 static int ravb_setup_irqs(struct ravb_private *priv)
2861 {
2862 const struct ravb_hw_info *info = priv->info;
2863 struct net_device *ndev = priv->ndev;
2864 const char *irq_name, *emac_irq_name;
2865 int error;
2866
2867 if (!info->multi_irqs)
2868 return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
2869
2870 if (info->err_mgmt_irqs) {
2871 irq_name = "dia";
2872 emac_irq_name = "line3";
2873 } else {
2874 irq_name = "ch22";
2875 emac_irq_name = "ch24";
2876 }
2877
2878 error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
2879 if (error)
2880 return error;
2881
2882 error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
2883 ravb_emac_interrupt);
2884 if (error)
2885 return error;
2886
2887 if (info->err_mgmt_irqs) {
2888 error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
2889 if (error)
2890 return error;
2891
2892 error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
2893 if (error)
2894 return error;
2895 }
2896
2897 error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
2898 if (error)
2899 return error;
2900
2901 error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
2902 if (error)
2903 return error;
2904
2905 error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
2906 if (error)
2907 return error;
2908
2909 return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
2910 }
2911
ravb_probe(struct platform_device * pdev)2912 static int ravb_probe(struct platform_device *pdev)
2913 {
2914 struct device_node *np = pdev->dev.of_node;
2915 const struct ravb_hw_info *info;
2916 struct reset_control *rstc;
2917 struct ravb_private *priv;
2918 struct net_device *ndev;
2919 struct resource *res;
2920 int error, q;
2921
2922 if (!np) {
2923 dev_err(&pdev->dev,
2924 "this driver is required to be instantiated from device tree\n");
2925 return -EINVAL;
2926 }
2927
2928 rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
2929 if (IS_ERR(rstc))
2930 return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2931 "failed to get cpg reset\n");
2932
2933 info = of_device_get_match_data(&pdev->dev);
2934
2935 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2936 info->nc_queues ? NUM_TX_QUEUE : 1,
2937 info->nc_queues ? NUM_RX_QUEUE : 1);
2938 if (!ndev)
2939 return -ENOMEM;
2940
2941 ndev->features = info->net_features;
2942 ndev->hw_features = info->net_hw_features;
2943 ndev->vlan_features = info->vlan_features;
2944
2945 error = reset_control_deassert(rstc);
2946 if (error)
2947 goto out_free_netdev;
2948
2949 SET_NETDEV_DEV(ndev, &pdev->dev);
2950
2951 priv = netdev_priv(ndev);
2952 priv->info = info;
2953 priv->rstc = rstc;
2954 priv->ndev = ndev;
2955 priv->pdev = pdev;
2956 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2957 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2958 if (info->nc_queues) {
2959 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2960 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2961 }
2962
2963 error = ravb_setup_irqs(priv);
2964 if (error)
2965 goto out_reset_assert;
2966
2967 priv->clk = devm_clk_get(&pdev->dev, NULL);
2968 if (IS_ERR(priv->clk)) {
2969 error = PTR_ERR(priv->clk);
2970 goto out_reset_assert;
2971 }
2972
2973 if (info->gptp_ref_clk) {
2974 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2975 if (IS_ERR(priv->gptp_clk)) {
2976 error = PTR_ERR(priv->gptp_clk);
2977 goto out_reset_assert;
2978 }
2979 }
2980
2981 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2982 if (IS_ERR(priv->refclk)) {
2983 error = PTR_ERR(priv->refclk);
2984 goto out_reset_assert;
2985 }
2986 clk_prepare(priv->refclk);
2987
2988 platform_set_drvdata(pdev, ndev);
2989 pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
2990 pm_runtime_use_autosuspend(&pdev->dev);
2991 pm_runtime_enable(&pdev->dev);
2992 error = pm_runtime_resume_and_get(&pdev->dev);
2993 if (error < 0)
2994 goto out_rpm_disable;
2995
2996 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2997 if (IS_ERR(priv->addr)) {
2998 error = PTR_ERR(priv->addr);
2999 goto out_rpm_put;
3000 }
3001
3002 /* The Ether-specific entries in the device structure. */
3003 ndev->base_addr = res->start;
3004
3005 spin_lock_init(&priv->lock);
3006 INIT_WORK(&priv->work, ravb_tx_timeout_work);
3007
3008 error = of_get_phy_mode(np, &priv->phy_interface);
3009 if (error && error != -ENODEV)
3010 goto out_rpm_put;
3011
3012 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
3013 priv->avb_link_active_low =
3014 of_property_read_bool(np, "renesas,ether-link-active-low");
3015
3016 ndev->max_mtu = info->tx_max_frame_size -
3017 (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3018 ndev->min_mtu = ETH_MIN_MTU;
3019
3020 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
3021 * Use two descriptor to handle such situation. First descriptor to
3022 * handle aligned data buffer and second descriptor to handle the
3023 * overflow data because of alignment.
3024 */
3025 priv->num_tx_desc = info->aligned_tx ? 2 : 1;
3026
3027 /* Set function */
3028 ndev->netdev_ops = &ravb_netdev_ops;
3029 ndev->ethtool_ops = &ravb_ethtool_ops;
3030
3031 error = ravb_compute_gti(ndev);
3032 if (error)
3033 goto out_rpm_put;
3034
3035 ravb_parse_delay_mode(np, ndev);
3036
3037 /* Allocate descriptor base address table */
3038 priv->desc_bat_size = sizeof(struct ravb_desc) * info->dbat_entry_num;
3039 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
3040 &priv->desc_bat_dma, GFP_KERNEL);
3041 if (!priv->desc_bat) {
3042 dev_err(&pdev->dev,
3043 "Cannot allocate desc base address table (size %d bytes)\n",
3044 priv->desc_bat_size);
3045 error = -ENOMEM;
3046 goto out_rpm_put;
3047 }
3048 for (q = RAVB_BE; q < info->dbat_entry_num; q++)
3049 priv->desc_bat[q].die_dt = DT_EOS;
3050
3051 /* Initialise HW timestamp list */
3052 INIT_LIST_HEAD(&priv->ts_skb_list);
3053
3054 /* Debug message level */
3055 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
3056
3057 /* Set config mode as this is needed for PHY initialization. */
3058 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
3059 if (error)
3060 goto out_rpm_put;
3061
3062 /* Read and set MAC address */
3063 ravb_read_mac_address(np, ndev);
3064 if (!is_valid_ether_addr(ndev->dev_addr)) {
3065 dev_warn(&pdev->dev,
3066 "no valid MAC address supplied, using a random one\n");
3067 eth_hw_addr_random(ndev);
3068 }
3069
3070 /* MDIO bus init */
3071 error = ravb_mdio_init(priv);
3072 if (error) {
3073 dev_err(&pdev->dev, "failed to initialize MDIO\n");
3074 goto out_reset_mode;
3075 }
3076
3077 /* Undo previous switch to config opmode. */
3078 error = ravb_set_opmode(ndev, CCC_OPC_RESET);
3079 if (error)
3080 goto out_mdio_release;
3081
3082 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
3083 if (info->nc_queues)
3084 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
3085
3086 if (info->coalesce_irqs) {
3087 netdev_sw_irq_coalesce_default_on(ndev);
3088 if (num_present_cpus() == 1)
3089 netif_threaded_enable(ndev);
3090 }
3091
3092 /* Network device register */
3093 error = register_netdev(ndev);
3094 if (error)
3095 goto out_napi_del;
3096
3097 device_set_wakeup_capable(&pdev->dev, 1);
3098
3099 /* Print device information */
3100 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
3101 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3102
3103 pm_runtime_put_autosuspend(&pdev->dev);
3104
3105 return 0;
3106
3107 out_napi_del:
3108 if (info->nc_queues)
3109 netif_napi_del(&priv->napi[RAVB_NC]);
3110
3111 netif_napi_del(&priv->napi[RAVB_BE]);
3112 out_mdio_release:
3113 ravb_mdio_release(priv);
3114 out_reset_mode:
3115 ravb_set_opmode(ndev, CCC_OPC_RESET);
3116 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3117 priv->desc_bat_dma);
3118 out_rpm_put:
3119 pm_runtime_put(&pdev->dev);
3120 out_rpm_disable:
3121 pm_runtime_disable(&pdev->dev);
3122 pm_runtime_dont_use_autosuspend(&pdev->dev);
3123 clk_unprepare(priv->refclk);
3124 out_reset_assert:
3125 reset_control_assert(rstc);
3126 out_free_netdev:
3127 free_netdev(ndev);
3128 return error;
3129 }
3130
ravb_remove(struct platform_device * pdev)3131 static void ravb_remove(struct platform_device *pdev)
3132 {
3133 struct net_device *ndev = platform_get_drvdata(pdev);
3134 struct ravb_private *priv = netdev_priv(ndev);
3135 const struct ravb_hw_info *info = priv->info;
3136 struct device *dev = &priv->pdev->dev;
3137 int error;
3138
3139 error = pm_runtime_resume_and_get(dev);
3140 if (error < 0)
3141 return;
3142
3143 unregister_netdev(ndev);
3144 if (info->nc_queues)
3145 netif_napi_del(&priv->napi[RAVB_NC]);
3146 netif_napi_del(&priv->napi[RAVB_BE]);
3147
3148 ravb_mdio_release(priv);
3149
3150 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3151 priv->desc_bat_dma);
3152
3153 pm_runtime_put_sync_suspend(&pdev->dev);
3154 pm_runtime_disable(&pdev->dev);
3155 pm_runtime_dont_use_autosuspend(dev);
3156 clk_unprepare(priv->refclk);
3157 reset_control_assert(priv->rstc);
3158 free_netdev(ndev);
3159 platform_set_drvdata(pdev, NULL);
3160 }
3161
ravb_wol_setup(struct net_device * ndev)3162 static int ravb_wol_setup(struct net_device *ndev)
3163 {
3164 struct ravb_private *priv = netdev_priv(ndev);
3165 const struct ravb_hw_info *info = priv->info;
3166
3167 /* Disable interrupts by clearing the interrupt masks. */
3168 ravb_write(ndev, 0, RIC0);
3169 ravb_write(ndev, 0, RIC2);
3170 ravb_write(ndev, 0, TIC);
3171
3172 /* Only allow ECI interrupts */
3173 synchronize_irq(priv->emac_irq);
3174 if (info->nc_queues)
3175 napi_disable(&priv->napi[RAVB_NC]);
3176 napi_disable(&priv->napi[RAVB_BE]);
3177 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
3178
3179 /* Enable MagicPacket */
3180 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3181
3182 if (priv->info->ccc_gac)
3183 ravb_ptp_stop(ndev);
3184
3185 return enable_irq_wake(priv->emac_irq);
3186 }
3187
ravb_wol_restore(struct net_device * ndev)3188 static int ravb_wol_restore(struct net_device *ndev)
3189 {
3190 struct ravb_private *priv = netdev_priv(ndev);
3191 const struct ravb_hw_info *info = priv->info;
3192 int error;
3193
3194 /* Set reset mode to rearm the WoL logic. */
3195 error = ravb_set_opmode(ndev, CCC_OPC_RESET);
3196 if (error)
3197 return error;
3198
3199 /* Set AVB config mode. */
3200 error = ravb_set_config_mode(ndev);
3201 if (error)
3202 return error;
3203
3204 if (priv->info->ccc_gac)
3205 ravb_ptp_init(ndev, priv->pdev);
3206
3207 if (info->nc_queues)
3208 napi_enable(&priv->napi[RAVB_NC]);
3209 napi_enable(&priv->napi[RAVB_BE]);
3210
3211 /* Disable MagicPacket */
3212 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
3213
3214 ravb_close(ndev);
3215
3216 return disable_irq_wake(priv->emac_irq);
3217 }
3218
ravb_suspend(struct device * dev)3219 static int ravb_suspend(struct device *dev)
3220 {
3221 struct net_device *ndev = dev_get_drvdata(dev);
3222 struct ravb_private *priv = netdev_priv(ndev);
3223 int ret;
3224
3225 if (!netif_running(ndev))
3226 goto reset_assert;
3227
3228 netif_device_detach(ndev);
3229
3230 rtnl_lock();
3231 if (priv->wol_enabled) {
3232 ret = ravb_wol_setup(ndev);
3233 rtnl_unlock();
3234 return ret;
3235 }
3236
3237 ret = ravb_close(ndev);
3238 rtnl_unlock();
3239 if (ret)
3240 return ret;
3241
3242 ret = pm_runtime_force_suspend(&priv->pdev->dev);
3243 if (ret)
3244 return ret;
3245
3246 reset_assert:
3247 return reset_control_assert(priv->rstc);
3248 }
3249
ravb_resume(struct device * dev)3250 static int ravb_resume(struct device *dev)
3251 {
3252 struct net_device *ndev = dev_get_drvdata(dev);
3253 struct ravb_private *priv = netdev_priv(ndev);
3254 int ret;
3255
3256 ret = reset_control_deassert(priv->rstc);
3257 if (ret)
3258 return ret;
3259
3260 if (!netif_running(ndev))
3261 return 0;
3262
3263 rtnl_lock();
3264 /* If WoL is enabled restore the interface. */
3265 if (priv->wol_enabled)
3266 ret = ravb_wol_restore(ndev);
3267 else
3268 ret = pm_runtime_force_resume(dev);
3269 if (ret) {
3270 rtnl_unlock();
3271 return ret;
3272 }
3273
3274 /* Reopening the interface will restore the device to the working state. */
3275 ret = ravb_open(ndev);
3276 rtnl_unlock();
3277 if (ret < 0)
3278 goto out_rpm_put;
3279
3280 ravb_set_rx_mode(ndev);
3281 netif_device_attach(ndev);
3282
3283 return 0;
3284
3285 out_rpm_put:
3286 if (!priv->wol_enabled)
3287 pm_runtime_put_autosuspend(dev);
3288
3289 return ret;
3290 }
3291
ravb_runtime_suspend(struct device * dev)3292 static int ravb_runtime_suspend(struct device *dev)
3293 {
3294 struct net_device *ndev = dev_get_drvdata(dev);
3295 struct ravb_private *priv = netdev_priv(ndev);
3296
3297 clk_disable(priv->refclk);
3298
3299 return 0;
3300 }
3301
ravb_runtime_resume(struct device * dev)3302 static int ravb_runtime_resume(struct device *dev)
3303 {
3304 struct net_device *ndev = dev_get_drvdata(dev);
3305 struct ravb_private *priv = netdev_priv(ndev);
3306
3307 return clk_enable(priv->refclk);
3308 }
3309
3310 static const struct dev_pm_ops ravb_dev_pm_ops = {
3311 SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3312 RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
3313 };
3314
3315 static struct platform_driver ravb_driver = {
3316 .probe = ravb_probe,
3317 .remove = ravb_remove,
3318 .driver = {
3319 .name = "ravb",
3320 .pm = pm_ptr(&ravb_dev_pm_ops),
3321 .of_match_table = ravb_match_table,
3322 },
3323 };
3324
3325 module_platform_driver(ravb_driver);
3326
3327 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3328 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3329 MODULE_LICENSE("GPL v2");
3330